aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:48:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:48:05 -0400
commitdaf799cca8abbf7f3e253ecf1d41d244070773d7 (patch)
tree6fb27ff60b820ae0eeb906c8a5d8d7f93f89cd8b /arch
parent6019958d146a4f127dae727a930f902c92531e6e (diff)
parentb22d1b6a91ca4260f869e349179ae53f18c664db (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: - More work on DT support for various platforms - Various fixes that were to late to make it straight into 3.9 - Improved platform support, in particular the Netlogic XLR and BCM63xx, and the SEAD3 and Malta eval boards. - Support for several Ralink SOC families. - Complete support for the microMIPS ASE which basically reencodes the existing MIPS32/MIPS64 ISA to use non-constant size instructions. - Some fallout from LTO work which remove old cruft and will generally make the MIPS kernel easier to maintain and resistant to compiler optimization, even in absence of LTO. - KVM support. While MIPS has announced hardware virtualization extensions this KVM extension uses trap and emulate mode for virtualization of MIPS32. More KVM work to add support for VZ hardware virtualizaiton extensions and MIPS64 will probably already be merged for 3.11. Most of this has been sitting in -next for a long time. All defconfigs have been build or run time tested except three for which fixes are being sent by other maintainers. Semantic conflict with kvm updates done as per Ralf * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (118 commits) MIPS: Add new GIC clockevent driver. MIPS: Formatting clean-ups for clocksources. MIPS: Refactor GIC clocksource code. MIPS: Move 'gic_frequency' to common location. MIPS: Move 'gic_present' to common location. MIPS: MIPS16e: Add unaligned access support. MIPS: MIPS16e: Support handling of delay slots. MIPS: MIPS16e: Add instruction formats. MIPS: microMIPS: Optimise 'strnlen' core library function. MIPS: microMIPS: Optimise 'strlen' core library function. MIPS: microMIPS: Optimise 'strncpy' core library function. MIPS: microMIPS: Optimise 'memset' core library function. MIPS: microMIPS: Add configuration option for microMIPS kernel. MIPS: microMIPS: Disable LL/SC and fix linker bug. MIPS: microMIPS: Add vdso support. MIPS: microMIPS: Add unaligned access support. MIPS: microMIPS: Support handling of delay slots. MIPS: microMIPS: Add support for exception handling. MIPS: microMIPS: Floating point support. MIPS: microMIPS: Fix macro naming in micro-assembler. ...
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/Kbuild4
-rw-r--r--arch/mips/Kconfig52
-rw-r--r--arch/mips/Makefile1
-rw-r--r--arch/mips/alchemy/Kconfig3
-rw-r--r--arch/mips/alchemy/Platform22
-rw-r--r--arch/mips/ar7/memory.c1
-rw-r--r--arch/mips/ath79/setup.c16
-rw-r--r--arch/mips/bcm63xx/Kconfig4
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c6
-rw-r--r--arch/mips/bcm63xx/clk.c43
-rw-r--r--arch/mips/bcm63xx/cpu.c142
-rw-r--r--arch/mips/bcm63xx/dev-flash.c6
-rw-r--r--arch/mips/bcm63xx/dev-spi.c26
-rw-r--r--arch/mips/bcm63xx/irq.c22
-rw-r--r--arch/mips/bcm63xx/prom.c2
-rw-r--r--arch/mips/bcm63xx/reset.c28
-rw-r--r--arch/mips/bcm63xx/setup.c5
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c5
-rw-r--r--arch/mips/configs/malta_defconfig69
-rw-r--r--arch/mips/configs/malta_kvm_defconfig456
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig453
-rw-r--r--arch/mips/configs/maltaaprp_defconfig195
-rw-r--r--arch/mips/configs/maltasmtc_defconfig196
-rw-r--r--arch/mips/configs/maltasmvp_defconfig199
-rw-r--r--arch/mips/configs/maltaup_defconfig194
-rw-r--r--arch/mips/configs/sead3_defconfig3
-rw-r--r--arch/mips/configs/sead3micro_defconfig122
-rw-r--r--arch/mips/fw/lib/Makefile2
-rw-r--r--arch/mips/fw/lib/cmdline.c101
-rw-r--r--arch/mips/include/asm/asm.h2
-rw-r--r--arch/mips/include/asm/bootinfo.h1
-rw-r--r--arch/mips/include/asm/branch.h40
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/dma-coherence.h15
-rw-r--r--arch/mips/include/asm/dma-mapping.h1
-rw-r--r--arch/mips/include/asm/fpu_emulator.h6
-rw-r--r--arch/mips/include/asm/fw/fw.h47
-rw-r--r--arch/mips/include/asm/gic.h16
-rw-r--r--arch/mips/include/asm/hazards.h371
-rw-r--r--arch/mips/include/asm/inst.h12
-rw-r--r--arch/mips/include/asm/irqflags.h153
-rw-r--r--arch/mips/include/asm/kvm.h55
-rw-r--r--arch/mips/include/asm/kvm_host.h667
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h11
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h141
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h11
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h105
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/ioremap.h1
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h5
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h9
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620.h84
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x.h53
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h56
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h27
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h56
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883.h252
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h55
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h3
-rw-r--r--arch/mips/include/asm/mips-boards/prom.h47
-rw-r--r--arch/mips/include/asm/mips_machine.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h19
-rw-r--r--arch/mips/include/asm/mmu_context.h116
-rw-r--r--arch/mips/include/asm/netlogic/haldefs.h92
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h20
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h53
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/usb.h64
-rw-r--r--arch/mips/include/asm/pgtable.h1
-rw-r--r--arch/mips/include/asm/processor.h5
-rw-r--r--arch/mips/include/asm/prom.h3
-rw-r--r--arch/mips/include/asm/sn/sn_private.h2
-rw-r--r--arch/mips/include/asm/sn/types.h1
-rw-r--r--arch/mips/include/asm/spinlock.h120
-rw-r--r--arch/mips/include/asm/stackframe.h12
-rw-r--r--arch/mips/include/asm/thread_info.h8
-rw-r--r--arch/mips/include/asm/time.h8
-rw-r--r--arch/mips/include/asm/uaccess.h25
-rw-r--r--arch/mips/include/asm/uasm.h84
-rw-r--r--arch/mips/include/uapi/asm/inst.h564
-rw-r--r--arch/mips/kernel/Makefile7
-rw-r--r--arch/mips/kernel/asm-offsets.c66
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c4
-rw-r--r--arch/mips/kernel/branch.c178
-rw-r--r--arch/mips/kernel/cevt-gic.c104
-rw-r--r--arch/mips/kernel/cevt-r4k.c13
-rw-r--r--arch/mips/kernel/cpu-probe.c3
-rw-r--r--arch/mips/kernel/csrc-gic.c13
-rw-r--r--arch/mips/kernel/genex.S75
-rw-r--r--arch/mips/kernel/irq-gic.c47
-rw-r--r--arch/mips/kernel/mips_machine.c22
-rw-r--r--arch/mips/kernel/proc.c6
-rw-r--r--arch/mips/kernel/process.c101
-rw-r--r--arch/mips/kernel/prom.c33
-rw-r--r--arch/mips/kernel/scall32-o32.S9
-rw-r--r--arch/mips/kernel/setup.c22
-rw-r--r--arch/mips/kernel/signal.c9
-rw-r--r--arch/mips/kernel/smp-mt.c3
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/smtc-asm.S3
-rw-r--r--arch/mips/kernel/smtc.c10
-rw-r--r--arch/mips/kernel/traps.c318
-rw-r--r--arch/mips/kernel/unaligned.c1489
-rw-r--r--arch/mips/kvm/00README.txt31
-rw-r--r--arch/mips/kvm/Kconfig49
-rw-r--r--arch/mips/kvm/Makefile13
-rw-r--r--arch/mips/kvm/kvm_cb.c14
-rw-r--r--arch/mips/kvm/kvm_locore.S650
-rw-r--r--arch/mips/kvm/kvm_mips.c958
-rw-r--r--arch/mips/kvm/kvm_mips_comm.h23
-rw-r--r--arch/mips/kvm/kvm_mips_commpage.c37
-rw-r--r--arch/mips/kvm/kvm_mips_dyntrans.c149
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c1826
-rw-r--r--arch/mips/kvm/kvm_mips_int.c243
-rw-r--r--arch/mips/kvm/kvm_mips_int.h49
-rw-r--r--arch/mips/kvm/kvm_mips_opcode.h24
-rw-r--r--arch/mips/kvm/kvm_mips_stats.c82
-rw-r--r--arch/mips/kvm/kvm_tlb.c928
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c482
-rw-r--r--arch/mips/kvm/trace.h46
-rw-r--r--arch/mips/lib/bitops.c14
-rw-r--r--arch/mips/lib/dump_tlb.c5
-rw-r--r--arch/mips/lib/memset.S84
-rw-r--r--arch/mips/lib/mips-atomic.c149
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c7
-rw-r--r--arch/mips/lib/strlen_user.S9
-rw-r--r--arch/mips/lib/strncpy_user.S32
-rw-r--r--arch/mips/lib/strnlen_user.S2
-rw-r--r--arch/mips/math-emu/cp1emu.c919
-rw-r--r--arch/mips/math-emu/dsemul.c30
-rw-r--r--arch/mips/mm/Makefile4
-rw-r--r--arch/mips/mm/c-r4k.c30
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/dma-default.c25
-rw-r--r--arch/mips/mm/page.c10
-rw-r--r--arch/mips/mm/tlb-r3k.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c132
-rw-r--r--arch/mips/mm/uasm-micromips.c221
-rw-r--r--arch/mips/mm/uasm-mips.c205
-rw-r--r--arch/mips/mm/uasm.c326
-rw-r--r--arch/mips/mti-malta/Makefile5
-rw-r--r--arch/mips/mti-malta/Platform6
-rw-r--r--arch/mips/mti-malta/malta-cmdline.c59
-rw-r--r--arch/mips/mti-malta/malta-display.c38
-rw-r--r--arch/mips/mti-malta/malta-init.c153
-rw-r--r--arch/mips/mti-malta/malta-int.c4
-rw-r--r--arch/mips/mti-malta/malta-memory.c104
-rw-r--r--arch/mips/mti-malta/malta-setup.c87
-rw-r--r--arch/mips/mti-malta/malta-time.c55
-rw-r--r--arch/mips/mti-sead3/Makefile8
-rw-r--r--arch/mips/mti-sead3/leds-sead3.c24
-rw-r--r--arch/mips/mti-sead3/sead3-cmdline.c46
-rw-r--r--arch/mips/mti-sead3/sead3-console.c2
-rw-r--r--arch/mips/mti-sead3/sead3-display.c1
-rw-r--r--arch/mips/mti-sead3/sead3-init.c130
-rw-r--r--arch/mips/mti-sead3/sead3-int.c1
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c4
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/Kconfig17
-rw-r--r--arch/mips/netlogic/common/smp.c21
-rw-r--r--arch/mips/netlogic/dts/Makefile1
-rw-r--r--arch/mips/netlogic/dts/xlp_evp.dts2
-rw-r--r--arch/mips/netlogic/dts/xlp_svp.dts124
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c62
-rw-r--r--arch/mips/netlogic/xlp/setup.c22
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c49
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c2
-rw-r--r--arch/mips/pci/pci-ar71xx.c6
-rw-r--r--arch/mips/pci/pci-ar724x.c18
-rw-r--r--arch/mips/pci/pci-bcm63xx.c11
-rw-r--r--arch/mips/powertv/init.c3
-rw-r--r--arch/mips/powertv/init.h2
-rw-r--r--arch/mips/powertv/memory.c1
-rw-r--r--arch/mips/powertv/powertv_setup.c1
-rw-r--r--arch/mips/ralink/Kconfig23
-rw-r--r--arch/mips/ralink/Makefile3
-rw-r--r--arch/mips/ralink/Platform18
-rw-r--r--arch/mips/ralink/common.h11
-rw-r--r--arch/mips/ralink/dts/Makefile3
-rw-r--r--arch/mips/ralink/dts/mt7620a.dtsi58
-rw-r--r--arch/mips/ralink/dts/mt7620a_eval.dts16
-rw-r--r--arch/mips/ralink/dts/rt2880.dtsi58
-rw-r--r--arch/mips/ralink/dts/rt2880_eval.dts46
-rw-r--r--arch/mips/ralink/dts/rt3050.dtsi52
-rw-r--r--arch/mips/ralink/dts/rt3052_eval.dts12
-rw-r--r--arch/mips/ralink/dts/rt3883.dtsi58
-rw-r--r--arch/mips/ralink/dts/rt3883_eval.dts16
-rw-r--r--arch/mips/ralink/early_printk.c4
-rw-r--r--arch/mips/ralink/irq.c5
-rw-r--r--arch/mips/ralink/mt7620.c234
-rw-r--r--arch/mips/ralink/of.c9
-rw-r--r--arch/mips/ralink/rt288x.c143
-rw-r--r--arch/mips/ralink/rt305x.c70
-rw-r--r--arch/mips/ralink/rt3883.c246
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c16
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
200 files changed, 16132 insertions, 2305 deletions
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index 7dd65cfae837..d2cfe45f332b 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -17,3 +17,7 @@ obj- := $(platform-)
17obj-y += kernel/ 17obj-y += kernel/
18obj-y += mm/ 18obj-y += mm/
19obj-y += math-emu/ 19obj-y += math-emu/
20
21ifdef CONFIG_KVM
22obj-y += kvm/
23endif
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a90cfc702bb1..7a58ab933b20 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -304,7 +304,6 @@ config MIPS_MALTA
304 select HW_HAS_PCI 304 select HW_HAS_PCI
305 select I8253 305 select I8253
306 select I8259 306 select I8259
307 select MIPS_BOARDS_GEN
308 select MIPS_BONITO64 307 select MIPS_BONITO64
309 select MIPS_CPU_SCACHE 308 select MIPS_CPU_SCACHE
310 select PCI_GT64XXX_PCI0 309 select PCI_GT64XXX_PCI0
@@ -335,12 +334,12 @@ config MIPS_SEAD3
335 select BOOT_RAW 334 select BOOT_RAW
336 select CEVT_R4K 335 select CEVT_R4K
337 select CSRC_R4K 336 select CSRC_R4K
337 select CSRC_GIC
338 select CPU_MIPSR2_IRQ_VI 338 select CPU_MIPSR2_IRQ_VI
339 select CPU_MIPSR2_IRQ_EI 339 select CPU_MIPSR2_IRQ_EI
340 select DMA_NONCOHERENT 340 select DMA_NONCOHERENT
341 select IRQ_CPU 341 select IRQ_CPU
342 select IRQ_GIC 342 select IRQ_GIC
343 select MIPS_BOARDS_GEN
344 select MIPS_CPU_SCACHE 343 select MIPS_CPU_SCACHE
345 select MIPS_MSC 344 select MIPS_MSC
346 select SYS_HAS_CPU_MIPS32_R1 345 select SYS_HAS_CPU_MIPS32_R1
@@ -352,6 +351,7 @@ config MIPS_SEAD3
352 select SYS_SUPPORTS_BIG_ENDIAN 351 select SYS_SUPPORTS_BIG_ENDIAN
353 select SYS_SUPPORTS_LITTLE_ENDIAN 352 select SYS_SUPPORTS_LITTLE_ENDIAN
354 select SYS_SUPPORTS_SMARTMIPS 353 select SYS_SUPPORTS_SMARTMIPS
354 select SYS_SUPPORTS_MICROMIPS
355 select USB_ARCH_HAS_EHCI 355 select USB_ARCH_HAS_EHCI
356 select USB_EHCI_BIG_ENDIAN_DESC 356 select USB_EHCI_BIG_ENDIAN_DESC
357 select USB_EHCI_BIG_ENDIAN_MMIO 357 select USB_EHCI_BIG_ENDIAN_MMIO
@@ -910,6 +910,9 @@ config CEVT_GT641XX
910config CEVT_R4K 910config CEVT_R4K
911 bool 911 bool
912 912
913config CEVT_GIC
914 bool
915
913config CEVT_SB1250 916config CEVT_SB1250
914 bool 917 bool
915 918
@@ -982,9 +985,6 @@ config MIPS_MSC
982config MIPS_NILE4 985config MIPS_NILE4
983 bool 986 bool
984 987
985config MIPS_DISABLE_OBSOLETE_IDE
986 bool
987
988config SYNC_R4K 988config SYNC_R4K
989 bool 989 bool
990 990
@@ -1075,9 +1075,6 @@ config IRQ_GT641XX
1075config IRQ_GIC 1075config IRQ_GIC
1076 bool 1076 bool
1077 1077
1078config MIPS_BOARDS_GEN
1079 bool
1080
1081config PCI_GT64XXX_PCI0 1078config PCI_GT64XXX_PCI0
1082 bool 1079 bool
1083 1080
@@ -1147,7 +1144,7 @@ config BOOT_ELF32
1147 1144
1148config MIPS_L1_CACHE_SHIFT 1145config MIPS_L1_CACHE_SHIFT
1149 int 1146 int
1150 default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL 1147 default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X
1151 default "6" if MIPS_CPU_SCACHE 1148 default "6" if MIPS_CPU_SCACHE
1152 default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON 1149 default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
1153 default "5" 1150 default "5"
@@ -1236,6 +1233,7 @@ config CPU_MIPS32_R2
1236 select CPU_HAS_PREFETCH 1233 select CPU_HAS_PREFETCH
1237 select CPU_SUPPORTS_32BIT_KERNEL 1234 select CPU_SUPPORTS_32BIT_KERNEL
1238 select CPU_SUPPORTS_HIGHMEM 1235 select CPU_SUPPORTS_HIGHMEM
1236 select HAVE_KVM
1239 help 1237 help
1240 Choose this option to build a kernel for release 2 or later of the 1238 Choose this option to build a kernel for release 2 or later of the
1241 MIPS32 architecture. Most modern embedded systems with a 32-bit 1239 MIPS32 architecture. Most modern embedded systems with a 32-bit
@@ -1736,6 +1734,20 @@ config 64BIT
1736 1734
1737endchoice 1735endchoice
1738 1736
1737config KVM_GUEST
1738 bool "KVM Guest Kernel"
1739 help
1740 Select this option if building a guest kernel for KVM (Trap & Emulate) mode
1741
1742config KVM_HOST_FREQ
1743 int "KVM Host Processor Frequency (MHz)"
1744 depends on KVM_GUEST
1745 default 500
1746 help
1747 Select this option if building a guest kernel for KVM to skip
1748 RTC emulation when determining guest CPU Frequency. Instead, the guest
1749 processor frequency is automatically derived from the host frequency.
1750
1739choice 1751choice
1740 prompt "Kernel page size" 1752 prompt "Kernel page size"
1741 default PAGE_SIZE_4KB 1753 default PAGE_SIZE_4KB
@@ -1811,6 +1823,15 @@ config FORCE_MAX_ZONEORDER
1811 The page size is not necessarily 4KB. Keep this in mind 1823 The page size is not necessarily 4KB. Keep this in mind
1812 when choosing a value for this option. 1824 when choosing a value for this option.
1813 1825
1826config CEVT_GIC
1827 bool "Use GIC global counter for clock events"
1828 depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC)
1829 help
1830 Use the GIC global counter for the clock events. The R4K clock
1831 event driver is always present, so if the platform ends up not
1832 detecting a GIC, it will fall back to the R4K timer for the
1833 generation of clock events.
1834
1814config BOARD_SCACHE 1835config BOARD_SCACHE
1815 bool 1836 bool
1816 1837
@@ -2016,6 +2037,7 @@ config SB1_PASS_2_1_WORKAROUNDS
2016 depends on CPU_SB1 && CPU_SB1_PASS_2 2037 depends on CPU_SB1 && CPU_SB1_PASS_2
2017 default y 2038 default y
2018 2039
2040
2019config 64BIT_PHYS_ADDR 2041config 64BIT_PHYS_ADDR
2020 bool 2042 bool
2021 2043
@@ -2034,6 +2056,13 @@ config CPU_HAS_SMARTMIPS
2034 you don't know you probably don't have SmartMIPS and should say N 2056 you don't know you probably don't have SmartMIPS and should say N
2035 here. 2057 here.
2036 2058
2059config CPU_MICROMIPS
2060 depends on SYS_SUPPORTS_MICROMIPS
2061 bool "Build kernel using microMIPS ISA"
2062 help
2063 When this option is enabled the kernel will be built using the
2064 microMIPS ISA
2065
2037config CPU_HAS_WB 2066config CPU_HAS_WB
2038 bool 2067 bool
2039 2068
@@ -2096,6 +2125,9 @@ config SYS_SUPPORTS_HIGHMEM
2096config SYS_SUPPORTS_SMARTMIPS 2125config SYS_SUPPORTS_SMARTMIPS
2097 bool 2126 bool
2098 2127
2128config SYS_SUPPORTS_MICROMIPS
2129 bool
2130
2099config ARCH_FLATMEM_ENABLE 2131config ARCH_FLATMEM_ENABLE
2100 def_bool y 2132 def_bool y
2101 depends on !NUMA && !CPU_LOONGSON2 2133 depends on !NUMA && !CPU_LOONGSON2
@@ -2556,3 +2588,5 @@ source "security/Kconfig"
2556source "crypto/Kconfig" 2588source "crypto/Kconfig"
2557 2589
2558source "lib/Kconfig" 2590source "lib/Kconfig"
2591
2592source "arch/mips/kvm/Kconfig"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 6f7978f95090..dd58a04ef4bc 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -114,6 +114,7 @@ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*e
114cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) 114cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
115 115
116cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) 116cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips)
117cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals)
117 118
118cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ 119cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
119 -fno-omit-frame-pointer 120 -fno-omit-frame-pointer
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index c8862bdc2ff2..7032ac7ecd1b 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -31,7 +31,6 @@ config MIPS_DB1000
31 select ALCHEMY_GPIOINT_AU1000 31 select ALCHEMY_GPIOINT_AU1000
32 select DMA_NONCOHERENT 32 select DMA_NONCOHERENT
33 select HW_HAS_PCI 33 select HW_HAS_PCI
34 select MIPS_DISABLE_OBSOLETE_IDE
35 select SYS_SUPPORTS_BIG_ENDIAN 34 select SYS_SUPPORTS_BIG_ENDIAN
36 select SYS_SUPPORTS_LITTLE_ENDIAN 35 select SYS_SUPPORTS_LITTLE_ENDIAN
37 select SYS_HAS_EARLY_PRINTK 36 select SYS_HAS_EARLY_PRINTK
@@ -41,7 +40,6 @@ config MIPS_DB1235
41 select ARCH_REQUIRE_GPIOLIB 40 select ARCH_REQUIRE_GPIOLIB
42 select HW_HAS_PCI 41 select HW_HAS_PCI
43 select DMA_COHERENT 42 select DMA_COHERENT
44 select MIPS_DISABLE_OBSOLETE_IDE
45 select SYS_SUPPORTS_LITTLE_ENDIAN 43 select SYS_SUPPORTS_LITTLE_ENDIAN
46 select SYS_HAS_EARLY_PRINTK 44 select SYS_HAS_EARLY_PRINTK
47 45
@@ -57,7 +55,6 @@ config MIPS_GPR
57 select ALCHEMY_GPIOINT_AU1000 55 select ALCHEMY_GPIOINT_AU1000
58 select HW_HAS_PCI 56 select HW_HAS_PCI
59 select DMA_NONCOHERENT 57 select DMA_NONCOHERENT
60 select MIPS_DISABLE_OBSOLETE_IDE
61 select SYS_SUPPORTS_LITTLE_ENDIAN 58 select SYS_SUPPORTS_LITTLE_ENDIAN
62 select SYS_HAS_EARLY_PRINTK 59 select SYS_HAS_EARLY_PRINTK
63 60
diff --git a/arch/mips/alchemy/Platform b/arch/mips/alchemy/Platform
index fa1bdd1aea15..b3afcdd8d77a 100644
--- a/arch/mips/alchemy/Platform
+++ b/arch/mips/alchemy/Platform
@@ -5,32 +5,14 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
5 5
6 6
7# 7#
8# AMD Alchemy Pb1100 eval board 8# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards
9#
10platform-$(CONFIG_MIPS_PB1100) += alchemy/devboards/
11load-$(CONFIG_MIPS_PB1100) += 0xffffffff80100000
12
13#
14# AMD Alchemy Pb1500 eval board
15#
16platform-$(CONFIG_MIPS_PB1500) += alchemy/devboards/
17load-$(CONFIG_MIPS_PB1500) += 0xffffffff80100000
18
19#
20# AMD Alchemy Pb1550 eval board
21#
22platform-$(CONFIG_MIPS_PB1550) += alchemy/devboards/
23load-$(CONFIG_MIPS_PB1550) += 0xffffffff80100000
24
25#
26# AMD Alchemy Db1000/Db1500/Db1100 eval boards
27# 9#
28platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/ 10platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/
29cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00 11cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
30load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000 12load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000
31 13
32# 14#
33# AMD Alchemy Db1200/Pb1200/Db1550/Db1300 eval boards 15# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards
34# 16#
35platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/ 17platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/
36cflags-$(CONFIG_MIPS_DB1235) += -I$(srctree)/arch/mips/include/asm/mach-db1x00 18cflags-$(CONFIG_MIPS_DB1235) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
index 28abfeef09d6..92dfa481205b 100644
--- a/arch/mips/ar7/memory.c
+++ b/arch/mips/ar7/memory.c
@@ -30,7 +30,6 @@
30#include <asm/sections.h> 30#include <asm/sections.h>
31 31
32#include <asm/mach-ar7/ar7.h> 32#include <asm/mach-ar7/ar7.h>
33#include <asm/mips-boards/prom.h>
34 33
35static int __init memsize(void) 34static int __init memsize(void)
36{ 35{
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index d5b3c9057018..a0233a2c1988 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -51,20 +51,6 @@ static void ath79_halt(void)
51 cpu_wait(); 51 cpu_wait();
52} 52}
53 53
54static void __init ath79_detect_mem_size(void)
55{
56 unsigned long size;
57
58 for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX;
59 size <<= 1) {
60 if (!memcmp(ath79_detect_mem_size,
61 ath79_detect_mem_size + size, 1024))
62 break;
63 }
64
65 add_memory_region(0, size, BOOT_MEM_RAM);
66}
67
68static void __init ath79_detect_sys_type(void) 54static void __init ath79_detect_sys_type(void)
69{ 55{
70 char *chip = "????"; 56 char *chip = "????";
@@ -212,7 +198,7 @@ void __init plat_mem_setup(void)
212 AR71XX_DDR_CTRL_SIZE); 198 AR71XX_DDR_CTRL_SIZE);
213 199
214 ath79_detect_sys_type(); 200 ath79_detect_sys_type();
215 ath79_detect_mem_size(); 201 detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
216 ath79_clocks_init(); 202 ath79_clocks_init();
217 203
218 _machine_restart = ath79_restart; 204 _machine_restart = ath79_restart;
diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig
index d03e8799d1cf..5639662fd503 100644
--- a/arch/mips/bcm63xx/Kconfig
+++ b/arch/mips/bcm63xx/Kconfig
@@ -25,6 +25,10 @@ config BCM63XX_CPU_6358
25 bool "support 6358 CPU" 25 bool "support 6358 CPU"
26 select HW_HAS_PCI 26 select HW_HAS_PCI
27 27
28config BCM63XX_CPU_6362
29 bool "support 6362 CPU"
30 select HW_HAS_PCI
31
28config BCM63XX_CPU_6368 32config BCM63XX_CPU_6368
29 bool "support 6368 CPU" 33 bool "support 6368 CPU"
30 select HW_HAS_PCI 34 select HW_HAS_PCI
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 9aa7d44898ed..a9505c4867e8 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -726,11 +726,11 @@ void __init board_prom_init(void)
726 u32 val; 726 u32 val;
727 727
728 /* read base address of boot chip select (0) 728 /* read base address of boot chip select (0)
729 * 6328 does not have MPI but boots from a fixed address 729 * 6328/6362 do not have MPI but boot from a fixed address
730 */ 730 */
731 if (BCMCPU_IS_6328()) 731 if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) {
732 val = 0x18000000; 732 val = 0x18000000;
733 else { 733 } else {
734 val = bcm_mpi_readl(MPI_CSBASE_REG(0)); 734 val = bcm_mpi_readl(MPI_CSBASE_REG(0));
735 val &= MPI_CSBASE_BASE_MASK; 735 val &= MPI_CSBASE_BASE_MASK;
736 } 736 }
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index b9e948d59430..c726a97fc798 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -15,7 +15,13 @@
15#include <bcm63xx_io.h> 15#include <bcm63xx_io.h>
16#include <bcm63xx_regs.h> 16#include <bcm63xx_regs.h>
17#include <bcm63xx_reset.h> 17#include <bcm63xx_reset.h>
18#include <bcm63xx_clk.h> 18
19struct clk {
20 void (*set)(struct clk *, int);
21 unsigned int rate;
22 unsigned int usage;
23 int id;
24};
19 25
20static DEFINE_MUTEX(clocks_mutex); 26static DEFINE_MUTEX(clocks_mutex);
21 27
@@ -119,11 +125,18 @@ static struct clk clk_ephy = {
119 */ 125 */
120static void enetsw_set(struct clk *clk, int enable) 126static void enetsw_set(struct clk *clk, int enable)
121{ 127{
122 if (!BCMCPU_IS_6368()) 128 if (BCMCPU_IS_6328())
129 bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
130 else if (BCMCPU_IS_6362())
131 bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
132 else if (BCMCPU_IS_6368())
133 bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
134 CKCTL_6368_SWPKT_USB_EN |
135 CKCTL_6368_SWPKT_SAR_EN,
136 enable);
137 else
123 return; 138 return;
124 bcm_hwclock_set(CKCTL_6368_ROBOSW_EN | 139
125 CKCTL_6368_SWPKT_USB_EN |
126 CKCTL_6368_SWPKT_SAR_EN, enable);
127 if (enable) { 140 if (enable) {
128 /* reset switch core afer clock change */ 141 /* reset switch core afer clock change */
129 bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1); 142 bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
@@ -160,6 +173,8 @@ static void usbh_set(struct clk *clk, int enable)
160 bcm_hwclock_set(CKCTL_6328_USBH_EN, enable); 173 bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
161 else if (BCMCPU_IS_6348()) 174 else if (BCMCPU_IS_6348())
162 bcm_hwclock_set(CKCTL_6348_USBH_EN, enable); 175 bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
176 else if (BCMCPU_IS_6362())
177 bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
163 else if (BCMCPU_IS_6368()) 178 else if (BCMCPU_IS_6368())
164 bcm_hwclock_set(CKCTL_6368_USBH_EN, enable); 179 bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
165} 180}
@@ -175,6 +190,8 @@ static void usbd_set(struct clk *clk, int enable)
175{ 190{
176 if (BCMCPU_IS_6328()) 191 if (BCMCPU_IS_6328())
177 bcm_hwclock_set(CKCTL_6328_USBD_EN, enable); 192 bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
193 else if (BCMCPU_IS_6362())
194 bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
178 else if (BCMCPU_IS_6368()) 195 else if (BCMCPU_IS_6368())
179 bcm_hwclock_set(CKCTL_6368_USBD_EN, enable); 196 bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
180} 197}
@@ -196,6 +213,8 @@ static void spi_set(struct clk *clk, int enable)
196 mask = CKCTL_6348_SPI_EN; 213 mask = CKCTL_6348_SPI_EN;
197 else if (BCMCPU_IS_6358()) 214 else if (BCMCPU_IS_6358())
198 mask = CKCTL_6358_SPI_EN; 215 mask = CKCTL_6358_SPI_EN;
216 else if (BCMCPU_IS_6362())
217 mask = CKCTL_6362_SPI_EN;
199 else 218 else
200 /* BCMCPU_IS_6368 */ 219 /* BCMCPU_IS_6368 */
201 mask = CKCTL_6368_SPI_EN; 220 mask = CKCTL_6368_SPI_EN;
@@ -236,7 +255,10 @@ static struct clk clk_xtm = {
236 */ 255 */
237static void ipsec_set(struct clk *clk, int enable) 256static void ipsec_set(struct clk *clk, int enable)
238{ 257{
239 bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable); 258 if (BCMCPU_IS_6362())
259 bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
260 else if (BCMCPU_IS_6368())
261 bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
240} 262}
241 263
242static struct clk clk_ipsec = { 264static struct clk clk_ipsec = {
@@ -249,7 +271,10 @@ static struct clk clk_ipsec = {
249 271
250static void pcie_set(struct clk *clk, int enable) 272static void pcie_set(struct clk *clk, int enable)
251{ 273{
252 bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable); 274 if (BCMCPU_IS_6328())
275 bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
276 else if (BCMCPU_IS_6362())
277 bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
253} 278}
254 279
255static struct clk clk_pcie = { 280static struct clk clk_pcie = {
@@ -315,9 +340,9 @@ struct clk *clk_get(struct device *dev, const char *id)
315 return &clk_periph; 340 return &clk_periph;
316 if (BCMCPU_IS_6358() && !strcmp(id, "pcm")) 341 if (BCMCPU_IS_6358() && !strcmp(id, "pcm"))
317 return &clk_pcm; 342 return &clk_pcm;
318 if (BCMCPU_IS_6368() && !strcmp(id, "ipsec")) 343 if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec"))
319 return &clk_ipsec; 344 return &clk_ipsec;
320 if (BCMCPU_IS_6328() && !strcmp(id, "pcie")) 345 if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie"))
321 return &clk_pcie; 346 return &clk_pcie;
322 return ERR_PTR(-ENOENT); 347 return ERR_PTR(-ENOENT);
323} 348}
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index a7afb289b15a..79fe32df5e96 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -25,7 +25,7 @@ const int *bcm63xx_irqs;
25EXPORT_SYMBOL(bcm63xx_irqs); 25EXPORT_SYMBOL(bcm63xx_irqs);
26 26
27static u16 bcm63xx_cpu_id; 27static u16 bcm63xx_cpu_id;
28static u16 bcm63xx_cpu_rev; 28static u8 bcm63xx_cpu_rev;
29static unsigned int bcm63xx_cpu_freq; 29static unsigned int bcm63xx_cpu_freq;
30static unsigned int bcm63xx_memory_size; 30static unsigned int bcm63xx_memory_size;
31 31
@@ -71,6 +71,15 @@ static const int bcm6358_irqs[] = {
71 71
72}; 72};
73 73
74static const unsigned long bcm6362_regs_base[] = {
75 __GEN_CPU_REGS_TABLE(6362)
76};
77
78static const int bcm6362_irqs[] = {
79 __GEN_CPU_IRQ_TABLE(6362)
80
81};
82
74static const unsigned long bcm6368_regs_base[] = { 83static const unsigned long bcm6368_regs_base[] = {
75 __GEN_CPU_REGS_TABLE(6368) 84 __GEN_CPU_REGS_TABLE(6368)
76}; 85};
@@ -87,7 +96,7 @@ u16 __bcm63xx_get_cpu_id(void)
87 96
88EXPORT_SYMBOL(__bcm63xx_get_cpu_id); 97EXPORT_SYMBOL(__bcm63xx_get_cpu_id);
89 98
90u16 bcm63xx_get_cpu_rev(void) 99u8 bcm63xx_get_cpu_rev(void)
91{ 100{
92 return bcm63xx_cpu_rev; 101 return bcm63xx_cpu_rev;
93} 102}
@@ -169,6 +178,42 @@ static unsigned int detect_cpu_clock(void)
169 return (16 * 1000000 * n1 * n2) / m1; 178 return (16 * 1000000 * n1 * n2) / m1;
170 } 179 }
171 180
181 case BCM6362_CPU_ID:
182 {
183 unsigned int tmp, mips_pll_fcvo;
184
185 tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
186 mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK)
187 >> STRAPBUS_6362_FCVO_SHIFT;
188 switch (mips_pll_fcvo) {
189 case 0x03:
190 case 0x0b:
191 case 0x13:
192 case 0x1b:
193 return 240000000;
194 case 0x04:
195 case 0x0c:
196 case 0x14:
197 case 0x1c:
198 return 160000000;
199 case 0x05:
200 case 0x0e:
201 case 0x16:
202 case 0x1e:
203 case 0x1f:
204 return 400000000;
205 case 0x06:
206 return 440000000;
207 case 0x07:
208 case 0x17:
209 return 384000000;
210 case 0x15:
211 case 0x1d:
212 return 200000000;
213 default:
214 return 320000000;
215 }
216 }
172 case BCM6368_CPU_ID: 217 case BCM6368_CPU_ID:
173 { 218 {
174 unsigned int tmp, p1, p2, ndiv, m1; 219 unsigned int tmp, p1, p2, ndiv, m1;
@@ -205,7 +250,7 @@ static unsigned int detect_memory_size(void)
205 unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0; 250 unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
206 u32 val; 251 u32 val;
207 252
208 if (BCMCPU_IS_6328()) 253 if (BCMCPU_IS_6328() || BCMCPU_IS_6362())
209 return bcm_ddr_readl(DDR_CSEND_REG) << 24; 254 return bcm_ddr_readl(DDR_CSEND_REG) << 24;
210 255
211 if (BCMCPU_IS_6345()) { 256 if (BCMCPU_IS_6345()) {
@@ -240,53 +285,27 @@ static unsigned int detect_memory_size(void)
240 285
241void __init bcm63xx_cpu_init(void) 286void __init bcm63xx_cpu_init(void)
242{ 287{
243 unsigned int tmp, expected_cpu_id; 288 unsigned int tmp;
244 struct cpuinfo_mips *c = &current_cpu_data; 289 struct cpuinfo_mips *c = &current_cpu_data;
245 unsigned int cpu = smp_processor_id(); 290 unsigned int cpu = smp_processor_id();
291 u32 chipid_reg;
246 292
247 /* soc registers location depends on cpu type */ 293 /* soc registers location depends on cpu type */
248 expected_cpu_id = 0; 294 chipid_reg = 0;
249 295
250 switch (c->cputype) { 296 switch (c->cputype) {
251 case CPU_BMIPS3300: 297 case CPU_BMIPS3300:
252 if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) { 298 if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT)
253 expected_cpu_id = BCM6348_CPU_ID;
254 bcm63xx_regs_base = bcm6348_regs_base;
255 bcm63xx_irqs = bcm6348_irqs;
256 } else {
257 __cpu_name[cpu] = "Broadcom BCM6338"; 299 __cpu_name[cpu] = "Broadcom BCM6338";
258 expected_cpu_id = BCM6338_CPU_ID; 300 /* fall-through */
259 bcm63xx_regs_base = bcm6338_regs_base;
260 bcm63xx_irqs = bcm6338_irqs;
261 }
262 break;
263 case CPU_BMIPS32: 301 case CPU_BMIPS32:
264 expected_cpu_id = BCM6345_CPU_ID; 302 chipid_reg = BCM_6345_PERF_BASE;
265 bcm63xx_regs_base = bcm6345_regs_base;
266 bcm63xx_irqs = bcm6345_irqs;
267 break; 303 break;
268 case CPU_BMIPS4350: 304 case CPU_BMIPS4350:
269 if ((read_c0_prid() & 0xf0) == 0x10) { 305 if ((read_c0_prid() & 0xf0) == 0x10)
270 expected_cpu_id = BCM6358_CPU_ID; 306 chipid_reg = BCM_6345_PERF_BASE;
271 bcm63xx_regs_base = bcm6358_regs_base; 307 else
272 bcm63xx_irqs = bcm6358_irqs; 308 chipid_reg = BCM_6368_PERF_BASE;
273 } else {
274 /* all newer chips have the same chip id location */
275 u16 chip_id = bcm_readw(BCM_6368_PERF_BASE);
276
277 switch (chip_id) {
278 case BCM6328_CPU_ID:
279 expected_cpu_id = BCM6328_CPU_ID;
280 bcm63xx_regs_base = bcm6328_regs_base;
281 bcm63xx_irqs = bcm6328_irqs;
282 break;
283 case BCM6368_CPU_ID:
284 expected_cpu_id = BCM6368_CPU_ID;
285 bcm63xx_regs_base = bcm6368_regs_base;
286 bcm63xx_irqs = bcm6368_irqs;
287 break;
288 }
289 }
290 break; 309 break;
291 } 310 }
292 311
@@ -294,20 +313,47 @@ void __init bcm63xx_cpu_init(void)
294 * really early to panic, but delaying panic would not help since we 313 * really early to panic, but delaying panic would not help since we
295 * will never get any working console 314 * will never get any working console
296 */ 315 */
297 if (!expected_cpu_id) 316 if (!chipid_reg)
298 panic("unsupported Broadcom CPU"); 317 panic("unsupported Broadcom CPU");
299 318
300 /* 319 /* read out CPU type */
301 * bcm63xx_regs_base is set, we can access soc registers 320 tmp = bcm_readl(chipid_reg);
302 */
303
304 /* double check CPU type */
305 tmp = bcm_perf_readl(PERF_REV_REG);
306 bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT; 321 bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT;
307 bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT; 322 bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT;
308 323
309 if (bcm63xx_cpu_id != expected_cpu_id) 324 switch (bcm63xx_cpu_id) {
310 panic("bcm63xx CPU id mismatch"); 325 case BCM6328_CPU_ID:
326 bcm63xx_regs_base = bcm6328_regs_base;
327 bcm63xx_irqs = bcm6328_irqs;
328 break;
329 case BCM6338_CPU_ID:
330 bcm63xx_regs_base = bcm6338_regs_base;
331 bcm63xx_irqs = bcm6338_irqs;
332 break;
333 case BCM6345_CPU_ID:
334 bcm63xx_regs_base = bcm6345_regs_base;
335 bcm63xx_irqs = bcm6345_irqs;
336 break;
337 case BCM6348_CPU_ID:
338 bcm63xx_regs_base = bcm6348_regs_base;
339 bcm63xx_irqs = bcm6348_irqs;
340 break;
341 case BCM6358_CPU_ID:
342 bcm63xx_regs_base = bcm6358_regs_base;
343 bcm63xx_irqs = bcm6358_irqs;
344 break;
345 case BCM6362_CPU_ID:
346 bcm63xx_regs_base = bcm6362_regs_base;
347 bcm63xx_irqs = bcm6362_irqs;
348 break;
349 case BCM6368_CPU_ID:
350 bcm63xx_regs_base = bcm6368_regs_base;
351 bcm63xx_irqs = bcm6368_irqs;
352 break;
353 default:
354 panic("unsupported broadcom CPU %x", bcm63xx_cpu_id);
355 break;
356 }
311 357
312 bcm63xx_cpu_freq = detect_cpu_clock(); 358 bcm63xx_cpu_freq = detect_cpu_clock();
313 bcm63xx_memory_size = detect_memory_size(); 359 bcm63xx_memory_size = detect_memory_size();
diff --git a/arch/mips/bcm63xx/dev-flash.c b/arch/mips/bcm63xx/dev-flash.c
index 58371c7deac2..588d1ec622e4 100644
--- a/arch/mips/bcm63xx/dev-flash.c
+++ b/arch/mips/bcm63xx/dev-flash.c
@@ -77,6 +77,12 @@ static int __init bcm63xx_detect_flash_type(void)
77 return BCM63XX_FLASH_TYPE_PARALLEL; 77 return BCM63XX_FLASH_TYPE_PARALLEL;
78 else 78 else
79 return BCM63XX_FLASH_TYPE_SERIAL; 79 return BCM63XX_FLASH_TYPE_SERIAL;
80 case BCM6362_CPU_ID:
81 val = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
82 if (val & STRAPBUS_6362_BOOT_SEL_SERIAL)
83 return BCM63XX_FLASH_TYPE_SERIAL;
84 else
85 return BCM63XX_FLASH_TYPE_NAND;
80 case BCM6368_CPU_ID: 86 case BCM6368_CPU_ID:
81 val = bcm_gpio_readl(GPIO_STRAPBUS_REG); 87 val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
82 switch (val & STRAPBUS_6368_BOOT_SEL_MASK) { 88 switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index e97fd60e92ef..3065bb61820d 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -22,10 +22,6 @@
22/* 22/*
23 * register offsets 23 * register offsets
24 */ 24 */
25static const unsigned long bcm6338_regs_spi[] = {
26 __GEN_SPI_REGS_TABLE(6338)
27};
28
29static const unsigned long bcm6348_regs_spi[] = { 25static const unsigned long bcm6348_regs_spi[] = {
30 __GEN_SPI_REGS_TABLE(6348) 26 __GEN_SPI_REGS_TABLE(6348)
31}; 27};
@@ -34,23 +30,15 @@ static const unsigned long bcm6358_regs_spi[] = {
34 __GEN_SPI_REGS_TABLE(6358) 30 __GEN_SPI_REGS_TABLE(6358)
35}; 31};
36 32
37static const unsigned long bcm6368_regs_spi[] = {
38 __GEN_SPI_REGS_TABLE(6368)
39};
40
41const unsigned long *bcm63xx_regs_spi; 33const unsigned long *bcm63xx_regs_spi;
42EXPORT_SYMBOL(bcm63xx_regs_spi); 34EXPORT_SYMBOL(bcm63xx_regs_spi);
43 35
44static __init void bcm63xx_spi_regs_init(void) 36static __init void bcm63xx_spi_regs_init(void)
45{ 37{
46 if (BCMCPU_IS_6338()) 38 if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
47 bcm63xx_regs_spi = bcm6338_regs_spi;
48 if (BCMCPU_IS_6348())
49 bcm63xx_regs_spi = bcm6348_regs_spi; 39 bcm63xx_regs_spi = bcm6348_regs_spi;
50 if (BCMCPU_IS_6358()) 40 if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
51 bcm63xx_regs_spi = bcm6358_regs_spi; 41 bcm63xx_regs_spi = bcm6358_regs_spi;
52 if (BCMCPU_IS_6368())
53 bcm63xx_regs_spi = bcm6368_regs_spi;
54} 42}
55#else 43#else
56static __init void bcm63xx_spi_regs_init(void) { } 44static __init void bcm63xx_spi_regs_init(void) { }
@@ -93,13 +81,13 @@ int __init bcm63xx_spi_register(void)
93 spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); 81 spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
94 82
95 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { 83 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
96 spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; 84 spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
97 spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; 85 spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
98 spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT; 86 spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
99 spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH; 87 spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
100 } 88 }
101 89
102 if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { 90 if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) {
103 spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; 91 spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
104 spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; 92 spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
105 spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; 93 spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index da24c2bd9b7c..c0ab3887f42e 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -82,6 +82,17 @@ static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
82#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6358 82#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6358
83#define ext_irq_cfg_reg2 0 83#define ext_irq_cfg_reg2 0
84#endif 84#endif
85#ifdef CONFIG_BCM63XX_CPU_6362
86#define irq_stat_reg PERF_IRQSTAT_6362_REG
87#define irq_mask_reg PERF_IRQMASK_6362_REG
88#define irq_bits 64
89#define is_ext_irq_cascaded 1
90#define ext_irq_start (BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE)
91#define ext_irq_end (BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE)
92#define ext_irq_count 4
93#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6362
94#define ext_irq_cfg_reg2 0
95#endif
85#ifdef CONFIG_BCM63XX_CPU_6368 96#ifdef CONFIG_BCM63XX_CPU_6368
86#define irq_stat_reg PERF_IRQSTAT_6368_REG 97#define irq_stat_reg PERF_IRQSTAT_6368_REG
87#define irq_mask_reg PERF_IRQMASK_6368_REG 98#define irq_mask_reg PERF_IRQMASK_6368_REG
@@ -170,6 +181,16 @@ static void bcm63xx_init_irq(void)
170 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE; 181 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
171 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358; 182 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
172 break; 183 break;
184 case BCM6362_CPU_ID:
185 irq_stat_addr += PERF_IRQSTAT_6362_REG;
186 irq_mask_addr += PERF_IRQMASK_6362_REG;
187 irq_bits = 64;
188 ext_irq_count = 4;
189 is_ext_irq_cascaded = 1;
190 ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
191 ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
192 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
193 break;
173 case BCM6368_CPU_ID: 194 case BCM6368_CPU_ID:
174 irq_stat_addr += PERF_IRQSTAT_6368_REG; 195 irq_stat_addr += PERF_IRQSTAT_6368_REG;
175 irq_mask_addr += PERF_IRQMASK_6368_REG; 196 irq_mask_addr += PERF_IRQMASK_6368_REG;
@@ -458,6 +479,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d,
458 case BCM6338_CPU_ID: 479 case BCM6338_CPU_ID:
459 case BCM6345_CPU_ID: 480 case BCM6345_CPU_ID:
460 case BCM6358_CPU_ID: 481 case BCM6358_CPU_ID:
482 case BCM6362_CPU_ID:
461 case BCM6368_CPU_ID: 483 case BCM6368_CPU_ID:
462 if (levelsense) 484 if (levelsense)
463 reg |= EXTIRQ_CFG_LEVELSENSE(irq); 485 reg |= EXTIRQ_CFG_LEVELSENSE(irq);
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index 10eaff458071..fd698087fbfd 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -36,6 +36,8 @@ void __init prom_init(void)
36 mask = CKCTL_6348_ALL_SAFE_EN; 36 mask = CKCTL_6348_ALL_SAFE_EN;
37 else if (BCMCPU_IS_6358()) 37 else if (BCMCPU_IS_6358())
38 mask = CKCTL_6358_ALL_SAFE_EN; 38 mask = CKCTL_6358_ALL_SAFE_EN;
39 else if (BCMCPU_IS_6362())
40 mask = CKCTL_6362_ALL_SAFE_EN;
39 else if (BCMCPU_IS_6368()) 41 else if (BCMCPU_IS_6368())
40 mask = CKCTL_6368_ALL_SAFE_EN; 42 mask = CKCTL_6368_ALL_SAFE_EN;
41 else 43 else
diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c
index 68a31bb90cbf..317931c6cf58 100644
--- a/arch/mips/bcm63xx/reset.c
+++ b/arch/mips/bcm63xx/reset.c
@@ -85,6 +85,20 @@
85#define BCM6358_RESET_PCIE 0 85#define BCM6358_RESET_PCIE 0
86#define BCM6358_RESET_PCIE_EXT 0 86#define BCM6358_RESET_PCIE_EXT 0
87 87
88#define BCM6362_RESET_SPI SOFTRESET_6362_SPI_MASK
89#define BCM6362_RESET_ENET 0
90#define BCM6362_RESET_USBH SOFTRESET_6362_USBH_MASK
91#define BCM6362_RESET_USBD SOFTRESET_6362_USBS_MASK
92#define BCM6362_RESET_DSL 0
93#define BCM6362_RESET_SAR SOFTRESET_6362_SAR_MASK
94#define BCM6362_RESET_EPHY SOFTRESET_6362_EPHY_MASK
95#define BCM6362_RESET_ENETSW SOFTRESET_6362_ENETSW_MASK
96#define BCM6362_RESET_PCM SOFTRESET_6362_PCM_MASK
97#define BCM6362_RESET_MPI 0
98#define BCM6362_RESET_PCIE (SOFTRESET_6362_PCIE_MASK | \
99 SOFTRESET_6362_PCIE_CORE_MASK)
100#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK
101
88#define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK 102#define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK
89#define BCM6368_RESET_ENET 0 103#define BCM6368_RESET_ENET 0
90#define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK 104#define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK
@@ -119,6 +133,10 @@ static const u32 bcm6358_reset_bits[] = {
119 __GEN_RESET_BITS_TABLE(6358) 133 __GEN_RESET_BITS_TABLE(6358)
120}; 134};
121 135
136static const u32 bcm6362_reset_bits[] = {
137 __GEN_RESET_BITS_TABLE(6362)
138};
139
122static const u32 bcm6368_reset_bits[] = { 140static const u32 bcm6368_reset_bits[] = {
123 __GEN_RESET_BITS_TABLE(6368) 141 __GEN_RESET_BITS_TABLE(6368)
124}; 142};
@@ -140,6 +158,9 @@ static int __init bcm63xx_reset_bits_init(void)
140 } else if (BCMCPU_IS_6358()) { 158 } else if (BCMCPU_IS_6358()) {
141 reset_reg = PERF_SOFTRESET_6358_REG; 159 reset_reg = PERF_SOFTRESET_6358_REG;
142 bcm63xx_reset_bits = bcm6358_reset_bits; 160 bcm63xx_reset_bits = bcm6358_reset_bits;
161 } else if (BCMCPU_IS_6362()) {
162 reset_reg = PERF_SOFTRESET_6362_REG;
163 bcm63xx_reset_bits = bcm6362_reset_bits;
143 } else if (BCMCPU_IS_6368()) { 164 } else if (BCMCPU_IS_6368()) {
144 reset_reg = PERF_SOFTRESET_6368_REG; 165 reset_reg = PERF_SOFTRESET_6368_REG;
145 bcm63xx_reset_bits = bcm6368_reset_bits; 166 bcm63xx_reset_bits = bcm6368_reset_bits;
@@ -182,6 +203,13 @@ static const u32 bcm63xx_reset_bits[] = {
182#define reset_reg PERF_SOFTRESET_6358_REG 203#define reset_reg PERF_SOFTRESET_6358_REG
183#endif 204#endif
184 205
206#ifdef CONFIG_BCM63XX_CPU_6362
207static const u32 bcm63xx_reset_bits[] = {
208 __GEN_RESET_BITS_TABLE(6362)
209};
210#define reset_reg PERF_SOFTRESET_6362_REG
211#endif
212
185#ifdef CONFIG_BCM63XX_CPU_6368 213#ifdef CONFIG_BCM63XX_CPU_6368
186static const u32 bcm63xx_reset_bits[] = { 214static const u32 bcm63xx_reset_bits[] = {
187 __GEN_RESET_BITS_TABLE(6368) 215 __GEN_RESET_BITS_TABLE(6368)
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 35e18e98beb9..24a24445db64 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -83,6 +83,9 @@ void bcm63xx_machine_reboot(void)
83 case BCM6358_CPU_ID: 83 case BCM6358_CPU_ID:
84 perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358; 84 perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358;
85 break; 85 break;
86 case BCM6362_CPU_ID:
87 perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362;
88 break;
86 } 89 }
87 90
88 for (i = 0; i < 2; i++) { 91 for (i = 0; i < 2; i++) {
@@ -126,7 +129,7 @@ static void __bcm63xx_machine_reboot(char *p)
126const char *get_system_type(void) 129const char *get_system_type(void)
127{ 130{
128 static char buf[128]; 131 static char buf[128];
129 snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)", 132 snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)",
130 board_get_name(), 133 board_get_name(),
131 bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev()); 134 bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev());
132 return buf; 135 return buf;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 156aa6143e11..a22f06a6f7ca 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1032,9 +1032,8 @@ static int octeon_irq_gpio_map_common(struct irq_domain *d,
1032 if (!octeon_irq_virq_in_range(virq)) 1032 if (!octeon_irq_virq_in_range(virq))
1033 return -EINVAL; 1033 return -EINVAL;
1034 1034
1035 hw += gpiod->base_hwirq; 1035 line = (hw + gpiod->base_hwirq) >> 6;
1036 line = hw >> 6; 1036 bit = (hw + gpiod->base_hwirq) & 63;
1037 bit = hw & 63;
1038 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) 1037 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
1039 return -EINVAL; 1038 return -EINVAL;
1040 1039
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index cd732e5b4fd5..ce1d3eeeb737 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -2,30 +2,21 @@ CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y 2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y 3CONFIG_CPU_MIPS32_R2=y
4CONFIG_MIPS_MT_SMP=y 4CONFIG_MIPS_MT_SMP=y
5CONFIG_NO_HZ=y
6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_HZ_100=y 5CONFIG_HZ_100=y
8CONFIG_EXPERIMENTAL=y
9CONFIG_SYSVIPC=y 6CONFIG_SYSVIPC=y
7CONFIG_NO_HZ=y
8CONFIG_HIGH_RES_TIMERS=y
10CONFIG_LOG_BUF_SHIFT=15 9CONFIG_LOG_BUF_SHIFT=15
11CONFIG_SYSFS_DEPRECATED_V2=y
12CONFIG_RELAY=y
13CONFIG_NAMESPACES=y 10CONFIG_NAMESPACES=y
14CONFIG_UTS_NS=y 11CONFIG_RELAY=y
15CONFIG_IPC_NS=y
16CONFIG_PID_NS=y
17# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
18CONFIG_EXPERT=y 12CONFIG_EXPERT=y
19# CONFIG_SYSCTL_SYSCALL is not set
20# CONFIG_COMPAT_BRK is not set 13# CONFIG_COMPAT_BRK is not set
21CONFIG_SLAB=y 14CONFIG_SLAB=y
22CONFIG_MODULES=y 15CONFIG_MODULES=y
23CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
24CONFIG_MODVERSIONS=y 17CONFIG_MODVERSIONS=y
25CONFIG_MODULE_SRCVERSION_ALL=y 18CONFIG_MODULE_SRCVERSION_ALL=y
26# CONFIG_BLK_DEV_BSG is not set
27CONFIG_PCI=y 19CONFIG_PCI=y
28CONFIG_PM=y
29CONFIG_PACKET=y 20CONFIG_PACKET=y
30CONFIG_UNIX=y 21CONFIG_UNIX=y
31CONFIG_XFRM_USER=m 22CONFIG_XFRM_USER=m
@@ -41,8 +32,6 @@ CONFIG_IP_PNP=y
41CONFIG_IP_PNP_DHCP=y 32CONFIG_IP_PNP_DHCP=y
42CONFIG_IP_PNP_BOOTP=y 33CONFIG_IP_PNP_BOOTP=y
43CONFIG_NET_IPIP=m 34CONFIG_NET_IPIP=m
44CONFIG_NET_IPGRE=m
45CONFIG_NET_IPGRE_BROADCAST=y
46CONFIG_IP_MROUTE=y 35CONFIG_IP_MROUTE=y
47CONFIG_IP_PIMSM_V1=y 36CONFIG_IP_PIMSM_V1=y
48CONFIG_IP_PIMSM_V2=y 37CONFIG_IP_PIMSM_V2=y
@@ -65,7 +54,6 @@ CONFIG_IPV6_MROUTE=y
65CONFIG_IPV6_PIMSM_V2=y 54CONFIG_IPV6_PIMSM_V2=y
66CONFIG_NETWORK_SECMARK=y 55CONFIG_NETWORK_SECMARK=y
67CONFIG_NETFILTER=y 56CONFIG_NETFILTER=y
68CONFIG_NETFILTER_NETLINK_QUEUE=m
69CONFIG_NF_CONNTRACK=m 57CONFIG_NF_CONNTRACK=m
70CONFIG_NF_CONNTRACK_SECMARK=y 58CONFIG_NF_CONNTRACK_SECMARK=y
71CONFIG_NF_CONNTRACK_EVENTS=y 59CONFIG_NF_CONNTRACK_EVENTS=y
@@ -136,23 +124,15 @@ CONFIG_IP_VS_DH=m
136CONFIG_IP_VS_SH=m 124CONFIG_IP_VS_SH=m
137CONFIG_IP_VS_SED=m 125CONFIG_IP_VS_SED=m
138CONFIG_IP_VS_NQ=m 126CONFIG_IP_VS_NQ=m
139CONFIG_IP_VS_FTP=m
140CONFIG_NF_CONNTRACK_IPV4=m 127CONFIG_NF_CONNTRACK_IPV4=m
141CONFIG_IP_NF_QUEUE=m 128CONFIG_IP_NF_QUEUE=m
142CONFIG_IP_NF_IPTABLES=m 129CONFIG_IP_NF_IPTABLES=m
143CONFIG_IP_NF_MATCH_ADDRTYPE=m
144CONFIG_IP_NF_MATCH_AH=m 130CONFIG_IP_NF_MATCH_AH=m
145CONFIG_IP_NF_MATCH_ECN=m 131CONFIG_IP_NF_MATCH_ECN=m
146CONFIG_IP_NF_MATCH_TTL=m 132CONFIG_IP_NF_MATCH_TTL=m
147CONFIG_IP_NF_FILTER=m 133CONFIG_IP_NF_FILTER=m
148CONFIG_IP_NF_TARGET_REJECT=m 134CONFIG_IP_NF_TARGET_REJECT=m
149CONFIG_IP_NF_TARGET_LOG=m
150CONFIG_IP_NF_TARGET_ULOG=m 135CONFIG_IP_NF_TARGET_ULOG=m
151CONFIG_NF_NAT=m
152CONFIG_IP_NF_TARGET_MASQUERADE=m
153CONFIG_IP_NF_TARGET_NETMAP=m
154CONFIG_IP_NF_TARGET_REDIRECT=m
155CONFIG_NF_NAT_SNMP_BASIC=m
156CONFIG_IP_NF_MANGLE=m 136CONFIG_IP_NF_MANGLE=m
157CONFIG_IP_NF_TARGET_CLUSTERIP=m 137CONFIG_IP_NF_TARGET_CLUSTERIP=m
158CONFIG_IP_NF_TARGET_ECN=m 138CONFIG_IP_NF_TARGET_ECN=m
@@ -162,8 +142,6 @@ CONFIG_IP_NF_ARPTABLES=m
162CONFIG_IP_NF_ARPFILTER=m 142CONFIG_IP_NF_ARPFILTER=m
163CONFIG_IP_NF_ARP_MANGLE=m 143CONFIG_IP_NF_ARP_MANGLE=m
164CONFIG_NF_CONNTRACK_IPV6=m 144CONFIG_NF_CONNTRACK_IPV6=m
165CONFIG_IP6_NF_QUEUE=m
166CONFIG_IP6_NF_IPTABLES=m
167CONFIG_IP6_NF_MATCH_AH=m 145CONFIG_IP6_NF_MATCH_AH=m
168CONFIG_IP6_NF_MATCH_EUI64=m 146CONFIG_IP6_NF_MATCH_EUI64=m
169CONFIG_IP6_NF_MATCH_FRAG=m 147CONFIG_IP6_NF_MATCH_FRAG=m
@@ -173,7 +151,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
173CONFIG_IP6_NF_MATCH_MH=m 151CONFIG_IP6_NF_MATCH_MH=m
174CONFIG_IP6_NF_MATCH_RT=m 152CONFIG_IP6_NF_MATCH_RT=m
175CONFIG_IP6_NF_TARGET_HL=m 153CONFIG_IP6_NF_TARGET_HL=m
176CONFIG_IP6_NF_TARGET_LOG=m
177CONFIG_IP6_NF_FILTER=m 154CONFIG_IP6_NF_FILTER=m
178CONFIG_IP6_NF_TARGET_REJECT=m 155CONFIG_IP6_NF_TARGET_REJECT=m
179CONFIG_IP6_NF_MANGLE=m 156CONFIG_IP6_NF_MANGLE=m
@@ -247,12 +224,10 @@ CONFIG_MAC80211=m
247CONFIG_MAC80211_RC_PID=y 224CONFIG_MAC80211_RC_PID=y
248CONFIG_MAC80211_RC_DEFAULT_PID=y 225CONFIG_MAC80211_RC_DEFAULT_PID=y
249CONFIG_MAC80211_MESH=y 226CONFIG_MAC80211_MESH=y
250CONFIG_MAC80211_LEDS=y
251CONFIG_RFKILL=m 227CONFIG_RFKILL=m
252CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 228CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
253CONFIG_CONNECTOR=m 229CONFIG_CONNECTOR=m
254CONFIG_MTD=y 230CONFIG_MTD=y
255CONFIG_MTD_PARTITIONS=y
256CONFIG_MTD_CHAR=y 231CONFIG_MTD_CHAR=y
257CONFIG_MTD_BLOCK=y 232CONFIG_MTD_BLOCK=y
258CONFIG_MTD_OOPS=m 233CONFIG_MTD_OOPS=m
@@ -271,7 +246,6 @@ CONFIG_BLK_DEV_NBD=m
271CONFIG_BLK_DEV_RAM=y 246CONFIG_BLK_DEV_RAM=y
272CONFIG_CDROM_PKTCDVD=m 247CONFIG_CDROM_PKTCDVD=m
273CONFIG_ATA_OVER_ETH=m 248CONFIG_ATA_OVER_ETH=m
274# CONFIG_MISC_DEVICES is not set
275CONFIG_IDE=y 249CONFIG_IDE=y
276CONFIG_BLK_DEV_IDECD=y 250CONFIG_BLK_DEV_IDECD=y
277CONFIG_IDE_GENERIC=y 251CONFIG_IDE_GENERIC=y
@@ -317,13 +291,19 @@ CONFIG_DM_MIRROR=m
317CONFIG_DM_ZERO=m 291CONFIG_DM_ZERO=m
318CONFIG_DM_MULTIPATH=m 292CONFIG_DM_MULTIPATH=m
319CONFIG_NETDEVICES=y 293CONFIG_NETDEVICES=y
320CONFIG_IFB=m
321CONFIG_DUMMY=m
322CONFIG_BONDING=m 294CONFIG_BONDING=m
323CONFIG_MACVLAN=m 295CONFIG_DUMMY=m
324CONFIG_EQUALIZER=m 296CONFIG_EQUALIZER=m
297CONFIG_IFB=m
298CONFIG_MACVLAN=m
325CONFIG_TUN=m 299CONFIG_TUN=m
326CONFIG_VETH=m 300CONFIG_VETH=m
301# CONFIG_NET_VENDOR_3COM is not set
302CONFIG_PCNET32=y
303CONFIG_CHELSIO_T3=m
304CONFIG_AX88796=m
305CONFIG_NETXEN_NIC=m
306CONFIG_TC35815=m
327CONFIG_MARVELL_PHY=m 307CONFIG_MARVELL_PHY=m
328CONFIG_DAVICOM_PHY=m 308CONFIG_DAVICOM_PHY=m
329CONFIG_QSEMI_PHY=m 309CONFIG_QSEMI_PHY=m
@@ -334,14 +314,6 @@ CONFIG_SMSC_PHY=m
334CONFIG_BROADCOM_PHY=m 314CONFIG_BROADCOM_PHY=m
335CONFIG_ICPLUS_PHY=m 315CONFIG_ICPLUS_PHY=m
336CONFIG_REALTEK_PHY=m 316CONFIG_REALTEK_PHY=m
337CONFIG_MDIO_BITBANG=m
338CONFIG_NET_ETHERNET=y
339CONFIG_AX88796=m
340CONFIG_NET_PCI=y
341CONFIG_PCNET32=y
342CONFIG_TC35815=m
343CONFIG_CHELSIO_T3=m
344CONFIG_NETXEN_NIC=m
345CONFIG_ATMEL=m 317CONFIG_ATMEL=m
346CONFIG_PCI_ATMEL=m 318CONFIG_PCI_ATMEL=m
347CONFIG_PRISM54=m 319CONFIG_PRISM54=m
@@ -352,15 +324,7 @@ CONFIG_HOSTAP_PLX=m
352CONFIG_HOSTAP_PCI=m 324CONFIG_HOSTAP_PCI=m
353CONFIG_IPW2100=m 325CONFIG_IPW2100=m
354CONFIG_IPW2100_MONITOR=y 326CONFIG_IPW2100_MONITOR=y
355CONFIG_IPW2200=m
356CONFIG_IPW2200_MONITOR=y
357CONFIG_IPW2200_PROMISCUOUS=y
358CONFIG_IPW2200_QOS=y
359CONFIG_LIBERTAS=m 327CONFIG_LIBERTAS=m
360CONFIG_HERMES=m
361CONFIG_PLX_HERMES=m
362CONFIG_TMD_HERMES=m
363CONFIG_NORTEL_HERMES=m
364# CONFIG_INPUT_KEYBOARD is not set 328# CONFIG_INPUT_KEYBOARD is not set
365# CONFIG_INPUT_MOUSE is not set 329# CONFIG_INPUT_MOUSE is not set
366# CONFIG_SERIO_I8042 is not set 330# CONFIG_SERIO_I8042 is not set
@@ -373,12 +337,6 @@ CONFIG_FB_CIRRUS=y
373# CONFIG_VGA_CONSOLE is not set 337# CONFIG_VGA_CONSOLE is not set
374CONFIG_FRAMEBUFFER_CONSOLE=y 338CONFIG_FRAMEBUFFER_CONSOLE=y
375CONFIG_HID=m 339CONFIG_HID=m
376CONFIG_LEDS_CLASS=y
377CONFIG_LEDS_TRIGGER_TIMER=m
378CONFIG_LEDS_TRIGGER_IDE_DISK=y
379CONFIG_LEDS_TRIGGER_HEARTBEAT=m
380CONFIG_LEDS_TRIGGER_BACKLIGHT=m
381CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
382CONFIG_RTC_CLASS=y 340CONFIG_RTC_CLASS=y
383CONFIG_RTC_DRV_CMOS=y 341CONFIG_RTC_DRV_CMOS=y
384CONFIG_UIO=m 342CONFIG_UIO=m
@@ -398,7 +356,6 @@ CONFIG_XFS_QUOTA=y
398CONFIG_XFS_POSIX_ACL=y 356CONFIG_XFS_POSIX_ACL=y
399CONFIG_QUOTA=y 357CONFIG_QUOTA=y
400CONFIG_QFMT_V2=y 358CONFIG_QFMT_V2=y
401CONFIG_AUTOFS_FS=y
402CONFIG_FUSE_FS=m 359CONFIG_FUSE_FS=m
403CONFIG_ISO9660_FS=m 360CONFIG_ISO9660_FS=m
404CONFIG_JOLIET=y 361CONFIG_JOLIET=y
@@ -425,7 +382,6 @@ CONFIG_ROMFS_FS=m
425CONFIG_SYSV_FS=m 382CONFIG_SYSV_FS=m
426CONFIG_UFS_FS=m 383CONFIG_UFS_FS=m
427CONFIG_NFS_FS=y 384CONFIG_NFS_FS=y
428CONFIG_NFS_V3=y
429CONFIG_ROOT_NFS=y 385CONFIG_ROOT_NFS=y
430CONFIG_NFSD=y 386CONFIG_NFSD=y
431CONFIG_NFSD_V3=y 387CONFIG_NFSD_V3=y
@@ -466,7 +422,6 @@ CONFIG_NLS_ISO8859_14=m
466CONFIG_NLS_ISO8859_15=m 422CONFIG_NLS_ISO8859_15=m
467CONFIG_NLS_KOI8_R=m 423CONFIG_NLS_KOI8_R=m
468CONFIG_NLS_KOI8_U=m 424CONFIG_NLS_KOI8_U=m
469# CONFIG_RCU_CPU_STALL_DETECTOR is not set
470CONFIG_CRYPTO_NULL=m 425CONFIG_CRYPTO_NULL=m
471CONFIG_CRYPTO_CRYPTD=m 426CONFIG_CRYPTO_CRYPTD=m
472CONFIG_CRYPTO_LRW=m 427CONFIG_CRYPTO_LRW=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644
index 000000000000..341bb47204d6
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -0,0 +1,456 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_PAGE_SIZE_16KB=y
5CONFIG_MIPS_MT_SMP=y
6CONFIG_HZ_100=y
7CONFIG_SYSVIPC=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_LOG_BUF_SHIFT=15
11CONFIG_NAMESPACES=y
12CONFIG_RELAY=y
13CONFIG_EXPERT=y
14CONFIG_PERF_EVENTS=y
15# CONFIG_COMPAT_BRK is not set
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODVERSIONS=y
20CONFIG_MODULE_SRCVERSION_ALL=y
21CONFIG_PCI=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_XFRM_USER=m
25CONFIG_NET_KEY=y
26CONFIG_NET_KEY_MIGRATE=y
27CONFIG_INET=y
28CONFIG_IP_MULTICAST=y
29CONFIG_IP_ADVANCED_ROUTER=y
30CONFIG_IP_MULTIPLE_TABLES=y
31CONFIG_IP_ROUTE_MULTIPATH=y
32CONFIG_IP_ROUTE_VERBOSE=y
33CONFIG_IP_PNP=y
34CONFIG_IP_PNP_DHCP=y
35CONFIG_IP_PNP_BOOTP=y
36CONFIG_NET_IPIP=m
37CONFIG_IP_MROUTE=y
38CONFIG_IP_PIMSM_V1=y
39CONFIG_IP_PIMSM_V2=y
40CONFIG_SYN_COOKIES=y
41CONFIG_INET_AH=m
42CONFIG_INET_ESP=m
43CONFIG_INET_IPCOMP=m
44CONFIG_INET_XFRM_MODE_TRANSPORT=m
45CONFIG_INET_XFRM_MODE_TUNNEL=m
46CONFIG_TCP_MD5SIG=y
47CONFIG_IPV6_PRIVACY=y
48CONFIG_IPV6_ROUTER_PREF=y
49CONFIG_IPV6_ROUTE_INFO=y
50CONFIG_IPV6_OPTIMISTIC_DAD=y
51CONFIG_INET6_AH=m
52CONFIG_INET6_ESP=m
53CONFIG_INET6_IPCOMP=m
54CONFIG_IPV6_TUNNEL=m
55CONFIG_IPV6_MROUTE=y
56CONFIG_IPV6_PIMSM_V2=y
57CONFIG_NETWORK_SECMARK=y
58CONFIG_NETFILTER=y
59CONFIG_NF_CONNTRACK=m
60CONFIG_NF_CONNTRACK_SECMARK=y
61CONFIG_NF_CONNTRACK_EVENTS=y
62CONFIG_NF_CT_PROTO_DCCP=m
63CONFIG_NF_CT_PROTO_UDPLITE=m
64CONFIG_NF_CONNTRACK_AMANDA=m
65CONFIG_NF_CONNTRACK_FTP=m
66CONFIG_NF_CONNTRACK_H323=m
67CONFIG_NF_CONNTRACK_IRC=m
68CONFIG_NF_CONNTRACK_PPTP=m
69CONFIG_NF_CONNTRACK_SANE=m
70CONFIG_NF_CONNTRACK_SIP=m
71CONFIG_NF_CONNTRACK_TFTP=m
72CONFIG_NF_CT_NETLINK=m
73CONFIG_NETFILTER_TPROXY=m
74CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
75CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
76CONFIG_NETFILTER_XT_TARGET_MARK=m
77CONFIG_NETFILTER_XT_TARGET_NFLOG=m
78CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
79CONFIG_NETFILTER_XT_TARGET_TPROXY=m
80CONFIG_NETFILTER_XT_TARGET_TRACE=m
81CONFIG_NETFILTER_XT_TARGET_SECMARK=m
82CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
83CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
84CONFIG_NETFILTER_XT_MATCH_COMMENT=m
85CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
86CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
87CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
88CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
89CONFIG_NETFILTER_XT_MATCH_DCCP=m
90CONFIG_NETFILTER_XT_MATCH_ESP=m
91CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
92CONFIG_NETFILTER_XT_MATCH_HELPER=m
93CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
94CONFIG_NETFILTER_XT_MATCH_LENGTH=m
95CONFIG_NETFILTER_XT_MATCH_LIMIT=m
96CONFIG_NETFILTER_XT_MATCH_MAC=m
97CONFIG_NETFILTER_XT_MATCH_MARK=m
98CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
99CONFIG_NETFILTER_XT_MATCH_OWNER=m
100CONFIG_NETFILTER_XT_MATCH_POLICY=m
101CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
102CONFIG_NETFILTER_XT_MATCH_QUOTA=m
103CONFIG_NETFILTER_XT_MATCH_RATEEST=m
104CONFIG_NETFILTER_XT_MATCH_REALM=m
105CONFIG_NETFILTER_XT_MATCH_RECENT=m
106CONFIG_NETFILTER_XT_MATCH_SOCKET=m
107CONFIG_NETFILTER_XT_MATCH_STATE=m
108CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
109CONFIG_NETFILTER_XT_MATCH_STRING=m
110CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
111CONFIG_NETFILTER_XT_MATCH_TIME=m
112CONFIG_NETFILTER_XT_MATCH_U32=m
113CONFIG_IP_VS=m
114CONFIG_IP_VS_IPV6=y
115CONFIG_IP_VS_PROTO_TCP=y
116CONFIG_IP_VS_PROTO_UDP=y
117CONFIG_IP_VS_PROTO_ESP=y
118CONFIG_IP_VS_PROTO_AH=y
119CONFIG_IP_VS_RR=m
120CONFIG_IP_VS_WRR=m
121CONFIG_IP_VS_LC=m
122CONFIG_IP_VS_WLC=m
123CONFIG_IP_VS_LBLC=m
124CONFIG_IP_VS_LBLCR=m
125CONFIG_IP_VS_DH=m
126CONFIG_IP_VS_SH=m
127CONFIG_IP_VS_SED=m
128CONFIG_IP_VS_NQ=m
129CONFIG_NF_CONNTRACK_IPV4=m
130CONFIG_IP_NF_QUEUE=m
131CONFIG_IP_NF_IPTABLES=m
132CONFIG_IP_NF_MATCH_AH=m
133CONFIG_IP_NF_MATCH_ECN=m
134CONFIG_IP_NF_MATCH_TTL=m
135CONFIG_IP_NF_FILTER=m
136CONFIG_IP_NF_TARGET_REJECT=m
137CONFIG_IP_NF_TARGET_ULOG=m
138CONFIG_IP_NF_MANGLE=m
139CONFIG_IP_NF_TARGET_CLUSTERIP=m
140CONFIG_IP_NF_TARGET_ECN=m
141CONFIG_IP_NF_TARGET_TTL=m
142CONFIG_IP_NF_RAW=m
143CONFIG_IP_NF_ARPTABLES=m
144CONFIG_IP_NF_ARPFILTER=m
145CONFIG_IP_NF_ARP_MANGLE=m
146CONFIG_NF_CONNTRACK_IPV6=m
147CONFIG_IP6_NF_MATCH_AH=m
148CONFIG_IP6_NF_MATCH_EUI64=m
149CONFIG_IP6_NF_MATCH_FRAG=m
150CONFIG_IP6_NF_MATCH_OPTS=m
151CONFIG_IP6_NF_MATCH_HL=m
152CONFIG_IP6_NF_MATCH_IPV6HEADER=m
153CONFIG_IP6_NF_MATCH_MH=m
154CONFIG_IP6_NF_MATCH_RT=m
155CONFIG_IP6_NF_TARGET_HL=m
156CONFIG_IP6_NF_FILTER=m
157CONFIG_IP6_NF_TARGET_REJECT=m
158CONFIG_IP6_NF_MANGLE=m
159CONFIG_IP6_NF_RAW=m
160CONFIG_BRIDGE_NF_EBTABLES=m
161CONFIG_BRIDGE_EBT_BROUTE=m
162CONFIG_BRIDGE_EBT_T_FILTER=m
163CONFIG_BRIDGE_EBT_T_NAT=m
164CONFIG_BRIDGE_EBT_802_3=m
165CONFIG_BRIDGE_EBT_AMONG=m
166CONFIG_BRIDGE_EBT_ARP=m
167CONFIG_BRIDGE_EBT_IP=m
168CONFIG_BRIDGE_EBT_IP6=m
169CONFIG_BRIDGE_EBT_LIMIT=m
170CONFIG_BRIDGE_EBT_MARK=m
171CONFIG_BRIDGE_EBT_PKTTYPE=m
172CONFIG_BRIDGE_EBT_STP=m
173CONFIG_BRIDGE_EBT_VLAN=m
174CONFIG_BRIDGE_EBT_ARPREPLY=m
175CONFIG_BRIDGE_EBT_DNAT=m
176CONFIG_BRIDGE_EBT_MARK_T=m
177CONFIG_BRIDGE_EBT_REDIRECT=m
178CONFIG_BRIDGE_EBT_SNAT=m
179CONFIG_BRIDGE_EBT_LOG=m
180CONFIG_BRIDGE_EBT_ULOG=m
181CONFIG_BRIDGE_EBT_NFLOG=m
182CONFIG_IP_SCTP=m
183CONFIG_BRIDGE=m
184CONFIG_VLAN_8021Q=m
185CONFIG_VLAN_8021Q_GVRP=y
186CONFIG_ATALK=m
187CONFIG_DEV_APPLETALK=m
188CONFIG_IPDDP=m
189CONFIG_IPDDP_ENCAP=y
190CONFIG_IPDDP_DECAP=y
191CONFIG_PHONET=m
192CONFIG_NET_SCHED=y
193CONFIG_NET_SCH_CBQ=m
194CONFIG_NET_SCH_HTB=m
195CONFIG_NET_SCH_HFSC=m
196CONFIG_NET_SCH_PRIO=m
197CONFIG_NET_SCH_RED=m
198CONFIG_NET_SCH_SFQ=m
199CONFIG_NET_SCH_TEQL=m
200CONFIG_NET_SCH_TBF=m
201CONFIG_NET_SCH_GRED=m
202CONFIG_NET_SCH_DSMARK=m
203CONFIG_NET_SCH_NETEM=m
204CONFIG_NET_SCH_INGRESS=m
205CONFIG_NET_CLS_BASIC=m
206CONFIG_NET_CLS_TCINDEX=m
207CONFIG_NET_CLS_ROUTE4=m
208CONFIG_NET_CLS_FW=m
209CONFIG_NET_CLS_U32=m
210CONFIG_NET_CLS_RSVP=m
211CONFIG_NET_CLS_RSVP6=m
212CONFIG_NET_CLS_FLOW=m
213CONFIG_NET_CLS_ACT=y
214CONFIG_NET_ACT_POLICE=y
215CONFIG_NET_ACT_GACT=m
216CONFIG_GACT_PROB=y
217CONFIG_NET_ACT_MIRRED=m
218CONFIG_NET_ACT_IPT=m
219CONFIG_NET_ACT_NAT=m
220CONFIG_NET_ACT_PEDIT=m
221CONFIG_NET_ACT_SIMP=m
222CONFIG_NET_ACT_SKBEDIT=m
223CONFIG_NET_CLS_IND=y
224CONFIG_CFG80211=m
225CONFIG_MAC80211=m
226CONFIG_MAC80211_RC_PID=y
227CONFIG_MAC80211_RC_DEFAULT_PID=y
228CONFIG_MAC80211_MESH=y
229CONFIG_RFKILL=m
230CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
231CONFIG_CONNECTOR=m
232CONFIG_MTD=y
233CONFIG_MTD_CHAR=y
234CONFIG_MTD_BLOCK=y
235CONFIG_MTD_OOPS=m
236CONFIG_MTD_CFI=y
237CONFIG_MTD_CFI_INTELEXT=y
238CONFIG_MTD_CFI_AMDSTD=y
239CONFIG_MTD_CFI_STAA=y
240CONFIG_MTD_PHYSMAP=y
241CONFIG_MTD_UBI=m
242CONFIG_MTD_UBI_GLUEBI=m
243CONFIG_BLK_DEV_FD=m
244CONFIG_BLK_DEV_UMEM=m
245CONFIG_BLK_DEV_LOOP=m
246CONFIG_BLK_DEV_CRYPTOLOOP=m
247CONFIG_BLK_DEV_NBD=m
248CONFIG_BLK_DEV_RAM=y
249CONFIG_CDROM_PKTCDVD=m
250CONFIG_ATA_OVER_ETH=m
251CONFIG_IDE=y
252CONFIG_BLK_DEV_IDECD=y
253CONFIG_IDE_GENERIC=y
254CONFIG_BLK_DEV_GENERIC=y
255CONFIG_BLK_DEV_PIIX=y
256CONFIG_BLK_DEV_IT8213=m
257CONFIG_BLK_DEV_TC86C001=m
258CONFIG_RAID_ATTRS=m
259CONFIG_SCSI=m
260CONFIG_SCSI_TGT=m
261CONFIG_BLK_DEV_SD=m
262CONFIG_CHR_DEV_ST=m
263CONFIG_CHR_DEV_OSST=m
264CONFIG_BLK_DEV_SR=m
265CONFIG_BLK_DEV_SR_VENDOR=y
266CONFIG_CHR_DEV_SG=m
267CONFIG_SCSI_MULTI_LUN=y
268CONFIG_SCSI_CONSTANTS=y
269CONFIG_SCSI_LOGGING=y
270CONFIG_SCSI_SCAN_ASYNC=y
271CONFIG_SCSI_FC_ATTRS=m
272CONFIG_ISCSI_TCP=m
273CONFIG_BLK_DEV_3W_XXXX_RAID=m
274CONFIG_SCSI_3W_9XXX=m
275CONFIG_SCSI_ACARD=m
276CONFIG_SCSI_AACRAID=m
277CONFIG_SCSI_AIC7XXX=m
278CONFIG_AIC7XXX_RESET_DELAY_MS=15000
279# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
280CONFIG_MD=y
281CONFIG_BLK_DEV_MD=m
282CONFIG_MD_LINEAR=m
283CONFIG_MD_RAID0=m
284CONFIG_MD_RAID1=m
285CONFIG_MD_RAID10=m
286CONFIG_MD_RAID456=m
287CONFIG_MD_MULTIPATH=m
288CONFIG_MD_FAULTY=m
289CONFIG_BLK_DEV_DM=m
290CONFIG_DM_CRYPT=m
291CONFIG_DM_SNAPSHOT=m
292CONFIG_DM_MIRROR=m
293CONFIG_DM_ZERO=m
294CONFIG_DM_MULTIPATH=m
295CONFIG_NETDEVICES=y
296CONFIG_BONDING=m
297CONFIG_DUMMY=m
298CONFIG_EQUALIZER=m
299CONFIG_IFB=m
300CONFIG_MACVLAN=m
301CONFIG_TUN=m
302CONFIG_VETH=m
303CONFIG_PCNET32=y
304CONFIG_CHELSIO_T3=m
305CONFIG_AX88796=m
306CONFIG_NETXEN_NIC=m
307CONFIG_TC35815=m
308CONFIG_MARVELL_PHY=m
309CONFIG_DAVICOM_PHY=m
310CONFIG_QSEMI_PHY=m
311CONFIG_LXT_PHY=m
312CONFIG_CICADA_PHY=m
313CONFIG_VITESSE_PHY=m
314CONFIG_SMSC_PHY=m
315CONFIG_BROADCOM_PHY=m
316CONFIG_ICPLUS_PHY=m
317CONFIG_REALTEK_PHY=m
318CONFIG_ATMEL=m
319CONFIG_PCI_ATMEL=m
320CONFIG_PRISM54=m
321CONFIG_HOSTAP=m
322CONFIG_HOSTAP_FIRMWARE=y
323CONFIG_HOSTAP_FIRMWARE_NVRAM=y
324CONFIG_HOSTAP_PLX=m
325CONFIG_HOSTAP_PCI=m
326CONFIG_IPW2100=m
327CONFIG_IPW2100_MONITOR=y
328CONFIG_LIBERTAS=m
329# CONFIG_INPUT_KEYBOARD is not set
330# CONFIG_INPUT_MOUSE is not set
331# CONFIG_SERIO_I8042 is not set
332CONFIG_VT_HW_CONSOLE_BINDING=y
333CONFIG_SERIAL_8250=y
334CONFIG_SERIAL_8250_CONSOLE=y
335# CONFIG_HWMON is not set
336CONFIG_FB=y
337CONFIG_FB_CIRRUS=y
338# CONFIG_VGA_CONSOLE is not set
339CONFIG_FRAMEBUFFER_CONSOLE=y
340CONFIG_HID=m
341CONFIG_RTC_CLASS=y
342CONFIG_RTC_DRV_CMOS=y
343CONFIG_UIO=m
344CONFIG_UIO_CIF=m
345CONFIG_EXT2_FS=y
346CONFIG_EXT3_FS=y
347CONFIG_REISERFS_FS=m
348CONFIG_REISERFS_PROC_INFO=y
349CONFIG_REISERFS_FS_XATTR=y
350CONFIG_REISERFS_FS_POSIX_ACL=y
351CONFIG_REISERFS_FS_SECURITY=y
352CONFIG_JFS_FS=m
353CONFIG_JFS_POSIX_ACL=y
354CONFIG_JFS_SECURITY=y
355CONFIG_XFS_FS=m
356CONFIG_XFS_QUOTA=y
357CONFIG_XFS_POSIX_ACL=y
358CONFIG_QUOTA=y
359CONFIG_QFMT_V2=y
360CONFIG_FUSE_FS=m
361CONFIG_ISO9660_FS=m
362CONFIG_JOLIET=y
363CONFIG_ZISOFS=y
364CONFIG_UDF_FS=m
365CONFIG_MSDOS_FS=m
366CONFIG_VFAT_FS=m
367CONFIG_PROC_KCORE=y
368CONFIG_TMPFS=y
369CONFIG_CONFIGFS_FS=y
370CONFIG_AFFS_FS=m
371CONFIG_HFS_FS=m
372CONFIG_HFSPLUS_FS=m
373CONFIG_BEFS_FS=m
374CONFIG_BFS_FS=m
375CONFIG_EFS_FS=m
376CONFIG_JFFS2_FS=m
377CONFIG_JFFS2_FS_XATTR=y
378CONFIG_JFFS2_COMPRESSION_OPTIONS=y
379CONFIG_JFFS2_RUBIN=y
380CONFIG_CRAMFS=m
381CONFIG_VXFS_FS=m
382CONFIG_MINIX_FS=m
383CONFIG_ROMFS_FS=m
384CONFIG_SYSV_FS=m
385CONFIG_UFS_FS=m
386CONFIG_NFS_FS=y
387CONFIG_ROOT_NFS=y
388CONFIG_NFSD=y
389CONFIG_NFSD_V3=y
390CONFIG_NLS_CODEPAGE_437=m
391CONFIG_NLS_CODEPAGE_737=m
392CONFIG_NLS_CODEPAGE_775=m
393CONFIG_NLS_CODEPAGE_850=m
394CONFIG_NLS_CODEPAGE_852=m
395CONFIG_NLS_CODEPAGE_855=m
396CONFIG_NLS_CODEPAGE_857=m
397CONFIG_NLS_CODEPAGE_860=m
398CONFIG_NLS_CODEPAGE_861=m
399CONFIG_NLS_CODEPAGE_862=m
400CONFIG_NLS_CODEPAGE_863=m
401CONFIG_NLS_CODEPAGE_864=m
402CONFIG_NLS_CODEPAGE_865=m
403CONFIG_NLS_CODEPAGE_866=m
404CONFIG_NLS_CODEPAGE_869=m
405CONFIG_NLS_CODEPAGE_936=m
406CONFIG_NLS_CODEPAGE_950=m
407CONFIG_NLS_CODEPAGE_932=m
408CONFIG_NLS_CODEPAGE_949=m
409CONFIG_NLS_CODEPAGE_874=m
410CONFIG_NLS_ISO8859_8=m
411CONFIG_NLS_CODEPAGE_1250=m
412CONFIG_NLS_CODEPAGE_1251=m
413CONFIG_NLS_ASCII=m
414CONFIG_NLS_ISO8859_1=m
415CONFIG_NLS_ISO8859_2=m
416CONFIG_NLS_ISO8859_3=m
417CONFIG_NLS_ISO8859_4=m
418CONFIG_NLS_ISO8859_5=m
419CONFIG_NLS_ISO8859_6=m
420CONFIG_NLS_ISO8859_7=m
421CONFIG_NLS_ISO8859_9=m
422CONFIG_NLS_ISO8859_13=m
423CONFIG_NLS_ISO8859_14=m
424CONFIG_NLS_ISO8859_15=m
425CONFIG_NLS_KOI8_R=m
426CONFIG_NLS_KOI8_U=m
427CONFIG_RCU_CPU_STALL_TIMEOUT=60
428CONFIG_ENABLE_DEFAULT_TRACERS=y
429CONFIG_CRYPTO_NULL=m
430CONFIG_CRYPTO_CRYPTD=m
431CONFIG_CRYPTO_LRW=m
432CONFIG_CRYPTO_PCBC=m
433CONFIG_CRYPTO_HMAC=y
434CONFIG_CRYPTO_XCBC=m
435CONFIG_CRYPTO_MD4=m
436CONFIG_CRYPTO_SHA256=m
437CONFIG_CRYPTO_SHA512=m
438CONFIG_CRYPTO_TGR192=m
439CONFIG_CRYPTO_WP512=m
440CONFIG_CRYPTO_ANUBIS=m
441CONFIG_CRYPTO_BLOWFISH=m
442CONFIG_CRYPTO_CAMELLIA=m
443CONFIG_CRYPTO_CAST5=m
444CONFIG_CRYPTO_CAST6=m
445CONFIG_CRYPTO_FCRYPT=m
446CONFIG_CRYPTO_KHAZAD=m
447CONFIG_CRYPTO_SERPENT=m
448CONFIG_CRYPTO_TEA=m
449CONFIG_CRYPTO_TWOFISH=m
450# CONFIG_CRYPTO_ANSI_CPRNG is not set
451CONFIG_CRC16=m
452CONFIG_VIRTUALIZATION=y
453CONFIG_KVM=m
454CONFIG_KVM_MIPS_DYN_TRANS=y
455CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
456CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644
index 000000000000..2b8558b71080
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -0,0 +1,453 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_KVM_GUEST=y
5CONFIG_PAGE_SIZE_16KB=y
6CONFIG_HZ_100=y
7CONFIG_SYSVIPC=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_LOG_BUF_SHIFT=15
11CONFIG_NAMESPACES=y
12CONFIG_RELAY=y
13CONFIG_BLK_DEV_INITRD=y
14CONFIG_EXPERT=y
15# CONFIG_COMPAT_BRK is not set
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODVERSIONS=y
20CONFIG_MODULE_SRCVERSION_ALL=y
21CONFIG_PCI=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_XFRM_USER=m
25CONFIG_NET_KEY=y
26CONFIG_NET_KEY_MIGRATE=y
27CONFIG_INET=y
28CONFIG_IP_MULTICAST=y
29CONFIG_IP_ADVANCED_ROUTER=y
30CONFIG_IP_MULTIPLE_TABLES=y
31CONFIG_IP_ROUTE_MULTIPATH=y
32CONFIG_IP_ROUTE_VERBOSE=y
33CONFIG_IP_PNP=y
34CONFIG_IP_PNP_DHCP=y
35CONFIG_IP_PNP_BOOTP=y
36CONFIG_NET_IPIP=m
37CONFIG_IP_MROUTE=y
38CONFIG_IP_PIMSM_V1=y
39CONFIG_IP_PIMSM_V2=y
40CONFIG_SYN_COOKIES=y
41CONFIG_INET_AH=m
42CONFIG_INET_ESP=m
43CONFIG_INET_IPCOMP=m
44CONFIG_INET_XFRM_MODE_TRANSPORT=m
45CONFIG_INET_XFRM_MODE_TUNNEL=m
46CONFIG_TCP_MD5SIG=y
47CONFIG_IPV6_PRIVACY=y
48CONFIG_IPV6_ROUTER_PREF=y
49CONFIG_IPV6_ROUTE_INFO=y
50CONFIG_IPV6_OPTIMISTIC_DAD=y
51CONFIG_INET6_AH=m
52CONFIG_INET6_ESP=m
53CONFIG_INET6_IPCOMP=m
54CONFIG_IPV6_TUNNEL=m
55CONFIG_IPV6_MROUTE=y
56CONFIG_IPV6_PIMSM_V2=y
57CONFIG_NETWORK_SECMARK=y
58CONFIG_NETFILTER=y
59CONFIG_NF_CONNTRACK=m
60CONFIG_NF_CONNTRACK_SECMARK=y
61CONFIG_NF_CONNTRACK_EVENTS=y
62CONFIG_NF_CT_PROTO_DCCP=m
63CONFIG_NF_CT_PROTO_UDPLITE=m
64CONFIG_NF_CONNTRACK_AMANDA=m
65CONFIG_NF_CONNTRACK_FTP=m
66CONFIG_NF_CONNTRACK_H323=m
67CONFIG_NF_CONNTRACK_IRC=m
68CONFIG_NF_CONNTRACK_PPTP=m
69CONFIG_NF_CONNTRACK_SANE=m
70CONFIG_NF_CONNTRACK_SIP=m
71CONFIG_NF_CONNTRACK_TFTP=m
72CONFIG_NF_CT_NETLINK=m
73CONFIG_NETFILTER_TPROXY=m
74CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
75CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
76CONFIG_NETFILTER_XT_TARGET_MARK=m
77CONFIG_NETFILTER_XT_TARGET_NFLOG=m
78CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
79CONFIG_NETFILTER_XT_TARGET_TPROXY=m
80CONFIG_NETFILTER_XT_TARGET_TRACE=m
81CONFIG_NETFILTER_XT_TARGET_SECMARK=m
82CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
83CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
84CONFIG_NETFILTER_XT_MATCH_COMMENT=m
85CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
86CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
87CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
88CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
89CONFIG_NETFILTER_XT_MATCH_DCCP=m
90CONFIG_NETFILTER_XT_MATCH_ESP=m
91CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
92CONFIG_NETFILTER_XT_MATCH_HELPER=m
93CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
94CONFIG_NETFILTER_XT_MATCH_LENGTH=m
95CONFIG_NETFILTER_XT_MATCH_LIMIT=m
96CONFIG_NETFILTER_XT_MATCH_MAC=m
97CONFIG_NETFILTER_XT_MATCH_MARK=m
98CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
99CONFIG_NETFILTER_XT_MATCH_OWNER=m
100CONFIG_NETFILTER_XT_MATCH_POLICY=m
101CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
102CONFIG_NETFILTER_XT_MATCH_QUOTA=m
103CONFIG_NETFILTER_XT_MATCH_RATEEST=m
104CONFIG_NETFILTER_XT_MATCH_REALM=m
105CONFIG_NETFILTER_XT_MATCH_RECENT=m
106CONFIG_NETFILTER_XT_MATCH_SOCKET=m
107CONFIG_NETFILTER_XT_MATCH_STATE=m
108CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
109CONFIG_NETFILTER_XT_MATCH_STRING=m
110CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
111CONFIG_NETFILTER_XT_MATCH_TIME=m
112CONFIG_NETFILTER_XT_MATCH_U32=m
113CONFIG_IP_VS=m
114CONFIG_IP_VS_IPV6=y
115CONFIG_IP_VS_PROTO_TCP=y
116CONFIG_IP_VS_PROTO_UDP=y
117CONFIG_IP_VS_PROTO_ESP=y
118CONFIG_IP_VS_PROTO_AH=y
119CONFIG_IP_VS_RR=m
120CONFIG_IP_VS_WRR=m
121CONFIG_IP_VS_LC=m
122CONFIG_IP_VS_WLC=m
123CONFIG_IP_VS_LBLC=m
124CONFIG_IP_VS_LBLCR=m
125CONFIG_IP_VS_DH=m
126CONFIG_IP_VS_SH=m
127CONFIG_IP_VS_SED=m
128CONFIG_IP_VS_NQ=m
129CONFIG_NF_CONNTRACK_IPV4=m
130CONFIG_IP_NF_QUEUE=m
131CONFIG_IP_NF_IPTABLES=m
132CONFIG_IP_NF_MATCH_AH=m
133CONFIG_IP_NF_MATCH_ECN=m
134CONFIG_IP_NF_MATCH_TTL=m
135CONFIG_IP_NF_FILTER=m
136CONFIG_IP_NF_TARGET_REJECT=m
137CONFIG_IP_NF_TARGET_ULOG=m
138CONFIG_IP_NF_MANGLE=m
139CONFIG_IP_NF_TARGET_CLUSTERIP=m
140CONFIG_IP_NF_TARGET_ECN=m
141CONFIG_IP_NF_TARGET_TTL=m
142CONFIG_IP_NF_RAW=m
143CONFIG_IP_NF_ARPTABLES=m
144CONFIG_IP_NF_ARPFILTER=m
145CONFIG_IP_NF_ARP_MANGLE=m
146CONFIG_NF_CONNTRACK_IPV6=m
147CONFIG_IP6_NF_MATCH_AH=m
148CONFIG_IP6_NF_MATCH_EUI64=m
149CONFIG_IP6_NF_MATCH_FRAG=m
150CONFIG_IP6_NF_MATCH_OPTS=m
151CONFIG_IP6_NF_MATCH_HL=m
152CONFIG_IP6_NF_MATCH_IPV6HEADER=m
153CONFIG_IP6_NF_MATCH_MH=m
154CONFIG_IP6_NF_MATCH_RT=m
155CONFIG_IP6_NF_TARGET_HL=m
156CONFIG_IP6_NF_FILTER=m
157CONFIG_IP6_NF_TARGET_REJECT=m
158CONFIG_IP6_NF_MANGLE=m
159CONFIG_IP6_NF_RAW=m
160CONFIG_BRIDGE_NF_EBTABLES=m
161CONFIG_BRIDGE_EBT_BROUTE=m
162CONFIG_BRIDGE_EBT_T_FILTER=m
163CONFIG_BRIDGE_EBT_T_NAT=m
164CONFIG_BRIDGE_EBT_802_3=m
165CONFIG_BRIDGE_EBT_AMONG=m
166CONFIG_BRIDGE_EBT_ARP=m
167CONFIG_BRIDGE_EBT_IP=m
168CONFIG_BRIDGE_EBT_IP6=m
169CONFIG_BRIDGE_EBT_LIMIT=m
170CONFIG_BRIDGE_EBT_MARK=m
171CONFIG_BRIDGE_EBT_PKTTYPE=m
172CONFIG_BRIDGE_EBT_STP=m
173CONFIG_BRIDGE_EBT_VLAN=m
174CONFIG_BRIDGE_EBT_ARPREPLY=m
175CONFIG_BRIDGE_EBT_DNAT=m
176CONFIG_BRIDGE_EBT_MARK_T=m
177CONFIG_BRIDGE_EBT_REDIRECT=m
178CONFIG_BRIDGE_EBT_SNAT=m
179CONFIG_BRIDGE_EBT_LOG=m
180CONFIG_BRIDGE_EBT_ULOG=m
181CONFIG_BRIDGE_EBT_NFLOG=m
182CONFIG_IP_SCTP=m
183CONFIG_BRIDGE=m
184CONFIG_VLAN_8021Q=m
185CONFIG_VLAN_8021Q_GVRP=y
186CONFIG_ATALK=m
187CONFIG_DEV_APPLETALK=m
188CONFIG_IPDDP=m
189CONFIG_IPDDP_ENCAP=y
190CONFIG_IPDDP_DECAP=y
191CONFIG_PHONET=m
192CONFIG_NET_SCHED=y
193CONFIG_NET_SCH_CBQ=m
194CONFIG_NET_SCH_HTB=m
195CONFIG_NET_SCH_HFSC=m
196CONFIG_NET_SCH_PRIO=m
197CONFIG_NET_SCH_RED=m
198CONFIG_NET_SCH_SFQ=m
199CONFIG_NET_SCH_TEQL=m
200CONFIG_NET_SCH_TBF=m
201CONFIG_NET_SCH_GRED=m
202CONFIG_NET_SCH_DSMARK=m
203CONFIG_NET_SCH_NETEM=m
204CONFIG_NET_SCH_INGRESS=m
205CONFIG_NET_CLS_BASIC=m
206CONFIG_NET_CLS_TCINDEX=m
207CONFIG_NET_CLS_ROUTE4=m
208CONFIG_NET_CLS_FW=m
209CONFIG_NET_CLS_U32=m
210CONFIG_NET_CLS_RSVP=m
211CONFIG_NET_CLS_RSVP6=m
212CONFIG_NET_CLS_FLOW=m
213CONFIG_NET_CLS_ACT=y
214CONFIG_NET_ACT_POLICE=y
215CONFIG_NET_ACT_GACT=m
216CONFIG_GACT_PROB=y
217CONFIG_NET_ACT_MIRRED=m
218CONFIG_NET_ACT_IPT=m
219CONFIG_NET_ACT_NAT=m
220CONFIG_NET_ACT_PEDIT=m
221CONFIG_NET_ACT_SIMP=m
222CONFIG_NET_ACT_SKBEDIT=m
223CONFIG_NET_CLS_IND=y
224CONFIG_CFG80211=m
225CONFIG_MAC80211=m
226CONFIG_MAC80211_RC_PID=y
227CONFIG_MAC80211_RC_DEFAULT_PID=y
228CONFIG_MAC80211_MESH=y
229CONFIG_RFKILL=m
230CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
231CONFIG_CONNECTOR=m
232CONFIG_MTD=y
233CONFIG_MTD_CHAR=y
234CONFIG_MTD_BLOCK=y
235CONFIG_MTD_OOPS=m
236CONFIG_MTD_CFI=y
237CONFIG_MTD_CFI_INTELEXT=y
238CONFIG_MTD_CFI_AMDSTD=y
239CONFIG_MTD_CFI_STAA=y
240CONFIG_MTD_PHYSMAP=y
241CONFIG_MTD_UBI=m
242CONFIG_MTD_UBI_GLUEBI=m
243CONFIG_BLK_DEV_FD=m
244CONFIG_BLK_DEV_UMEM=m
245CONFIG_BLK_DEV_LOOP=m
246CONFIG_BLK_DEV_CRYPTOLOOP=m
247CONFIG_BLK_DEV_NBD=m
248CONFIG_BLK_DEV_RAM=y
249CONFIG_CDROM_PKTCDVD=m
250CONFIG_ATA_OVER_ETH=m
251CONFIG_VIRTIO_BLK=y
252CONFIG_IDE=y
253CONFIG_BLK_DEV_IDECD=y
254CONFIG_IDE_GENERIC=y
255CONFIG_BLK_DEV_GENERIC=y
256CONFIG_BLK_DEV_PIIX=y
257CONFIG_BLK_DEV_IT8213=m
258CONFIG_BLK_DEV_TC86C001=m
259CONFIG_RAID_ATTRS=m
260CONFIG_SCSI=m
261CONFIG_SCSI_TGT=m
262CONFIG_BLK_DEV_SD=m
263CONFIG_CHR_DEV_ST=m
264CONFIG_CHR_DEV_OSST=m
265CONFIG_BLK_DEV_SR=m
266CONFIG_BLK_DEV_SR_VENDOR=y
267CONFIG_CHR_DEV_SG=m
268CONFIG_SCSI_MULTI_LUN=y
269CONFIG_SCSI_CONSTANTS=y
270CONFIG_SCSI_LOGGING=y
271CONFIG_SCSI_SCAN_ASYNC=y
272CONFIG_SCSI_FC_ATTRS=m
273CONFIG_ISCSI_TCP=m
274CONFIG_BLK_DEV_3W_XXXX_RAID=m
275CONFIG_SCSI_3W_9XXX=m
276CONFIG_SCSI_ACARD=m
277CONFIG_SCSI_AACRAID=m
278CONFIG_SCSI_AIC7XXX=m
279CONFIG_AIC7XXX_RESET_DELAY_MS=15000
280# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
281CONFIG_MD=y
282CONFIG_BLK_DEV_MD=m
283CONFIG_MD_LINEAR=m
284CONFIG_MD_RAID0=m
285CONFIG_MD_RAID1=m
286CONFIG_MD_RAID10=m
287CONFIG_MD_RAID456=m
288CONFIG_MD_MULTIPATH=m
289CONFIG_MD_FAULTY=m
290CONFIG_BLK_DEV_DM=m
291CONFIG_DM_CRYPT=m
292CONFIG_DM_SNAPSHOT=m
293CONFIG_DM_MIRROR=m
294CONFIG_DM_ZERO=m
295CONFIG_DM_MULTIPATH=m
296CONFIG_NETDEVICES=y
297CONFIG_BONDING=m
298CONFIG_DUMMY=m
299CONFIG_EQUALIZER=m
300CONFIG_IFB=m
301CONFIG_MACVLAN=m
302CONFIG_TUN=m
303CONFIG_VETH=m
304CONFIG_VIRTIO_NET=y
305CONFIG_PCNET32=y
306CONFIG_CHELSIO_T3=m
307CONFIG_AX88796=m
308CONFIG_NETXEN_NIC=m
309CONFIG_TC35815=m
310CONFIG_MARVELL_PHY=m
311CONFIG_DAVICOM_PHY=m
312CONFIG_QSEMI_PHY=m
313CONFIG_LXT_PHY=m
314CONFIG_CICADA_PHY=m
315CONFIG_VITESSE_PHY=m
316CONFIG_SMSC_PHY=m
317CONFIG_BROADCOM_PHY=m
318CONFIG_ICPLUS_PHY=m
319CONFIG_REALTEK_PHY=m
320CONFIG_ATMEL=m
321CONFIG_PCI_ATMEL=m
322CONFIG_PRISM54=m
323CONFIG_HOSTAP=m
324CONFIG_HOSTAP_FIRMWARE=y
325CONFIG_HOSTAP_FIRMWARE_NVRAM=y
326CONFIG_HOSTAP_PLX=m
327CONFIG_HOSTAP_PCI=m
328CONFIG_IPW2100=m
329CONFIG_IPW2100_MONITOR=y
330CONFIG_LIBERTAS=m
331# CONFIG_INPUT_KEYBOARD is not set
332# CONFIG_INPUT_MOUSE is not set
333# CONFIG_SERIO_I8042 is not set
334CONFIG_VT_HW_CONSOLE_BINDING=y
335CONFIG_SERIAL_8250=y
336CONFIG_SERIAL_8250_CONSOLE=y
337# CONFIG_HWMON is not set
338CONFIG_FB=y
339CONFIG_FB_CIRRUS=y
340# CONFIG_VGA_CONSOLE is not set
341CONFIG_FRAMEBUFFER_CONSOLE=y
342CONFIG_HID=m
343CONFIG_RTC_CLASS=y
344CONFIG_RTC_DRV_CMOS=y
345CONFIG_UIO=m
346CONFIG_UIO_CIF=m
347CONFIG_VIRTIO_PCI=y
348CONFIG_VIRTIO_BALLOON=y
349CONFIG_VIRTIO_MMIO=y
350CONFIG_EXT2_FS=y
351CONFIG_EXT3_FS=y
352CONFIG_REISERFS_FS=m
353CONFIG_REISERFS_PROC_INFO=y
354CONFIG_REISERFS_FS_XATTR=y
355CONFIG_REISERFS_FS_POSIX_ACL=y
356CONFIG_REISERFS_FS_SECURITY=y
357CONFIG_JFS_FS=m
358CONFIG_JFS_POSIX_ACL=y
359CONFIG_JFS_SECURITY=y
360CONFIG_XFS_FS=m
361CONFIG_XFS_QUOTA=y
362CONFIG_XFS_POSIX_ACL=y
363CONFIG_QUOTA=y
364CONFIG_QFMT_V2=y
365CONFIG_FUSE_FS=m
366CONFIG_ISO9660_FS=m
367CONFIG_JOLIET=y
368CONFIG_ZISOFS=y
369CONFIG_UDF_FS=m
370CONFIG_MSDOS_FS=m
371CONFIG_VFAT_FS=m
372CONFIG_PROC_KCORE=y
373CONFIG_TMPFS=y
374CONFIG_AFFS_FS=m
375CONFIG_HFS_FS=m
376CONFIG_HFSPLUS_FS=m
377CONFIG_BEFS_FS=m
378CONFIG_BFS_FS=m
379CONFIG_EFS_FS=m
380CONFIG_JFFS2_FS=m
381CONFIG_JFFS2_FS_XATTR=y
382CONFIG_JFFS2_COMPRESSION_OPTIONS=y
383CONFIG_JFFS2_RUBIN=y
384CONFIG_CRAMFS=m
385CONFIG_VXFS_FS=m
386CONFIG_MINIX_FS=m
387CONFIG_ROMFS_FS=m
388CONFIG_SYSV_FS=m
389CONFIG_UFS_FS=m
390CONFIG_NFS_FS=y
391CONFIG_ROOT_NFS=y
392CONFIG_NFSD=y
393CONFIG_NFSD_V3=y
394CONFIG_NLS_CODEPAGE_437=m
395CONFIG_NLS_CODEPAGE_737=m
396CONFIG_NLS_CODEPAGE_775=m
397CONFIG_NLS_CODEPAGE_850=m
398CONFIG_NLS_CODEPAGE_852=m
399CONFIG_NLS_CODEPAGE_855=m
400CONFIG_NLS_CODEPAGE_857=m
401CONFIG_NLS_CODEPAGE_860=m
402CONFIG_NLS_CODEPAGE_861=m
403CONFIG_NLS_CODEPAGE_862=m
404CONFIG_NLS_CODEPAGE_863=m
405CONFIG_NLS_CODEPAGE_864=m
406CONFIG_NLS_CODEPAGE_865=m
407CONFIG_NLS_CODEPAGE_866=m
408CONFIG_NLS_CODEPAGE_869=m
409CONFIG_NLS_CODEPAGE_936=m
410CONFIG_NLS_CODEPAGE_950=m
411CONFIG_NLS_CODEPAGE_932=m
412CONFIG_NLS_CODEPAGE_949=m
413CONFIG_NLS_CODEPAGE_874=m
414CONFIG_NLS_ISO8859_8=m
415CONFIG_NLS_CODEPAGE_1250=m
416CONFIG_NLS_CODEPAGE_1251=m
417CONFIG_NLS_ASCII=m
418CONFIG_NLS_ISO8859_1=m
419CONFIG_NLS_ISO8859_2=m
420CONFIG_NLS_ISO8859_3=m
421CONFIG_NLS_ISO8859_4=m
422CONFIG_NLS_ISO8859_5=m
423CONFIG_NLS_ISO8859_6=m
424CONFIG_NLS_ISO8859_7=m
425CONFIG_NLS_ISO8859_9=m
426CONFIG_NLS_ISO8859_13=m
427CONFIG_NLS_ISO8859_14=m
428CONFIG_NLS_ISO8859_15=m
429CONFIG_NLS_KOI8_R=m
430CONFIG_NLS_KOI8_U=m
431CONFIG_CRYPTO_NULL=m
432CONFIG_CRYPTO_CRYPTD=m
433CONFIG_CRYPTO_LRW=m
434CONFIG_CRYPTO_PCBC=m
435CONFIG_CRYPTO_HMAC=y
436CONFIG_CRYPTO_XCBC=m
437CONFIG_CRYPTO_MD4=m
438CONFIG_CRYPTO_SHA256=m
439CONFIG_CRYPTO_SHA512=m
440CONFIG_CRYPTO_TGR192=m
441CONFIG_CRYPTO_WP512=m
442CONFIG_CRYPTO_ANUBIS=m
443CONFIG_CRYPTO_BLOWFISH=m
444CONFIG_CRYPTO_CAMELLIA=m
445CONFIG_CRYPTO_CAST5=m
446CONFIG_CRYPTO_CAST6=m
447CONFIG_CRYPTO_FCRYPT=m
448CONFIG_CRYPTO_KHAZAD=m
449CONFIG_CRYPTO_SERPENT=m
450CONFIG_CRYPTO_TEA=m
451CONFIG_CRYPTO_TWOFISH=m
452# CONFIG_CRYPTO_ANSI_CPRNG is not set
453CONFIG_CRC16=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
new file mode 100644
index 000000000000..93057a760dfa
--- /dev/null
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -0,0 +1,195 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_MIPS_VPE_LOADER=y
5CONFIG_MIPS_VPE_APSP_API=y
6CONFIG_HZ_100=y
7CONFIG_LOCALVERSION="aprp"
8CONFIG_SYSVIPC=y
9CONFIG_POSIX_MQUEUE=y
10CONFIG_AUDIT=y
11CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y
13CONFIG_LOG_BUF_SHIFT=15
14CONFIG_SYSCTL_SYSCALL=y
15CONFIG_EMBEDDED=y
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODVERSIONS=y
20CONFIG_MODULE_SRCVERSION_ALL=y
21# CONFIG_BLK_DEV_BSG is not set
22CONFIG_PCI=y
23# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
24CONFIG_NET=y
25CONFIG_PACKET=y
26CONFIG_UNIX=y
27CONFIG_XFRM_USER=m
28CONFIG_NET_KEY=y
29CONFIG_INET=y
30CONFIG_IP_MULTICAST=y
31CONFIG_IP_ADVANCED_ROUTER=y
32CONFIG_IP_MULTIPLE_TABLES=y
33CONFIG_IP_ROUTE_MULTIPATH=y
34CONFIG_IP_ROUTE_VERBOSE=y
35CONFIG_IP_PNP=y
36CONFIG_IP_PNP_DHCP=y
37CONFIG_IP_PNP_BOOTP=y
38CONFIG_NET_IPIP=m
39CONFIG_IP_MROUTE=y
40CONFIG_IP_PIMSM_V1=y
41CONFIG_IP_PIMSM_V2=y
42CONFIG_SYN_COOKIES=y
43CONFIG_INET_AH=m
44CONFIG_INET_ESP=m
45CONFIG_INET_IPCOMP=m
46# CONFIG_INET_LRO is not set
47CONFIG_IPV6_PRIVACY=y
48CONFIG_INET6_AH=m
49CONFIG_INET6_ESP=m
50CONFIG_INET6_IPCOMP=m
51CONFIG_IPV6_TUNNEL=m
52CONFIG_BRIDGE=m
53CONFIG_VLAN_8021Q=m
54CONFIG_ATALK=m
55CONFIG_DEV_APPLETALK=m
56CONFIG_IPDDP=m
57CONFIG_IPDDP_ENCAP=y
58CONFIG_IPDDP_DECAP=y
59CONFIG_NET_SCHED=y
60CONFIG_NET_SCH_CBQ=m
61CONFIG_NET_SCH_HTB=m
62CONFIG_NET_SCH_HFSC=m
63CONFIG_NET_SCH_PRIO=m
64CONFIG_NET_SCH_RED=m
65CONFIG_NET_SCH_SFQ=m
66CONFIG_NET_SCH_TEQL=m
67CONFIG_NET_SCH_TBF=m
68CONFIG_NET_SCH_GRED=m
69CONFIG_NET_SCH_DSMARK=m
70CONFIG_NET_SCH_NETEM=m
71CONFIG_NET_SCH_INGRESS=m
72CONFIG_NET_CLS_BASIC=m
73CONFIG_NET_CLS_TCINDEX=m
74CONFIG_NET_CLS_ROUTE4=m
75CONFIG_NET_CLS_FW=m
76CONFIG_NET_CLS_U32=m
77CONFIG_NET_CLS_RSVP=m
78CONFIG_NET_CLS_RSVP6=m
79CONFIG_NET_CLS_ACT=y
80CONFIG_NET_ACT_POLICE=y
81CONFIG_NET_CLS_IND=y
82# CONFIG_WIRELESS is not set
83CONFIG_BLK_DEV_LOOP=y
84CONFIG_BLK_DEV_CRYPTOLOOP=m
85CONFIG_IDE=y
86# CONFIG_IDE_PROC_FS is not set
87# CONFIG_IDEPCI_PCIBUS_ORDER is not set
88CONFIG_BLK_DEV_GENERIC=y
89CONFIG_BLK_DEV_PIIX=y
90CONFIG_SCSI=y
91CONFIG_BLK_DEV_SD=y
92CONFIG_CHR_DEV_SG=y
93# CONFIG_SCSI_LOWLEVEL is not set
94CONFIG_NETDEVICES=y
95# CONFIG_NET_VENDOR_3COM is not set
96# CONFIG_NET_VENDOR_ADAPTEC is not set
97# CONFIG_NET_VENDOR_ALTEON is not set
98CONFIG_PCNET32=y
99# CONFIG_NET_VENDOR_ATHEROS is not set
100# CONFIG_NET_VENDOR_BROADCOM is not set
101# CONFIG_NET_VENDOR_BROCADE is not set
102# CONFIG_NET_VENDOR_CHELSIO is not set
103# CONFIG_NET_VENDOR_CISCO is not set
104# CONFIG_NET_VENDOR_DEC is not set
105# CONFIG_NET_VENDOR_DLINK is not set
106# CONFIG_NET_VENDOR_EMULEX is not set
107# CONFIG_NET_VENDOR_EXAR is not set
108# CONFIG_NET_VENDOR_HP is not set
109# CONFIG_NET_VENDOR_INTEL is not set
110# CONFIG_NET_VENDOR_MARVELL is not set
111# CONFIG_NET_VENDOR_MELLANOX is not set
112# CONFIG_NET_VENDOR_MICREL is not set
113# CONFIG_NET_VENDOR_MYRI is not set
114# CONFIG_NET_VENDOR_NATSEMI is not set
115# CONFIG_NET_VENDOR_NVIDIA is not set
116# CONFIG_NET_VENDOR_OKI is not set
117# CONFIG_NET_PACKET_ENGINE is not set
118# CONFIG_NET_VENDOR_QLOGIC is not set
119# CONFIG_NET_VENDOR_REALTEK is not set
120# CONFIG_NET_VENDOR_RDC is not set
121# CONFIG_NET_VENDOR_SEEQ is not set
122# CONFIG_NET_VENDOR_SILAN is not set
123# CONFIG_NET_VENDOR_SIS is not set
124# CONFIG_NET_VENDOR_SMSC is not set
125# CONFIG_NET_VENDOR_STMICRO is not set
126# CONFIG_NET_VENDOR_SUN is not set
127# CONFIG_NET_VENDOR_TEHUTI is not set
128# CONFIG_NET_VENDOR_TI is not set
129# CONFIG_NET_VENDOR_TOSHIBA is not set
130# CONFIG_NET_VENDOR_VIA is not set
131# CONFIG_WLAN is not set
132# CONFIG_VT is not set
133CONFIG_LEGACY_PTY_COUNT=16
134CONFIG_SERIAL_8250=y
135CONFIG_SERIAL_8250_CONSOLE=y
136CONFIG_HW_RANDOM=y
137# CONFIG_HWMON is not set
138CONFIG_VIDEO_OUTPUT_CONTROL=m
139CONFIG_FB=y
140CONFIG_FIRMWARE_EDID=y
141CONFIG_FB_MATROX=y
142CONFIG_FB_MATROX_G=y
143CONFIG_USB=y
144CONFIG_USB_EHCI_HCD=y
145# CONFIG_USB_EHCI_TT_NEWSCHED is not set
146CONFIG_USB_UHCI_HCD=y
147CONFIG_USB_STORAGE=y
148CONFIG_NEW_LEDS=y
149CONFIG_LEDS_CLASS=y
150CONFIG_LEDS_TRIGGERS=y
151CONFIG_LEDS_TRIGGER_TIMER=y
152CONFIG_LEDS_TRIGGER_IDE_DISK=y
153CONFIG_LEDS_TRIGGER_HEARTBEAT=y
154CONFIG_LEDS_TRIGGER_BACKLIGHT=y
155CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
156CONFIG_RTC_CLASS=y
157CONFIG_RTC_DRV_CMOS=y
158CONFIG_EXT2_FS=y
159CONFIG_EXT3_FS=y
160# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
161CONFIG_XFS_FS=y
162CONFIG_XFS_QUOTA=y
163CONFIG_XFS_POSIX_ACL=y
164CONFIG_QUOTA=y
165CONFIG_QFMT_V2=y
166CONFIG_MSDOS_FS=m
167CONFIG_VFAT_FS=m
168CONFIG_PROC_KCORE=y
169CONFIG_TMPFS=y
170CONFIG_NFS_FS=y
171CONFIG_ROOT_NFS=y
172CONFIG_CIFS=m
173CONFIG_CIFS_WEAK_PW_HASH=y
174CONFIG_CIFS_XATTR=y
175CONFIG_CIFS_POSIX=y
176CONFIG_NLS_CODEPAGE_437=m
177CONFIG_NLS_ISO8859_1=m
178# CONFIG_FTRACE is not set
179CONFIG_CRYPTO_NULL=m
180CONFIG_CRYPTO_PCBC=m
181CONFIG_CRYPTO_HMAC=y
182CONFIG_CRYPTO_MICHAEL_MIC=m
183CONFIG_CRYPTO_SHA512=m
184CONFIG_CRYPTO_TGR192=m
185CONFIG_CRYPTO_WP512=m
186CONFIG_CRYPTO_ANUBIS=m
187CONFIG_CRYPTO_BLOWFISH=m
188CONFIG_CRYPTO_CAST5=m
189CONFIG_CRYPTO_CAST6=m
190CONFIG_CRYPTO_KHAZAD=m
191CONFIG_CRYPTO_SERPENT=m
192CONFIG_CRYPTO_TEA=m
193CONFIG_CRYPTO_TWOFISH=m
194# CONFIG_CRYPTO_ANSI_CPRNG is not set
195# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
new file mode 100644
index 000000000000..4e54b75d89be
--- /dev/null
+++ b/arch/mips/configs/maltasmtc_defconfig
@@ -0,0 +1,196 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_MIPS_MT_SMTC=y
5# CONFIG_MIPS_MT_FPAFF is not set
6CONFIG_NR_CPUS=9
7CONFIG_HZ_48=y
8CONFIG_LOCALVERSION="smtc"
9CONFIG_SYSVIPC=y
10CONFIG_POSIX_MQUEUE=y
11CONFIG_AUDIT=y
12CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y
14CONFIG_LOG_BUF_SHIFT=15
15CONFIG_SYSCTL_SYSCALL=y
16CONFIG_EMBEDDED=y
17CONFIG_SLAB=y
18CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y
20CONFIG_MODVERSIONS=y
21CONFIG_MODULE_SRCVERSION_ALL=y
22# CONFIG_BLK_DEV_BSG is not set
23CONFIG_PCI=y
24# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
25CONFIG_NET=y
26CONFIG_PACKET=y
27CONFIG_UNIX=y
28CONFIG_XFRM_USER=m
29CONFIG_NET_KEY=y
30CONFIG_INET=y
31CONFIG_IP_MULTICAST=y
32CONFIG_IP_ADVANCED_ROUTER=y
33CONFIG_IP_MULTIPLE_TABLES=y
34CONFIG_IP_ROUTE_MULTIPATH=y
35CONFIG_IP_ROUTE_VERBOSE=y
36CONFIG_IP_PNP=y
37CONFIG_IP_PNP_DHCP=y
38CONFIG_IP_PNP_BOOTP=y
39CONFIG_NET_IPIP=m
40CONFIG_IP_MROUTE=y
41CONFIG_IP_PIMSM_V1=y
42CONFIG_IP_PIMSM_V2=y
43CONFIG_SYN_COOKIES=y
44CONFIG_INET_AH=m
45CONFIG_INET_ESP=m
46CONFIG_INET_IPCOMP=m
47# CONFIG_INET_LRO is not set
48CONFIG_IPV6_PRIVACY=y
49CONFIG_INET6_AH=m
50CONFIG_INET6_ESP=m
51CONFIG_INET6_IPCOMP=m
52CONFIG_IPV6_TUNNEL=m
53CONFIG_BRIDGE=m
54CONFIG_VLAN_8021Q=m
55CONFIG_ATALK=m
56CONFIG_DEV_APPLETALK=m
57CONFIG_IPDDP=m
58CONFIG_IPDDP_ENCAP=y
59CONFIG_IPDDP_DECAP=y
60CONFIG_NET_SCHED=y
61CONFIG_NET_SCH_CBQ=m
62CONFIG_NET_SCH_HTB=m
63CONFIG_NET_SCH_HFSC=m
64CONFIG_NET_SCH_PRIO=m
65CONFIG_NET_SCH_RED=m
66CONFIG_NET_SCH_SFQ=m
67CONFIG_NET_SCH_TEQL=m
68CONFIG_NET_SCH_TBF=m
69CONFIG_NET_SCH_GRED=m
70CONFIG_NET_SCH_DSMARK=m
71CONFIG_NET_SCH_NETEM=m
72CONFIG_NET_SCH_INGRESS=m
73CONFIG_NET_CLS_BASIC=m
74CONFIG_NET_CLS_TCINDEX=m
75CONFIG_NET_CLS_ROUTE4=m
76CONFIG_NET_CLS_FW=m
77CONFIG_NET_CLS_U32=m
78CONFIG_NET_CLS_RSVP=m
79CONFIG_NET_CLS_RSVP6=m
80CONFIG_NET_CLS_ACT=y
81CONFIG_NET_ACT_POLICE=y
82CONFIG_NET_CLS_IND=y
83# CONFIG_WIRELESS is not set
84CONFIG_BLK_DEV_LOOP=y
85CONFIG_BLK_DEV_CRYPTOLOOP=m
86CONFIG_IDE=y
87# CONFIG_IDE_PROC_FS is not set
88# CONFIG_IDEPCI_PCIBUS_ORDER is not set
89CONFIG_BLK_DEV_GENERIC=y
90CONFIG_BLK_DEV_PIIX=y
91CONFIG_SCSI=y
92CONFIG_BLK_DEV_SD=y
93CONFIG_CHR_DEV_SG=y
94# CONFIG_SCSI_LOWLEVEL is not set
95CONFIG_NETDEVICES=y
96# CONFIG_NET_VENDOR_3COM is not set
97# CONFIG_NET_VENDOR_ADAPTEC is not set
98# CONFIG_NET_VENDOR_ALTEON is not set
99CONFIG_PCNET32=y
100# CONFIG_NET_VENDOR_ATHEROS is not set
101# CONFIG_NET_VENDOR_BROADCOM is not set
102# CONFIG_NET_VENDOR_BROCADE is not set
103# CONFIG_NET_VENDOR_CHELSIO is not set
104# CONFIG_NET_VENDOR_CISCO is not set
105# CONFIG_NET_VENDOR_DEC is not set
106# CONFIG_NET_VENDOR_DLINK is not set
107# CONFIG_NET_VENDOR_EMULEX is not set
108# CONFIG_NET_VENDOR_EXAR is not set
109# CONFIG_NET_VENDOR_HP is not set
110# CONFIG_NET_VENDOR_INTEL is not set
111# CONFIG_NET_VENDOR_MARVELL is not set
112# CONFIG_NET_VENDOR_MELLANOX is not set
113# CONFIG_NET_VENDOR_MICREL is not set
114# CONFIG_NET_VENDOR_MYRI is not set
115# CONFIG_NET_VENDOR_NATSEMI is not set
116# CONFIG_NET_VENDOR_NVIDIA is not set
117# CONFIG_NET_VENDOR_OKI is not set
118# CONFIG_NET_PACKET_ENGINE is not set
119# CONFIG_NET_VENDOR_QLOGIC is not set
120# CONFIG_NET_VENDOR_REALTEK is not set
121# CONFIG_NET_VENDOR_RDC is not set
122# CONFIG_NET_VENDOR_SEEQ is not set
123# CONFIG_NET_VENDOR_SILAN is not set
124# CONFIG_NET_VENDOR_SIS is not set
125# CONFIG_NET_VENDOR_SMSC is not set
126# CONFIG_NET_VENDOR_STMICRO is not set
127# CONFIG_NET_VENDOR_SUN is not set
128# CONFIG_NET_VENDOR_TEHUTI is not set
129# CONFIG_NET_VENDOR_TI is not set
130# CONFIG_NET_VENDOR_TOSHIBA is not set
131# CONFIG_NET_VENDOR_VIA is not set
132# CONFIG_WLAN is not set
133# CONFIG_VT is not set
134CONFIG_LEGACY_PTY_COUNT=16
135CONFIG_SERIAL_8250=y
136CONFIG_SERIAL_8250_CONSOLE=y
137CONFIG_HW_RANDOM=y
138# CONFIG_HWMON is not set
139CONFIG_VIDEO_OUTPUT_CONTROL=m
140CONFIG_FB=y
141CONFIG_FIRMWARE_EDID=y
142CONFIG_FB_MATROX=y
143CONFIG_FB_MATROX_G=y
144CONFIG_USB=y
145CONFIG_USB_EHCI_HCD=y
146# CONFIG_USB_EHCI_TT_NEWSCHED is not set
147CONFIG_USB_UHCI_HCD=y
148CONFIG_USB_STORAGE=y
149CONFIG_NEW_LEDS=y
150CONFIG_LEDS_CLASS=y
151CONFIG_LEDS_TRIGGERS=y
152CONFIG_LEDS_TRIGGER_TIMER=y
153CONFIG_LEDS_TRIGGER_IDE_DISK=y
154CONFIG_LEDS_TRIGGER_HEARTBEAT=y
155CONFIG_LEDS_TRIGGER_BACKLIGHT=y
156CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
157CONFIG_RTC_CLASS=y
158CONFIG_RTC_DRV_CMOS=y
159CONFIG_EXT2_FS=y
160CONFIG_EXT3_FS=y
161# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
162CONFIG_XFS_FS=y
163CONFIG_XFS_QUOTA=y
164CONFIG_XFS_POSIX_ACL=y
165CONFIG_QUOTA=y
166CONFIG_QFMT_V2=y
167CONFIG_MSDOS_FS=m
168CONFIG_VFAT_FS=m
169CONFIG_PROC_KCORE=y
170CONFIG_TMPFS=y
171CONFIG_NFS_FS=y
172CONFIG_ROOT_NFS=y
173CONFIG_CIFS=m
174CONFIG_CIFS_WEAK_PW_HASH=y
175CONFIG_CIFS_XATTR=y
176CONFIG_CIFS_POSIX=y
177CONFIG_NLS_CODEPAGE_437=m
178CONFIG_NLS_ISO8859_1=m
179# CONFIG_FTRACE is not set
180CONFIG_CRYPTO_NULL=m
181CONFIG_CRYPTO_PCBC=m
182CONFIG_CRYPTO_HMAC=y
183CONFIG_CRYPTO_MICHAEL_MIC=m
184CONFIG_CRYPTO_SHA512=m
185CONFIG_CRYPTO_TGR192=m
186CONFIG_CRYPTO_WP512=m
187CONFIG_CRYPTO_ANUBIS=m
188CONFIG_CRYPTO_BLOWFISH=m
189CONFIG_CRYPTO_CAST5=m
190CONFIG_CRYPTO_CAST6=m
191CONFIG_CRYPTO_KHAZAD=m
192CONFIG_CRYPTO_SERPENT=m
193CONFIG_CRYPTO_TEA=m
194CONFIG_CRYPTO_TWOFISH=m
195# CONFIG_CRYPTO_ANSI_CPRNG is not set
196# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
new file mode 100644
index 000000000000..8a666021b870
--- /dev/null
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -0,0 +1,199 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_MIPS_MT_SMP=y
5CONFIG_SCHED_SMT=y
6CONFIG_MIPS_CMP=y
7CONFIG_NR_CPUS=8
8CONFIG_HZ_100=y
9CONFIG_LOCALVERSION="cmp"
10CONFIG_SYSVIPC=y
11CONFIG_POSIX_MQUEUE=y
12CONFIG_AUDIT=y
13CONFIG_NO_HZ=y
14CONFIG_IKCONFIG=y
15CONFIG_IKCONFIG_PROC=y
16CONFIG_LOG_BUF_SHIFT=15
17CONFIG_SYSCTL_SYSCALL=y
18CONFIG_EMBEDDED=y
19CONFIG_SLAB=y
20CONFIG_MODULES=y
21CONFIG_MODULE_UNLOAD=y
22CONFIG_MODVERSIONS=y
23CONFIG_MODULE_SRCVERSION_ALL=y
24# CONFIG_BLK_DEV_BSG is not set
25CONFIG_PCI=y
26# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
27CONFIG_NET=y
28CONFIG_PACKET=y
29CONFIG_UNIX=y
30CONFIG_XFRM_USER=m
31CONFIG_NET_KEY=y
32CONFIG_INET=y
33CONFIG_IP_MULTICAST=y
34CONFIG_IP_ADVANCED_ROUTER=y
35CONFIG_IP_MULTIPLE_TABLES=y
36CONFIG_IP_ROUTE_MULTIPATH=y
37CONFIG_IP_ROUTE_VERBOSE=y
38CONFIG_IP_PNP=y
39CONFIG_IP_PNP_DHCP=y
40CONFIG_IP_PNP_BOOTP=y
41CONFIG_NET_IPIP=m
42CONFIG_IP_MROUTE=y
43CONFIG_IP_PIMSM_V1=y
44CONFIG_IP_PIMSM_V2=y
45CONFIG_SYN_COOKIES=y
46CONFIG_INET_AH=m
47CONFIG_INET_ESP=m
48CONFIG_INET_IPCOMP=m
49# CONFIG_INET_LRO is not set
50CONFIG_IPV6_PRIVACY=y
51CONFIG_INET6_AH=m
52CONFIG_INET6_ESP=m
53CONFIG_INET6_IPCOMP=m
54CONFIG_IPV6_TUNNEL=m
55CONFIG_BRIDGE=m
56CONFIG_VLAN_8021Q=m
57CONFIG_ATALK=m
58CONFIG_DEV_APPLETALK=m
59CONFIG_IPDDP=m
60CONFIG_IPDDP_ENCAP=y
61CONFIG_IPDDP_DECAP=y
62CONFIG_NET_SCHED=y
63CONFIG_NET_SCH_CBQ=m
64CONFIG_NET_SCH_HTB=m
65CONFIG_NET_SCH_HFSC=m
66CONFIG_NET_SCH_PRIO=m
67CONFIG_NET_SCH_RED=m
68CONFIG_NET_SCH_SFQ=m
69CONFIG_NET_SCH_TEQL=m
70CONFIG_NET_SCH_TBF=m
71CONFIG_NET_SCH_GRED=m
72CONFIG_NET_SCH_DSMARK=m
73CONFIG_NET_SCH_NETEM=m
74CONFIG_NET_SCH_INGRESS=m
75CONFIG_NET_CLS_BASIC=m
76CONFIG_NET_CLS_TCINDEX=m
77CONFIG_NET_CLS_ROUTE4=m
78CONFIG_NET_CLS_FW=m
79CONFIG_NET_CLS_U32=m
80CONFIG_NET_CLS_RSVP=m
81CONFIG_NET_CLS_RSVP6=m
82CONFIG_NET_CLS_ACT=y
83CONFIG_NET_ACT_POLICE=y
84CONFIG_NET_CLS_IND=y
85# CONFIG_WIRELESS is not set
86CONFIG_BLK_DEV_LOOP=y
87CONFIG_BLK_DEV_CRYPTOLOOP=m
88CONFIG_IDE=y
89# CONFIG_IDE_PROC_FS is not set
90# CONFIG_IDEPCI_PCIBUS_ORDER is not set
91CONFIG_BLK_DEV_GENERIC=y
92CONFIG_BLK_DEV_PIIX=y
93CONFIG_SCSI=y
94CONFIG_BLK_DEV_SD=y
95CONFIG_CHR_DEV_SG=y
96# CONFIG_SCSI_LOWLEVEL is not set
97CONFIG_NETDEVICES=y
98# CONFIG_NET_VENDOR_3COM is not set
99# CONFIG_NET_VENDOR_ADAPTEC is not set
100# CONFIG_NET_VENDOR_ALTEON is not set
101CONFIG_PCNET32=y
102# CONFIG_NET_VENDOR_ATHEROS is not set
103# CONFIG_NET_VENDOR_BROADCOM is not set
104# CONFIG_NET_VENDOR_BROCADE is not set
105# CONFIG_NET_VENDOR_CHELSIO is not set
106# CONFIG_NET_VENDOR_CISCO is not set
107# CONFIG_NET_VENDOR_DEC is not set
108# CONFIG_NET_VENDOR_DLINK is not set
109# CONFIG_NET_VENDOR_EMULEX is not set
110# CONFIG_NET_VENDOR_EXAR is not set
111# CONFIG_NET_VENDOR_HP is not set
112# CONFIG_NET_VENDOR_INTEL is not set
113# CONFIG_NET_VENDOR_MARVELL is not set
114# CONFIG_NET_VENDOR_MELLANOX is not set
115# CONFIG_NET_VENDOR_MICREL is not set
116# CONFIG_NET_VENDOR_MYRI is not set
117# CONFIG_NET_VENDOR_NATSEMI is not set
118# CONFIG_NET_VENDOR_NVIDIA is not set
119# CONFIG_NET_VENDOR_OKI is not set
120# CONFIG_NET_PACKET_ENGINE is not set
121# CONFIG_NET_VENDOR_QLOGIC is not set
122# CONFIG_NET_VENDOR_REALTEK is not set
123# CONFIG_NET_VENDOR_RDC is not set
124# CONFIG_NET_VENDOR_SEEQ is not set
125# CONFIG_NET_VENDOR_SILAN is not set
126# CONFIG_NET_VENDOR_SIS is not set
127# CONFIG_NET_VENDOR_SMSC is not set
128# CONFIG_NET_VENDOR_STMICRO is not set
129# CONFIG_NET_VENDOR_SUN is not set
130# CONFIG_NET_VENDOR_TEHUTI is not set
131# CONFIG_NET_VENDOR_TI is not set
132# CONFIG_NET_VENDOR_TOSHIBA is not set
133# CONFIG_NET_VENDOR_VIA is not set
134# CONFIG_NET_VENDOR_WIZNET is not set
135# CONFIG_WLAN is not set
136# CONFIG_VT is not set
137CONFIG_LEGACY_PTY_COUNT=4
138CONFIG_SERIAL_8250=y
139CONFIG_SERIAL_8250_CONSOLE=y
140CONFIG_HW_RANDOM=y
141# CONFIG_HWMON is not set
142CONFIG_VIDEO_OUTPUT_CONTROL=m
143CONFIG_FB=y
144CONFIG_FIRMWARE_EDID=y
145CONFIG_FB_MATROX=y
146CONFIG_FB_MATROX_G=y
147CONFIG_USB=y
148CONFIG_USB_EHCI_HCD=y
149# CONFIG_USB_EHCI_TT_NEWSCHED is not set
150CONFIG_USB_UHCI_HCD=y
151CONFIG_USB_STORAGE=y
152CONFIG_NEW_LEDS=y
153CONFIG_LEDS_CLASS=y
154CONFIG_LEDS_TRIGGERS=y
155CONFIG_LEDS_TRIGGER_TIMER=y
156CONFIG_LEDS_TRIGGER_IDE_DISK=y
157CONFIG_LEDS_TRIGGER_HEARTBEAT=y
158CONFIG_LEDS_TRIGGER_BACKLIGHT=y
159CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
160CONFIG_RTC_CLASS=y
161CONFIG_RTC_DRV_CMOS=y
162CONFIG_EXT2_FS=y
163CONFIG_EXT3_FS=y
164# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
165CONFIG_XFS_FS=y
166CONFIG_XFS_QUOTA=y
167CONFIG_XFS_POSIX_ACL=y
168CONFIG_QUOTA=y
169CONFIG_QFMT_V2=y
170CONFIG_MSDOS_FS=m
171CONFIG_VFAT_FS=m
172CONFIG_PROC_KCORE=y
173CONFIG_TMPFS=y
174CONFIG_NFS_FS=y
175CONFIG_ROOT_NFS=y
176CONFIG_CIFS=m
177CONFIG_CIFS_WEAK_PW_HASH=y
178CONFIG_CIFS_XATTR=y
179CONFIG_CIFS_POSIX=y
180CONFIG_NLS_CODEPAGE_437=m
181CONFIG_NLS_ISO8859_1=m
182# CONFIG_FTRACE is not set
183CONFIG_CRYPTO_NULL=m
184CONFIG_CRYPTO_PCBC=m
185CONFIG_CRYPTO_HMAC=y
186CONFIG_CRYPTO_MICHAEL_MIC=m
187CONFIG_CRYPTO_SHA512=m
188CONFIG_CRYPTO_TGR192=m
189CONFIG_CRYPTO_WP512=m
190CONFIG_CRYPTO_ANUBIS=m
191CONFIG_CRYPTO_BLOWFISH=m
192CONFIG_CRYPTO_CAST5=m
193CONFIG_CRYPTO_CAST6=m
194CONFIG_CRYPTO_KHAZAD=m
195CONFIG_CRYPTO_SERPENT=m
196CONFIG_CRYPTO_TEA=m
197CONFIG_CRYPTO_TWOFISH=m
198# CONFIG_CRYPTO_ANSI_CPRNG is not set
199# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
new file mode 100644
index 000000000000..9868fc9c1133
--- /dev/null
+++ b/arch/mips/configs/maltaup_defconfig
@@ -0,0 +1,194 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_HZ_100=y
5CONFIG_LOCALVERSION="up"
6CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y
8CONFIG_AUDIT=y
9CONFIG_NO_HZ=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=15
13CONFIG_SYSCTL_SYSCALL=y
14CONFIG_EMBEDDED=y
15CONFIG_SLAB=y
16CONFIG_MODULES=y
17CONFIG_MODULE_UNLOAD=y
18CONFIG_MODVERSIONS=y
19CONFIG_MODULE_SRCVERSION_ALL=y
20# CONFIG_BLK_DEV_BSG is not set
21CONFIG_PCI=y
22# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
23CONFIG_NET=y
24CONFIG_PACKET=y
25CONFIG_UNIX=y
26CONFIG_XFRM_USER=m
27CONFIG_NET_KEY=y
28CONFIG_INET=y
29CONFIG_IP_MULTICAST=y
30CONFIG_IP_ADVANCED_ROUTER=y
31CONFIG_IP_MULTIPLE_TABLES=y
32CONFIG_IP_ROUTE_MULTIPATH=y
33CONFIG_IP_ROUTE_VERBOSE=y
34CONFIG_IP_PNP=y
35CONFIG_IP_PNP_DHCP=y
36CONFIG_IP_PNP_BOOTP=y
37CONFIG_NET_IPIP=m
38CONFIG_IP_MROUTE=y
39CONFIG_IP_PIMSM_V1=y
40CONFIG_IP_PIMSM_V2=y
41CONFIG_SYN_COOKIES=y
42CONFIG_INET_AH=m
43CONFIG_INET_ESP=m
44CONFIG_INET_IPCOMP=m
45# CONFIG_INET_LRO is not set
46CONFIG_IPV6_PRIVACY=y
47CONFIG_INET6_AH=m
48CONFIG_INET6_ESP=m
49CONFIG_INET6_IPCOMP=m
50CONFIG_IPV6_TUNNEL=m
51CONFIG_BRIDGE=m
52CONFIG_VLAN_8021Q=m
53CONFIG_ATALK=m
54CONFIG_DEV_APPLETALK=m
55CONFIG_IPDDP=m
56CONFIG_IPDDP_ENCAP=y
57CONFIG_IPDDP_DECAP=y
58CONFIG_NET_SCHED=y
59CONFIG_NET_SCH_CBQ=m
60CONFIG_NET_SCH_HTB=m
61CONFIG_NET_SCH_HFSC=m
62CONFIG_NET_SCH_PRIO=m
63CONFIG_NET_SCH_RED=m
64CONFIG_NET_SCH_SFQ=m
65CONFIG_NET_SCH_TEQL=m
66CONFIG_NET_SCH_TBF=m
67CONFIG_NET_SCH_GRED=m
68CONFIG_NET_SCH_DSMARK=m
69CONFIG_NET_SCH_NETEM=m
70CONFIG_NET_SCH_INGRESS=m
71CONFIG_NET_CLS_BASIC=m
72CONFIG_NET_CLS_TCINDEX=m
73CONFIG_NET_CLS_ROUTE4=m
74CONFIG_NET_CLS_FW=m
75CONFIG_NET_CLS_U32=m
76CONFIG_NET_CLS_RSVP=m
77CONFIG_NET_CLS_RSVP6=m
78CONFIG_NET_CLS_ACT=y
79CONFIG_NET_ACT_POLICE=y
80CONFIG_NET_CLS_IND=y
81# CONFIG_WIRELESS is not set
82CONFIG_BLK_DEV_LOOP=y
83CONFIG_BLK_DEV_CRYPTOLOOP=m
84CONFIG_IDE=y
85# CONFIG_IDE_PROC_FS is not set
86# CONFIG_IDEPCI_PCIBUS_ORDER is not set
87CONFIG_BLK_DEV_GENERIC=y
88CONFIG_BLK_DEV_PIIX=y
89CONFIG_SCSI=y
90CONFIG_BLK_DEV_SD=y
91CONFIG_CHR_DEV_SG=y
92# CONFIG_SCSI_LOWLEVEL is not set
93CONFIG_NETDEVICES=y
94# CONFIG_NET_VENDOR_3COM is not set
95# CONFIG_NET_VENDOR_ADAPTEC is not set
96# CONFIG_NET_VENDOR_ALTEON is not set
97CONFIG_PCNET32=y
98# CONFIG_NET_VENDOR_ATHEROS is not set
99# CONFIG_NET_VENDOR_BROADCOM is not set
100# CONFIG_NET_VENDOR_BROCADE is not set
101# CONFIG_NET_VENDOR_CHELSIO is not set
102# CONFIG_NET_VENDOR_CISCO is not set
103# CONFIG_NET_VENDOR_DEC is not set
104# CONFIG_NET_VENDOR_DLINK is not set
105# CONFIG_NET_VENDOR_EMULEX is not set
106# CONFIG_NET_VENDOR_EXAR is not set
107# CONFIG_NET_VENDOR_HP is not set
108# CONFIG_NET_VENDOR_INTEL is not set
109# CONFIG_NET_VENDOR_MARVELL is not set
110# CONFIG_NET_VENDOR_MELLANOX is not set
111# CONFIG_NET_VENDOR_MICREL is not set
112# CONFIG_NET_VENDOR_MYRI is not set
113# CONFIG_NET_VENDOR_NATSEMI is not set
114# CONFIG_NET_VENDOR_NVIDIA is not set
115# CONFIG_NET_VENDOR_OKI is not set
116# CONFIG_NET_PACKET_ENGINE is not set
117# CONFIG_NET_VENDOR_QLOGIC is not set
118# CONFIG_NET_VENDOR_REALTEK is not set
119# CONFIG_NET_VENDOR_RDC is not set
120# CONFIG_NET_VENDOR_SEEQ is not set
121# CONFIG_NET_VENDOR_SILAN is not set
122# CONFIG_NET_VENDOR_SIS is not set
123# CONFIG_NET_VENDOR_SMSC is not set
124# CONFIG_NET_VENDOR_STMICRO is not set
125# CONFIG_NET_VENDOR_SUN is not set
126# CONFIG_NET_VENDOR_TEHUTI is not set
127# CONFIG_NET_VENDOR_TI is not set
128# CONFIG_NET_VENDOR_TOSHIBA is not set
129# CONFIG_NET_VENDOR_VIA is not set
130# CONFIG_WLAN is not set
131# CONFIG_VT is not set
132CONFIG_LEGACY_PTY_COUNT=16
133CONFIG_SERIAL_8250=y
134CONFIG_SERIAL_8250_CONSOLE=y
135CONFIG_HW_RANDOM=y
136# CONFIG_HWMON is not set
137CONFIG_VIDEO_OUTPUT_CONTROL=m
138CONFIG_FB=y
139CONFIG_FIRMWARE_EDID=y
140CONFIG_FB_MATROX=y
141CONFIG_FB_MATROX_G=y
142CONFIG_USB=y
143CONFIG_USB_EHCI_HCD=y
144# CONFIG_USB_EHCI_TT_NEWSCHED is not set
145CONFIG_USB_UHCI_HCD=y
146CONFIG_USB_STORAGE=y
147CONFIG_NEW_LEDS=y
148CONFIG_LEDS_CLASS=y
149CONFIG_LEDS_TRIGGERS=y
150CONFIG_LEDS_TRIGGER_TIMER=y
151CONFIG_LEDS_TRIGGER_IDE_DISK=y
152CONFIG_LEDS_TRIGGER_HEARTBEAT=y
153CONFIG_LEDS_TRIGGER_BACKLIGHT=y
154CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
155CONFIG_RTC_CLASS=y
156CONFIG_RTC_DRV_CMOS=y
157CONFIG_EXT2_FS=y
158CONFIG_EXT3_FS=y
159# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
160CONFIG_XFS_FS=y
161CONFIG_XFS_QUOTA=y
162CONFIG_XFS_POSIX_ACL=y
163CONFIG_QUOTA=y
164CONFIG_QFMT_V2=y
165CONFIG_MSDOS_FS=m
166CONFIG_VFAT_FS=m
167CONFIG_PROC_KCORE=y
168CONFIG_TMPFS=y
169CONFIG_NFS_FS=y
170CONFIG_ROOT_NFS=y
171CONFIG_CIFS=m
172CONFIG_CIFS_WEAK_PW_HASH=y
173CONFIG_CIFS_XATTR=y
174CONFIG_CIFS_POSIX=y
175CONFIG_NLS_CODEPAGE_437=m
176CONFIG_NLS_ISO8859_1=m
177# CONFIG_FTRACE is not set
178CONFIG_CRYPTO_NULL=m
179CONFIG_CRYPTO_PCBC=m
180CONFIG_CRYPTO_HMAC=y
181CONFIG_CRYPTO_MICHAEL_MIC=m
182CONFIG_CRYPTO_SHA512=m
183CONFIG_CRYPTO_TGR192=m
184CONFIG_CRYPTO_WP512=m
185CONFIG_CRYPTO_ANUBIS=m
186CONFIG_CRYPTO_BLOWFISH=m
187CONFIG_CRYPTO_CAST5=m
188CONFIG_CRYPTO_CAST6=m
189CONFIG_CRYPTO_KHAZAD=m
190CONFIG_CRYPTO_SERPENT=m
191CONFIG_CRYPTO_TEA=m
192CONFIG_CRYPTO_TWOFISH=m
193# CONFIG_CRYPTO_ANSI_CPRNG is not set
194# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3_defconfig b/arch/mips/configs/sead3_defconfig
index e3eec68d9132..0abe681c11a0 100644
--- a/arch/mips/configs/sead3_defconfig
+++ b/arch/mips/configs/sead3_defconfig
@@ -2,7 +2,6 @@ CONFIG_MIPS_SEAD3=y
2CONFIG_CPU_LITTLE_ENDIAN=y 2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y 3CONFIG_CPU_MIPS32_R2=y
4CONFIG_HZ_100=y 4CONFIG_HZ_100=y
5CONFIG_EXPERIMENTAL=y
6CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
8CONFIG_NO_HZ=y 7CONFIG_NO_HZ=y
@@ -115,10 +114,8 @@ CONFIG_NLS_ISO8859_1=y
115CONFIG_NLS_ISO8859_15=y 114CONFIG_NLS_ISO8859_15=y
116CONFIG_NLS_UTF8=y 115CONFIG_NLS_UTF8=y
117# CONFIG_FTRACE is not set 116# CONFIG_FTRACE is not set
118CONFIG_CRYPTO=y
119CONFIG_CRYPTO_CBC=y 117CONFIG_CRYPTO_CBC=y
120CONFIG_CRYPTO_ECB=y 118CONFIG_CRYPTO_ECB=y
121CONFIG_CRYPTO_AES=y
122CONFIG_CRYPTO_ARC4=y 119CONFIG_CRYPTO_ARC4=y
123# CONFIG_CRYPTO_ANSI_CPRNG is not set 120# CONFIG_CRYPTO_ANSI_CPRNG is not set
124# CONFIG_CRYPTO_HW is not set 121# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig
new file mode 100644
index 000000000000..2a0da5bf4b64
--- /dev/null
+++ b/arch/mips/configs/sead3micro_defconfig
@@ -0,0 +1,122 @@
1CONFIG_MIPS_SEAD3=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_CPU_MICROMIPS=y
5CONFIG_HZ_100=y
6CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=15
13CONFIG_EMBEDDED=y
14CONFIG_SLAB=y
15CONFIG_PROFILING=y
16CONFIG_OPROFILE=y
17CONFIG_MODULES=y
18# CONFIG_BLK_DEV_BSG is not set
19# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
20CONFIG_NET=y
21CONFIG_PACKET=y
22CONFIG_UNIX=y
23CONFIG_INET=y
24CONFIG_IP_PNP=y
25CONFIG_IP_PNP_DHCP=y
26CONFIG_IP_PNP_BOOTP=y
27# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
28# CONFIG_INET_XFRM_MODE_TUNNEL is not set
29# CONFIG_INET_XFRM_MODE_BEET is not set
30# CONFIG_INET_LRO is not set
31# CONFIG_INET_DIAG is not set
32# CONFIG_IPV6 is not set
33# CONFIG_WIRELESS is not set
34CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
35CONFIG_MTD=y
36CONFIG_MTD_CHAR=y
37CONFIG_MTD_BLOCK=y
38CONFIG_MTD_CFI=y
39CONFIG_MTD_CFI_INTELEXT=y
40CONFIG_MTD_PHYSMAP=y
41CONFIG_MTD_UBI=y
42CONFIG_MTD_UBI_GLUEBI=y
43CONFIG_BLK_DEV_LOOP=y
44CONFIG_BLK_DEV_CRYPTOLOOP=m
45CONFIG_SCSI=y
46# CONFIG_SCSI_PROC_FS is not set
47CONFIG_BLK_DEV_SD=y
48CONFIG_CHR_DEV_SG=y
49# CONFIG_SCSI_LOWLEVEL is not set
50CONFIG_NETDEVICES=y
51CONFIG_SMSC911X=y
52# CONFIG_NET_VENDOR_WIZNET is not set
53CONFIG_MARVELL_PHY=y
54CONFIG_DAVICOM_PHY=y
55CONFIG_QSEMI_PHY=y
56CONFIG_LXT_PHY=y
57CONFIG_CICADA_PHY=y
58CONFIG_VITESSE_PHY=y
59CONFIG_SMSC_PHY=y
60CONFIG_BROADCOM_PHY=y
61CONFIG_ICPLUS_PHY=y
62# CONFIG_WLAN is not set
63# CONFIG_INPUT_MOUSEDEV is not set
64# CONFIG_INPUT_KEYBOARD is not set
65# CONFIG_INPUT_MOUSE is not set
66# CONFIG_SERIO is not set
67# CONFIG_CONSOLE_TRANSLATIONS is not set
68CONFIG_VT_HW_CONSOLE_BINDING=y
69CONFIG_LEGACY_PTY_COUNT=32
70CONFIG_SERIAL_8250=y
71CONFIG_SERIAL_8250_CONSOLE=y
72CONFIG_SERIAL_8250_NR_UARTS=2
73CONFIG_SERIAL_8250_RUNTIME_UARTS=2
74# CONFIG_HW_RANDOM is not set
75CONFIG_I2C=y
76# CONFIG_I2C_COMPAT is not set
77CONFIG_I2C_CHARDEV=y
78# CONFIG_I2C_HELPER_AUTO is not set
79CONFIG_SPI=y
80CONFIG_SENSORS_ADT7475=y
81CONFIG_BACKLIGHT_LCD_SUPPORT=y
82CONFIG_LCD_CLASS_DEVICE=y
83CONFIG_BACKLIGHT_CLASS_DEVICE=y
84# CONFIG_VGA_CONSOLE is not set
85CONFIG_USB=y
86CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
87CONFIG_USB_EHCI_HCD=y
88CONFIG_USB_EHCI_ROOT_HUB_TT=y
89CONFIG_USB_STORAGE=y
90CONFIG_MMC=y
91CONFIG_MMC_DEBUG=y
92CONFIG_MMC_SPI=y
93CONFIG_NEW_LEDS=y
94CONFIG_LEDS_CLASS=y
95CONFIG_LEDS_TRIGGERS=y
96CONFIG_LEDS_TRIGGER_HEARTBEAT=y
97CONFIG_RTC_CLASS=y
98CONFIG_RTC_DRV_M41T80=y
99CONFIG_EXT3_FS=y
100# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
101CONFIG_XFS_FS=y
102CONFIG_XFS_QUOTA=y
103CONFIG_XFS_POSIX_ACL=y
104CONFIG_QUOTA=y
105# CONFIG_PRINT_QUOTA_WARNING is not set
106CONFIG_MSDOS_FS=m
107CONFIG_VFAT_FS=m
108CONFIG_TMPFS=y
109CONFIG_JFFS2_FS=y
110CONFIG_NFS_FS=y
111CONFIG_ROOT_NFS=y
112CONFIG_NLS_CODEPAGE_437=y
113CONFIG_NLS_ASCII=y
114CONFIG_NLS_ISO8859_1=y
115CONFIG_NLS_ISO8859_15=y
116CONFIG_NLS_UTF8=y
117# CONFIG_FTRACE is not set
118CONFIG_CRYPTO_CBC=y
119CONFIG_CRYPTO_ECB=y
120CONFIG_CRYPTO_ARC4=y
121# CONFIG_CRYPTO_ANSI_CPRNG is not set
122# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/fw/lib/Makefile b/arch/mips/fw/lib/Makefile
index 84befc968fc4..529150516777 100644
--- a/arch/mips/fw/lib/Makefile
+++ b/arch/mips/fw/lib/Makefile
@@ -2,4 +2,6 @@
2# Makefile for generic prom monitor library routines under Linux. 2# Makefile for generic prom monitor library routines under Linux.
3# 3#
4 4
5lib-y += cmdline.o
6
5lib-$(CONFIG_64BIT) += call_o32.o 7lib-$(CONFIG_64BIT) += call_o32.o
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
new file mode 100644
index 000000000000..ffd0345780ae
--- /dev/null
+++ b/arch/mips/fw/lib/cmdline.c
@@ -0,0 +1,101 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/string.h>
11
12#include <asm/addrspace.h>
13#include <asm/fw/fw.h>
14
15int fw_argc;
16int *_fw_argv;
17int *_fw_envp;
18
19void __init fw_init_cmdline(void)
20{
21 int i;
22
23 /* Validate command line parameters. */
24 if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) {
25 fw_argc = 0;
26 _fw_argv = NULL;
27 } else {
28 fw_argc = (fw_arg0 & 0x0000ffff);
29 _fw_argv = (int *)fw_arg1;
30 }
31
32 /* Validate environment pointer. */
33 if (fw_arg2 < CKSEG0)
34 _fw_envp = NULL;
35 else
36 _fw_envp = (int *)fw_arg2;
37
38 for (i = 1; i < fw_argc; i++) {
39 strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE);
40 if (i < (fw_argc - 1))
41 strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
42 }
43}
44
45char * __init fw_getcmdline(void)
46{
47 return &(arcs_cmdline[0]);
48}
49
50char *fw_getenv(char *envname)
51{
52 char *result = NULL;
53
54 if (_fw_envp != NULL) {
55 /*
56 * Return a pointer to the given environment variable.
57 * YAMON uses "name", "value" pairs, while U-Boot uses
58 * "name=value".
59 */
60 int i, yamon, index = 0;
61
62 yamon = (strchr(fw_envp(index), '=') == NULL);
63 i = strlen(envname);
64
65 while (fw_envp(index)) {
66 if (strncmp(envname, fw_envp(index), i) == 0) {
67 if (yamon) {
68 result = fw_envp(index + 1);
69 break;
70 } else if (fw_envp(index)[i] == '=') {
71 result = (fw_envp(index + 1) + i);
72 break;
73 }
74 }
75
76 /* Increment array index. */
77 if (yamon)
78 index += 2;
79 else
80 index += 1;
81 }
82 }
83
84 return result;
85}
86
87unsigned long fw_getenvl(char *envname)
88{
89 unsigned long envl = 0UL;
90 char *str;
91 long val;
92 int tmp;
93
94 str = fw_getenv(envname);
95 if (str) {
96 tmp = kstrtol(str, 0, &val);
97 envl = (unsigned long)val;
98 }
99
100 return envl;
101}
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 164a21e65b42..879691d194af 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -296,6 +296,7 @@ symbol = value
296#define LONG_SUBU subu 296#define LONG_SUBU subu
297#define LONG_L lw 297#define LONG_L lw
298#define LONG_S sw 298#define LONG_S sw
299#define LONG_SP swp
299#define LONG_SLL sll 300#define LONG_SLL sll
300#define LONG_SLLV sllv 301#define LONG_SLLV sllv
301#define LONG_SRL srl 302#define LONG_SRL srl
@@ -318,6 +319,7 @@ symbol = value
318#define LONG_SUBU dsubu 319#define LONG_SUBU dsubu
319#define LONG_L ld 320#define LONG_L ld
320#define LONG_S sd 321#define LONG_S sd
322#define LONG_SP sdp
321#define LONG_SLL dsll 323#define LONG_SLL dsll
322#define LONG_SLLV dsllv 324#define LONG_SLLV dsllv
323#define LONG_SRL dsrl 325#define LONG_SRL dsrl
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index b71dd5b16085..4d2cdea5aa37 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -104,6 +104,7 @@ struct boot_mem_map {
104extern struct boot_mem_map boot_mem_map; 104extern struct boot_mem_map boot_mem_map;
105 105
106extern void add_memory_region(phys_t start, phys_t size, long type); 106extern void add_memory_region(phys_t start, phys_t size, long type);
107extern void detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max);
107 108
108extern void prom_init(void); 109extern void prom_init(void);
109extern void prom_free_prom_memory(void); 110extern void prom_free_prom_memory(void);
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index 888766ae1f85..e28a3e0eb3cb 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -11,6 +11,14 @@
11#include <asm/ptrace.h> 11#include <asm/ptrace.h>
12#include <asm/inst.h> 12#include <asm/inst.h>
13 13
14extern int __isa_exception_epc(struct pt_regs *regs);
15extern int __compute_return_epc(struct pt_regs *regs);
16extern int __compute_return_epc_for_insn(struct pt_regs *regs,
17 union mips_instruction insn);
18extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
19extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
20
21
14static inline int delay_slot(struct pt_regs *regs) 22static inline int delay_slot(struct pt_regs *regs)
15{ 23{
16 return regs->cp0_cause & CAUSEF_BD; 24 return regs->cp0_cause & CAUSEF_BD;
@@ -18,20 +26,27 @@ static inline int delay_slot(struct pt_regs *regs)
18 26
19static inline unsigned long exception_epc(struct pt_regs *regs) 27static inline unsigned long exception_epc(struct pt_regs *regs)
20{ 28{
21 if (!delay_slot(regs)) 29 if (likely(!delay_slot(regs)))
22 return regs->cp0_epc; 30 return regs->cp0_epc;
23 31
32 if (get_isa16_mode(regs->cp0_epc))
33 return __isa_exception_epc(regs);
34
24 return regs->cp0_epc + 4; 35 return regs->cp0_epc + 4;
25} 36}
26 37
27#define BRANCH_LIKELY_TAKEN 0x0001 38#define BRANCH_LIKELY_TAKEN 0x0001
28 39
29extern int __compute_return_epc(struct pt_regs *regs);
30extern int __compute_return_epc_for_insn(struct pt_regs *regs,
31 union mips_instruction insn);
32
33static inline int compute_return_epc(struct pt_regs *regs) 40static inline int compute_return_epc(struct pt_regs *regs)
34{ 41{
42 if (get_isa16_mode(regs->cp0_epc)) {
43 if (cpu_has_mmips)
44 return __microMIPS_compute_return_epc(regs);
45 if (cpu_has_mips16)
46 return __MIPS16e_compute_return_epc(regs);
47 return regs->cp0_epc;
48 }
49
35 if (!delay_slot(regs)) { 50 if (!delay_slot(regs)) {
36 regs->cp0_epc += 4; 51 regs->cp0_epc += 4;
37 return 0; 52 return 0;
@@ -40,4 +55,19 @@ static inline int compute_return_epc(struct pt_regs *regs)
40 return __compute_return_epc(regs); 55 return __compute_return_epc(regs);
41} 56}
42 57
58static inline int MIPS16e_compute_return_epc(struct pt_regs *regs,
59 union mips16e_instruction *inst)
60{
61 if (likely(!delay_slot(regs))) {
62 if (inst->ri.opcode == MIPS16e_extend_op) {
63 regs->cp0_epc += 4;
64 return 0;
65 }
66 regs->cp0_epc += 2;
67 return 0;
68 }
69
70 return __MIPS16e_compute_return_epc(regs);
71}
72
43#endif /* _ASM_BRANCH_H */ 73#endif /* _ASM_BRANCH_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1a57e8b4d092..e5ec8fcd8afa 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -113,6 +113,9 @@
113#ifndef cpu_has_pindexed_dcache 113#ifndef cpu_has_pindexed_dcache
114#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX) 114#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
115#endif 115#endif
116#ifndef cpu_has_local_ebase
117#define cpu_has_local_ebase 1
118#endif
116 119
117/* 120/*
118 * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors 121 * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
new file mode 100644
index 000000000000..242cbb3ca582
--- /dev/null
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -0,0 +1,15 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
7 *
8 */
9#ifndef __ASM_DMA_COHERENCE_H
10#define __ASM_DMA_COHERENCE_H
11
12extern int coherentio;
13extern int hw_coherentio;
14
15#endif
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index f8fc74b6cb47..84238c574d5e 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -2,6 +2,7 @@
2#define _ASM_DMA_MAPPING_H 2#define _ASM_DMA_MAPPING_H
3 3
4#include <asm/scatterlist.h> 4#include <asm/scatterlist.h>
5#include <asm/dma-coherence.h>
5#include <asm/cache.h> 6#include <asm/cache.h>
6#include <asm-generic/dma-coherent.h> 7#include <asm-generic/dma-coherent.h>
7 8
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 3b4092705567..2abb587d5ab4 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -54,6 +54,12 @@ do { \
54extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, 54extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
55 unsigned long cpc); 55 unsigned long cpc);
56extern int do_dsemulret(struct pt_regs *xcp); 56extern int do_dsemulret(struct pt_regs *xcp);
57extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
58 struct mips_fpu_struct *ctx, int has_fpu,
59 void *__user *fault_addr);
60int process_fpemu_return(int sig, void __user *fault_addr);
61int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
62 unsigned long *contpc);
57 63
58/* 64/*
59 * Instruction inserted following the badinst to further tag the sequence 65 * Instruction inserted following the badinst to further tag the sequence
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
new file mode 100644
index 000000000000..d6c50a7e9ede
--- /dev/null
+++ b/arch/mips/include/asm/fw/fw.h
@@ -0,0 +1,47 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc.
7 */
8#ifndef __ASM_FW_H_
9#define __ASM_FW_H_
10
11#include <asm/bootinfo.h> /* For cleaner code... */
12
13enum fw_memtypes {
14 fw_dontuse,
15 fw_code,
16 fw_free,
17};
18
19typedef struct {
20 unsigned long base; /* Within KSEG0 */
21 unsigned int size; /* bytes */
22 enum fw_memtypes type; /* fw_memtypes */
23} fw_memblock_t;
24
25/* Maximum number of memory block descriptors. */
26#define FW_MAX_MEMBLOCKS 32
27
28extern int fw_argc;
29extern int *_fw_argv;
30extern int *_fw_envp;
31
32/*
33 * Most firmware like YAMON, PMON, etc. pass arguments and environment
34 * variables as 32-bit pointers. These take care of sign extension.
35 */
36#define fw_argv(index) ((char *)(long)_fw_argv[(index)])
37#define fw_envp(index) ((char *)(long)_fw_envp[(index)])
38
39extern void fw_init_cmdline(void);
40extern char *fw_getcmdline(void);
41extern fw_memblock_t *fw_getmdesc(void);
42extern void fw_meminit(void);
43extern char *fw_getenv(char *name);
44extern unsigned long fw_getenvl(char *name);
45extern void fw_init_early_console(char port);
46
47#endif /* __ASM_FW_H_ */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index bdc9786ab5a7..7153b32de18e 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -202,7 +202,7 @@
202#define GIC_VPE_WD_COUNT0_OFS 0x0094 202#define GIC_VPE_WD_COUNT0_OFS 0x0094
203#define GIC_VPE_WD_INITIAL0_OFS 0x0098 203#define GIC_VPE_WD_INITIAL0_OFS 0x0098
204#define GIC_VPE_COMPARE_LO_OFS 0x00a0 204#define GIC_VPE_COMPARE_LO_OFS 0x00a0
205#define GIC_VPE_COMPARE_HI 0x00a4 205#define GIC_VPE_COMPARE_HI_OFS 0x00a4
206 206
207#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100 207#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100
208#define GIC_VPE_EIC_SS(intr) \ 208#define GIC_VPE_EIC_SS(intr) \
@@ -359,7 +359,11 @@ struct gic_shared_intr_map {
359/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 359/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
360#define GIC_PIN_TO_VEC_OFFSET (1) 360#define GIC_PIN_TO_VEC_OFFSET (1)
361 361
362extern int gic_present; 362#include <linux/clocksource.h>
363#include <linux/irq.h>
364
365extern unsigned int gic_present;
366extern unsigned int gic_frequency;
363extern unsigned long _gic_base; 367extern unsigned long _gic_base;
364extern unsigned int gic_irq_base; 368extern unsigned int gic_irq_base;
365extern unsigned int gic_irq_flags[]; 369extern unsigned int gic_irq_flags[];
@@ -368,18 +372,20 @@ extern struct gic_shared_intr_map gic_shared_intr_map[];
368extern void gic_init(unsigned long gic_base_addr, 372extern void gic_init(unsigned long gic_base_addr,
369 unsigned long gic_addrspace_size, struct gic_intr_map *intrmap, 373 unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
370 unsigned int intrmap_size, unsigned int irqbase); 374 unsigned int intrmap_size, unsigned int irqbase);
371
372extern void gic_clocksource_init(unsigned int); 375extern void gic_clocksource_init(unsigned int);
373extern unsigned int gic_get_int(void); 376extern unsigned int gic_compare_int (void);
377extern cycle_t gic_read_count(void);
378extern cycle_t gic_read_compare(void);
379extern void gic_write_compare(cycle_t cnt);
374extern void gic_send_ipi(unsigned int intr); 380extern void gic_send_ipi(unsigned int intr);
375extern unsigned int plat_ipi_call_int_xlate(unsigned int); 381extern unsigned int plat_ipi_call_int_xlate(unsigned int);
376extern unsigned int plat_ipi_resched_int_xlate(unsigned int); 382extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
377extern void gic_bind_eic_interrupt(int irq, int set); 383extern void gic_bind_eic_interrupt(int irq, int set);
378extern unsigned int gic_get_timer_pending(void); 384extern unsigned int gic_get_timer_pending(void);
385extern unsigned int gic_get_int(void);
379extern void gic_enable_interrupt(int irq_vec); 386extern void gic_enable_interrupt(int irq_vec);
380extern void gic_disable_interrupt(int irq_vec); 387extern void gic_disable_interrupt(int irq_vec);
381extern void gic_irq_ack(struct irq_data *d); 388extern void gic_irq_ack(struct irq_data *d);
382extern void gic_finish_irq(struct irq_data *d); 389extern void gic_finish_irq(struct irq_data *d);
383extern void gic_platform_init(int irqs, struct irq_chip *irq_controller); 390extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
384
385#endif /* _ASM_GICREGS_H */ 391#endif /* _ASM_GICREGS_H */
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 44d6a5bde4a1..e3ee92d4dbe7 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -10,34 +10,13 @@
10#ifndef _ASM_HAZARDS_H 10#ifndef _ASM_HAZARDS_H
11#define _ASM_HAZARDS_H 11#define _ASM_HAZARDS_H
12 12
13#ifdef __ASSEMBLY__ 13#include <linux/stringify.h>
14#define ASMMACRO(name, code...) .macro name; code; .endm
15#else
16
17#include <asm/cpu-features.h>
18
19#define ASMMACRO(name, code...) \
20__asm__(".macro " #name "; " #code "; .endm"); \
21 \
22static inline void name(void) \
23{ \
24 __asm__ __volatile__ (#name); \
25}
26
27/*
28 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
29 */
30extern void mips_ihb(void);
31
32#endif
33 14
34ASMMACRO(_ssnop, 15#define ___ssnop \
35 sll $0, $0, 1 16 sll $0, $0, 1
36 )
37 17
38ASMMACRO(_ehb, 18#define ___ehb \
39 sll $0, $0, 3 19 sll $0, $0, 3
40 )
41 20
42/* 21/*
43 * TLB hazards 22 * TLB hazards
@@ -48,24 +27,24 @@ ASMMACRO(_ehb,
48 * MIPSR2 defines ehb for hazard avoidance 27 * MIPSR2 defines ehb for hazard avoidance
49 */ 28 */
50 29
51ASMMACRO(mtc0_tlbw_hazard, 30#define __mtc0_tlbw_hazard \
52 _ehb 31 ___ehb
53 ) 32
54ASMMACRO(tlbw_use_hazard, 33#define __tlbw_use_hazard \
55 _ehb 34 ___ehb
56 ) 35
57ASMMACRO(tlb_probe_hazard, 36#define __tlb_probe_hazard \
58 _ehb 37 ___ehb
59 ) 38
60ASMMACRO(irq_enable_hazard, 39#define __irq_enable_hazard \
61 _ehb 40 ___ehb
62 ) 41
63ASMMACRO(irq_disable_hazard, 42#define __irq_disable_hazard \
64 _ehb 43 ___ehb
65 ) 44
66ASMMACRO(back_to_back_c0_hazard, 45#define __back_to_back_c0_hazard \
67 _ehb 46 ___ehb
68 ) 47
69/* 48/*
70 * gcc has a tradition of misscompiling the previous construct using the 49 * gcc has a tradition of misscompiling the previous construct using the
71 * address of a label as argument to inline assembler. Gas otoh has the 50 * address of a label as argument to inline assembler. Gas otoh has the
@@ -94,24 +73,42 @@ do { \
94 * These are slightly complicated by the fact that we guarantee R1 kernels to 73 * These are slightly complicated by the fact that we guarantee R1 kernels to
95 * run fine on R2 processors. 74 * run fine on R2 processors.
96 */ 75 */
97ASMMACRO(mtc0_tlbw_hazard, 76
98 _ssnop; _ssnop; _ehb 77#define __mtc0_tlbw_hazard \
99 ) 78 ___ssnop; \
100ASMMACRO(tlbw_use_hazard, 79 ___ssnop; \
101 _ssnop; _ssnop; _ssnop; _ehb 80 ___ehb
102 ) 81
103ASMMACRO(tlb_probe_hazard, 82#define __tlbw_use_hazard \
104 _ssnop; _ssnop; _ssnop; _ehb 83 ___ssnop; \
105 ) 84 ___ssnop; \
106ASMMACRO(irq_enable_hazard, 85 ___ssnop; \
107 _ssnop; _ssnop; _ssnop; _ehb 86 ___ehb
108 ) 87
109ASMMACRO(irq_disable_hazard, 88#define __tlb_probe_hazard \
110 _ssnop; _ssnop; _ssnop; _ehb 89 ___ssnop; \
111 ) 90 ___ssnop; \
112ASMMACRO(back_to_back_c0_hazard, 91 ___ssnop; \
113 _ssnop; _ssnop; _ssnop; _ehb 92 ___ehb
114 ) 93
94#define __irq_enable_hazard \
95 ___ssnop; \
96 ___ssnop; \
97 ___ssnop; \
98 ___ehb
99
100#define __irq_disable_hazard \
101 ___ssnop; \
102 ___ssnop; \
103 ___ssnop; \
104 ___ehb
105
106#define __back_to_back_c0_hazard \
107 ___ssnop; \
108 ___ssnop; \
109 ___ssnop; \
110 ___ehb
111
115/* 112/*
116 * gcc has a tradition of misscompiling the previous construct using the 113 * gcc has a tradition of misscompiling the previous construct using the
117 * address of a label as argument to inline assembler. Gas otoh has the 114 * address of a label as argument to inline assembler. Gas otoh has the
@@ -147,18 +144,18 @@ do { \
147 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 144 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
148 */ 145 */
149 146
150ASMMACRO(mtc0_tlbw_hazard, 147#define __mtc0_tlbw_hazard
151 ) 148
152ASMMACRO(tlbw_use_hazard, 149#define __tlbw_use_hazard
153 ) 150
154ASMMACRO(tlb_probe_hazard, 151#define __tlb_probe_hazard
155 ) 152
156ASMMACRO(irq_enable_hazard, 153#define __irq_enable_hazard
157 ) 154
158ASMMACRO(irq_disable_hazard, 155#define __irq_disable_hazard
159 ) 156
160ASMMACRO(back_to_back_c0_hazard, 157#define __back_to_back_c0_hazard
161 ) 158
162#define instruction_hazard() do { } while (0) 159#define instruction_hazard() do { } while (0)
163 160
164#elif defined(CONFIG_CPU_SB1) 161#elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard,
166/* 163/*
167 * Mostly like R4000 for historic reasons 164 * Mostly like R4000 for historic reasons
168 */ 165 */
169ASMMACRO(mtc0_tlbw_hazard, 166#define __mtc0_tlbw_hazard
170 ) 167
171ASMMACRO(tlbw_use_hazard, 168#define __tlbw_use_hazard
172 ) 169
173ASMMACRO(tlb_probe_hazard, 170#define __tlb_probe_hazard
174 ) 171
175ASMMACRO(irq_enable_hazard, 172#define __irq_enable_hazard
176 ) 173
177ASMMACRO(irq_disable_hazard, 174#define __irq_disable_hazard \
178 _ssnop; _ssnop; _ssnop 175 ___ssnop; \
179 ) 176 ___ssnop; \
180ASMMACRO(back_to_back_c0_hazard, 177 ___ssnop
181 ) 178
179#define __back_to_back_c0_hazard
180
182#define instruction_hazard() do { } while (0) 181#define instruction_hazard() do { } while (0)
183 182
184#else 183#else
@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard,
192 * hazard so this is nice trick to have an optimal code for a range of 191 * hazard so this is nice trick to have an optimal code for a range of
193 * processors. 192 * processors.
194 */ 193 */
195ASMMACRO(mtc0_tlbw_hazard, 194#define __mtc0_tlbw_hazard \
196 nop; nop 195 nop; \
197 ) 196 nop
198ASMMACRO(tlbw_use_hazard, 197
199 nop; nop; nop 198#define __tlbw_use_hazard \
200 ) 199 nop; \
201ASMMACRO(tlb_probe_hazard, 200 nop; \
202 nop; nop; nop 201 nop
203 ) 202
204ASMMACRO(irq_enable_hazard, 203#define __tlb_probe_hazard \
205 _ssnop; _ssnop; _ssnop; 204 nop; \
206 ) 205 nop; \
207ASMMACRO(irq_disable_hazard, 206 nop
208 nop; nop; nop 207
209 ) 208#define __irq_enable_hazard \
210ASMMACRO(back_to_back_c0_hazard, 209 ___ssnop; \
211 _ssnop; _ssnop; _ssnop; 210 ___ssnop; \
212 ) 211 ___ssnop
212
213#define __irq_disable_hazard \
214 nop; \
215 nop; \
216 nop
217
218#define __back_to_back_c0_hazard \
219 ___ssnop; \
220 ___ssnop; \
221 ___ssnop
222
213#define instruction_hazard() do { } while (0) 223#define instruction_hazard() do { } while (0)
214 224
215#endif 225#endif
@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard,
218/* FPU hazards */ 228/* FPU hazards */
219 229
220#if defined(CONFIG_CPU_SB1) 230#if defined(CONFIG_CPU_SB1)
221ASMMACRO(enable_fpu_hazard, 231
222 .set push; 232#define __enable_fpu_hazard \
223 .set mips64; 233 .set push; \
224 .set noreorder; 234 .set mips64; \
225 _ssnop; 235 .set noreorder; \
226 bnezl $0, .+4; 236 ___ssnop; \
227 _ssnop; 237 bnezl $0, .+4; \
228 .set pop 238 ___ssnop; \
229) 239 .set pop
230ASMMACRO(disable_fpu_hazard, 240
231) 241#define __disable_fpu_hazard
232 242
233#elif defined(CONFIG_CPU_MIPSR2) 243#elif defined(CONFIG_CPU_MIPSR2)
234ASMMACRO(enable_fpu_hazard, 244
235 _ehb 245#define __enable_fpu_hazard \
236) 246 ___ehb
237ASMMACRO(disable_fpu_hazard, 247
238 _ehb 248#define __disable_fpu_hazard \
239) 249 ___ehb
250
240#else 251#else
241ASMMACRO(enable_fpu_hazard, 252
242 nop; nop; nop; nop 253#define __enable_fpu_hazard \
243) 254 nop; \
244ASMMACRO(disable_fpu_hazard, 255 nop; \
245 _ehb 256 nop; \
246) 257 nop
258
259#define __disable_fpu_hazard \
260 ___ehb
261
247#endif 262#endif
248 263
264#ifdef __ASSEMBLY__
265
266#define _ssnop ___ssnop
267#define _ehb ___ehb
268#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
269#define tlbw_use_hazard __tlbw_use_hazard
270#define tlb_probe_hazard __tlb_probe_hazard
271#define irq_enable_hazard __irq_enable_hazard
272#define irq_disable_hazard __irq_disable_hazard
273#define back_to_back_c0_hazard __back_to_back_c0_hazard
274#define enable_fpu_hazard __enable_fpu_hazard
275#define disable_fpu_hazard __disable_fpu_hazard
276
277#else
278
279#define _ssnop() \
280do { \
281 __asm__ __volatile__( \
282 __stringify(___ssnop) \
283 ); \
284} while (0)
285
286#define _ehb() \
287do { \
288 __asm__ __volatile__( \
289 __stringify(___ehb) \
290 ); \
291} while (0)
292
293
294#define mtc0_tlbw_hazard() \
295do { \
296 __asm__ __volatile__( \
297 __stringify(__mtc0_tlbw_hazard) \
298 ); \
299} while (0)
300
301
302#define tlbw_use_hazard() \
303do { \
304 __asm__ __volatile__( \
305 __stringify(__tlbw_use_hazard) \
306 ); \
307} while (0)
308
309
310#define tlb_probe_hazard() \
311do { \
312 __asm__ __volatile__( \
313 __stringify(__tlb_probe_hazard) \
314 ); \
315} while (0)
316
317
318#define irq_enable_hazard() \
319do { \
320 __asm__ __volatile__( \
321 __stringify(__irq_enable_hazard) \
322 ); \
323} while (0)
324
325
326#define irq_disable_hazard() \
327do { \
328 __asm__ __volatile__( \
329 __stringify(__irq_disable_hazard) \
330 ); \
331} while (0)
332
333
334#define back_to_back_c0_hazard() \
335do { \
336 __asm__ __volatile__( \
337 __stringify(__back_to_back_c0_hazard) \
338 ); \
339} while (0)
340
341
342#define enable_fpu_hazard() \
343do { \
344 __asm__ __volatile__( \
345 __stringify(__enable_fpu_hazard) \
346 ); \
347} while (0)
348
349
350#define disable_fpu_hazard() \
351do { \
352 __asm__ __volatile__( \
353 __stringify(__disable_fpu_hazard) \
354 ); \
355} while (0)
356
357/*
358 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
359 */
360extern void mips_ihb(void);
361
362#endif /* __ASSEMBLY__ */
363
249#endif /* _ASM_HAZARDS_H */ 364#endif /* _ASM_HAZARDS_H */
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index f1eadf764071..22912f78401c 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -73,4 +73,16 @@
73 73
74typedef unsigned int mips_instruction; 74typedef unsigned int mips_instruction;
75 75
76/* microMIPS instruction decode structure. Do NOT export!!! */
77struct mm_decoded_insn {
78 mips_instruction insn;
79 mips_instruction next_insn;
80 int pc_inc;
81 int next_pc_inc;
82 int micro_mips_mode;
83};
84
85/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */
86extern const int reg16to32[];
87
76#endif /* _ASM_INST_H */ 88#endif /* _ASM_INST_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9f3384c789d7..45c00951888b 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -14,53 +14,48 @@
14#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/stringify.h>
17#include <asm/hazards.h> 18#include <asm/hazards.h>
18 19
19#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) 20#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
20 21
21__asm__( 22static inline void arch_local_irq_disable(void)
22 " .macro arch_local_irq_disable\n" 23{
24 __asm__ __volatile__(
23 " .set push \n" 25 " .set push \n"
24 " .set noat \n" 26 " .set noat \n"
25 " di \n" 27 " di \n"
26 " irq_disable_hazard \n" 28 " " __stringify(__irq_disable_hazard) " \n"
27 " .set pop \n" 29 " .set pop \n"
28 " .endm \n"); 30 : /* no outputs */
29 31 : /* no inputs */
30static inline void arch_local_irq_disable(void) 32 : "memory");
31{
32 __asm__ __volatile__(
33 "arch_local_irq_disable"
34 : /* no outputs */
35 : /* no inputs */
36 : "memory");
37} 33}
38 34
35static inline unsigned long arch_local_irq_save(void)
36{
37 unsigned long flags;
39 38
40__asm__( 39 asm __volatile__(
41 " .macro arch_local_irq_save result \n"
42 " .set push \n" 40 " .set push \n"
43 " .set reorder \n" 41 " .set reorder \n"
44 " .set noat \n" 42 " .set noat \n"
45 " di \\result \n" 43 " di %[flags] \n"
46 " andi \\result, 1 \n" 44 " andi %[flags], 1 \n"
47 " irq_disable_hazard \n" 45 " " __stringify(__irq_disable_hazard) " \n"
48 " .set pop \n" 46 " .set pop \n"
49 " .endm \n"); 47 : [flags] "=r" (flags)
48 : /* no inputs */
49 : "memory");
50 50
51static inline unsigned long arch_local_irq_save(void)
52{
53 unsigned long flags;
54 asm volatile("arch_local_irq_save\t%0"
55 : "=r" (flags)
56 : /* no inputs */
57 : "memory");
58 return flags; 51 return flags;
59} 52}
60 53
54static inline void arch_local_irq_restore(unsigned long flags)
55{
56 unsigned long __tmp1;
61 57
62__asm__( 58 __asm__ __volatile__(
63 " .macro arch_local_irq_restore flags \n"
64 " .set push \n" 59 " .set push \n"
65 " .set noreorder \n" 60 " .set noreorder \n"
66 " .set noat \n" 61 " .set noat \n"
@@ -69,7 +64,7 @@ __asm__(
69 * Slow, but doesn't suffer from a relatively unlikely race 64 * Slow, but doesn't suffer from a relatively unlikely race
70 * condition we're having since days 1. 65 * condition we're having since days 1.
71 */ 66 */
72 " beqz \\flags, 1f \n" 67 " beqz %[flags], 1f \n"
73 " di \n" 68 " di \n"
74 " ei \n" 69 " ei \n"
75 "1: \n" 70 "1: \n"
@@ -78,33 +73,44 @@ __asm__(
78 * Fast, dangerous. Life is fun, life is good. 73 * Fast, dangerous. Life is fun, life is good.
79 */ 74 */
80 " mfc0 $1, $12 \n" 75 " mfc0 $1, $12 \n"
81 " ins $1, \\flags, 0, 1 \n" 76 " ins $1, %[flags], 0, 1 \n"
82 " mtc0 $1, $12 \n" 77 " mtc0 $1, $12 \n"
83#endif 78#endif
84 " irq_disable_hazard \n" 79 " " __stringify(__irq_disable_hazard) " \n"
85 " .set pop \n" 80 " .set pop \n"
86 " .endm \n"); 81 : [flags] "=r" (__tmp1)
87 82 : "0" (flags)
88static inline void arch_local_irq_restore(unsigned long flags) 83 : "memory");
89{
90 unsigned long __tmp1;
91
92 __asm__ __volatile__(
93 "arch_local_irq_restore\t%0"
94 : "=r" (__tmp1)
95 : "0" (flags)
96 : "memory");
97} 84}
98 85
99static inline void __arch_local_irq_restore(unsigned long flags) 86static inline void __arch_local_irq_restore(unsigned long flags)
100{ 87{
101 unsigned long __tmp1;
102
103 __asm__ __volatile__( 88 __asm__ __volatile__(
104 "arch_local_irq_restore\t%0" 89 " .set push \n"
105 : "=r" (__tmp1) 90 " .set noreorder \n"
106 : "0" (flags) 91 " .set noat \n"
107 : "memory"); 92#if defined(CONFIG_IRQ_CPU)
93 /*
94 * Slow, but doesn't suffer from a relatively unlikely race
95 * condition we're having since days 1.
96 */
97 " beqz %[flags], 1f \n"
98 " di \n"
99 " ei \n"
100 "1: \n"
101#else
102 /*
103 * Fast, dangerous. Life is fun, life is good.
104 */
105 " mfc0 $1, $12 \n"
106 " ins $1, %[flags], 0, 1 \n"
107 " mtc0 $1, $12 \n"
108#endif
109 " " __stringify(__irq_disable_hazard) " \n"
110 " .set pop \n"
111 : [flags] "=r" (flags)
112 : "0" (flags)
113 : "memory");
108} 114}
109#else 115#else
110/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ 116/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
115#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ 121#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
116 122
117 123
118__asm__( 124extern void smtc_ipi_replay(void);
119 " .macro arch_local_irq_enable \n" 125
126static inline void arch_local_irq_enable(void)
127{
128#ifdef CONFIG_MIPS_MT_SMTC
129 /*
130 * SMTC kernel needs to do a software replay of queued
131 * IPIs, at the cost of call overhead on each local_irq_enable()
132 */
133 smtc_ipi_replay();
134#endif
135 __asm__ __volatile__(
120 " .set push \n" 136 " .set push \n"
121 " .set reorder \n" 137 " .set reorder \n"
122 " .set noat \n" 138 " .set noat \n"
@@ -133,45 +149,28 @@ __asm__(
133 " xori $1,0x1e \n" 149 " xori $1,0x1e \n"
134 " mtc0 $1,$12 \n" 150 " mtc0 $1,$12 \n"
135#endif 151#endif
136 " irq_enable_hazard \n" 152 " " __stringify(__irq_enable_hazard) " \n"
137 " .set pop \n" 153 " .set pop \n"
138 " .endm"); 154 : /* no outputs */
139 155 : /* no inputs */
140extern void smtc_ipi_replay(void); 156 : "memory");
141
142static inline void arch_local_irq_enable(void)
143{
144#ifdef CONFIG_MIPS_MT_SMTC
145 /*
146 * SMTC kernel needs to do a software replay of queued
147 * IPIs, at the cost of call overhead on each local_irq_enable()
148 */
149 smtc_ipi_replay();
150#endif
151 __asm__ __volatile__(
152 "arch_local_irq_enable"
153 : /* no outputs */
154 : /* no inputs */
155 : "memory");
156} 157}
157 158
159static inline unsigned long arch_local_save_flags(void)
160{
161 unsigned long flags;
158 162
159__asm__( 163 asm __volatile__(
160 " .macro arch_local_save_flags flags \n"
161 " .set push \n" 164 " .set push \n"
162 " .set reorder \n" 165 " .set reorder \n"
163#ifdef CONFIG_MIPS_MT_SMTC 166#ifdef CONFIG_MIPS_MT_SMTC
164 " mfc0 \\flags, $2, 1 \n" 167 " mfc0 %[flags], $2, 1 \n"
165#else 168#else
166 " mfc0 \\flags, $12 \n" 169 " mfc0 %[flags], $12 \n"
167#endif 170#endif
168 " .set pop \n" 171 " .set pop \n"
169 " .endm \n"); 172 : [flags] "=r" (flags));
170 173
171static inline unsigned long arch_local_save_flags(void)
172{
173 unsigned long flags;
174 asm volatile("arch_local_save_flags %0" : "=r" (flags));
175 return flags; 174 return flags;
176} 175}
177 176
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
new file mode 100644
index 000000000000..85789eacbf18
--- /dev/null
+++ b/arch/mips/include/asm/kvm.h
@@ -0,0 +1,55 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __LINUX_KVM_MIPS_H
11#define __LINUX_KVM_MIPS_H
12
13#include <linux/types.h>
14
15#define __KVM_MIPS
16
17#define N_MIPS_COPROC_REGS 32
18#define N_MIPS_COPROC_SEL 8
19
20/* for KVM_GET_REGS and KVM_SET_REGS */
21struct kvm_regs {
22 __u32 gprs[32];
23 __u32 hi;
24 __u32 lo;
25 __u32 pc;
26
27 __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
28};
29
30/* for KVM_GET_SREGS and KVM_SET_SREGS */
31struct kvm_sregs {
32};
33
34/* for KVM_GET_FPU and KVM_SET_FPU */
35struct kvm_fpu {
36};
37
38struct kvm_debug_exit_arch {
39};
40
41/* for KVM_SET_GUEST_DEBUG */
42struct kvm_guest_debug_arch {
43};
44
45struct kvm_mips_interrupt {
46 /* in */
47 __u32 cpu;
48 __u32 irq;
49};
50
51/* definition of registers in kvm_run */
52struct kvm_sync_regs {
53};
54
55#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644
index 000000000000..e68781e18387
--- /dev/null
+++ b/arch/mips/include/asm/kvm_host.h
@@ -0,0 +1,667 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __MIPS_KVM_HOST_H__
11#define __MIPS_KVM_HOST_H__
12
13#include <linux/mutex.h>
14#include <linux/hrtimer.h>
15#include <linux/interrupt.h>
16#include <linux/types.h>
17#include <linux/kvm.h>
18#include <linux/kvm_types.h>
19#include <linux/threads.h>
20#include <linux/spinlock.h>
21
22
23#define KVM_MAX_VCPUS 1
24#define KVM_USER_MEM_SLOTS 8
25/* memory slots that does not exposed to userspace */
26#define KVM_PRIVATE_MEM_SLOTS 0
27
28#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
29
30/* Don't support huge pages */
31#define KVM_HPAGE_GFN_SHIFT(x) 0
32
33/* We don't currently support large pages. */
34#define KVM_NR_PAGE_SIZES 1
35#define KVM_PAGES_PER_HPAGE(x) 1
36
37
38
39/* Special address that contains the comm page, used for reducing # of traps */
40#define KVM_GUEST_COMMPAGE_ADDR 0x0
41
42#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
43 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
44
45#define KVM_GUEST_KUSEG 0x00000000UL
46#define KVM_GUEST_KSEG0 0x40000000UL
47#define KVM_GUEST_KSEG23 0x60000000UL
48#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
49#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
50
51#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
52#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
53#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
54
55/*
56 * Map an address to a certain kernel segment
57 */
58#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
59#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
60#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
61
62#define KVM_INVALID_PAGE 0xdeadbeef
63#define KVM_INVALID_INST 0xdeadbeef
64#define KVM_INVALID_ADDR 0xdeadbeef
65
66#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL
67
68#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
69#define MS_TO_NS(x) (x * 1E6L)
70
71#define CAUSEB_DC 27
72#define CAUSEF_DC (_ULCAST_(1) << 27)
73
74struct kvm;
75struct kvm_run;
76struct kvm_vcpu;
77struct kvm_interrupt;
78
79extern atomic_t kvm_mips_instance;
80extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
81extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
82extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
83
84struct kvm_vm_stat {
85 u32 remote_tlb_flush;
86};
87
88struct kvm_vcpu_stat {
89 u32 wait_exits;
90 u32 cache_exits;
91 u32 signal_exits;
92 u32 int_exits;
93 u32 cop_unusable_exits;
94 u32 tlbmod_exits;
95 u32 tlbmiss_ld_exits;
96 u32 tlbmiss_st_exits;
97 u32 addrerr_st_exits;
98 u32 addrerr_ld_exits;
99 u32 syscall_exits;
100 u32 resvd_inst_exits;
101 u32 break_inst_exits;
102 u32 flush_dcache_exits;
103 u32 halt_wakeup;
104};
105
106enum kvm_mips_exit_types {
107 WAIT_EXITS,
108 CACHE_EXITS,
109 SIGNAL_EXITS,
110 INT_EXITS,
111 COP_UNUSABLE_EXITS,
112 TLBMOD_EXITS,
113 TLBMISS_LD_EXITS,
114 TLBMISS_ST_EXITS,
115 ADDRERR_ST_EXITS,
116 ADDRERR_LD_EXITS,
117 SYSCALL_EXITS,
118 RESVD_INST_EXITS,
119 BREAK_INST_EXITS,
120 FLUSH_DCACHE_EXITS,
121 MAX_KVM_MIPS_EXIT_TYPES
122};
123
124struct kvm_arch_memory_slot {
125};
126
127struct kvm_arch {
128 /* Guest GVA->HPA page table */
129 unsigned long *guest_pmap;
130 unsigned long guest_pmap_npages;
131
132 /* Wired host TLB used for the commpage */
133 int commpage_tlb;
134};
135
136#define N_MIPS_COPROC_REGS 32
137#define N_MIPS_COPROC_SEL 8
138
139struct mips_coproc {
140 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
141#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
142 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
143#endif
144};
145
146/*
147 * Coprocessor 0 register names
148 */
149#define MIPS_CP0_TLB_INDEX 0
150#define MIPS_CP0_TLB_RANDOM 1
151#define MIPS_CP0_TLB_LOW 2
152#define MIPS_CP0_TLB_LO0 2
153#define MIPS_CP0_TLB_LO1 3
154#define MIPS_CP0_TLB_CONTEXT 4
155#define MIPS_CP0_TLB_PG_MASK 5
156#define MIPS_CP0_TLB_WIRED 6
157#define MIPS_CP0_HWRENA 7
158#define MIPS_CP0_BAD_VADDR 8
159#define MIPS_CP0_COUNT 9
160#define MIPS_CP0_TLB_HI 10
161#define MIPS_CP0_COMPARE 11
162#define MIPS_CP0_STATUS 12
163#define MIPS_CP0_CAUSE 13
164#define MIPS_CP0_EXC_PC 14
165#define MIPS_CP0_PRID 15
166#define MIPS_CP0_CONFIG 16
167#define MIPS_CP0_LLADDR 17
168#define MIPS_CP0_WATCH_LO 18
169#define MIPS_CP0_WATCH_HI 19
170#define MIPS_CP0_TLB_XCONTEXT 20
171#define MIPS_CP0_ECC 26
172#define MIPS_CP0_CACHE_ERR 27
173#define MIPS_CP0_TAG_LO 28
174#define MIPS_CP0_TAG_HI 29
175#define MIPS_CP0_ERROR_PC 30
176#define MIPS_CP0_DEBUG 23
177#define MIPS_CP0_DEPC 24
178#define MIPS_CP0_PERFCNT 25
179#define MIPS_CP0_ERRCTL 26
180#define MIPS_CP0_DATA_LO 28
181#define MIPS_CP0_DATA_HI 29
182#define MIPS_CP0_DESAVE 31
183
184#define MIPS_CP0_CONFIG_SEL 0
185#define MIPS_CP0_CONFIG1_SEL 1
186#define MIPS_CP0_CONFIG2_SEL 2
187#define MIPS_CP0_CONFIG3_SEL 3
188
189/* Config0 register bits */
190#define CP0C0_M 31
191#define CP0C0_K23 28
192#define CP0C0_KU 25
193#define CP0C0_MDU 20
194#define CP0C0_MM 17
195#define CP0C0_BM 16
196#define CP0C0_BE 15
197#define CP0C0_AT 13
198#define CP0C0_AR 10
199#define CP0C0_MT 7
200#define CP0C0_VI 3
201#define CP0C0_K0 0
202
203/* Config1 register bits */
204#define CP0C1_M 31
205#define CP0C1_MMU 25
206#define CP0C1_IS 22
207#define CP0C1_IL 19
208#define CP0C1_IA 16
209#define CP0C1_DS 13
210#define CP0C1_DL 10
211#define CP0C1_DA 7
212#define CP0C1_C2 6
213#define CP0C1_MD 5
214#define CP0C1_PC 4
215#define CP0C1_WR 3
216#define CP0C1_CA 2
217#define CP0C1_EP 1
218#define CP0C1_FP 0
219
220/* Config2 Register bits */
221#define CP0C2_M 31
222#define CP0C2_TU 28
223#define CP0C2_TS 24
224#define CP0C2_TL 20
225#define CP0C2_TA 16
226#define CP0C2_SU 12
227#define CP0C2_SS 8
228#define CP0C2_SL 4
229#define CP0C2_SA 0
230
231/* Config3 Register bits */
232#define CP0C3_M 31
233#define CP0C3_ISA_ON_EXC 16
234#define CP0C3_ULRI 13
235#define CP0C3_DSPP 10
236#define CP0C3_LPA 7
237#define CP0C3_VEIC 6
238#define CP0C3_VInt 5
239#define CP0C3_SP 4
240#define CP0C3_MT 2
241#define CP0C3_SM 1
242#define CP0C3_TL 0
243
244/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
245#define MIPS_CONFIG0 \
246 ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
247
248/* Have config2, no coprocessor2 attached, no MDMX support attached,
249 no performance counters, watch registers present,
250 no code compression, EJTAG present, no FPU, no watch registers */
251#define MIPS_CONFIG1 \
252((1 << CP0C1_M) | \
253 (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
254 (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
255 (0 << CP0C1_FP))
256
257/* Have config3, no tertiary/secondary caches implemented */
258#define MIPS_CONFIG2 \
259((1 << CP0C2_M))
260
261/* No config4, no DSP ASE, no large physaddr (PABITS),
262 no external interrupt controller, no vectored interrupts,
263 no 1kb pages, no SmartMIPS ASE, no trace logic */
264#define MIPS_CONFIG3 \
265((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
266 (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
267 (0 << CP0C3_SM) | (0 << CP0C3_TL))
268
269/* MMU types, the first four entries have the same layout as the
270 CP0C0_MT field. */
271enum mips_mmu_types {
272 MMU_TYPE_NONE,
273 MMU_TYPE_R4000,
274 MMU_TYPE_RESERVED,
275 MMU_TYPE_FMT,
276 MMU_TYPE_R3000,
277 MMU_TYPE_R6000,
278 MMU_TYPE_R8000
279};
280
281/*
282 * Trap codes
283 */
284#define T_INT 0 /* Interrupt pending */
285#define T_TLB_MOD 1 /* TLB modified fault */
286#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
287#define T_TLB_ST_MISS 3 /* TLB miss on a store */
288#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
289#define T_ADDR_ERR_ST 5 /* Address error on a store */
290#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
291#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
292#define T_SYSCALL 8 /* System call */
293#define T_BREAK 9 /* Breakpoint */
294#define T_RES_INST 10 /* Reserved instruction exception */
295#define T_COP_UNUSABLE 11 /* Coprocessor unusable */
296#define T_OVFLOW 12 /* Arithmetic overflow */
297
298/*
299 * Trap definitions added for r4000 port.
300 */
301#define T_TRAP 13 /* Trap instruction */
302#define T_VCEI 14 /* Virtual coherency exception */
303#define T_FPE 15 /* Floating point exception */
304#define T_WATCH 23 /* Watch address reference */
305#define T_VCED 31 /* Virtual coherency data */
306
307/* Resume Flags */
308#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
309#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
310
311#define RESUME_GUEST 0
312#define RESUME_GUEST_DR RESUME_FLAG_DR
313#define RESUME_HOST RESUME_FLAG_HOST
314
315enum emulation_result {
316 EMULATE_DONE, /* no further processing */
317 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
318 EMULATE_FAIL, /* can't emulate this instruction */
319 EMULATE_WAIT, /* WAIT instruction */
320 EMULATE_PRIV_FAIL,
321};
322
323#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
324#define MIPS3_PG_V 0x00000002 /* Valid */
325#define MIPS3_PG_NV 0x00000000
326#define MIPS3_PG_D 0x00000004 /* Dirty */
327
328#define mips3_paddr_to_tlbpfn(x) \
329 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
330#define mips3_tlbpfn_to_paddr(x) \
331 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
332
333#define MIPS3_PG_SHIFT 6
334#define MIPS3_PG_FRAME 0x3fffffc0
335
336#define VPN2_MASK 0xffffe000
337#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
338#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
339#define TLB_ASID(x) (ASID_MASK((x).tlb_hi))
340#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
341
342struct kvm_mips_tlb {
343 long tlb_mask;
344 long tlb_hi;
345 long tlb_lo0;
346 long tlb_lo1;
347};
348
349#define KVM_MIPS_GUEST_TLB_SIZE 64
350struct kvm_vcpu_arch {
351 void *host_ebase, *guest_ebase;
352 unsigned long host_stack;
353 unsigned long host_gp;
354
355 /* Host CP0 registers used when handling exits from guest */
356 unsigned long host_cp0_badvaddr;
357 unsigned long host_cp0_cause;
358 unsigned long host_cp0_epc;
359 unsigned long host_cp0_entryhi;
360 uint32_t guest_inst;
361
362 /* GPRS */
363 unsigned long gprs[32];
364 unsigned long hi;
365 unsigned long lo;
366 unsigned long pc;
367
368 /* FPU State */
369 struct mips_fpu_struct fpu;
370
371 /* COP0 State */
372 struct mips_coproc *cop0;
373
374 /* Host KSEG0 address of the EI/DI offset */
375 void *kseg0_commpage;
376
377 u32 io_gpr; /* GPR used as IO source/target */
378
379 /* Used to calibrate the virutal count register for the guest */
380 int32_t host_cp0_count;
381
382 /* Bitmask of exceptions that are pending */
383 unsigned long pending_exceptions;
384
385 /* Bitmask of pending exceptions to be cleared */
386 unsigned long pending_exceptions_clr;
387
388 unsigned long pending_load_cause;
389
390 /* Save/Restore the entryhi register when are are preempted/scheduled back in */
391 unsigned long preempt_entryhi;
392
393 /* S/W Based TLB for guest */
394 struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
395
396 /* Cached guest kernel/user ASIDs */
397 uint32_t guest_user_asid[NR_CPUS];
398 uint32_t guest_kernel_asid[NR_CPUS];
399 struct mm_struct guest_kernel_mm, guest_user_mm;
400
401 struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
402
403
404 struct hrtimer comparecount_timer;
405
406 int last_sched_cpu;
407
408 /* WAIT executed */
409 int wait;
410};
411
412
413#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
414#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
415#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
416#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
417#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
418#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
419#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
420#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
421#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
422#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
423#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
424#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
425#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
426#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
427#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
428#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
429#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
430#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
431#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
432#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
433#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
434#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
435#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
436#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
437#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
438#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
439#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
440#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
441#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
442#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
443#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
444#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
445#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
446#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
447#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
448#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
449#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
450#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
451#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
452#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
453#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
454#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
455#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
456
457#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
458#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
459#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
460#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
461#define kvm_change_c0_guest_cause(cop0, change, val) \
462{ \
463 kvm_clear_c0_guest_cause(cop0, change); \
464 kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
465}
466#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
467#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
468#define kvm_change_c0_guest_ebase(cop0, change, val) \
469{ \
470 kvm_clear_c0_guest_ebase(cop0, change); \
471 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
472}
473
474
475struct kvm_mips_callbacks {
476 int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
477 int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
478 int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
479 int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
480 int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
481 int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
482 int (*handle_syscall) (struct kvm_vcpu *vcpu);
483 int (*handle_res_inst) (struct kvm_vcpu *vcpu);
484 int (*handle_break) (struct kvm_vcpu *vcpu);
485 int (*vm_init) (struct kvm *kvm);
486 int (*vcpu_init) (struct kvm_vcpu *vcpu);
487 int (*vcpu_setup) (struct kvm_vcpu *vcpu);
488 gpa_t(*gva_to_gpa) (gva_t gva);
489 void (*queue_timer_int) (struct kvm_vcpu *vcpu);
490 void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
491 void (*queue_io_int) (struct kvm_vcpu *vcpu,
492 struct kvm_mips_interrupt *irq);
493 void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
494 struct kvm_mips_interrupt *irq);
495 int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
496 uint32_t cause);
497 int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
498 uint32_t cause);
499 int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
500 struct kvm_regs *regs);
501 int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
502 struct kvm_regs *regs);
503};
504extern struct kvm_mips_callbacks *kvm_mips_callbacks;
505int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
506
507/* Debug: dump vcpu state */
508int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
509
510/* Trampoline ASM routine to start running in "Guest" context */
511extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
512
513/* TLB handling */
514uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
515
516uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
517
518uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
519
520extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
521 struct kvm_vcpu *vcpu);
522
523extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
524 struct kvm_vcpu *vcpu);
525
526extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
527 struct kvm_mips_tlb *tlb,
528 unsigned long *hpa0,
529 unsigned long *hpa1);
530
531extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
532 uint32_t *opc,
533 struct kvm_run *run,
534 struct kvm_vcpu *vcpu);
535
536extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
537 uint32_t *opc,
538 struct kvm_run *run,
539 struct kvm_vcpu *vcpu);
540
541extern void kvm_mips_dump_host_tlbs(void);
542extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
543extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
544extern void kvm_mips_flush_host_tlb(int skip_kseg0);
545extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
546extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
547
548extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
549 unsigned long entryhi);
550extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
551extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
552 unsigned long gva);
553extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
554 struct kvm_vcpu *vcpu);
555extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
556extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
557extern void kvm_local_flush_tlb_all(void);
558extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
559extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
560extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
561extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
562
563/* Emulation */
564uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
565enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
566
567extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
568 uint32_t *opc,
569 struct kvm_run *run,
570 struct kvm_vcpu *vcpu);
571
572extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
573 uint32_t *opc,
574 struct kvm_run *run,
575 struct kvm_vcpu *vcpu);
576
577extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
578 uint32_t *opc,
579 struct kvm_run *run,
580 struct kvm_vcpu *vcpu);
581
582extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
583 uint32_t *opc,
584 struct kvm_run *run,
585 struct kvm_vcpu *vcpu);
586
587extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
588 uint32_t *opc,
589 struct kvm_run *run,
590 struct kvm_vcpu *vcpu);
591
592extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
593 uint32_t *opc,
594 struct kvm_run *run,
595 struct kvm_vcpu *vcpu);
596
597extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
598 uint32_t *opc,
599 struct kvm_run *run,
600 struct kvm_vcpu *vcpu);
601
602extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
603 uint32_t *opc,
604 struct kvm_run *run,
605 struct kvm_vcpu *vcpu);
606
607extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
608 uint32_t *opc,
609 struct kvm_run *run,
610 struct kvm_vcpu *vcpu);
611
612extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
613 uint32_t *opc,
614 struct kvm_run *run,
615 struct kvm_vcpu *vcpu);
616
617extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
618 uint32_t *opc,
619 struct kvm_run *run,
620 struct kvm_vcpu *vcpu);
621
622extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
623 struct kvm_run *run);
624
625enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
626
627enum emulation_result kvm_mips_check_privilege(unsigned long cause,
628 uint32_t *opc,
629 struct kvm_run *run,
630 struct kvm_vcpu *vcpu);
631
632enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
633 uint32_t *opc,
634 uint32_t cause,
635 struct kvm_run *run,
636 struct kvm_vcpu *vcpu);
637enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
638 uint32_t *opc,
639 uint32_t cause,
640 struct kvm_run *run,
641 struct kvm_vcpu *vcpu);
642enum emulation_result kvm_mips_emulate_store(uint32_t inst,
643 uint32_t cause,
644 struct kvm_run *run,
645 struct kvm_vcpu *vcpu);
646enum emulation_result kvm_mips_emulate_load(uint32_t inst,
647 uint32_t cause,
648 struct kvm_run *run,
649 struct kvm_vcpu *vcpu);
650
651/* Dynamic binary translation */
652extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
653 struct kvm_vcpu *vcpu);
654extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
655 struct kvm_vcpu *vcpu);
656extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
657 struct kvm_vcpu *vcpu);
658extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
659 struct kvm_vcpu *vcpu);
660
661/* Misc */
662extern void mips32_SyncICache(unsigned long addr, unsigned long size);
663extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
664extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
665
666
667#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
deleted file mode 100644
index 8fcf8df4418a..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef BCM63XX_CLK_H_
2#define BCM63XX_CLK_H_
3
4struct clk {
5 void (*set)(struct clk *, int);
6 unsigned int rate;
7 unsigned int usage;
8 int id;
9};
10
11#endif /* ! BCM63XX_CLK_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index cb922b9cb0e9..336228990808 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -14,11 +14,12 @@
14#define BCM6345_CPU_ID 0x6345 14#define BCM6345_CPU_ID 0x6345
15#define BCM6348_CPU_ID 0x6348 15#define BCM6348_CPU_ID 0x6348
16#define BCM6358_CPU_ID 0x6358 16#define BCM6358_CPU_ID 0x6358
17#define BCM6362_CPU_ID 0x6362
17#define BCM6368_CPU_ID 0x6368 18#define BCM6368_CPU_ID 0x6368
18 19
19void __init bcm63xx_cpu_init(void); 20void __init bcm63xx_cpu_init(void);
20u16 __bcm63xx_get_cpu_id(void); 21u16 __bcm63xx_get_cpu_id(void);
21u16 bcm63xx_get_cpu_rev(void); 22u8 bcm63xx_get_cpu_rev(void);
22unsigned int bcm63xx_get_cpu_freq(void); 23unsigned int bcm63xx_get_cpu_freq(void);
23 24
24#ifdef CONFIG_BCM63XX_CPU_6328 25#ifdef CONFIG_BCM63XX_CPU_6328
@@ -86,6 +87,20 @@ unsigned int bcm63xx_get_cpu_freq(void);
86# define BCMCPU_IS_6358() (0) 87# define BCMCPU_IS_6358() (0)
87#endif 88#endif
88 89
90#ifdef CONFIG_BCM63XX_CPU_6362
91# ifdef bcm63xx_get_cpu_id
92# undef bcm63xx_get_cpu_id
93# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id()
94# define BCMCPU_RUNTIME_DETECT
95# else
96# define bcm63xx_get_cpu_id() BCM6362_CPU_ID
97# endif
98# define BCMCPU_IS_6362() (bcm63xx_get_cpu_id() == BCM6362_CPU_ID)
99#else
100# define BCMCPU_IS_6362() (0)
101#endif
102
103
89#ifdef CONFIG_BCM63XX_CPU_6368 104#ifdef CONFIG_BCM63XX_CPU_6368
90# ifdef bcm63xx_get_cpu_id 105# ifdef bcm63xx_get_cpu_id
91# undef bcm63xx_get_cpu_id 106# undef bcm63xx_get_cpu_id
@@ -406,6 +421,62 @@ enum bcm63xx_regs_set {
406 421
407 422
408/* 423/*
424 * 6362 register sets base address
425 */
426#define BCM_6362_DSL_LMEM_BASE (0xdeadbeef)
427#define BCM_6362_PERF_BASE (0xb0000000)
428#define BCM_6362_TIMER_BASE (0xb0000040)
429#define BCM_6362_WDT_BASE (0xb000005c)
430#define BCM_6362_UART0_BASE (0xb0000100)
431#define BCM_6362_UART1_BASE (0xb0000120)
432#define BCM_6362_GPIO_BASE (0xb0000080)
433#define BCM_6362_SPI_BASE (0xb0000800)
434#define BCM_6362_HSSPI_BASE (0xb0001000)
435#define BCM_6362_UDC0_BASE (0xdeadbeef)
436#define BCM_6362_USBDMA_BASE (0xb000c000)
437#define BCM_6362_OHCI0_BASE (0xb0002600)
438#define BCM_6362_OHCI_PRIV_BASE (0xdeadbeef)
439#define BCM_6362_USBH_PRIV_BASE (0xb0002700)
440#define BCM_6362_USBD_BASE (0xb0002400)
441#define BCM_6362_MPI_BASE (0xdeadbeef)
442#define BCM_6362_PCMCIA_BASE (0xdeadbeef)
443#define BCM_6362_PCIE_BASE (0xb0e40000)
444#define BCM_6362_SDRAM_REGS_BASE (0xdeadbeef)
445#define BCM_6362_DSL_BASE (0xdeadbeef)
446#define BCM_6362_UBUS_BASE (0xdeadbeef)
447#define BCM_6362_ENET0_BASE (0xdeadbeef)
448#define BCM_6362_ENET1_BASE (0xdeadbeef)
449#define BCM_6362_ENETDMA_BASE (0xb000d800)
450#define BCM_6362_ENETDMAC_BASE (0xb000da00)
451#define BCM_6362_ENETDMAS_BASE (0xb000dc00)
452#define BCM_6362_ENETSW_BASE (0xb0e00000)
453#define BCM_6362_EHCI0_BASE (0xb0002500)
454#define BCM_6362_SDRAM_BASE (0xdeadbeef)
455#define BCM_6362_MEMC_BASE (0xdeadbeef)
456#define BCM_6362_DDR_BASE (0xb0003000)
457#define BCM_6362_M2M_BASE (0xdeadbeef)
458#define BCM_6362_ATM_BASE (0xdeadbeef)
459#define BCM_6362_XTM_BASE (0xb0007800)
460#define BCM_6362_XTMDMA_BASE (0xb000b800)
461#define BCM_6362_XTMDMAC_BASE (0xdeadbeef)
462#define BCM_6362_XTMDMAS_BASE (0xdeadbeef)
463#define BCM_6362_PCM_BASE (0xb000a800)
464#define BCM_6362_PCMDMA_BASE (0xdeadbeef)
465#define BCM_6362_PCMDMAC_BASE (0xdeadbeef)
466#define BCM_6362_PCMDMAS_BASE (0xdeadbeef)
467#define BCM_6362_RNG_BASE (0xdeadbeef)
468#define BCM_6362_MISC_BASE (0xb0001800)
469
470#define BCM_6362_NAND_REG_BASE (0xb0000200)
471#define BCM_6362_NAND_CACHE_BASE (0xb0000600)
472#define BCM_6362_LED_BASE (0xb0001900)
473#define BCM_6362_IPSEC_BASE (0xb0002800)
474#define BCM_6362_IPSEC_DMA_BASE (0xb000d000)
475#define BCM_6362_WLAN_CHIPCOMMON_BASE (0xb0004000)
476#define BCM_6362_WLAN_D11_BASE (0xb0005000)
477#define BCM_6362_WLAN_SHIM_BASE (0xb0007000)
478
479/*
409 * 6368 register sets base address 480 * 6368 register sets base address
410 */ 481 */
411#define BCM_6368_DSL_LMEM_BASE (0xdeadbeef) 482#define BCM_6368_DSL_LMEM_BASE (0xdeadbeef)
@@ -564,6 +635,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
564#ifdef CONFIG_BCM63XX_CPU_6358 635#ifdef CONFIG_BCM63XX_CPU_6358
565 __GEN_RSET(6358) 636 __GEN_RSET(6358)
566#endif 637#endif
638#ifdef CONFIG_BCM63XX_CPU_6362
639 __GEN_RSET(6362)
640#endif
567#ifdef CONFIG_BCM63XX_CPU_6368 641#ifdef CONFIG_BCM63XX_CPU_6368
568 __GEN_RSET(6368) 642 __GEN_RSET(6368)
569#endif 643#endif
@@ -820,6 +894,71 @@ enum bcm63xx_irq {
820#define BCM_6358_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28) 894#define BCM_6358_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28)
821 895
822/* 896/*
897 * 6362 irqs
898 */
899#define BCM_6362_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
900
901#define BCM_6362_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
902#define BCM_6362_SPI_IRQ (IRQ_INTERNAL_BASE + 2)
903#define BCM_6362_UART0_IRQ (IRQ_INTERNAL_BASE + 3)
904#define BCM_6362_UART1_IRQ (IRQ_INTERNAL_BASE + 4)
905#define BCM_6362_DSL_IRQ (IRQ_INTERNAL_BASE + 28)
906#define BCM_6362_UDC0_IRQ 0
907#define BCM_6362_ENET0_IRQ 0
908#define BCM_6362_ENET1_IRQ 0
909#define BCM_6362_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 14)
910#define BCM_6362_HSSPI_IRQ (IRQ_INTERNAL_BASE + 5)
911#define BCM_6362_OHCI0_IRQ (IRQ_INTERNAL_BASE + 9)
912#define BCM_6362_EHCI0_IRQ (IRQ_INTERNAL_BASE + 10)
913#define BCM_6362_USBD_IRQ (IRQ_INTERNAL_BASE + 11)
914#define BCM_6362_USBD_RXDMA0_IRQ (IRQ_INTERNAL_BASE + 20)
915#define BCM_6362_USBD_TXDMA0_IRQ (IRQ_INTERNAL_BASE + 21)
916#define BCM_6362_USBD_RXDMA1_IRQ (IRQ_INTERNAL_BASE + 22)
917#define BCM_6362_USBD_TXDMA1_IRQ (IRQ_INTERNAL_BASE + 23)
918#define BCM_6362_USBD_RXDMA2_IRQ (IRQ_INTERNAL_BASE + 24)
919#define BCM_6362_USBD_TXDMA2_IRQ (IRQ_INTERNAL_BASE + 25)
920#define BCM_6362_PCMCIA_IRQ 0
921#define BCM_6362_ENET0_RXDMA_IRQ 0
922#define BCM_6362_ENET0_TXDMA_IRQ 0
923#define BCM_6362_ENET1_RXDMA_IRQ 0
924#define BCM_6362_ENET1_TXDMA_IRQ 0
925#define BCM_6362_PCI_IRQ (IRQ_INTERNAL_BASE + 30)
926#define BCM_6362_ATM_IRQ 0
927#define BCM_6362_ENETSW_RXDMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 0)
928#define BCM_6362_ENETSW_RXDMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 1)
929#define BCM_6362_ENETSW_RXDMA2_IRQ (BCM_6362_HIGH_IRQ_BASE + 2)
930#define BCM_6362_ENETSW_RXDMA3_IRQ (BCM_6362_HIGH_IRQ_BASE + 3)
931#define BCM_6362_ENETSW_TXDMA0_IRQ 0
932#define BCM_6362_ENETSW_TXDMA1_IRQ 0
933#define BCM_6362_ENETSW_TXDMA2_IRQ 0
934#define BCM_6362_ENETSW_TXDMA3_IRQ 0
935#define BCM_6362_XTM_IRQ 0
936#define BCM_6362_XTM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 12)
937
938#define BCM_6362_RING_OSC_IRQ (IRQ_INTERNAL_BASE + 1)
939#define BCM_6362_WLAN_GPIO_IRQ (IRQ_INTERNAL_BASE + 6)
940#define BCM_6362_WLAN_IRQ (IRQ_INTERNAL_BASE + 7)
941#define BCM_6362_IPSEC_IRQ (IRQ_INTERNAL_BASE + 8)
942#define BCM_6362_NAND_IRQ (IRQ_INTERNAL_BASE + 12)
943#define BCM_6362_PCM_IRQ (IRQ_INTERNAL_BASE + 13)
944#define BCM_6362_DG_IRQ (IRQ_INTERNAL_BASE + 15)
945#define BCM_6362_EPHY_ENERGY0_IRQ (IRQ_INTERNAL_BASE + 16)
946#define BCM_6362_EPHY_ENERGY1_IRQ (IRQ_INTERNAL_BASE + 17)
947#define BCM_6362_EPHY_ENERGY2_IRQ (IRQ_INTERNAL_BASE + 18)
948#define BCM_6362_EPHY_ENERGY3_IRQ (IRQ_INTERNAL_BASE + 19)
949#define BCM_6362_IPSEC_DMA0_IRQ (IRQ_INTERNAL_BASE + 26)
950#define BCM_6362_IPSEC_DMA1_IRQ (IRQ_INTERNAL_BASE + 27)
951#define BCM_6362_FAP0_IRQ (IRQ_INTERNAL_BASE + 29)
952#define BCM_6362_PCM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 4)
953#define BCM_6362_PCM_DMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 5)
954#define BCM_6362_DECT0_IRQ (BCM_6362_HIGH_IRQ_BASE + 6)
955#define BCM_6362_DECT1_IRQ (BCM_6362_HIGH_IRQ_BASE + 7)
956#define BCM_6362_EXT_IRQ0 (BCM_6362_HIGH_IRQ_BASE + 8)
957#define BCM_6362_EXT_IRQ1 (BCM_6362_HIGH_IRQ_BASE + 9)
958#define BCM_6362_EXT_IRQ2 (BCM_6362_HIGH_IRQ_BASE + 10)
959#define BCM_6362_EXT_IRQ3 (BCM_6362_HIGH_IRQ_BASE + 11)
960
961/*
823 * 6368 irqs 962 * 6368 irqs
824 */ 963 */
825#define BCM_6368_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32) 964#define BCM_6368_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index b0184cf02575..c426cabc620a 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -71,18 +71,13 @@ static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg)
71 71
72 return bcm63xx_regs_spi[reg]; 72 return bcm63xx_regs_spi[reg];
73#else 73#else
74#ifdef CONFIG_BCM63XX_CPU_6338 74#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348)
75 __GEN_SPI_RSET(6338)
76#endif
77#ifdef CONFIG_BCM63XX_CPU_6348
78 __GEN_SPI_RSET(6348) 75 __GEN_SPI_RSET(6348)
79#endif 76#endif
80#ifdef CONFIG_BCM63XX_CPU_6358 77#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \
78 defined(CONFIG_BCM63XX_CPU_6368)
81 __GEN_SPI_RSET(6358) 79 __GEN_SPI_RSET(6358)
82#endif 80#endif
83#ifdef CONFIG_BCM63XX_CPU_6368
84 __GEN_SPI_RSET(6368)
85#endif
86#endif 81#endif
87 return 0; 82 return 0;
88} 83}
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 0a9891f7580d..35baa1a60a64 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -17,6 +17,8 @@ static inline unsigned long bcm63xx_gpio_count(void)
17 return 8; 17 return 8;
18 case BCM6345_CPU_ID: 18 case BCM6345_CPU_ID:
19 return 16; 19 return 16;
20 case BCM6362_CPU_ID:
21 return 48;
20 case BCM6368_CPU_ID: 22 case BCM6368_CPU_ID:
21 return 38; 23 return 38;
22 case BCM6348_CPU_ID: 24 case BCM6348_CPU_ID:
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 81b4702f792a..3203fe49b34d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -10,7 +10,7 @@
10#define REV_CHIPID_SHIFT 16 10#define REV_CHIPID_SHIFT 16
11#define REV_CHIPID_MASK (0xffff << REV_CHIPID_SHIFT) 11#define REV_CHIPID_MASK (0xffff << REV_CHIPID_SHIFT)
12#define REV_REVID_SHIFT 0 12#define REV_REVID_SHIFT 0
13#define REV_REVID_MASK (0xffff << REV_REVID_SHIFT) 13#define REV_REVID_MASK (0xff << REV_REVID_SHIFT)
14 14
15/* Clock Control register */ 15/* Clock Control register */
16#define PERF_CKCTL_REG 0x4 16#define PERF_CKCTL_REG 0x4
@@ -112,6 +112,39 @@
112 CKCTL_6358_USBSU_EN | \ 112 CKCTL_6358_USBSU_EN | \
113 CKCTL_6358_EPHY_EN) 113 CKCTL_6358_EPHY_EN)
114 114
115#define CKCTL_6362_ADSL_QPROC_EN (1 << 1)
116#define CKCTL_6362_ADSL_AFE_EN (1 << 2)
117#define CKCTL_6362_ADSL_EN (1 << 3)
118#define CKCTL_6362_MIPS_EN (1 << 4)
119#define CKCTL_6362_WLAN_OCP_EN (1 << 5)
120#define CKCTL_6362_SWPKT_USB_EN (1 << 7)
121#define CKCTL_6362_SWPKT_SAR_EN (1 << 8)
122#define CKCTL_6362_SAR_EN (1 << 9)
123#define CKCTL_6362_ROBOSW_EN (1 << 10)
124#define CKCTL_6362_PCM_EN (1 << 11)
125#define CKCTL_6362_USBD_EN (1 << 12)
126#define CKCTL_6362_USBH_EN (1 << 13)
127#define CKCTL_6362_IPSEC_EN (1 << 14)
128#define CKCTL_6362_SPI_EN (1 << 15)
129#define CKCTL_6362_HSSPI_EN (1 << 16)
130#define CKCTL_6362_PCIE_EN (1 << 17)
131#define CKCTL_6362_FAP_EN (1 << 18)
132#define CKCTL_6362_PHYMIPS_EN (1 << 19)
133#define CKCTL_6362_NAND_EN (1 << 20)
134
135#define CKCTL_6362_ALL_SAFE_EN (CKCTL_6362_PHYMIPS_EN | \
136 CKCTL_6362_ADSL_QPROC_EN | \
137 CKCTL_6362_ADSL_AFE_EN | \
138 CKCTL_6362_ADSL_EN | \
139 CKCTL_6362_SAR_EN | \
140 CKCTL_6362_PCM_EN | \
141 CKCTL_6362_IPSEC_EN | \
142 CKCTL_6362_USBD_EN | \
143 CKCTL_6362_USBH_EN | \
144 CKCTL_6362_ROBOSW_EN | \
145 CKCTL_6362_PCIE_EN)
146
147
115#define CKCTL_6368_VDSL_QPROC_EN (1 << 2) 148#define CKCTL_6368_VDSL_QPROC_EN (1 << 2)
116#define CKCTL_6368_VDSL_AFE_EN (1 << 3) 149#define CKCTL_6368_VDSL_AFE_EN (1 << 3)
117#define CKCTL_6368_VDSL_BONDING_EN (1 << 4) 150#define CKCTL_6368_VDSL_BONDING_EN (1 << 4)
@@ -153,6 +186,7 @@
153#define PERF_IRQMASK_6345_REG 0xc 186#define PERF_IRQMASK_6345_REG 0xc
154#define PERF_IRQMASK_6348_REG 0xc 187#define PERF_IRQMASK_6348_REG 0xc
155#define PERF_IRQMASK_6358_REG 0xc 188#define PERF_IRQMASK_6358_REG 0xc
189#define PERF_IRQMASK_6362_REG 0x20
156#define PERF_IRQMASK_6368_REG 0x20 190#define PERF_IRQMASK_6368_REG 0x20
157 191
158/* Interrupt Status register */ 192/* Interrupt Status register */
@@ -161,6 +195,7 @@
161#define PERF_IRQSTAT_6345_REG 0x10 195#define PERF_IRQSTAT_6345_REG 0x10
162#define PERF_IRQSTAT_6348_REG 0x10 196#define PERF_IRQSTAT_6348_REG 0x10
163#define PERF_IRQSTAT_6358_REG 0x10 197#define PERF_IRQSTAT_6358_REG 0x10
198#define PERF_IRQSTAT_6362_REG 0x28
164#define PERF_IRQSTAT_6368_REG 0x28 199#define PERF_IRQSTAT_6368_REG 0x28
165 200
166/* External Interrupt Configuration register */ 201/* External Interrupt Configuration register */
@@ -169,6 +204,7 @@
169#define PERF_EXTIRQ_CFG_REG_6345 0x14 204#define PERF_EXTIRQ_CFG_REG_6345 0x14
170#define PERF_EXTIRQ_CFG_REG_6348 0x14 205#define PERF_EXTIRQ_CFG_REG_6348 0x14
171#define PERF_EXTIRQ_CFG_REG_6358 0x14 206#define PERF_EXTIRQ_CFG_REG_6358 0x14
207#define PERF_EXTIRQ_CFG_REG_6362 0x18
172#define PERF_EXTIRQ_CFG_REG_6368 0x18 208#define PERF_EXTIRQ_CFG_REG_6368 0x18
173 209
174#define PERF_EXTIRQ_CFG_REG2_6368 0x1c 210#define PERF_EXTIRQ_CFG_REG2_6368 0x1c
@@ -197,6 +233,7 @@
197#define PERF_SOFTRESET_REG 0x28 233#define PERF_SOFTRESET_REG 0x28
198#define PERF_SOFTRESET_6328_REG 0x10 234#define PERF_SOFTRESET_6328_REG 0x10
199#define PERF_SOFTRESET_6358_REG 0x34 235#define PERF_SOFTRESET_6358_REG 0x34
236#define PERF_SOFTRESET_6362_REG 0x10
200#define PERF_SOFTRESET_6368_REG 0x10 237#define PERF_SOFTRESET_6368_REG 0x10
201 238
202#define SOFTRESET_6328_SPI_MASK (1 << 0) 239#define SOFTRESET_6328_SPI_MASK (1 << 0)
@@ -259,6 +296,22 @@
259#define SOFTRESET_6358_PCM_MASK (1 << 13) 296#define SOFTRESET_6358_PCM_MASK (1 << 13)
260#define SOFTRESET_6358_ADSL_MASK (1 << 14) 297#define SOFTRESET_6358_ADSL_MASK (1 << 14)
261 298
299#define SOFTRESET_6362_SPI_MASK (1 << 0)
300#define SOFTRESET_6362_IPSEC_MASK (1 << 1)
301#define SOFTRESET_6362_EPHY_MASK (1 << 2)
302#define SOFTRESET_6362_SAR_MASK (1 << 3)
303#define SOFTRESET_6362_ENETSW_MASK (1 << 4)
304#define SOFTRESET_6362_USBS_MASK (1 << 5)
305#define SOFTRESET_6362_USBH_MASK (1 << 6)
306#define SOFTRESET_6362_PCM_MASK (1 << 7)
307#define SOFTRESET_6362_PCIE_CORE_MASK (1 << 8)
308#define SOFTRESET_6362_PCIE_MASK (1 << 9)
309#define SOFTRESET_6362_PCIE_EXT_MASK (1 << 10)
310#define SOFTRESET_6362_WLAN_SHIM_MASK (1 << 11)
311#define SOFTRESET_6362_DDR_PHY_MASK (1 << 12)
312#define SOFTRESET_6362_FAP_MASK (1 << 13)
313#define SOFTRESET_6362_WLAN_UBUS_MASK (1 << 14)
314
262#define SOFTRESET_6368_SPI_MASK (1 << 0) 315#define SOFTRESET_6368_SPI_MASK (1 << 0)
263#define SOFTRESET_6368_MPI_MASK (1 << 3) 316#define SOFTRESET_6368_MPI_MASK (1 << 3)
264#define SOFTRESET_6368_EPHY_MASK (1 << 6) 317#define SOFTRESET_6368_EPHY_MASK (1 << 6)
@@ -1223,24 +1276,7 @@
1223 * _REG relative to RSET_SPI 1276 * _REG relative to RSET_SPI
1224 *************************************************************************/ 1277 *************************************************************************/
1225 1278
1226/* BCM 6338 SPI core */ 1279/* BCM 6338/6348 SPI core */
1227#define SPI_6338_CMD 0x00 /* 16-bits register */
1228#define SPI_6338_INT_STATUS 0x02
1229#define SPI_6338_INT_MASK_ST 0x03
1230#define SPI_6338_INT_MASK 0x04
1231#define SPI_6338_ST 0x05
1232#define SPI_6338_CLK_CFG 0x06
1233#define SPI_6338_FILL_BYTE 0x07
1234#define SPI_6338_MSG_TAIL 0x09
1235#define SPI_6338_RX_TAIL 0x0b
1236#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */
1237#define SPI_6338_MSG_CTL_WIDTH 8
1238#define SPI_6338_MSG_DATA 0x41
1239#define SPI_6338_MSG_DATA_SIZE 0x3f
1240#define SPI_6338_RX_DATA 0x80
1241#define SPI_6338_RX_DATA_SIZE 0x3f
1242
1243/* BCM 6348 SPI core */
1244#define SPI_6348_CMD 0x00 /* 16-bits register */ 1280#define SPI_6348_CMD 0x00 /* 16-bits register */
1245#define SPI_6348_INT_STATUS 0x02 1281#define SPI_6348_INT_STATUS 0x02
1246#define SPI_6348_INT_MASK_ST 0x03 1282#define SPI_6348_INT_MASK_ST 0x03
@@ -1257,7 +1293,7 @@
1257#define SPI_6348_RX_DATA 0x80 1293#define SPI_6348_RX_DATA 0x80
1258#define SPI_6348_RX_DATA_SIZE 0x3f 1294#define SPI_6348_RX_DATA_SIZE 0x3f
1259 1295
1260/* BCM 6358 SPI core */ 1296/* BCM 6358/6262/6368 SPI core */
1261#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ 1297#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
1262#define SPI_6358_MSG_CTL_WIDTH 16 1298#define SPI_6358_MSG_CTL_WIDTH 16
1263#define SPI_6358_MSG_DATA 0x02 1299#define SPI_6358_MSG_DATA 0x02
@@ -1274,23 +1310,6 @@
1274#define SPI_6358_MSG_TAIL 0x709 1310#define SPI_6358_MSG_TAIL 0x709
1275#define SPI_6358_RX_TAIL 0x70B 1311#define SPI_6358_RX_TAIL 0x70B
1276 1312
1277/* BCM 6358 SPI core */
1278#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */
1279#define SPI_6368_MSG_CTL_WIDTH 16
1280#define SPI_6368_MSG_DATA 0x02
1281#define SPI_6368_MSG_DATA_SIZE 0x21e
1282#define SPI_6368_RX_DATA 0x400
1283#define SPI_6368_RX_DATA_SIZE 0x220
1284#define SPI_6368_CMD 0x700 /* 16-bits register */
1285#define SPI_6368_INT_STATUS 0x702
1286#define SPI_6368_INT_MASK_ST 0x703
1287#define SPI_6368_INT_MASK 0x704
1288#define SPI_6368_ST 0x705
1289#define SPI_6368_CLK_CFG 0x706
1290#define SPI_6368_FILL_BYTE 0x707
1291#define SPI_6368_MSG_TAIL 0x709
1292#define SPI_6368_RX_TAIL 0x70B
1293
1294/* Shared SPI definitions */ 1313/* Shared SPI definitions */
1295 1314
1296/* Message configuration */ 1315/* Message configuration */
@@ -1298,10 +1317,8 @@
1298#define SPI_HD_W 0x01 1317#define SPI_HD_W 0x01
1299#define SPI_HD_R 0x02 1318#define SPI_HD_R 0x02
1300#define SPI_BYTE_CNT_SHIFT 0 1319#define SPI_BYTE_CNT_SHIFT 0
1301#define SPI_6338_MSG_TYPE_SHIFT 6
1302#define SPI_6348_MSG_TYPE_SHIFT 6 1320#define SPI_6348_MSG_TYPE_SHIFT 6
1303#define SPI_6358_MSG_TYPE_SHIFT 14 1321#define SPI_6358_MSG_TYPE_SHIFT 14
1304#define SPI_6368_MSG_TYPE_SHIFT 14
1305 1322
1306/* Command */ 1323/* Command */
1307#define SPI_CMD_NOOP 0x00 1324#define SPI_CMD_NOOP 0x00
@@ -1348,10 +1365,18 @@
1348/************************************************************************* 1365/*************************************************************************
1349 * _REG relative to RSET_MISC 1366 * _REG relative to RSET_MISC
1350 *************************************************************************/ 1367 *************************************************************************/
1351#define MISC_SERDES_CTRL_REG 0x0 1368#define MISC_SERDES_CTRL_6328_REG 0x0
1369#define MISC_SERDES_CTRL_6362_REG 0x4
1352#define SERDES_PCIE_EN (1 << 0) 1370#define SERDES_PCIE_EN (1 << 0)
1353#define SERDES_PCIE_EXD_EN (1 << 15) 1371#define SERDES_PCIE_EXD_EN (1 << 15)
1354 1372
1373#define MISC_STRAPBUS_6362_REG 0x14
1374#define STRAPBUS_6362_FCVO_SHIFT 1
1375#define STRAPBUS_6362_HSSPI_CLK_FAST (1 << 13)
1376#define STRAPBUS_6362_FCVO_MASK (0x1f << STRAPBUS_6362_FCVO_SHIFT)
1377#define STRAPBUS_6362_BOOT_SEL_SERIAL (1 << 15)
1378#define STRAPBUS_6362_BOOT_SEL_NAND (0 << 15)
1379
1355#define MISC_STRAPBUS_6328_REG 0x240 1380#define MISC_STRAPBUS_6328_REG 0x240
1356#define STRAPBUS_6328_FCVO_SHIFT 7 1381#define STRAPBUS_6328_FCVO_SHIFT 7
1357#define STRAPBUS_6328_FCVO_MASK (0x1f << STRAPBUS_6328_FCVO_SHIFT) 1382#define STRAPBUS_6328_FCVO_MASK (0x1f << STRAPBUS_6328_FCVO_SHIFT)
diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
index 30931c42379d..94e3011ba7df 100644
--- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h
+++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
@@ -19,6 +19,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset)
19 return 1; 19 return 1;
20 break; 20 break;
21 case BCM6328_CPU_ID: 21 case BCM6328_CPU_ID:
22 case BCM6362_CPU_ID:
22 case BCM6368_CPU_ID: 23 case BCM6368_CPU_ID:
23 if (offset >= 0xb0000000 && offset < 0xb1000000) 24 if (offset >= 0xb0000000 && offset < 0xb1000000)
24 return 1; 25 return 1;
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 9c95177f7a7e..fe23034aaf72 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -61,9 +61,8 @@ static inline int plat_device_is_coherent(struct device *dev)
61{ 61{
62#ifdef CONFIG_DMA_COHERENT 62#ifdef CONFIG_DMA_COHERENT
63 return 1; 63 return 1;
64#endif 64#else
65#ifdef CONFIG_DMA_NONCOHERENT 65 return coherentio;
66 return 0;
67#endif 66#endif
68} 67}
69 68
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 73d717a75cb0..5b2f2e68e57f 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -20,14 +20,21 @@
20#endif 20#endif
21 21
22#ifdef CONFIG_32BIT 22#ifdef CONFIG_32BIT
23 23#ifdef CONFIG_KVM_GUEST
24#define CAC_BASE _AC(0x40000000, UL)
25#else
24#define CAC_BASE _AC(0x80000000, UL) 26#define CAC_BASE _AC(0x80000000, UL)
27#endif
25#define IO_BASE _AC(0xa0000000, UL) 28#define IO_BASE _AC(0xa0000000, UL)
26#define UNCAC_BASE _AC(0xa0000000, UL) 29#define UNCAC_BASE _AC(0xa0000000, UL)
27 30
28#ifndef MAP_BASE 31#ifndef MAP_BASE
32#ifdef CONFIG_KVM_GUEST
33#define MAP_BASE _AC(0x60000000, UL)
34#else
29#define MAP_BASE _AC(0xc0000000, UL) 35#define MAP_BASE _AC(0xc0000000, UL)
30#endif 36#endif
37#endif
31 38
32/* 39/*
33 * Memory above this physical address will be considered highmem. 40 * Memory above this physical address will be considered highmem.
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 75fd8c0f986e..c0f3ef45c2c1 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -57,5 +57,6 @@
57#define cpu_has_vint 0 57#define cpu_has_vint 0
58#define cpu_has_vtag_icache 0 58#define cpu_has_vtag_icache 0
59#define cpu_has_watch 1 59#define cpu_has_watch 1
60#define cpu_has_local_ebase 0
60 61
61#endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */ 62#endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
new file mode 100644
index 000000000000..9809972ea882
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -0,0 +1,84 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
9 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#ifndef _MT7620_REGS_H_
14#define _MT7620_REGS_H_
15
16#define MT7620_SYSC_BASE 0x10000000
17
18#define SYSC_REG_CHIP_NAME0 0x00
19#define SYSC_REG_CHIP_NAME1 0x04
20#define SYSC_REG_CHIP_REV 0x0c
21#define SYSC_REG_SYSTEM_CONFIG0 0x10
22#define SYSC_REG_SYSTEM_CONFIG1 0x14
23#define SYSC_REG_CPLL_CONFIG0 0x54
24#define SYSC_REG_CPLL_CONFIG1 0x58
25
26#define MT7620N_CHIP_NAME0 0x33365452
27#define MT7620N_CHIP_NAME1 0x20203235
28
29#define MT7620A_CHIP_NAME0 0x3637544d
30#define MT7620A_CHIP_NAME1 0x20203032
31
32#define CHIP_REV_PKG_MASK 0x1
33#define CHIP_REV_PKG_SHIFT 16
34#define CHIP_REV_VER_MASK 0xf
35#define CHIP_REV_VER_SHIFT 8
36#define CHIP_REV_ECO_MASK 0xf
37
38#define CPLL_SW_CONFIG_SHIFT 31
39#define CPLL_SW_CONFIG_MASK 0x1
40#define CPLL_CPU_CLK_SHIFT 24
41#define CPLL_CPU_CLK_MASK 0x1
42#define CPLL_MULT_RATIO_SHIFT 16
43#define CPLL_MULT_RATIO 0x7
44#define CPLL_DIV_RATIO_SHIFT 10
45#define CPLL_DIV_RATIO 0x3
46
47#define SYSCFG0_DRAM_TYPE_MASK 0x3
48#define SYSCFG0_DRAM_TYPE_SHIFT 4
49#define SYSCFG0_DRAM_TYPE_SDRAM 0
50#define SYSCFG0_DRAM_TYPE_DDR1 1
51#define SYSCFG0_DRAM_TYPE_DDR2 2
52
53#define MT7620_DRAM_BASE 0x0
54#define MT7620_SDRAM_SIZE_MIN 2
55#define MT7620_SDRAM_SIZE_MAX 64
56#define MT7620_DDR1_SIZE_MIN 32
57#define MT7620_DDR1_SIZE_MAX 128
58#define MT7620_DDR2_SIZE_MIN 32
59#define MT7620_DDR2_SIZE_MAX 256
60
61#define MT7620_GPIO_MODE_I2C BIT(0)
62#define MT7620_GPIO_MODE_UART0_SHIFT 2
63#define MT7620_GPIO_MODE_UART0_MASK 0x7
64#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
65#define MT7620_GPIO_MODE_UARTF 0x0
66#define MT7620_GPIO_MODE_PCM_UARTF 0x1
67#define MT7620_GPIO_MODE_PCM_I2S 0x2
68#define MT7620_GPIO_MODE_I2S_UARTF 0x3
69#define MT7620_GPIO_MODE_PCM_GPIO 0x4
70#define MT7620_GPIO_MODE_GPIO_UARTF 0x5
71#define MT7620_GPIO_MODE_GPIO_I2S 0x6
72#define MT7620_GPIO_MODE_GPIO 0x7
73#define MT7620_GPIO_MODE_UART1 BIT(5)
74#define MT7620_GPIO_MODE_MDIO BIT(8)
75#define MT7620_GPIO_MODE_RGMII1 BIT(9)
76#define MT7620_GPIO_MODE_RGMII2 BIT(10)
77#define MT7620_GPIO_MODE_SPI BIT(11)
78#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12)
79#define MT7620_GPIO_MODE_WLED BIT(13)
80#define MT7620_GPIO_MODE_JTAG BIT(15)
81#define MT7620_GPIO_MODE_EPHY BIT(15)
82#define MT7620_GPIO_MODE_WDT BIT(22)
83
84#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
new file mode 100644
index 000000000000..03ad716acb42
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x.h
@@ -0,0 +1,53 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
9 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#ifndef _RT288X_REGS_H_
14#define _RT288X_REGS_H_
15
16#define RT2880_SYSC_BASE 0x00300000
17
18#define SYSC_REG_CHIP_NAME0 0x00
19#define SYSC_REG_CHIP_NAME1 0x04
20#define SYSC_REG_CHIP_ID 0x0c
21#define SYSC_REG_SYSTEM_CONFIG 0x10
22#define SYSC_REG_CLKCFG 0x30
23
24#define RT2880_CHIP_NAME0 0x38325452
25#define RT2880_CHIP_NAME1 0x20203038
26
27#define CHIP_ID_ID_MASK 0xff
28#define CHIP_ID_ID_SHIFT 8
29#define CHIP_ID_REV_MASK 0xff
30
31#define SYSTEM_CONFIG_CPUCLK_SHIFT 20
32#define SYSTEM_CONFIG_CPUCLK_MASK 0x3
33#define SYSTEM_CONFIG_CPUCLK_250 0x0
34#define SYSTEM_CONFIG_CPUCLK_266 0x1
35#define SYSTEM_CONFIG_CPUCLK_280 0x2
36#define SYSTEM_CONFIG_CPUCLK_300 0x3
37
38#define RT2880_GPIO_MODE_I2C BIT(0)
39#define RT2880_GPIO_MODE_UART0 BIT(1)
40#define RT2880_GPIO_MODE_SPI BIT(2)
41#define RT2880_GPIO_MODE_UART1 BIT(3)
42#define RT2880_GPIO_MODE_JTAG BIT(4)
43#define RT2880_GPIO_MODE_MDIO BIT(5)
44#define RT2880_GPIO_MODE_SDRAM BIT(6)
45#define RT2880_GPIO_MODE_PCI BIT(7)
46
47#define CLKCFG_SRAM_CS_N_WDT BIT(9)
48
49#define RT2880_SDRAM_BASE 0x08000000
50#define RT2880_MEM_SIZE_MIN 2
51#define RT2880_MEM_SIZE_MAX 128
52
53#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
new file mode 100644
index 000000000000..72fc10669199
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
1/*
2 * Ralink RT288x specific CPU feature overrides
3 *
4 * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 * This file was derived from: include/asm-mips/cpu-features.h
8 * Copyright (C) 2003, 2004 Ralf Baechle
9 * Copyright (C) 2004 Maciej W. Rozycki
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
14 *
15 */
16#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H
17#define _RT288X_CPU_FEATURE_OVERRIDES_H
18
19#define cpu_has_tlb 1
20#define cpu_has_4kex 1
21#define cpu_has_3k_cache 0
22#define cpu_has_4k_cache 1
23#define cpu_has_tx39_cache 0
24#define cpu_has_sb1_cache 0
25#define cpu_has_fpu 0
26#define cpu_has_32fpr 0
27#define cpu_has_counter 1
28#define cpu_has_watch 1
29#define cpu_has_divec 1
30
31#define cpu_has_prefetch 1
32#define cpu_has_ejtag 1
33#define cpu_has_llsc 1
34
35#define cpu_has_mips16 1
36#define cpu_has_mdmx 0
37#define cpu_has_mips3d 0
38#define cpu_has_smartmips 0
39
40#define cpu_has_mips32r1 1
41#define cpu_has_mips32r2 1
42#define cpu_has_mips64r1 0
43#define cpu_has_mips64r2 0
44
45#define cpu_has_dsp 0
46#define cpu_has_mipsmt 0
47
48#define cpu_has_64bits 0
49#define cpu_has_64bit_zero_reg 0
50#define cpu_has_64bit_gp_regs 0
51#define cpu_has_64bit_addresses 0
52
53#define cpu_dcache_line_size() 16
54#define cpu_icache_line_size() 16
55
56#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index 7d344f2d7d0a..069bf37a6010 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -97,6 +97,14 @@ static inline int soc_is_rt5350(void)
97#define RT5350_SYSCFG0_CPUCLK_320 0x2 97#define RT5350_SYSCFG0_CPUCLK_320 0x2
98#define RT5350_SYSCFG0_CPUCLK_300 0x3 98#define RT5350_SYSCFG0_CPUCLK_300 0x3
99 99
100#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT 12
101#define RT5350_SYSCFG0_DRAM_SIZE_MASK 7
102#define RT5350_SYSCFG0_DRAM_SIZE_2M 0
103#define RT5350_SYSCFG0_DRAM_SIZE_8M 1
104#define RT5350_SYSCFG0_DRAM_SIZE_16M 2
105#define RT5350_SYSCFG0_DRAM_SIZE_32M 3
106#define RT5350_SYSCFG0_DRAM_SIZE_64M 4
107
100/* multi function gpio pins */ 108/* multi function gpio pins */
101#define RT305X_GPIO_I2C_SD 1 109#define RT305X_GPIO_I2C_SD 1
102#define RT305X_GPIO_I2C_SCLK 2 110#define RT305X_GPIO_I2C_SCLK 2
@@ -136,4 +144,23 @@ static inline int soc_is_rt5350(void)
136#define RT305X_GPIO_MODE_SDRAM BIT(8) 144#define RT305X_GPIO_MODE_SDRAM BIT(8)
137#define RT305X_GPIO_MODE_RGMII BIT(9) 145#define RT305X_GPIO_MODE_RGMII BIT(9)
138 146
147#define RT3352_SYSC_REG_SYSCFG0 0x010
148#define RT3352_SYSC_REG_SYSCFG1 0x014
149#define RT3352_SYSC_REG_CLKCFG1 0x030
150#define RT3352_SYSC_REG_RSTCTRL 0x034
151#define RT3352_SYSC_REG_USB_PS 0x05c
152
153#define RT3352_CLKCFG0_XTAL_SEL BIT(20)
154#define RT3352_CLKCFG1_UPHY0_CLK_EN BIT(18)
155#define RT3352_CLKCFG1_UPHY1_CLK_EN BIT(20)
156#define RT3352_RSTCTRL_UHST BIT(22)
157#define RT3352_RSTCTRL_UDEV BIT(25)
158#define RT3352_SYSCFG1_USB0_HOST_MODE BIT(10)
159
160#define RT305X_SDRAM_BASE 0x00000000
161#define RT305X_MEM_SIZE_MIN 2
162#define RT305X_MEM_SIZE_MAX 64
163#define RT3352_MEM_SIZE_MIN 2
164#define RT3352_MEM_SIZE_MAX 256
165
139#endif 166#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
new file mode 100644
index 000000000000..917c28654552
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
1/*
2 * Ralink RT305x specific CPU feature overrides
3 *
4 * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 * This file was derived from: include/asm-mips/cpu-features.h
8 * Copyright (C) 2003, 2004 Ralf Baechle
9 * Copyright (C) 2004 Maciej W. Rozycki
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
14 *
15 */
16#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H
17#define _RT305X_CPU_FEATURE_OVERRIDES_H
18
19#define cpu_has_tlb 1
20#define cpu_has_4kex 1
21#define cpu_has_3k_cache 0
22#define cpu_has_4k_cache 1
23#define cpu_has_tx39_cache 0
24#define cpu_has_sb1_cache 0
25#define cpu_has_fpu 0
26#define cpu_has_32fpr 0
27#define cpu_has_counter 1
28#define cpu_has_watch 1
29#define cpu_has_divec 1
30
31#define cpu_has_prefetch 1
32#define cpu_has_ejtag 1
33#define cpu_has_llsc 1
34
35#define cpu_has_mips16 1
36#define cpu_has_mdmx 0
37#define cpu_has_mips3d 0
38#define cpu_has_smartmips 0
39
40#define cpu_has_mips32r1 1
41#define cpu_has_mips32r2 1
42#define cpu_has_mips64r1 0
43#define cpu_has_mips64r2 0
44
45#define cpu_has_dsp 1
46#define cpu_has_mipsmt 0
47
48#define cpu_has_64bits 0
49#define cpu_has_64bit_zero_reg 0
50#define cpu_has_64bit_gp_regs 0
51#define cpu_has_64bit_addresses 0
52
53#define cpu_dcache_line_size() 32
54#define cpu_icache_line_size() 32
55
56#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
new file mode 100644
index 000000000000..058382f37f92
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883.h
@@ -0,0 +1,252 @@
1/*
2 * Ralink RT3662/RT3883 SoC register definitions
3 *
4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11#ifndef _RT3883_REGS_H_
12#define _RT3883_REGS_H_
13
14#include <linux/bitops.h>
15
16#define RT3883_SDRAM_BASE 0x00000000
17#define RT3883_SYSC_BASE 0x10000000
18#define RT3883_TIMER_BASE 0x10000100
19#define RT3883_INTC_BASE 0x10000200
20#define RT3883_MEMC_BASE 0x10000300
21#define RT3883_UART0_BASE 0x10000500
22#define RT3883_PIO_BASE 0x10000600
23#define RT3883_FSCC_BASE 0x10000700
24#define RT3883_NANDC_BASE 0x10000810
25#define RT3883_I2C_BASE 0x10000900
26#define RT3883_I2S_BASE 0x10000a00
27#define RT3883_SPI_BASE 0x10000b00
28#define RT3883_UART1_BASE 0x10000c00
29#define RT3883_PCM_BASE 0x10002000
30#define RT3883_GDMA_BASE 0x10002800
31#define RT3883_CODEC1_BASE 0x10003000
32#define RT3883_CODEC2_BASE 0x10003800
33#define RT3883_FE_BASE 0x10100000
34#define RT3883_ROM_BASE 0x10118000
35#define RT3883_USBDEV_BASE 0x10112000
36#define RT3883_PCI_BASE 0x10140000
37#define RT3883_WLAN_BASE 0x10180000
38#define RT3883_USBHOST_BASE 0x101c0000
39#define RT3883_BOOT_BASE 0x1c000000
40#define RT3883_SRAM_BASE 0x1e000000
41#define RT3883_PCIMEM_BASE 0x20000000
42
43#define RT3883_EHCI_BASE (RT3883_USBHOST_BASE)
44#define RT3883_OHCI_BASE (RT3883_USBHOST_BASE + 0x1000)
45
46#define RT3883_SYSC_SIZE 0x100
47#define RT3883_TIMER_SIZE 0x100
48#define RT3883_INTC_SIZE 0x100
49#define RT3883_MEMC_SIZE 0x100
50#define RT3883_UART0_SIZE 0x100
51#define RT3883_UART1_SIZE 0x100
52#define RT3883_PIO_SIZE 0x100
53#define RT3883_FSCC_SIZE 0x100
54#define RT3883_NANDC_SIZE 0x0f0
55#define RT3883_I2C_SIZE 0x100
56#define RT3883_I2S_SIZE 0x100
57#define RT3883_SPI_SIZE 0x100
58#define RT3883_PCM_SIZE 0x800
59#define RT3883_GDMA_SIZE 0x800
60#define RT3883_CODEC1_SIZE 0x800
61#define RT3883_CODEC2_SIZE 0x800
62#define RT3883_FE_SIZE 0x10000
63#define RT3883_ROM_SIZE 0x4000
64#define RT3883_USBDEV_SIZE 0x4000
65#define RT3883_PCI_SIZE 0x40000
66#define RT3883_WLAN_SIZE 0x40000
67#define RT3883_USBHOST_SIZE 0x40000
68#define RT3883_BOOT_SIZE (32 * 1024 * 1024)
69#define RT3883_SRAM_SIZE (32 * 1024 * 1024)
70
71/* SYSC registers */
72#define RT3883_SYSC_REG_CHIPID0_3 0x00 /* Chip ID 0 */
73#define RT3883_SYSC_REG_CHIPID4_7 0x04 /* Chip ID 1 */
74#define RT3883_SYSC_REG_REVID 0x0c /* Chip Revision Identification */
75#define RT3883_SYSC_REG_SYSCFG0 0x10 /* System Configuration 0 */
76#define RT3883_SYSC_REG_SYSCFG1 0x14 /* System Configuration 1 */
77#define RT3883_SYSC_REG_CLKCFG0 0x2c /* Clock Configuration 0 */
78#define RT3883_SYSC_REG_CLKCFG1 0x30 /* Clock Configuration 1 */
79#define RT3883_SYSC_REG_RSTCTRL 0x34 /* Reset Control*/
80#define RT3883_SYSC_REG_RSTSTAT 0x38 /* Reset Status*/
81#define RT3883_SYSC_REG_USB_PS 0x5c /* USB Power saving control */
82#define RT3883_SYSC_REG_GPIO_MODE 0x60 /* GPIO Purpose Select */
83#define RT3883_SYSC_REG_PCIE_CLK_GEN0 0x7c
84#define RT3883_SYSC_REG_PCIE_CLK_GEN1 0x80
85#define RT3883_SYSC_REG_PCIE_CLK_GEN2 0x84
86#define RT3883_SYSC_REG_PMU 0x88
87#define RT3883_SYSC_REG_PMU1 0x8c
88
89#define RT3883_CHIP_NAME0 0x38335452
90#define RT3883_CHIP_NAME1 0x20203338
91
92#define RT3883_REVID_VER_ID_MASK 0x0f
93#define RT3883_REVID_VER_ID_SHIFT 8
94#define RT3883_REVID_ECO_ID_MASK 0x0f
95
96#define RT3883_SYSCFG0_DRAM_TYPE_DDR2 BIT(17)
97#define RT3883_SYSCFG0_CPUCLK_SHIFT 8
98#define RT3883_SYSCFG0_CPUCLK_MASK 0x3
99#define RT3883_SYSCFG0_CPUCLK_250 0x0
100#define RT3883_SYSCFG0_CPUCLK_384 0x1
101#define RT3883_SYSCFG0_CPUCLK_480 0x2
102#define RT3883_SYSCFG0_CPUCLK_500 0x3
103
104#define RT3883_SYSCFG1_USB0_HOST_MODE BIT(10)
105#define RT3883_SYSCFG1_PCIE_RC_MODE BIT(8)
106#define RT3883_SYSCFG1_PCI_HOST_MODE BIT(7)
107#define RT3883_SYSCFG1_PCI_66M_MODE BIT(6)
108#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT BIT(2)
109
110#define RT3883_CLKCFG1_PCIE_CLK_EN BIT(21)
111#define RT3883_CLKCFG1_UPHY1_CLK_EN BIT(20)
112#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19)
113#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18)
114
115#define RT3883_GPIO_MODE_I2C BIT(0)
116#define RT3883_GPIO_MODE_SPI BIT(1)
117#define RT3883_GPIO_MODE_UART0_SHIFT 2
118#define RT3883_GPIO_MODE_UART0_MASK 0x7
119#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
120#define RT3883_GPIO_MODE_UARTF 0x0
121#define RT3883_GPIO_MODE_PCM_UARTF 0x1
122#define RT3883_GPIO_MODE_PCM_I2S 0x2
123#define RT3883_GPIO_MODE_I2S_UARTF 0x3
124#define RT3883_GPIO_MODE_PCM_GPIO 0x4
125#define RT3883_GPIO_MODE_GPIO_UARTF 0x5
126#define RT3883_GPIO_MODE_GPIO_I2S 0x6
127#define RT3883_GPIO_MODE_GPIO 0x7
128#define RT3883_GPIO_MODE_UART1 BIT(5)
129#define RT3883_GPIO_MODE_JTAG BIT(6)
130#define RT3883_GPIO_MODE_MDIO BIT(7)
131#define RT3883_GPIO_MODE_GE1 BIT(9)
132#define RT3883_GPIO_MODE_GE2 BIT(10)
133#define RT3883_GPIO_MODE_PCI_SHIFT 11
134#define RT3883_GPIO_MODE_PCI_MASK 0x7
135#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
136#define RT3883_GPIO_MODE_LNA_A_SHIFT 16
137#define RT3883_GPIO_MODE_LNA_A_MASK 0x3
138#define _RT3883_GPIO_MODE_LNA_A(_x) ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
139#define RT3883_GPIO_MODE_LNA_A_GPIO 0x3
140#define RT3883_GPIO_MODE_LNA_A _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
141#define RT3883_GPIO_MODE_LNA_G_SHIFT 18
142#define RT3883_GPIO_MODE_LNA_G_MASK 0x3
143#define _RT3883_GPIO_MODE_LNA_G(_x) ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
144#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3
145#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
146
147#define RT3883_GPIO_I2C_SD 1
148#define RT3883_GPIO_I2C_SCLK 2
149#define RT3883_GPIO_SPI_CS0 3
150#define RT3883_GPIO_SPI_CLK 4
151#define RT3883_GPIO_SPI_MOSI 5
152#define RT3883_GPIO_SPI_MISO 6
153#define RT3883_GPIO_7 7
154#define RT3883_GPIO_10 10
155#define RT3883_GPIO_11 11
156#define RT3883_GPIO_14 14
157#define RT3883_GPIO_UART1_TXD 15
158#define RT3883_GPIO_UART1_RXD 16
159#define RT3883_GPIO_JTAG_TDO 17
160#define RT3883_GPIO_JTAG_TDI 18
161#define RT3883_GPIO_JTAG_TMS 19
162#define RT3883_GPIO_JTAG_TCLK 20
163#define RT3883_GPIO_JTAG_TRST_N 21
164#define RT3883_GPIO_MDIO_MDC 22
165#define RT3883_GPIO_MDIO_MDIO 23
166#define RT3883_GPIO_LNA_PE_A0 32
167#define RT3883_GPIO_LNA_PE_A1 33
168#define RT3883_GPIO_LNA_PE_A2 34
169#define RT3883_GPIO_LNA_PE_G0 35
170#define RT3883_GPIO_LNA_PE_G1 36
171#define RT3883_GPIO_LNA_PE_G2 37
172#define RT3883_GPIO_PCI_AD0 40
173#define RT3883_GPIO_PCI_AD31 71
174#define RT3883_GPIO_GE2_TXD0 72
175#define RT3883_GPIO_GE2_TXD1 73
176#define RT3883_GPIO_GE2_TXD2 74
177#define RT3883_GPIO_GE2_TXD3 75
178#define RT3883_GPIO_GE2_TXEN 76
179#define RT3883_GPIO_GE2_TXCLK 77
180#define RT3883_GPIO_GE2_RXD0 78
181#define RT3883_GPIO_GE2_RXD1 79
182#define RT3883_GPIO_GE2_RXD2 80
183#define RT3883_GPIO_GE2_RXD3 81
184#define RT3883_GPIO_GE2_RXDV 82
185#define RT3883_GPIO_GE2_RXCLK 83
186#define RT3883_GPIO_GE1_TXD0 84
187#define RT3883_GPIO_GE1_TXD1 85
188#define RT3883_GPIO_GE1_TXD2 86
189#define RT3883_GPIO_GE1_TXD3 87
190#define RT3883_GPIO_GE1_TXEN 88
191#define RT3883_GPIO_GE1_TXCLK 89
192#define RT3883_GPIO_GE1_RXD0 90
193#define RT3883_GPIO_GE1_RXD1 91
194#define RT3883_GPIO_GE1_RXD2 92
195#define RT3883_GPIO_GE1_RXD3 93
196#define RT3883_GPIO_GE1_RXDV 94
197#define RT3883_GPIO_GE1_RXCLK 95
198
199#define RT3883_RSTCTRL_PCIE_PCI_PDM BIT(27)
200#define RT3883_RSTCTRL_FLASH BIT(26)
201#define RT3883_RSTCTRL_UDEV BIT(25)
202#define RT3883_RSTCTRL_PCI BIT(24)
203#define RT3883_RSTCTRL_PCIE BIT(23)
204#define RT3883_RSTCTRL_UHST BIT(22)
205#define RT3883_RSTCTRL_FE BIT(21)
206#define RT3883_RSTCTRL_WLAN BIT(20)
207#define RT3883_RSTCTRL_UART1 BIT(29)
208#define RT3883_RSTCTRL_SPI BIT(18)
209#define RT3883_RSTCTRL_I2S BIT(17)
210#define RT3883_RSTCTRL_I2C BIT(16)
211#define RT3883_RSTCTRL_NAND BIT(15)
212#define RT3883_RSTCTRL_DMA BIT(14)
213#define RT3883_RSTCTRL_PIO BIT(13)
214#define RT3883_RSTCTRL_UART BIT(12)
215#define RT3883_RSTCTRL_PCM BIT(11)
216#define RT3883_RSTCTRL_MC BIT(10)
217#define RT3883_RSTCTRL_INTC BIT(9)
218#define RT3883_RSTCTRL_TIMER BIT(8)
219#define RT3883_RSTCTRL_SYS BIT(0)
220
221#define RT3883_INTC_INT_SYSCTL BIT(0)
222#define RT3883_INTC_INT_TIMER0 BIT(1)
223#define RT3883_INTC_INT_TIMER1 BIT(2)
224#define RT3883_INTC_INT_IA BIT(3)
225#define RT3883_INTC_INT_PCM BIT(4)
226#define RT3883_INTC_INT_UART0 BIT(5)
227#define RT3883_INTC_INT_PIO BIT(6)
228#define RT3883_INTC_INT_DMA BIT(7)
229#define RT3883_INTC_INT_NAND BIT(8)
230#define RT3883_INTC_INT_PERFC BIT(9)
231#define RT3883_INTC_INT_I2S BIT(10)
232#define RT3883_INTC_INT_UART1 BIT(12)
233#define RT3883_INTC_INT_UHST BIT(18)
234#define RT3883_INTC_INT_UDEV BIT(19)
235
236/* FLASH/SRAM/Codec Controller registers */
237#define RT3883_FSCC_REG_FLASH_CFG0 0x00
238#define RT3883_FSCC_REG_FLASH_CFG1 0x04
239#define RT3883_FSCC_REG_CODEC_CFG0 0x40
240#define RT3883_FSCC_REG_CODEC_CFG1 0x44
241
242#define RT3883_FLASH_CFG_WIDTH_SHIFT 26
243#define RT3883_FLASH_CFG_WIDTH_MASK 0x3
244#define RT3883_FLASH_CFG_WIDTH_8BIT 0x0
245#define RT3883_FLASH_CFG_WIDTH_16BIT 0x1
246#define RT3883_FLASH_CFG_WIDTH_32BIT 0x2
247
248#define RT3883_SDRAM_BASE 0x00000000
249#define RT3883_MEM_SIZE_MIN 2
250#define RT3883_MEM_SIZE_MAX 256
251
252#endif /* _RT3883_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
new file mode 100644
index 000000000000..181fbf4c976f
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
@@ -0,0 +1,55 @@
1/*
2 * Ralink RT3662/RT3883 specific CPU feature overrides
3 *
4 * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
5 *
6 * This file was derived from: include/asm-mips/cpu-features.h
7 * Copyright (C) 2003, 2004 Ralf Baechle
8 * Copyright (C) 2004 Maciej W. Rozycki
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 */
15#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H
16#define _RT3883_CPU_FEATURE_OVERRIDES_H
17
18#define cpu_has_tlb 1
19#define cpu_has_4kex 1
20#define cpu_has_3k_cache 0
21#define cpu_has_4k_cache 1
22#define cpu_has_tx39_cache 0
23#define cpu_has_sb1_cache 0
24#define cpu_has_fpu 0
25#define cpu_has_32fpr 0
26#define cpu_has_counter 1
27#define cpu_has_watch 1
28#define cpu_has_divec 1
29
30#define cpu_has_prefetch 1
31#define cpu_has_ejtag 1
32#define cpu_has_llsc 1
33
34#define cpu_has_mips16 1
35#define cpu_has_mdmx 0
36#define cpu_has_mips3d 0
37#define cpu_has_smartmips 0
38
39#define cpu_has_mips32r1 1
40#define cpu_has_mips32r2 1
41#define cpu_has_mips64r1 0
42#define cpu_has_mips64r2 0
43
44#define cpu_has_dsp 1
45#define cpu_has_mipsmt 0
46
47#define cpu_has_64bits 0
48#define cpu_has_64bit_zero_reg 0
49#define cpu_has_64bit_gp_regs 0
50#define cpu_has_64bit_addresses 0
51
52#define cpu_dcache_line_size() 32
53#define cpu_icache_line_size() 32
54
55#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index 193c0912d38e..bfbd7035d4c5 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,7 +28,11 @@
28/* #define cpu_has_prefetch ? */ 28/* #define cpu_has_prefetch ? */
29#define cpu_has_mcheck 1 29#define cpu_has_mcheck 1
30/* #define cpu_has_ejtag ? */ 30/* #define cpu_has_ejtag ? */
31#ifdef CONFIG_CPU_MICROMIPS
32#define cpu_has_llsc 0
33#else
31#define cpu_has_llsc 1 34#define cpu_has_llsc 1
35#endif
32/* #define cpu_has_vtag_icache ? */ 36/* #define cpu_has_vtag_icache ? */
33/* #define cpu_has_dc_aliases ? */ 37/* #define cpu_has_dc_aliases ? */
34/* #define cpu_has_ic_fills_f_dc ? */ 38/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 44a09a64160a..bd9746fbe4af 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -83,4 +83,7 @@ extern void mips_pcibios_init(void);
83#define mips_pcibios_init() do { } while (0) 83#define mips_pcibios_init() do { } while (0)
84#endif 84#endif
85 85
86extern void mips_scroll_message(void);
87extern void mips_display_message(const char *str);
88
86#endif /* __ASM_MIPS_BOARDS_GENERIC_H */ 89#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h
deleted file mode 100644
index e7aed3e4ff58..000000000000
--- a/arch/mips/include/asm/mips-boards/prom.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Carsten Langgaard, carstenl@mips.com
3 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
4 *
5 * ########################################################################
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 *
20 * ########################################################################
21 *
22 * MIPS boards bootprom interface for the Linux kernel.
23 *
24 */
25
26#ifndef _MIPS_PROM_H
27#define _MIPS_PROM_H
28
29extern char *prom_getcmdline(void);
30extern char *prom_getenv(char *name);
31extern void prom_init_cmdline(void);
32extern void prom_meminit(void);
33extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
34extern void mips_display_message(const char *str);
35extern void mips_display_word(unsigned int num);
36extern void mips_scroll_message(void);
37extern int get_ethernet_addr(char *ethernet_addr);
38
39/* Memory descriptor management. */
40#define PROM_MAX_PMEMBLOCKS 32
41struct prom_pmemblock {
42 unsigned long base; /* Within KSEG0. */
43 unsigned int size; /* In bytes. */
44 unsigned int type; /* free or prom memory */
45};
46
47#endif /* !(_MIPS_PROM_H) */
diff --git a/arch/mips/include/asm/mips_machine.h b/arch/mips/include/asm/mips_machine.h
index 363bb352c7f7..9d00aebe9842 100644
--- a/arch/mips/include/asm/mips_machine.h
+++ b/arch/mips/include/asm/mips_machine.h
@@ -42,13 +42,9 @@ extern long __mips_machines_end;
42#ifdef CONFIG_MIPS_MACHINE 42#ifdef CONFIG_MIPS_MACHINE
43int mips_machtype_setup(char *id) __init; 43int mips_machtype_setup(char *id) __init;
44void mips_machine_setup(void) __init; 44void mips_machine_setup(void) __init;
45void mips_set_machine_name(const char *name) __init;
46char *mips_get_machine_name(void);
47#else 45#else
48static inline int mips_machtype_setup(char *id) { return 1; } 46static inline int mips_machtype_setup(char *id) { return 1; }
49static inline void mips_machine_setup(void) { } 47static inline void mips_machine_setup(void) { }
50static inline void mips_set_machine_name(const char *name) { }
51static inline char *mips_get_machine_name(void) { return NULL; }
52#endif /* CONFIG_MIPS_MACHINE */ 48#endif /* CONFIG_MIPS_MACHINE */
53 49
54#endif /* __ASM_MIPS_MACHINE_H */ 50#endif /* __ASM_MIPS_MACHINE_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 0da44d422f5b..87e6207b05e4 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -596,6 +596,7 @@
596#define MIPS_CONF3_RXI (_ULCAST_(1) << 12) 596#define MIPS_CONF3_RXI (_ULCAST_(1) << 12)
597#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13) 597#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13)
598#define MIPS_CONF3_ISA (_ULCAST_(3) << 14) 598#define MIPS_CONF3_ISA (_ULCAST_(3) << 14)
599#define MIPS_CONF3_ISA_OE (_ULCAST_(3) << 16)
599#define MIPS_CONF3_VZ (_ULCAST_(1) << 23) 600#define MIPS_CONF3_VZ (_ULCAST_(1) << 23)
600 601
601#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) 602#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0)
@@ -623,6 +624,24 @@
623#ifndef __ASSEMBLY__ 624#ifndef __ASSEMBLY__
624 625
625/* 626/*
627 * Macros for handling the ISA mode bit for microMIPS.
628 */
629#define get_isa16_mode(x) ((x) & 0x1)
630#define msk_isa16_mode(x) ((x) & ~0x1)
631#define set_isa16_mode(x) do { (x) |= 0x1; } while(0)
632
633/*
634 * microMIPS instructions can be 16-bit or 32-bit in length. This
635 * returns a 1 if the instruction is 16-bit and a 0 if 32-bit.
636 */
637static inline int mm_insn_16bit(u16 insn)
638{
639 u16 opcode = (insn >> 10) & 0x7;
640
641 return (opcode >= 1 && opcode <= 3) ? 1 : 0;
642}
643
644/*
626 * Functions to access the R10000 performance counters. These are basically 645 * Functions to access the R10000 performance counters. These are basically
627 * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit 646 * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
628 * performance counter number encoded into bits 1 ... 5 of the instruction. 647 * performance counter number encoded into bits 1 ... 5 of the instruction.
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index e81d719efcd1..1554721e4808 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -26,10 +26,15 @@
26 26
27#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 27#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
28 28
29#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 29#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
30 tlbmiss_handler_setup_pgd((unsigned long)(pgd)) 30do { \
31 31 void (*tlbmiss_handler_setup_pgd)(unsigned long); \
32extern void tlbmiss_handler_setup_pgd(unsigned long pgd); 32 extern u32 tlbmiss_handler_setup_pgd_array[16]; \
33 \
34 tlbmiss_handler_setup_pgd = \
35 (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
36 tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \
37} while (0)
33 38
34#define TLBMISS_HANDLER_SETUP() \ 39#define TLBMISS_HANDLER_SETUP() \
35 do { \ 40 do { \
@@ -62,59 +67,88 @@ extern unsigned long pgd_current[];
62 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 67 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
63#endif 68#endif
64#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ 69#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
65#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
66
67#define ASID_INC 0x40
68#define ASID_MASK 0xfc0
69
70#elif defined(CONFIG_CPU_R8000)
71
72#define ASID_INC 0x10
73#define ASID_MASK 0xff0
74 70
75#elif defined(CONFIG_MIPS_MT_SMTC) 71#define ASID_INC(asid) \
76 72({ \
77#define ASID_INC 0x1 73 unsigned long __asid = asid; \
78extern unsigned long smtc_asid_mask; 74 __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \
79#define ASID_MASK (smtc_asid_mask) 75 ".section\t__asid_inc,\"a\"\n\t" \
80#define HW_ASID_MASK 0xff 76 ".word\t1b\n\t" \
81/* End SMTC/34K debug hack */ 77 ".previous" \
82#else /* FIXME: not correct for R6000 */ 78 :"=r" (__asid) \
83 79 :"0" (__asid)); \
84#define ASID_INC 0x1 80 __asid; \
85#define ASID_MASK 0xff 81})
82#define ASID_MASK(asid) \
83({ \
84 unsigned long __asid = asid; \
85 __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \
86 ".section\t__asid_mask,\"a\"\n\t" \
87 ".word\t1b\n\t" \
88 ".previous" \
89 :"=r" (__asid) \
90 :"r" (__asid)); \
91 __asid; \
92})
93#define ASID_VERSION_MASK \
94({ \
95 unsigned long __asid; \
96 __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
97 ".section\t__asid_version_mask,\"a\"\n\t" \
98 ".word\t1b\n\t" \
99 ".previous" \
100 :"=r" (__asid)); \
101 __asid; \
102})
103#define ASID_FIRST_VERSION \
104({ \
105 unsigned long __asid = asid; \
106 __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
107 ".section\t__asid_first_version,\"a\"\n\t" \
108 ".word\t1b\n\t" \
109 ".previous" \
110 :"=r" (__asid)); \
111 __asid; \
112})
113
114#define ASID_FIRST_VERSION_R3000 0x1000
115#define ASID_FIRST_VERSION_R4000 0x100
116#define ASID_FIRST_VERSION_R8000 0x1000
117#define ASID_FIRST_VERSION_RM9000 0x1000
86 118
119#ifdef CONFIG_MIPS_MT_SMTC
120#define SMTC_HW_ASID_MASK 0xff
121extern unsigned int smtc_asid_mask;
87#endif 122#endif
88 123
89#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 124#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
90#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) 125#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm)))
91#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 126#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
92 127
93static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 128static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
94{ 129{
95} 130}
96 131
97/*
98 * All unused by hardware upper bits will be considered
99 * as a software asid extension.
100 */
101#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
102#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
103
104#ifndef CONFIG_MIPS_MT_SMTC 132#ifndef CONFIG_MIPS_MT_SMTC
105/* Normal, classic MIPS get_new_mmu_context */ 133/* Normal, classic MIPS get_new_mmu_context */
106static inline void 134static inline void
107get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 135get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
108{ 136{
137 extern void kvm_local_flush_tlb_all(void);
109 unsigned long asid = asid_cache(cpu); 138 unsigned long asid = asid_cache(cpu);
110 139
111 if (! ((asid += ASID_INC) & ASID_MASK) ) { 140 if (!ASID_MASK((asid = ASID_INC(asid)))) {
112 if (cpu_has_vtag_icache) 141 if (cpu_has_vtag_icache)
113 flush_icache_all(); 142 flush_icache_all();
143#ifdef CONFIG_VIRTUALIZATION
144 kvm_local_flush_tlb_all(); /* start new asid cycle */
145#else
114 local_flush_tlb_all(); /* start new asid cycle */ 146 local_flush_tlb_all(); /* start new asid cycle */
147#endif
115 if (!asid) /* fix version if needed */ 148 if (!asid) /* fix version if needed */
116 asid = ASID_FIRST_VERSION; 149 asid = ASID_FIRST_VERSION;
117 } 150 }
151
118 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 152 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
119} 153}
120 154
@@ -133,7 +167,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
133{ 167{
134 int i; 168 int i;
135 169
136 for_each_online_cpu(i) 170 for_each_possible_cpu(i)
137 cpu_context(i, mm) = 0; 171 cpu_context(i, mm) = 0;
138 172
139 return 0; 173 return 0;
@@ -166,7 +200,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
166 * free up the ASID value for use and flush any old 200 * free up the ASID value for use and flush any old
167 * instances of it from the TLB. 201 * instances of it from the TLB.
168 */ 202 */
169 oldasid = (read_c0_entryhi() & ASID_MASK); 203 oldasid = ASID_MASK(read_c0_entryhi());
170 if(smtc_live_asid[mytlb][oldasid]) { 204 if(smtc_live_asid[mytlb][oldasid]) {
171 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 205 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
172 if(smtc_live_asid[mytlb][oldasid] == 0) 206 if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -177,7 +211,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
177 * having ASID_MASK smaller than the hardware maximum, 211 * having ASID_MASK smaller than the hardware maximum,
178 * make sure no "soft" bits become "hard"... 212 * make sure no "soft" bits become "hard"...
179 */ 213 */
180 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 214 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
181 cpu_asid(cpu, next)); 215 cpu_asid(cpu, next));
182 ehb(); /* Make sure it propagates to TCStatus */ 216 ehb(); /* Make sure it propagates to TCStatus */
183 evpe(mtflags); 217 evpe(mtflags);
@@ -230,15 +264,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
230#ifdef CONFIG_MIPS_MT_SMTC 264#ifdef CONFIG_MIPS_MT_SMTC
231 /* See comments for similar code above */ 265 /* See comments for similar code above */
232 mtflags = dvpe(); 266 mtflags = dvpe();
233 oldasid = read_c0_entryhi() & ASID_MASK; 267 oldasid = ASID_MASK(read_c0_entryhi());
234 if(smtc_live_asid[mytlb][oldasid]) { 268 if(smtc_live_asid[mytlb][oldasid]) {
235 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 269 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
236 if(smtc_live_asid[mytlb][oldasid] == 0) 270 if(smtc_live_asid[mytlb][oldasid] == 0)
237 smtc_flush_tlb_asid(oldasid); 271 smtc_flush_tlb_asid(oldasid);
238 } 272 }
239 /* See comments for similar code above */ 273 /* See comments for similar code above */
240 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 274 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
241 cpu_asid(cpu, next)); 275 cpu_asid(cpu, next));
242 ehb(); /* Make sure it propagates to TCStatus */ 276 ehb(); /* Make sure it propagates to TCStatus */
243 evpe(mtflags); 277 evpe(mtflags);
244#else 278#else
@@ -275,14 +309,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
275#ifdef CONFIG_MIPS_MT_SMTC 309#ifdef CONFIG_MIPS_MT_SMTC
276 /* See comments for similar code above */ 310 /* See comments for similar code above */
277 prevvpe = dvpe(); 311 prevvpe = dvpe();
278 oldasid = (read_c0_entryhi() & ASID_MASK); 312 oldasid = ASID_MASK(read_c0_entryhi());
279 if (smtc_live_asid[mytlb][oldasid]) { 313 if (smtc_live_asid[mytlb][oldasid]) {
280 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 314 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
281 if(smtc_live_asid[mytlb][oldasid] == 0) 315 if(smtc_live_asid[mytlb][oldasid] == 0)
282 smtc_flush_tlb_asid(oldasid); 316 smtc_flush_tlb_asid(oldasid);
283 } 317 }
284 /* See comments for similar code above */ 318 /* See comments for similar code above */
285 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) 319 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
286 | cpu_asid(cpu, mm)); 320 | cpu_asid(cpu, mm));
287 ehb(); /* Make sure it propagates to TCStatus */ 321 ehb(); /* Make sure it propagates to TCStatus */
288 evpe(prevvpe); 322 evpe(prevvpe);
diff --git a/arch/mips/include/asm/netlogic/haldefs.h b/arch/mips/include/asm/netlogic/haldefs.h
index 419d8aef8569..79c7cccdc22c 100644
--- a/arch/mips/include/asm/netlogic/haldefs.h
+++ b/arch/mips/include/asm/netlogic/haldefs.h
@@ -35,42 +35,13 @@
35#ifndef __NLM_HAL_HALDEFS_H__ 35#ifndef __NLM_HAL_HALDEFS_H__
36#define __NLM_HAL_HALDEFS_H__ 36#define __NLM_HAL_HALDEFS_H__
37 37
38#include <linux/irqflags.h> /* for local_irq_disable */
39
38/* 40/*
39 * This file contains platform specific memory mapped IO implementation 41 * This file contains platform specific memory mapped IO implementation
40 * and will provide a way to read 32/64 bit memory mapped registers in 42 * and will provide a way to read 32/64 bit memory mapped registers in
41 * all ABIs 43 * all ABIs
42 */ 44 */
43#if !defined(CONFIG_64BIT) && defined(CONFIG_CPU_XLP)
44#error "o32 compile not supported on XLP yet"
45#endif
46/*
47 * For o32 compilation, we have to disable interrupts and enable KX bit to
48 * access 64 bit addresses or data.
49 *
50 * We need to disable interrupts because we save just the lower 32 bits of
51 * registers in interrupt handling. So if we get hit by an interrupt while
52 * using the upper 32 bits of a register, we lose.
53 */
54static inline uint32_t nlm_save_flags_kx(void)
55{
56 return change_c0_status(ST0_KX | ST0_IE, ST0_KX);
57}
58
59static inline uint32_t nlm_save_flags_cop2(void)
60{
61 return change_c0_status(ST0_CU2 | ST0_IE, ST0_CU2);
62}
63
64static inline void nlm_restore_flags(uint32_t sr)
65{
66 write_c0_status(sr);
67}
68
69/*
70 * The n64 implementations are simple, the o32 implementations when they
71 * are added, will have to disable interrupts and enable KX before doing
72 * 64 bit ops.
73 */
74static inline uint32_t 45static inline uint32_t
75nlm_read_reg(uint64_t base, uint32_t reg) 46nlm_read_reg(uint64_t base, uint32_t reg)
76{ 47{
@@ -87,13 +58,40 @@ nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val)
87 *addr = val; 58 *addr = val;
88} 59}
89 60
61/*
62 * For o32 compilation, we have to disable interrupts to access 64 bit
63 * registers
64 *
65 * We need to disable interrupts because we save just the lower 32 bits of
66 * registers in interrupt handling. So if we get hit by an interrupt while
67 * using the upper 32 bits of a register, we lose.
68 */
69
90static inline uint64_t 70static inline uint64_t
91nlm_read_reg64(uint64_t base, uint32_t reg) 71nlm_read_reg64(uint64_t base, uint32_t reg)
92{ 72{
93 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t); 73 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
94 volatile uint64_t *ptr = (volatile uint64_t *)(long)addr; 74 volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
95 75 uint64_t val;
96 return *ptr; 76
77 if (sizeof(unsigned long) == 4) {
78 unsigned long flags;
79
80 local_irq_save(flags);
81 __asm__ __volatile__(
82 ".set push" "\n\t"
83 ".set mips64" "\n\t"
84 "ld %L0, %1" "\n\t"
85 "dsra32 %M0, %L0, 0" "\n\t"
86 "sll %L0, %L0, 0" "\n\t"
87 ".set pop" "\n"
88 : "=r" (val)
89 : "m" (*ptr));
90 local_irq_restore(flags);
91 } else
92 val = *ptr;
93
94 return val;
97} 95}
98 96
99static inline void 97static inline void
@@ -102,7 +100,25 @@ nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val)
102 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t); 100 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
103 volatile uint64_t *ptr = (volatile uint64_t *)(long)addr; 101 volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
104 102
105 *ptr = val; 103 if (sizeof(unsigned long) == 4) {
104 unsigned long flags;
105 uint64_t tmp;
106
107 local_irq_save(flags);
108 __asm__ __volatile__(
109 ".set push" "\n\t"
110 ".set mips64" "\n\t"
111 "dsll32 %L0, %L0, 0" "\n\t"
112 "dsrl32 %L0, %L0, 0" "\n\t"
113 "dsll32 %M0, %M0, 0" "\n\t"
114 "or %L0, %L0, %M0" "\n\t"
115 "sd %L0, %2" "\n\t"
116 ".set pop" "\n"
117 : "=r" (tmp)
118 : "0" (val), "m" (*ptr));
119 local_irq_restore(flags);
120 } else
121 *ptr = val;
106} 122}
107 123
108/* 124/*
@@ -143,14 +159,6 @@ nlm_pcicfg_base(uint32_t devoffset)
143 return nlm_io_base + devoffset; 159 return nlm_io_base + devoffset;
144} 160}
145 161
146static inline uint64_t
147nlm_xkphys_map_pcibar0(uint64_t pcibase)
148{
149 uint64_t paddr;
150
151 paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu;
152 return (uint64_t)0x9000000000000000 | paddr;
153}
154#elif defined(CONFIG_CPU_XLR) 162#elif defined(CONFIG_CPU_XLR)
155 163
156static inline uint64_t 164static inline uint64_t
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 8ad2e0f81719..f299d31d7c1a 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -38,21 +38,16 @@
38/* 38/*
39 * XLR and XLP interrupt request and interrupt mask registers 39 * XLR and XLP interrupt request and interrupt mask registers
40 */ 40 */
41#define read_c0_eirr() __read_64bit_c0_register($9, 6)
42#define read_c0_eimr() __read_64bit_c0_register($9, 7)
43#define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val)
44
45/* 41/*
46 * Writing EIMR in 32 bit is a special case, the lower 8 bit of the 42 * NOTE: Do not save/restore flags around write_c0_eimr().
47 * EIMR is shadowed in the status register, so we cannot save and 43 * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
48 * restore status register for split read. 44 * register. Restoring flags will overwrite the lower 8 bits of EIMR.
45 *
46 * Call with interrupts disabled.
49 */ 47 */
50#define write_c0_eimr(val) \ 48#define write_c0_eimr(val) \
51do { \ 49do { \
52 if (sizeof(unsigned long) == 4) { \ 50 if (sizeof(unsigned long) == 4) { \
53 unsigned long __flags; \
54 \
55 local_irq_save(__flags); \
56 __asm__ __volatile__( \ 51 __asm__ __volatile__( \
57 ".set\tmips64\n\t" \ 52 ".set\tmips64\n\t" \
58 "dsll\t%L0, %L0, 32\n\t" \ 53 "dsll\t%L0, %L0, 32\n\t" \
@@ -62,8 +57,6 @@ do { \
62 "dmtc0\t%L0, $9, 7\n\t" \ 57 "dmtc0\t%L0, $9, 7\n\t" \
63 ".set\tmips0" \ 58 ".set\tmips0" \
64 : : "r" (val)); \ 59 : : "r" (val)); \
65 __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\
66 local_irq_restore(__flags); \
67 } else \ 60 } else \
68 __write_64bit_c0_register($9, 7, (val)); \ 61 __write_64bit_c0_register($9, 7, (val)); \
69} while (0) 62} while (0)
@@ -128,7 +121,7 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
128 uint64_t val; 121 uint64_t val;
129 122
130#ifdef CONFIG_64BIT 123#ifdef CONFIG_64BIT
131 val = read_c0_eimr() & read_c0_eirr(); 124 val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
132#else 125#else
133 __asm__ __volatile__( 126 __asm__ __volatile__(
134 ".set push\n\t" 127 ".set push\n\t"
@@ -143,7 +136,6 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
143 ".set pop" 136 ".set pop"
144 : "=r" (val)); 137 : "=r" (val));
145#endif 138#endif
146
147 return val; 139 return val;
148} 140}
149 141
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index 3df53017fe51..a981f4681a15 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -191,59 +191,6 @@
191#define PIC_IRT_PCIE_LINK_2_INDEX 80 191#define PIC_IRT_PCIE_LINK_2_INDEX 80
192#define PIC_IRT_PCIE_LINK_3_INDEX 81 192#define PIC_IRT_PCIE_LINK_3_INDEX 81
193#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX) 193#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
194/* 78 to 81 */
195#define PIC_NUM_NA_IRTS 32
196/* 82 to 113 */
197#define PIC_IRT_NA_0_INDEX 82
198#define PIC_IRT_NA_INDEX(num) ((num) + PIC_IRT_NA_0_INDEX)
199#define PIC_IRT_POE_INDEX 114
200
201#define PIC_NUM_USB_IRTS 6
202#define PIC_IRT_USB_0_INDEX 115
203#define PIC_IRT_EHCI_0_INDEX 115
204#define PIC_IRT_OHCI_0_INDEX 116
205#define PIC_IRT_OHCI_1_INDEX 117
206#define PIC_IRT_EHCI_1_INDEX 118
207#define PIC_IRT_OHCI_2_INDEX 119
208#define PIC_IRT_OHCI_3_INDEX 120
209#define PIC_IRT_USB_INDEX(num) ((num) + PIC_IRT_USB_0_INDEX)
210/* 115 to 120 */
211#define PIC_IRT_GDX_INDEX 121
212#define PIC_IRT_SEC_INDEX 122
213#define PIC_IRT_RSA_INDEX 123
214
215#define PIC_NUM_COMP_IRTS 4
216#define PIC_IRT_COMP_0_INDEX 124
217#define PIC_IRT_COMP_INDEX(num) ((num) + PIC_IRT_COMP_0_INDEX)
218/* 124 to 127 */
219#define PIC_IRT_GBU_INDEX 128
220#define PIC_IRT_ICC_0_INDEX 129 /* ICC - Inter Chip Coherency */
221#define PIC_IRT_ICC_1_INDEX 130
222#define PIC_IRT_ICC_2_INDEX 131
223#define PIC_IRT_CAM_INDEX 132
224#define PIC_IRT_UART_0_INDEX 133
225#define PIC_IRT_UART_1_INDEX 134
226#define PIC_IRT_I2C_0_INDEX 135
227#define PIC_IRT_I2C_1_INDEX 136
228#define PIC_IRT_SYS_0_INDEX 137
229#define PIC_IRT_SYS_1_INDEX 138
230#define PIC_IRT_JTAG_INDEX 139
231#define PIC_IRT_PIC_INDEX 140
232#define PIC_IRT_NBU_INDEX 141
233#define PIC_IRT_TCU_INDEX 142
234#define PIC_IRT_GCU_INDEX 143 /* GBC - Global Coherency */
235#define PIC_IRT_DMC_0_INDEX 144
236#define PIC_IRT_DMC_1_INDEX 145
237
238#define PIC_NUM_GPIO_IRTS 4
239#define PIC_IRT_GPIO_0_INDEX 146
240#define PIC_IRT_GPIO_INDEX(num) ((num) + PIC_IRT_GPIO_0_INDEX)
241
242/* 146 to 149 */
243#define PIC_IRT_NOR_INDEX 150
244#define PIC_IRT_NAND_INDEX 151
245#define PIC_IRT_SPI_INDEX 152
246#define PIC_IRT_MMC_INDEX 153
247 194
248#define PIC_CLOCK_TIMER 7 195#define PIC_CLOCK_TIMER 7
249#define PIC_IRQ_BASE 8 196#define PIC_IRQ_BASE 8
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/arch/mips/include/asm/netlogic/xlp-hal/usb.h
deleted file mode 100644
index a9cd350dfb6c..000000000000
--- a/arch/mips/include/asm/netlogic/xlp-hal/usb.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Copyright (c) 2003-2012 Broadcom Corporation
3 * All Rights Reserved
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef __NLM_HAL_USB_H__
36#define __NLM_HAL_USB_H__
37
38#define USB_CTL_0 0x01
39#define USB_PHY_0 0x0A
40#define USB_PHY_RESET 0x01
41#define USB_PHY_PORT_RESET_0 0x10
42#define USB_PHY_PORT_RESET_1 0x20
43#define USB_CONTROLLER_RESET 0x01
44#define USB_INT_STATUS 0x0E
45#define USB_INT_EN 0x0F
46#define USB_PHY_INTERRUPT_EN 0x01
47#define USB_OHCI_INTERRUPT_EN 0x02
48#define USB_OHCI_INTERRUPT1_EN 0x04
49#define USB_OHCI_INTERRUPT2_EN 0x08
50#define USB_CTRL_INTERRUPT_EN 0x10
51
52#ifndef __ASSEMBLY__
53
54#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
55#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
56#define nlm_get_usb_pcibase(node, inst) \
57 nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
58#define nlm_get_usb_hcd_base(node, inst) \
59 nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst))
60#define nlm_get_usb_regbase(node, inst) \
61 (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
62
63#endif
64#endif /* __NLM_HAL_USB_H__ */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index fdc62fb5630d..8b8f6b393363 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -8,6 +8,7 @@
8#ifndef _ASM_PGTABLE_H 8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H 9#define _ASM_PGTABLE_H
10 10
11#include <linux/mm_types.h>
11#include <linux/mmzone.h> 12#include <linux/mmzone.h>
12#ifdef CONFIG_32BIT 13#ifdef CONFIG_32BIT
13#include <asm/pgtable-32.h> 14#include <asm/pgtable-32.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 2a5fa7abb346..71686c897dea 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -44,11 +44,16 @@ extern unsigned int vced_count, vcei_count;
44#define SPECIAL_PAGES_SIZE PAGE_SIZE 44#define SPECIAL_PAGES_SIZE PAGE_SIZE
45 45
46#ifdef CONFIG_32BIT 46#ifdef CONFIG_32BIT
47#ifdef CONFIG_KVM_GUEST
48/* User space process size is limited to 1GB in KVM Guest Mode */
49#define TASK_SIZE 0x3fff8000UL
50#else
47/* 51/*
48 * User space process size: 2GB. This is hardcoded into a few places, 52 * User space process size: 2GB. This is hardcoded into a few places,
49 * so don't change it unless you know what you are doing. 53 * so don't change it unless you know what you are doing.
50 */ 54 */
51#define TASK_SIZE 0x7fff8000UL 55#define TASK_SIZE 0x7fff8000UL
56#endif
52 57
53#ifdef __KERNEL__ 58#ifdef __KERNEL__
54#define STACK_TOP_MAX TASK_SIZE 59#define STACK_TOP_MAX TASK_SIZE
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 8808bf548b99..1e7e0961064b 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -48,4 +48,7 @@ extern void __dt_setup_arch(struct boot_param_header *bph);
48static inline void device_tree_init(void) { } 48static inline void device_tree_init(void) { }
49#endif /* CONFIG_OF */ 49#endif /* CONFIG_OF */
50 50
51extern char *mips_get_machine_name(void);
52extern void mips_set_machine_name(const char *name);
53
51#endif /* __ASM_PROM_H */ 54#endif /* __ASM_PROM_H */
diff --git a/arch/mips/include/asm/sn/sn_private.h b/arch/mips/include/asm/sn/sn_private.h
index 1a2c3025bf28..fdfae43d8b99 100644
--- a/arch/mips/include/asm/sn/sn_private.h
+++ b/arch/mips/include/asm/sn/sn_private.h
@@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice);
14extern void install_ipi(void); 14extern void install_ipi(void);
15extern void setup_replication_mask(void); 15extern void setup_replication_mask(void);
16extern void replicate_kernel_text(void); 16extern void replicate_kernel_text(void);
17extern pfn_t node_getfirstfree(cnodeid_t); 17extern unsigned long node_getfirstfree(cnodeid_t);
18 18
19#endif /* __ASM_SN_SN_PRIVATE_H */ 19#endif /* __ASM_SN_SN_PRIVATE_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index c4813d67aec3..6d24d4e8b9ed 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -19,7 +19,6 @@ typedef signed char partid_t; /* partition ID type */
19typedef signed short moduleid_t; /* user-visible module number type */ 19typedef signed short moduleid_t; /* user-visible module number type */
20typedef signed short cmoduleid_t; /* kernel compact module id type */ 20typedef signed short cmoduleid_t; /* kernel compact module id type */
21typedef unsigned char clusterid_t; /* Clusterid of the cell */ 21typedef unsigned char clusterid_t; /* Clusterid of the cell */
22typedef unsigned long pfn_t;
23 22
24typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */ 23typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */
25 24
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5130c88d6420..78d201fb6c87 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -71,7 +71,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
71 " nop \n" 71 " nop \n"
72 " srl %[my_ticket], %[ticket], 16 \n" 72 " srl %[my_ticket], %[ticket], 16 \n"
73 " andi %[ticket], %[ticket], 0xffff \n" 73 " andi %[ticket], %[ticket], 0xffff \n"
74 " andi %[my_ticket], %[my_ticket], 0xffff \n"
75 " bne %[ticket], %[my_ticket], 4f \n" 74 " bne %[ticket], %[my_ticket], 4f \n"
76 " subu %[ticket], %[my_ticket], %[ticket] \n" 75 " subu %[ticket], %[my_ticket], %[ticket] \n"
77 "2: \n" 76 "2: \n"
@@ -105,7 +104,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
105 " beqz %[my_ticket], 1b \n" 104 " beqz %[my_ticket], 1b \n"
106 " srl %[my_ticket], %[ticket], 16 \n" 105 " srl %[my_ticket], %[ticket], 16 \n"
107 " andi %[ticket], %[ticket], 0xffff \n" 106 " andi %[ticket], %[ticket], 0xffff \n"
108 " andi %[my_ticket], %[my_ticket], 0xffff \n"
109 " bne %[ticket], %[my_ticket], 4f \n" 107 " bne %[ticket], %[my_ticket], 4f \n"
110 " subu %[ticket], %[my_ticket], %[ticket] \n" 108 " subu %[ticket], %[my_ticket], %[ticket] \n"
111 "2: \n" 109 "2: \n"
@@ -153,7 +151,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
153 " \n" 151 " \n"
154 "1: ll %[ticket], %[ticket_ptr] \n" 152 "1: ll %[ticket], %[ticket_ptr] \n"
155 " srl %[my_ticket], %[ticket], 16 \n" 153 " srl %[my_ticket], %[ticket], 16 \n"
156 " andi %[my_ticket], %[my_ticket], 0xffff \n"
157 " andi %[now_serving], %[ticket], 0xffff \n" 154 " andi %[now_serving], %[ticket], 0xffff \n"
158 " bne %[my_ticket], %[now_serving], 3f \n" 155 " bne %[my_ticket], %[now_serving], 3f \n"
159 " addu %[ticket], %[ticket], %[inc] \n" 156 " addu %[ticket], %[ticket], %[inc] \n"
@@ -178,7 +175,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
178 " \n" 175 " \n"
179 "1: ll %[ticket], %[ticket_ptr] \n" 176 "1: ll %[ticket], %[ticket_ptr] \n"
180 " srl %[my_ticket], %[ticket], 16 \n" 177 " srl %[my_ticket], %[ticket], 16 \n"
181 " andi %[my_ticket], %[my_ticket], 0xffff \n"
182 " andi %[now_serving], %[ticket], 0xffff \n" 178 " andi %[now_serving], %[ticket], 0xffff \n"
183 " bne %[my_ticket], %[now_serving], 3f \n" 179 " bne %[my_ticket], %[now_serving], 3f \n"
184 " addu %[ticket], %[ticket], %[inc] \n" 180 " addu %[ticket], %[ticket], %[inc] \n"
@@ -242,25 +238,16 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
242 : "m" (rw->lock) 238 : "m" (rw->lock)
243 : "memory"); 239 : "memory");
244 } else { 240 } else {
245 __asm__ __volatile__( 241 do {
246 " .set noreorder # arch_read_lock \n" 242 __asm__ __volatile__(
247 "1: ll %1, %2 \n" 243 "1: ll %1, %2 # arch_read_lock \n"
248 " bltz %1, 3f \n" 244 " bltz %1, 1b \n"
249 " addu %1, 1 \n" 245 " addu %1, 1 \n"
250 "2: sc %1, %0 \n" 246 "2: sc %1, %0 \n"
251 " beqz %1, 1b \n" 247 : "=m" (rw->lock), "=&r" (tmp)
252 " nop \n" 248 : "m" (rw->lock)
253 " .subsection 2 \n" 249 : "memory");
254 "3: ll %1, %2 \n" 250 } while (unlikely(!tmp));
255 " bltz %1, 3b \n"
256 " addu %1, 1 \n"
257 " b 2b \n"
258 " nop \n"
259 " .previous \n"
260 " .set reorder \n"
261 : "=m" (rw->lock), "=&r" (tmp)
262 : "m" (rw->lock)
263 : "memory");
264 } 251 }
265 252
266 smp_llsc_mb(); 253 smp_llsc_mb();
@@ -285,21 +272,15 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
285 : "m" (rw->lock) 272 : "m" (rw->lock)
286 : "memory"); 273 : "memory");
287 } else { 274 } else {
288 __asm__ __volatile__( 275 do {
289 " .set noreorder # arch_read_unlock \n" 276 __asm__ __volatile__(
290 "1: ll %1, %2 \n" 277 "1: ll %1, %2 # arch_read_unlock \n"
291 " sub %1, 1 \n" 278 " sub %1, 1 \n"
292 " sc %1, %0 \n" 279 " sc %1, %0 \n"
293 " beqz %1, 2f \n" 280 : "=m" (rw->lock), "=&r" (tmp)
294 " nop \n" 281 : "m" (rw->lock)
295 " .subsection 2 \n" 282 : "memory");
296 "2: b 1b \n" 283 } while (unlikely(!tmp));
297 " nop \n"
298 " .previous \n"
299 " .set reorder \n"
300 : "=m" (rw->lock), "=&r" (tmp)
301 : "m" (rw->lock)
302 : "memory");
303 } 284 }
304} 285}
305 286
@@ -321,25 +302,16 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
321 : "m" (rw->lock) 302 : "m" (rw->lock)
322 : "memory"); 303 : "memory");
323 } else { 304 } else {
324 __asm__ __volatile__( 305 do {
325 " .set noreorder # arch_write_lock \n" 306 __asm__ __volatile__(
326 "1: ll %1, %2 \n" 307 "1: ll %1, %2 # arch_write_lock \n"
327 " bnez %1, 3f \n" 308 " bnez %1, 1b \n"
328 " lui %1, 0x8000 \n" 309 " lui %1, 0x8000 \n"
329 "2: sc %1, %0 \n" 310 "2: sc %1, %0 \n"
330 " beqz %1, 3f \n" 311 : "=m" (rw->lock), "=&r" (tmp)
331 " nop \n" 312 : "m" (rw->lock)
332 " .subsection 2 \n" 313 : "memory");
333 "3: ll %1, %2 \n" 314 } while (unlikely(!tmp));
334 " bnez %1, 3b \n"
335 " lui %1, 0x8000 \n"
336 " b 2b \n"
337 " nop \n"
338 " .previous \n"
339 " .set reorder \n"
340 : "=m" (rw->lock), "=&r" (tmp)
341 : "m" (rw->lock)
342 : "memory");
343 } 315 }
344 316
345 smp_llsc_mb(); 317 smp_llsc_mb();
@@ -424,25 +396,21 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
424 : "m" (rw->lock) 396 : "m" (rw->lock)
425 : "memory"); 397 : "memory");
426 } else { 398 } else {
427 __asm__ __volatile__( 399 do {
428 " .set noreorder # arch_write_trylock \n" 400 __asm__ __volatile__(
429 " li %2, 0 \n" 401 " ll %1, %3 # arch_write_trylock \n"
430 "1: ll %1, %3 \n" 402 " li %2, 0 \n"
431 " bnez %1, 2f \n" 403 " bnez %1, 2f \n"
432 " lui %1, 0x8000 \n" 404 " lui %1, 0x8000 \n"
433 " sc %1, %0 \n" 405 " sc %1, %0 \n"
434 " beqz %1, 3f \n" 406 " li %2, 1 \n"
435 " li %2, 1 \n" 407 "2: \n"
436 "2: \n" 408 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
437 __WEAK_LLSC_MB 409 : "m" (rw->lock)
438 " .subsection 2 \n" 410 : "memory");
439 "3: b 1b \n" 411 } while (unlikely(!tmp));
440 " li %2, 0 \n" 412
441 " .previous \n" 413 smp_llsc_mb();
442 " .set reorder \n"
443 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
444 : "m" (rw->lock)
445 : "memory");
446 } 414 }
447 415
448 return ret; 416 return ret;
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index c99384018161..a89d1b10d027 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -139,7 +139,7 @@
1391: move ra, k0 1391: move ra, k0
140 li k0, 3 140 li k0, 3
141 mtc0 k0, $22 141 mtc0 k0, $22
142#endif /* CONFIG_CPU_LOONGSON2F */ 142#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
143#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 143#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
144 lui k1, %hi(kernelsp) 144 lui k1, %hi(kernelsp)
145#else 145#else
@@ -189,6 +189,7 @@
189 LONG_S $0, PT_R0(sp) 189 LONG_S $0, PT_R0(sp)
190 mfc0 v1, CP0_STATUS 190 mfc0 v1, CP0_STATUS
191 LONG_S $2, PT_R2(sp) 191 LONG_S $2, PT_R2(sp)
192 LONG_S v1, PT_STATUS(sp)
192#ifdef CONFIG_MIPS_MT_SMTC 193#ifdef CONFIG_MIPS_MT_SMTC
193 /* 194 /*
194 * Ideally, these instructions would be shuffled in 195 * Ideally, these instructions would be shuffled in
@@ -200,21 +201,20 @@
200 LONG_S k0, PT_TCSTATUS(sp) 201 LONG_S k0, PT_TCSTATUS(sp)
201#endif /* CONFIG_MIPS_MT_SMTC */ 202#endif /* CONFIG_MIPS_MT_SMTC */
202 LONG_S $4, PT_R4(sp) 203 LONG_S $4, PT_R4(sp)
203 LONG_S $5, PT_R5(sp)
204 LONG_S v1, PT_STATUS(sp)
205 mfc0 v1, CP0_CAUSE 204 mfc0 v1, CP0_CAUSE
206 LONG_S $6, PT_R6(sp) 205 LONG_S $5, PT_R5(sp)
207 LONG_S $7, PT_R7(sp)
208 LONG_S v1, PT_CAUSE(sp) 206 LONG_S v1, PT_CAUSE(sp)
207 LONG_S $6, PT_R6(sp)
209 MFC0 v1, CP0_EPC 208 MFC0 v1, CP0_EPC
209 LONG_S $7, PT_R7(sp)
210#ifdef CONFIG_64BIT 210#ifdef CONFIG_64BIT
211 LONG_S $8, PT_R8(sp) 211 LONG_S $8, PT_R8(sp)
212 LONG_S $9, PT_R9(sp) 212 LONG_S $9, PT_R9(sp)
213#endif 213#endif
214 LONG_S v1, PT_EPC(sp)
214 LONG_S $25, PT_R25(sp) 215 LONG_S $25, PT_R25(sp)
215 LONG_S $28, PT_R28(sp) 216 LONG_S $28, PT_R28(sp)
216 LONG_S $31, PT_R31(sp) 217 LONG_S $31, PT_R31(sp)
217 LONG_S v1, PT_EPC(sp)
218 ori $28, sp, _THREAD_MASK 218 ori $28, sp, _THREAD_MASK
219 xori $28, _THREAD_MASK 219 xori $28, _THREAD_MASK
220#ifdef CONFIG_CPU_CAVIUM_OCTEON 220#ifdef CONFIG_CPU_CAVIUM_OCTEON
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 178f7924149a..895320e25662 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -58,8 +58,12 @@ struct thread_info {
58#define init_stack (init_thread_union.stack) 58#define init_stack (init_thread_union.stack)
59 59
60/* How to get the thread information struct from C. */ 60/* How to get the thread information struct from C. */
61register struct thread_info *__current_thread_info __asm__("$28"); 61static inline struct thread_info *current_thread_info(void)
62#define current_thread_info() __current_thread_info 62{
63 register struct thread_info *__current_thread_info __asm__("$28");
64
65 return __current_thread_info;
66}
63 67
64#endif /* !__ASSEMBLY__ */ 68#endif /* !__ASSEMBLY__ */
65 69
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index debc8009bd58..2d7b9df4542d 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -52,13 +52,15 @@ extern int (*perf_irq)(void);
52 */ 52 */
53extern unsigned int __weak get_c0_compare_int(void); 53extern unsigned int __weak get_c0_compare_int(void);
54extern int r4k_clockevent_init(void); 54extern int r4k_clockevent_init(void);
55extern int smtc_clockevent_init(void);
56extern int gic_clockevent_init(void);
55 57
56static inline int mips_clockevent_init(void) 58static inline int mips_clockevent_init(void)
57{ 59{
58#ifdef CONFIG_MIPS_MT_SMTC 60#ifdef CONFIG_MIPS_MT_SMTC
59 extern int smtc_clockevent_init(void);
60
61 return smtc_clockevent_init(); 61 return smtc_clockevent_init();
62#elif defined(CONFIG_CEVT_GIC)
63 return (gic_clockevent_init() | r4k_clockevent_init());
62#elif defined(CONFIG_CEVT_R4K) 64#elif defined(CONFIG_CEVT_R4K)
63 return r4k_clockevent_init(); 65 return r4k_clockevent_init();
64#else 66#else
@@ -69,9 +71,7 @@ static inline int mips_clockevent_init(void)
69/* 71/*
70 * Initialize the count register as a clocksource 72 * Initialize the count register as a clocksource
71 */ 73 */
72#ifdef CONFIG_CSRC_R4K
73extern int init_r4k_clocksource(void); 74extern int init_r4k_clocksource(void);
74#endif
75 75
76static inline int init_mips_clocksource(void) 76static inline int init_mips_clocksource(void)
77{ 77{
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index bd87e36bf26a..f3fa3750f577 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -23,7 +23,11 @@
23 */ 23 */
24#ifdef CONFIG_32BIT 24#ifdef CONFIG_32BIT
25 25
26#define __UA_LIMIT 0x80000000UL 26#ifdef CONFIG_KVM_GUEST
27#define __UA_LIMIT 0x40000000UL
28#else
29#define __UA_LIMIT 0x80000000UL
30#endif
27 31
28#define __UA_ADDR ".word" 32#define __UA_ADDR ".word"
29#define __UA_LA "la" 33#define __UA_LA "la"
@@ -55,8 +59,13 @@ extern u64 __ua_limit;
55 * address in this range it's the process's problem, not ours :-) 59 * address in this range it's the process's problem, not ours :-)
56 */ 60 */
57 61
62#ifdef CONFIG_KVM_GUEST
63#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
64#define USER_DS ((mm_segment_t) { 0xC0000000UL })
65#else
58#define KERNEL_DS ((mm_segment_t) { 0UL }) 66#define KERNEL_DS ((mm_segment_t) { 0UL })
59#define USER_DS ((mm_segment_t) { __UA_LIMIT }) 67#define USER_DS ((mm_segment_t) { __UA_LIMIT })
68#endif
60 69
61#define VERIFY_READ 0 70#define VERIFY_READ 0
62#define VERIFY_WRITE 1 71#define VERIFY_WRITE 1
@@ -261,6 +270,7 @@ do { \
261 __asm__ __volatile__( \ 270 __asm__ __volatile__( \
262 "1: " insn " %1, %3 \n" \ 271 "1: " insn " %1, %3 \n" \
263 "2: \n" \ 272 "2: \n" \
273 " .insn \n" \
264 " .section .fixup,\"ax\" \n" \ 274 " .section .fixup,\"ax\" \n" \
265 "3: li %0, %4 \n" \ 275 "3: li %0, %4 \n" \
266 " j 2b \n" \ 276 " j 2b \n" \
@@ -287,7 +297,9 @@ do { \
287 __asm__ __volatile__( \ 297 __asm__ __volatile__( \
288 "1: lw %1, (%3) \n" \ 298 "1: lw %1, (%3) \n" \
289 "2: lw %D1, 4(%3) \n" \ 299 "2: lw %D1, 4(%3) \n" \
290 "3: .section .fixup,\"ax\" \n" \ 300 "3: \n" \
301 " .insn \n" \
302 " .section .fixup,\"ax\" \n" \
291 "4: li %0, %4 \n" \ 303 "4: li %0, %4 \n" \
292 " move %1, $0 \n" \ 304 " move %1, $0 \n" \
293 " move %D1, $0 \n" \ 305 " move %D1, $0 \n" \
@@ -355,6 +367,7 @@ do { \
355 __asm__ __volatile__( \ 367 __asm__ __volatile__( \
356 "1: " insn " %z2, %3 # __put_user_asm\n" \ 368 "1: " insn " %z2, %3 # __put_user_asm\n" \
357 "2: \n" \ 369 "2: \n" \
370 " .insn \n" \
358 " .section .fixup,\"ax\" \n" \ 371 " .section .fixup,\"ax\" \n" \
359 "3: li %0, %4 \n" \ 372 "3: li %0, %4 \n" \
360 " j 2b \n" \ 373 " j 2b \n" \
@@ -373,6 +386,7 @@ do { \
373 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ 386 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
374 "2: sw %D2, 4(%3) \n" \ 387 "2: sw %D2, 4(%3) \n" \
375 "3: \n" \ 388 "3: \n" \
389 " .insn \n" \
376 " .section .fixup,\"ax\" \n" \ 390 " .section .fixup,\"ax\" \n" \
377 "4: li %0, %4 \n" \ 391 "4: li %0, %4 \n" \
378 " j 3b \n" \ 392 " j 3b \n" \
@@ -524,6 +538,7 @@ do { \
524 __asm__ __volatile__( \ 538 __asm__ __volatile__( \
525 "1: " insn " %1, %3 \n" \ 539 "1: " insn " %1, %3 \n" \
526 "2: \n" \ 540 "2: \n" \
541 " .insn \n" \
527 " .section .fixup,\"ax\" \n" \ 542 " .section .fixup,\"ax\" \n" \
528 "3: li %0, %4 \n" \ 543 "3: li %0, %4 \n" \
529 " j 2b \n" \ 544 " j 2b \n" \
@@ -549,7 +564,9 @@ do { \
549 "1: ulw %1, (%3) \n" \ 564 "1: ulw %1, (%3) \n" \
550 "2: ulw %D1, 4(%3) \n" \ 565 "2: ulw %D1, 4(%3) \n" \
551 " move %0, $0 \n" \ 566 " move %0, $0 \n" \
552 "3: .section .fixup,\"ax\" \n" \ 567 "3: \n" \
568 " .insn \n" \
569 " .section .fixup,\"ax\" \n" \
553 "4: li %0, %4 \n" \ 570 "4: li %0, %4 \n" \
554 " move %1, $0 \n" \ 571 " move %1, $0 \n" \
555 " move %D1, $0 \n" \ 572 " move %D1, $0 \n" \
@@ -616,6 +633,7 @@ do { \
616 __asm__ __volatile__( \ 633 __asm__ __volatile__( \
617 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ 634 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
618 "2: \n" \ 635 "2: \n" \
636 " .insn \n" \
619 " .section .fixup,\"ax\" \n" \ 637 " .section .fixup,\"ax\" \n" \
620 "3: li %0, %4 \n" \ 638 "3: li %0, %4 \n" \
621 " j 2b \n" \ 639 " j 2b \n" \
@@ -634,6 +652,7 @@ do { \
634 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ 652 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
635 "2: sw %D2, 4(%3) \n" \ 653 "2: sw %D2, 4(%3) \n" \
636 "3: \n" \ 654 "3: \n" \
655 " .insn \n" \
637 " .section .fixup,\"ax\" \n" \ 656 " .section .fixup,\"ax\" \n" \
638 "4: li %0, %4 \n" \ 657 "4: li %0, %4 \n" \
639 " j 3b \n" \ 658 " j 3b \n" \
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 058e941626a6..370d967725c2 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -6,7 +6,7 @@
6 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 6 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
7 * Copyright (C) 2005 Maciej W. Rozycki 7 * Copyright (C) 2005 Maciej W. Rozycki
8 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 8 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
9 * Copyright (C) 2012 MIPS Technologies, Inc. 9 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
10 */ 10 */
11 11
12#include <linux/types.h> 12#include <linux/types.h>
@@ -22,44 +22,75 @@
22#define UASM_EXPORT_SYMBOL(sym) 22#define UASM_EXPORT_SYMBOL(sym)
23#endif 23#endif
24 24
25#define _UASM_ISA_CLASSIC 0
26#define _UASM_ISA_MICROMIPS 1
27
28#ifndef UASM_ISA
29#ifdef CONFIG_CPU_MICROMIPS
30#define UASM_ISA _UASM_ISA_MICROMIPS
31#else
32#define UASM_ISA _UASM_ISA_CLASSIC
33#endif
34#endif
35
36#if (UASM_ISA == _UASM_ISA_CLASSIC)
37#ifdef CONFIG_CPU_MICROMIPS
38#define ISAOPC(op) CL_uasm_i##op
39#define ISAFUNC(x) CL_##x
40#else
41#define ISAOPC(op) uasm_i##op
42#define ISAFUNC(x) x
43#endif
44#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
45#ifdef CONFIG_CPU_MICROMIPS
46#define ISAOPC(op) uasm_i##op
47#define ISAFUNC(x) x
48#else
49#define ISAOPC(op) MM_uasm_i##op
50#define ISAFUNC(x) MM_##x
51#endif
52#else
53#error Unsupported micro-assembler ISA!!!
54#endif
55
25#define Ip_u1u2u3(op) \ 56#define Ip_u1u2u3(op) \
26void __uasminit \ 57void __uasminit \
27uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 58ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
28 59
29#define Ip_u2u1u3(op) \ 60#define Ip_u2u1u3(op) \
30void __uasminit \ 61void __uasminit \
31uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 62ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
32 63
33#define Ip_u3u1u2(op) \ 64#define Ip_u3u1u2(op) \
34void __uasminit \ 65void __uasminit \
35uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 66ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
36 67
37#define Ip_u1u2s3(op) \ 68#define Ip_u1u2s3(op) \
38void __uasminit \ 69void __uasminit \
39uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) 70ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
40 71
41#define Ip_u2s3u1(op) \ 72#define Ip_u2s3u1(op) \
42void __uasminit \ 73void __uasminit \
43uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c) 74ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
44 75
45#define Ip_u2u1s3(op) \ 76#define Ip_u2u1s3(op) \
46void __uasminit \ 77void __uasminit \
47uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) 78ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
48 79
49#define Ip_u2u1msbu3(op) \ 80#define Ip_u2u1msbu3(op) \
50void __uasminit \ 81void __uasminit \
51uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ 82ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
52 unsigned int d) 83 unsigned int d)
53 84
54#define Ip_u1u2(op) \ 85#define Ip_u1u2(op) \
55void __uasminit uasm_i##op(u32 **buf, unsigned int a, unsigned int b) 86void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
56 87
57#define Ip_u1s2(op) \ 88#define Ip_u1s2(op) \
58void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b) 89void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
59 90
60#define Ip_u1(op) void __uasminit uasm_i##op(u32 **buf, unsigned int a) 91#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a)
61 92
62#define Ip_0(op) void __uasminit uasm_i##op(u32 **buf) 93#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf)
63 94
64Ip_u2u1s3(_addiu); 95Ip_u2u1s3(_addiu);
65Ip_u3u1u2(_addu); 96Ip_u3u1u2(_addu);
@@ -132,19 +163,20 @@ struct uasm_label {
132 int lab; 163 int lab;
133}; 164};
134 165
135void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid); 166void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
167 int lid);
136#ifdef CONFIG_64BIT 168#ifdef CONFIG_64BIT
137int uasm_in_compat_space_p(long addr); 169int ISAFUNC(uasm_in_compat_space_p)(long addr);
138#endif 170#endif
139int uasm_rel_hi(long val); 171int ISAFUNC(uasm_rel_hi)(long val);
140int uasm_rel_lo(long val); 172int ISAFUNC(uasm_rel_lo)(long val);
141void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr); 173void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
142void UASM_i_LA(u32 **buf, unsigned int rs, long addr); 174void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
143 175
144#define UASM_L_LA(lb) \ 176#define UASM_L_LA(lb) \
145static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \ 177static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
146{ \ 178{ \
147 uasm_build_label(lab, addr, label##lb); \ 179 ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
148} 180}
149 181
150/* convenience macros for instructions */ 182/* convenience macros for instructions */
@@ -196,27 +228,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
196 unsigned int a2, unsigned int a3) 228 unsigned int a2, unsigned int a3)
197{ 229{
198 if (a3 < 32) 230 if (a3 < 32)
199 uasm_i_drotr(p, a1, a2, a3); 231 ISAOPC(_drotr)(p, a1, a2, a3);
200 else 232 else
201 uasm_i_drotr32(p, a1, a2, a3 - 32); 233 ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
202} 234}
203 235
204static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1, 236static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
205 unsigned int a2, unsigned int a3) 237 unsigned int a2, unsigned int a3)
206{ 238{
207 if (a3 < 32) 239 if (a3 < 32)
208 uasm_i_dsll(p, a1, a2, a3); 240 ISAOPC(_dsll)(p, a1, a2, a3);
209 else 241 else
210 uasm_i_dsll32(p, a1, a2, a3 - 32); 242 ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
211} 243}
212 244
213static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1, 245static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
214 unsigned int a2, unsigned int a3) 246 unsigned int a2, unsigned int a3)
215{ 247{
216 if (a3 < 32) 248 if (a3 < 32)
217 uasm_i_dsrl(p, a1, a2, a3); 249 ISAOPC(_dsrl)(p, a1, a2, a3);
218 else 250 else
219 uasm_i_dsrl32(p, a1, a2, a3 - 32); 251 ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
220} 252}
221 253
222/* Handle relocations. */ 254/* Handle relocations. */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4d078815eaa5..0f4aec2ad1e6 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright (C) 1996, 2000 by Ralf Baechle 8 * Copyright (C) 1996, 2000 by Ralf Baechle
9 * Copyright (C) 2006 by Thiemo Seufer 9 * Copyright (C) 2006 by Thiemo Seufer
10 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 */ 11 */
11#ifndef _UAPI_ASM_INST_H 12#ifndef _UAPI_ASM_INST_H
12#define _UAPI_ASM_INST_H 13#define _UAPI_ASM_INST_H
@@ -193,6 +194,282 @@ enum lx_func {
193}; 194};
194 195
195/* 196/*
197 * (microMIPS) Major opcodes.
198 */
199enum mm_major_op {
200 mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op,
201 mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op,
202 mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
203 mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
204 mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
205 mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
206 mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
207 mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
208 mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
209 mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op,
210 mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op,
211 mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op,
212 mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op,
213 mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op,
214 mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op,
215 mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op,
216};
217
218/*
219 * (microMIPS) POOL32I minor opcodes.
220 */
221enum mm_32i_minor_op {
222 mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op,
223 mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op,
224 mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op,
225 mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op,
226 mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op,
227 mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op,
228 mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op,
229 mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op,
230 mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op,
231};
232
233/*
234 * (microMIPS) POOL32A minor opcodes.
235 */
236enum mm_32a_minor_op {
237 mm_sll32_op = 0x000,
238 mm_ins_op = 0x00c,
239 mm_ext_op = 0x02c,
240 mm_pool32axf_op = 0x03c,
241 mm_srl32_op = 0x040,
242 mm_sra_op = 0x080,
243 mm_rotr_op = 0x0c0,
244 mm_lwxs_op = 0x118,
245 mm_addu32_op = 0x150,
246 mm_subu32_op = 0x1d0,
247 mm_and_op = 0x250,
248 mm_or32_op = 0x290,
249 mm_xor32_op = 0x310,
250};
251
252/*
253 * (microMIPS) POOL32B functions.
254 */
255enum mm_32b_func {
256 mm_lwc2_func = 0x0,
257 mm_lwp_func = 0x1,
258 mm_ldc2_func = 0x2,
259 mm_ldp_func = 0x4,
260 mm_lwm32_func = 0x5,
261 mm_cache_func = 0x6,
262 mm_ldm_func = 0x7,
263 mm_swc2_func = 0x8,
264 mm_swp_func = 0x9,
265 mm_sdc2_func = 0xa,
266 mm_sdp_func = 0xc,
267 mm_swm32_func = 0xd,
268 mm_sdm_func = 0xf,
269};
270
271/*
272 * (microMIPS) POOL32C functions.
273 */
274enum mm_32c_func {
275 mm_pref_func = 0x2,
276 mm_ll_func = 0x3,
277 mm_swr_func = 0x9,
278 mm_sc_func = 0xb,
279 mm_lwu_func = 0xe,
280};
281
282/*
283 * (microMIPS) POOL32AXF minor opcodes.
284 */
285enum mm_32axf_minor_op {
286 mm_mfc0_op = 0x003,
287 mm_mtc0_op = 0x00b,
288 mm_tlbp_op = 0x00d,
289 mm_jalr_op = 0x03c,
290 mm_tlbr_op = 0x04d,
291 mm_jalrhb_op = 0x07c,
292 mm_tlbwi_op = 0x08d,
293 mm_tlbwr_op = 0x0cd,
294 mm_jalrs_op = 0x13c,
295 mm_jalrshb_op = 0x17c,
296 mm_syscall_op = 0x22d,
297 mm_eret_op = 0x3cd,
298};
299
300/*
301 * (microMIPS) POOL32F minor opcodes.
302 */
303enum mm_32f_minor_op {
304 mm_32f_00_op = 0x00,
305 mm_32f_01_op = 0x01,
306 mm_32f_02_op = 0x02,
307 mm_32f_10_op = 0x08,
308 mm_32f_11_op = 0x09,
309 mm_32f_12_op = 0x0a,
310 mm_32f_20_op = 0x10,
311 mm_32f_30_op = 0x18,
312 mm_32f_40_op = 0x20,
313 mm_32f_41_op = 0x21,
314 mm_32f_42_op = 0x22,
315 mm_32f_50_op = 0x28,
316 mm_32f_51_op = 0x29,
317 mm_32f_52_op = 0x2a,
318 mm_32f_60_op = 0x30,
319 mm_32f_70_op = 0x38,
320 mm_32f_73_op = 0x3b,
321 mm_32f_74_op = 0x3c,
322};
323
324/*
325 * (microMIPS) POOL32F secondary minor opcodes.
326 */
327enum mm_32f_10_minor_op {
328 mm_lwxc1_op = 0x1,
329 mm_swxc1_op,
330 mm_ldxc1_op,
331 mm_sdxc1_op,
332 mm_luxc1_op,
333 mm_suxc1_op,
334};
335
336enum mm_32f_func {
337 mm_lwxc1_func = 0x048,
338 mm_swxc1_func = 0x088,
339 mm_ldxc1_func = 0x0c8,
340 mm_sdxc1_func = 0x108,
341};
342
343/*
344 * (microMIPS) POOL32F secondary minor opcodes.
345 */
346enum mm_32f_40_minor_op {
347 mm_fmovf_op,
348 mm_fmovt_op,
349};
350
351/*
352 * (microMIPS) POOL32F secondary minor opcodes.
353 */
354enum mm_32f_60_minor_op {
355 mm_fadd_op,
356 mm_fsub_op,
357 mm_fmul_op,
358 mm_fdiv_op,
359};
360
361/*
362 * (microMIPS) POOL32F secondary minor opcodes.
363 */
364enum mm_32f_70_minor_op {
365 mm_fmovn_op,
366 mm_fmovz_op,
367};
368
369/*
370 * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F.
371 */
372enum mm_32f_73_minor_op {
373 mm_fmov0_op = 0x01,
374 mm_fcvtl_op = 0x04,
375 mm_movf0_op = 0x05,
376 mm_frsqrt_op = 0x08,
377 mm_ffloorl_op = 0x0c,
378 mm_fabs0_op = 0x0d,
379 mm_fcvtw_op = 0x24,
380 mm_movt0_op = 0x25,
381 mm_fsqrt_op = 0x28,
382 mm_ffloorw_op = 0x2c,
383 mm_fneg0_op = 0x2d,
384 mm_cfc1_op = 0x40,
385 mm_frecip_op = 0x48,
386 mm_fceill_op = 0x4c,
387 mm_fcvtd0_op = 0x4d,
388 mm_ctc1_op = 0x60,
389 mm_fceilw_op = 0x6c,
390 mm_fcvts0_op = 0x6d,
391 mm_mfc1_op = 0x80,
392 mm_fmov1_op = 0x81,
393 mm_movf1_op = 0x85,
394 mm_ftruncl_op = 0x8c,
395 mm_fabs1_op = 0x8d,
396 mm_mtc1_op = 0xa0,
397 mm_movt1_op = 0xa5,
398 mm_ftruncw_op = 0xac,
399 mm_fneg1_op = 0xad,
400 mm_froundl_op = 0xcc,
401 mm_fcvtd1_op = 0xcd,
402 mm_froundw_op = 0xec,
403 mm_fcvts1_op = 0xed,
404};
405
406/*
407 * (microMIPS) POOL16C minor opcodes.
408 */
409enum mm_16c_minor_op {
410 mm_lwm16_op = 0x04,
411 mm_swm16_op = 0x05,
412 mm_jr16_op = 0x18,
413 mm_jrc_op = 0x1a,
414 mm_jalr16_op = 0x1c,
415 mm_jalrs16_op = 0x1e,
416};
417
418/*
419 * (microMIPS) POOL16D minor opcodes.
420 */
421enum mm_16d_minor_op {
422 mm_addius5_func,
423 mm_addiusp_func,
424};
425
426/*
427 * (MIPS16e) opcodes.
428 */
429enum MIPS16e_ops {
430 MIPS16e_jal_op = 003,
431 MIPS16e_ld_op = 007,
432 MIPS16e_i8_op = 014,
433 MIPS16e_sd_op = 017,
434 MIPS16e_lb_op = 020,
435 MIPS16e_lh_op = 021,
436 MIPS16e_lwsp_op = 022,
437 MIPS16e_lw_op = 023,
438 MIPS16e_lbu_op = 024,
439 MIPS16e_lhu_op = 025,
440 MIPS16e_lwpc_op = 026,
441 MIPS16e_lwu_op = 027,
442 MIPS16e_sb_op = 030,
443 MIPS16e_sh_op = 031,
444 MIPS16e_swsp_op = 032,
445 MIPS16e_sw_op = 033,
446 MIPS16e_rr_op = 035,
447 MIPS16e_extend_op = 036,
448 MIPS16e_i64_op = 037,
449};
450
451enum MIPS16e_i64_func {
452 MIPS16e_ldsp_func,
453 MIPS16e_sdsp_func,
454 MIPS16e_sdrasp_func,
455 MIPS16e_dadjsp_func,
456 MIPS16e_ldpc_func,
457};
458
459enum MIPS16e_rr_func {
460 MIPS16e_jr_func,
461};
462
463enum MIPS6e_i8_func {
464 MIPS16e_swrasp_func = 02,
465};
466
467/*
468 * (microMIPS & MIPS16e) NOP instruction.
469 */
470#define MM_NOP16 0x0c00
471
472/*
196 * Damn ... bitfields depend from byteorder :-( 473 * Damn ... bitfields depend from byteorder :-(
197 */ 474 */
198#ifdef __MIPSEB__ 475#ifdef __MIPSEB__
@@ -311,6 +588,262 @@ struct v_format { /* MDMX vector format */
311 ;))))))) 588 ;)))))))
312}; 589};
313 590
591/*
592 * microMIPS instruction formats (32-bit length)
593 *
594 * NOTE:
595 * Parenthesis denote whether the format is a microMIPS instruction or
596 * if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
597 */
598struct fb_format { /* FPU branch format (MIPS32) */
599 BITFIELD_FIELD(unsigned int opcode : 6,
600 BITFIELD_FIELD(unsigned int bc : 5,
601 BITFIELD_FIELD(unsigned int cc : 3,
602 BITFIELD_FIELD(unsigned int flag : 2,
603 BITFIELD_FIELD(signed int simmediate : 16,
604 ;)))))
605};
606
607struct fp0_format { /* FPU multiply and add format (MIPS32) */
608 BITFIELD_FIELD(unsigned int opcode : 6,
609 BITFIELD_FIELD(unsigned int fmt : 5,
610 BITFIELD_FIELD(unsigned int ft : 5,
611 BITFIELD_FIELD(unsigned int fs : 5,
612 BITFIELD_FIELD(unsigned int fd : 5,
613 BITFIELD_FIELD(unsigned int func : 6,
614 ;))))))
615};
616
617struct mm_fp0_format { /* FPU multipy and add format (microMIPS) */
618 BITFIELD_FIELD(unsigned int opcode : 6,
619 BITFIELD_FIELD(unsigned int ft : 5,
620 BITFIELD_FIELD(unsigned int fs : 5,
621 BITFIELD_FIELD(unsigned int fd : 5,
622 BITFIELD_FIELD(unsigned int fmt : 3,
623 BITFIELD_FIELD(unsigned int op : 2,
624 BITFIELD_FIELD(unsigned int func : 6,
625 ;)))))))
626};
627
628struct fp1_format { /* FPU mfc1 and cfc1 format (MIPS32) */
629 BITFIELD_FIELD(unsigned int opcode : 6,
630 BITFIELD_FIELD(unsigned int op : 5,
631 BITFIELD_FIELD(unsigned int rt : 5,
632 BITFIELD_FIELD(unsigned int fs : 5,
633 BITFIELD_FIELD(unsigned int fd : 5,
634 BITFIELD_FIELD(unsigned int func : 6,
635 ;))))))
636};
637
638struct mm_fp1_format { /* FPU mfc1 and cfc1 format (microMIPS) */
639 BITFIELD_FIELD(unsigned int opcode : 6,
640 BITFIELD_FIELD(unsigned int rt : 5,
641 BITFIELD_FIELD(unsigned int fs : 5,
642 BITFIELD_FIELD(unsigned int fmt : 2,
643 BITFIELD_FIELD(unsigned int op : 8,
644 BITFIELD_FIELD(unsigned int func : 6,
645 ;))))))
646};
647
648struct mm_fp2_format { /* FPU movt and movf format (microMIPS) */
649 BITFIELD_FIELD(unsigned int opcode : 6,
650 BITFIELD_FIELD(unsigned int fd : 5,
651 BITFIELD_FIELD(unsigned int fs : 5,
652 BITFIELD_FIELD(unsigned int cc : 3,
653 BITFIELD_FIELD(unsigned int zero : 2,
654 BITFIELD_FIELD(unsigned int fmt : 2,
655 BITFIELD_FIELD(unsigned int op : 3,
656 BITFIELD_FIELD(unsigned int func : 6,
657 ;))))))))
658};
659
660struct mm_fp3_format { /* FPU abs and neg format (microMIPS) */
661 BITFIELD_FIELD(unsigned int opcode : 6,
662 BITFIELD_FIELD(unsigned int rt : 5,
663 BITFIELD_FIELD(unsigned int fs : 5,
664 BITFIELD_FIELD(unsigned int fmt : 3,
665 BITFIELD_FIELD(unsigned int op : 7,
666 BITFIELD_FIELD(unsigned int func : 6,
667 ;))))))
668};
669
670struct mm_fp4_format { /* FPU c.cond format (microMIPS) */
671 BITFIELD_FIELD(unsigned int opcode : 6,
672 BITFIELD_FIELD(unsigned int rt : 5,
673 BITFIELD_FIELD(unsigned int fs : 5,
674 BITFIELD_FIELD(unsigned int cc : 3,
675 BITFIELD_FIELD(unsigned int fmt : 3,
676 BITFIELD_FIELD(unsigned int cond : 4,
677 BITFIELD_FIELD(unsigned int func : 6,
678 ;)))))))
679};
680
681struct mm_fp5_format { /* FPU lwxc1 and swxc1 format (microMIPS) */
682 BITFIELD_FIELD(unsigned int opcode : 6,
683 BITFIELD_FIELD(unsigned int index : 5,
684 BITFIELD_FIELD(unsigned int base : 5,
685 BITFIELD_FIELD(unsigned int fd : 5,
686 BITFIELD_FIELD(unsigned int op : 5,
687 BITFIELD_FIELD(unsigned int func : 6,
688 ;))))))
689};
690
691struct fp6_format { /* FPU madd and msub format (MIPS IV) */
692 BITFIELD_FIELD(unsigned int opcode : 6,
693 BITFIELD_FIELD(unsigned int fr : 5,
694 BITFIELD_FIELD(unsigned int ft : 5,
695 BITFIELD_FIELD(unsigned int fs : 5,
696 BITFIELD_FIELD(unsigned int fd : 5,
697 BITFIELD_FIELD(unsigned int func : 6,
698 ;))))))
699};
700
701struct mm_fp6_format { /* FPU madd and msub format (microMIPS) */
702 BITFIELD_FIELD(unsigned int opcode : 6,
703 BITFIELD_FIELD(unsigned int ft : 5,
704 BITFIELD_FIELD(unsigned int fs : 5,
705 BITFIELD_FIELD(unsigned int fd : 5,
706 BITFIELD_FIELD(unsigned int fr : 5,
707 BITFIELD_FIELD(unsigned int func : 6,
708 ;))))))
709};
710
711struct mm_i_format { /* Immediate format (microMIPS) */
712 BITFIELD_FIELD(unsigned int opcode : 6,
713 BITFIELD_FIELD(unsigned int rt : 5,
714 BITFIELD_FIELD(unsigned int rs : 5,
715 BITFIELD_FIELD(signed int simmediate : 16,
716 ;))))
717};
718
719struct mm_m_format { /* Multi-word load/store format (microMIPS) */
720 BITFIELD_FIELD(unsigned int opcode : 6,
721 BITFIELD_FIELD(unsigned int rd : 5,
722 BITFIELD_FIELD(unsigned int base : 5,
723 BITFIELD_FIELD(unsigned int func : 4,
724 BITFIELD_FIELD(signed int simmediate : 12,
725 ;)))))
726};
727
728struct mm_x_format { /* Scaled indexed load format (microMIPS) */
729 BITFIELD_FIELD(unsigned int opcode : 6,
730 BITFIELD_FIELD(unsigned int index : 5,
731 BITFIELD_FIELD(unsigned int base : 5,
732 BITFIELD_FIELD(unsigned int rd : 5,
733 BITFIELD_FIELD(unsigned int func : 11,
734 ;)))))
735};
736
737/*
738 * microMIPS instruction formats (16-bit length)
739 */
740struct mm_b0_format { /* Unconditional branch format (microMIPS) */
741 BITFIELD_FIELD(unsigned int opcode : 6,
742 BITFIELD_FIELD(signed int simmediate : 10,
743 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
744 ;)))
745};
746
747struct mm_b1_format { /* Conditional branch format (microMIPS) */
748 BITFIELD_FIELD(unsigned int opcode : 6,
749 BITFIELD_FIELD(unsigned int rs : 3,
750 BITFIELD_FIELD(signed int simmediate : 7,
751 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
752 ;))))
753};
754
755struct mm16_m_format { /* Multi-word load/store format */
756 BITFIELD_FIELD(unsigned int opcode : 6,
757 BITFIELD_FIELD(unsigned int func : 4,
758 BITFIELD_FIELD(unsigned int rlist : 2,
759 BITFIELD_FIELD(unsigned int imm : 4,
760 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
761 ;)))))
762};
763
764struct mm16_rb_format { /* Signed immediate format */
765 BITFIELD_FIELD(unsigned int opcode : 6,
766 BITFIELD_FIELD(unsigned int rt : 3,
767 BITFIELD_FIELD(unsigned int base : 3,
768 BITFIELD_FIELD(signed int simmediate : 4,
769 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
770 ;)))))
771};
772
773struct mm16_r3_format { /* Load from global pointer format */
774 BITFIELD_FIELD(unsigned int opcode : 6,
775 BITFIELD_FIELD(unsigned int rt : 3,
776 BITFIELD_FIELD(signed int simmediate : 7,
777 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
778 ;))))
779};
780
781struct mm16_r5_format { /* Load/store from stack pointer format */
782 BITFIELD_FIELD(unsigned int opcode : 6,
783 BITFIELD_FIELD(unsigned int rt : 5,
784 BITFIELD_FIELD(signed int simmediate : 5,
785 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
786 ;))))
787};
788
789/*
790 * MIPS16e instruction formats (16-bit length)
791 */
792struct m16e_rr {
793 BITFIELD_FIELD(unsigned int opcode : 5,
794 BITFIELD_FIELD(unsigned int rx : 3,
795 BITFIELD_FIELD(unsigned int nd : 1,
796 BITFIELD_FIELD(unsigned int l : 1,
797 BITFIELD_FIELD(unsigned int ra : 1,
798 BITFIELD_FIELD(unsigned int func : 5,
799 ;))))))
800};
801
802struct m16e_jal {
803 BITFIELD_FIELD(unsigned int opcode : 5,
804 BITFIELD_FIELD(unsigned int x : 1,
805 BITFIELD_FIELD(unsigned int imm20_16 : 5,
806 BITFIELD_FIELD(signed int imm25_21 : 5,
807 ;))))
808};
809
810struct m16e_i64 {
811 BITFIELD_FIELD(unsigned int opcode : 5,
812 BITFIELD_FIELD(unsigned int func : 3,
813 BITFIELD_FIELD(unsigned int imm : 8,
814 ;)))
815};
816
817struct m16e_ri64 {
818 BITFIELD_FIELD(unsigned int opcode : 5,
819 BITFIELD_FIELD(unsigned int func : 3,
820 BITFIELD_FIELD(unsigned int ry : 3,
821 BITFIELD_FIELD(unsigned int imm : 5,
822 ;))))
823};
824
825struct m16e_ri {
826 BITFIELD_FIELD(unsigned int opcode : 5,
827 BITFIELD_FIELD(unsigned int rx : 3,
828 BITFIELD_FIELD(unsigned int imm : 8,
829 ;)))
830};
831
832struct m16e_rri {
833 BITFIELD_FIELD(unsigned int opcode : 5,
834 BITFIELD_FIELD(unsigned int rx : 3,
835 BITFIELD_FIELD(unsigned int ry : 3,
836 BITFIELD_FIELD(unsigned int imm : 5,
837 ;))))
838};
839
840struct m16e_i8 {
841 BITFIELD_FIELD(unsigned int opcode : 5,
842 BITFIELD_FIELD(unsigned int func : 3,
843 BITFIELD_FIELD(unsigned int imm : 8,
844 ;)))
845};
846
314union mips_instruction { 847union mips_instruction {
315 unsigned int word; 848 unsigned int word;
316 unsigned short halfword[2]; 849 unsigned short halfword[2];
@@ -326,6 +859,37 @@ union mips_instruction {
326 struct b_format b_format; 859 struct b_format b_format;
327 struct ps_format ps_format; 860 struct ps_format ps_format;
328 struct v_format v_format; 861 struct v_format v_format;
862 struct fb_format fb_format;
863 struct fp0_format fp0_format;
864 struct mm_fp0_format mm_fp0_format;
865 struct fp1_format fp1_format;
866 struct mm_fp1_format mm_fp1_format;
867 struct mm_fp2_format mm_fp2_format;
868 struct mm_fp3_format mm_fp3_format;
869 struct mm_fp4_format mm_fp4_format;
870 struct mm_fp5_format mm_fp5_format;
871 struct fp6_format fp6_format;
872 struct mm_fp6_format mm_fp6_format;
873 struct mm_i_format mm_i_format;
874 struct mm_m_format mm_m_format;
875 struct mm_x_format mm_x_format;
876 struct mm_b0_format mm_b0_format;
877 struct mm_b1_format mm_b1_format;
878 struct mm16_m_format mm16_m_format ;
879 struct mm16_rb_format mm16_rb_format;
880 struct mm16_r3_format mm16_r3_format;
881 struct mm16_r5_format mm16_r5_format;
882};
883
884union mips16e_instruction {
885 unsigned int full : 16;
886 struct m16e_rr rr;
887 struct m16e_jal jal;
888 struct m16e_i64 i64;
889 struct m16e_ri64 ri64;
890 struct m16e_ri ri;
891 struct m16e_rri rri;
892 struct m16e_i8 i8;
329}; 893};
330 894
331#endif /* _UAPI_ASM_INST_H */ 895#endif /* _UAPI_ASM_INST_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 520a908d45d6..6ad9e04bdf62 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
8 ptrace.o reset.o setup.o signal.o syscall.o \ 8 prom.o ptrace.o reset.o setup.o signal.o syscall.o \
9 time.o topology.o traps.o unaligned.o watch.o vdso.o 9 time.o topology.o traps.o unaligned.o watch.o vdso.o
10 10
11ifdef CONFIG_FUNCTION_TRACER 11ifdef CONFIG_FUNCTION_TRACER
@@ -19,15 +19,16 @@ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
20obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o 20obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
21obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 21obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
22obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
22obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 23obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
23obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o 24obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
24obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o 25obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
25obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o 26obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
27obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
26obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o 28obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
27obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o 29obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
28obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o 30obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
29obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 31obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
30obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
31obj-$(CONFIG_SYNC_R4K) += sync-r4k.o 32obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
32 33
33obj-$(CONFIG_STACKTRACE) += stacktrace.o 34obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -86,8 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
86obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 87obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
87obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o 88obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
88 89
89obj-$(CONFIG_OF) += prom.o
90
91CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 90CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
92 91
93obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o 92obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 50285b2c7ffe..0845091ba480 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -17,6 +17,8 @@
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19 19
20#include <linux/kvm_host.h>
21
20void output_ptreg_defines(void) 22void output_ptreg_defines(void)
21{ 23{
22 COMMENT("MIPS pt_regs offsets."); 24 COMMENT("MIPS pt_regs offsets.");
@@ -328,3 +330,67 @@ void output_pbe_defines(void)
328 BLANK(); 330 BLANK();
329} 331}
330#endif 332#endif
333
334void output_kvm_defines(void)
335{
336 COMMENT(" KVM/MIPS Specfic offsets. ");
337 DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
338 OFFSET(VCPU_RUN, kvm_vcpu, run);
339 OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
340
341 OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
342 OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
343
344 OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
345 OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
346
347 OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
348 OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
349 OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
350 OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
351
352 OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
353
354 OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
355 OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
356 OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
357 OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
358 OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
359 OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
360 OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
361 OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
362 OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
363 OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
364 OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
365 OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
366 OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
367 OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
368 OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
369 OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
370 OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
371 OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
372 OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
373 OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
374 OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
375 OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
376 OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
377 OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
378 OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
379 OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
380 OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
381 OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
382 OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
383 OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
384 OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
385 OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
386 OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
387 OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
388 OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
389 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
390 OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
391 OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
392
393 OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
394 OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
395 BLANK();
396}
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 556a4357d7fc..97c5a1668e53 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
48 __res; \ 48 __res; \
49}) 49})
50 50
51#ifdef CONFIG_KVM_GUEST
52#define TASK32_SIZE 0x3fff8000UL
53#else
51#define TASK32_SIZE 0x7fff8000UL 54#define TASK32_SIZE 0x7fff8000UL
55#endif
52#undef ELF_ET_DYN_BASE 56#undef ELF_ET_DYN_BASE
53#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) 57#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
54 58
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 83ffe950f710..46c2ad0703a0 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -14,10 +14,186 @@
14#include <asm/cpu.h> 14#include <asm/cpu.h>
15#include <asm/cpu-features.h> 15#include <asm/cpu-features.h>
16#include <asm/fpu.h> 16#include <asm/fpu.h>
17#include <asm/fpu_emulator.h>
17#include <asm/inst.h> 18#include <asm/inst.h>
18#include <asm/ptrace.h> 19#include <asm/ptrace.h>
19#include <asm/uaccess.h> 20#include <asm/uaccess.h>
20 21
22/*
23 * Calculate and return exception PC in case of branch delay slot
24 * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
25 */
26int __isa_exception_epc(struct pt_regs *regs)
27{
28 unsigned short inst;
29 long epc = regs->cp0_epc;
30
31 /* Calculate exception PC in branch delay slot. */
32 if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
33 /* This should never happen because delay slot was checked. */
34 force_sig(SIGSEGV, current);
35 return epc;
36 }
37 if (cpu_has_mips16) {
38 if (((union mips16e_instruction)inst).ri.opcode
39 == MIPS16e_jal_op)
40 epc += 4;
41 else
42 epc += 2;
43 } else if (mm_insn_16bit(inst))
44 epc += 2;
45 else
46 epc += 4;
47
48 return epc;
49}
50
51/*
52 * Compute return address and emulate branch in microMIPS mode after an
53 * exception only. It does not handle compact branches/jumps and cannot
54 * be used in interrupt context. (Compact branches/jumps do not cause
55 * exceptions.)
56 */
57int __microMIPS_compute_return_epc(struct pt_regs *regs)
58{
59 u16 __user *pc16;
60 u16 halfword;
61 unsigned int word;
62 unsigned long contpc;
63 struct mm_decoded_insn mminsn = { 0 };
64
65 mminsn.micro_mips_mode = 1;
66
67 /* This load never faults. */
68 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
69 __get_user(halfword, pc16);
70 pc16++;
71 contpc = regs->cp0_epc + 2;
72 word = ((unsigned int)halfword << 16);
73 mminsn.pc_inc = 2;
74
75 if (!mm_insn_16bit(halfword)) {
76 __get_user(halfword, pc16);
77 pc16++;
78 contpc = regs->cp0_epc + 4;
79 mminsn.pc_inc = 4;
80 word |= halfword;
81 }
82 mminsn.insn = word;
83
84 if (get_user(halfword, pc16))
85 goto sigsegv;
86 mminsn.next_pc_inc = 2;
87 word = ((unsigned int)halfword << 16);
88
89 if (!mm_insn_16bit(halfword)) {
90 pc16++;
91 if (get_user(halfword, pc16))
92 goto sigsegv;
93 mminsn.next_pc_inc = 4;
94 word |= halfword;
95 }
96 mminsn.next_insn = word;
97
98 mm_isBranchInstr(regs, mminsn, &contpc);
99
100 regs->cp0_epc = contpc;
101
102 return 0;
103
104sigsegv:
105 force_sig(SIGSEGV, current);
106 return -EFAULT;
107}
108
109/*
110 * Compute return address and emulate branch in MIPS16e mode after an
111 * exception only. It does not handle compact branches/jumps and cannot
112 * be used in interrupt context. (Compact branches/jumps do not cause
113 * exceptions.)
114 */
115int __MIPS16e_compute_return_epc(struct pt_regs *regs)
116{
117 u16 __user *addr;
118 union mips16e_instruction inst;
119 u16 inst2;
120 u32 fullinst;
121 long epc;
122
123 epc = regs->cp0_epc;
124
125 /* Read the instruction. */
126 addr = (u16 __user *)msk_isa16_mode(epc);
127 if (__get_user(inst.full, addr)) {
128 force_sig(SIGSEGV, current);
129 return -EFAULT;
130 }
131
132 switch (inst.ri.opcode) {
133 case MIPS16e_extend_op:
134 regs->cp0_epc += 4;
135 return 0;
136
137 /*
138 * JAL and JALX in MIPS16e mode
139 */
140 case MIPS16e_jal_op:
141 addr += 1;
142 if (__get_user(inst2, addr)) {
143 force_sig(SIGSEGV, current);
144 return -EFAULT;
145 }
146 fullinst = ((unsigned)inst.full << 16) | inst2;
147 regs->regs[31] = epc + 6;
148 epc += 4;
149 epc >>= 28;
150 epc <<= 28;
151 /*
152 * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
153 *
154 * ......TARGET[15:0].................TARGET[20:16]...........
155 * ......TARGET[25:21]
156 */
157 epc |=
158 ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
159 ((fullinst & 0x1f0000) << 7);
160 if (!inst.jal.x)
161 set_isa16_mode(epc); /* Set ISA mode bit. */
162 regs->cp0_epc = epc;
163 return 0;
164
165 /*
166 * J(AL)R(C)
167 */
168 case MIPS16e_rr_op:
169 if (inst.rr.func == MIPS16e_jr_func) {
170
171 if (inst.rr.ra)
172 regs->cp0_epc = regs->regs[31];
173 else
174 regs->cp0_epc =
175 regs->regs[reg16to32[inst.rr.rx]];
176
177 if (inst.rr.l) {
178 if (inst.rr.nd)
179 regs->regs[31] = epc + 2;
180 else
181 regs->regs[31] = epc + 4;
182 }
183 return 0;
184 }
185 break;
186 }
187
188 /*
189 * All other cases have no branch delay slot and are 16-bits.
190 * Branches do not cause an exception.
191 */
192 regs->cp0_epc += 2;
193
194 return 0;
195}
196
21/** 197/**
22 * __compute_return_epc_for_insn - Computes the return address and do emulate 198 * __compute_return_epc_for_insn - Computes the return address and do emulate
23 * branch simulation, if required. 199 * branch simulation, if required.
@@ -129,6 +305,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
129 epc <<= 28; 305 epc <<= 28;
130 epc |= (insn.j_format.target << 2); 306 epc |= (insn.j_format.target << 2);
131 regs->cp0_epc = epc; 307 regs->cp0_epc = epc;
308 if (insn.i_format.opcode == jalx_op)
309 set_isa16_mode(regs->cp0_epc);
132 break; 310 break;
133 311
134 /* 312 /*
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
new file mode 100644
index 000000000000..730eaf92c018
--- /dev/null
+++ b/arch/mips/kernel/cevt-gic.c
@@ -0,0 +1,104 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Imagination Technologies Ltd.
7 */
8#include <linux/clockchips.h>
9#include <linux/interrupt.h>
10#include <linux/percpu.h>
11#include <linux/smp.h>
12#include <linux/irq.h>
13
14#include <asm/time.h>
15#include <asm/gic.h>
16#include <asm/mips-boards/maltaint.h>
17
18DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
19int gic_timer_irq_installed;
20
21
22static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
23{
24 u64 cnt;
25 int res;
26
27 cnt = gic_read_count();
28 cnt += (u64)delta;
29 gic_write_compare(cnt);
30 res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
31 return res;
32}
33
34void gic_set_clock_mode(enum clock_event_mode mode,
35 struct clock_event_device *evt)
36{
37 /* Nothing to do ... */
38}
39
40irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
41{
42 struct clock_event_device *cd;
43 int cpu = smp_processor_id();
44
45 gic_write_compare(gic_read_compare());
46 cd = &per_cpu(gic_clockevent_device, cpu);
47 cd->event_handler(cd);
48 return IRQ_HANDLED;
49}
50
51struct irqaction gic_compare_irqaction = {
52 .handler = gic_compare_interrupt,
53 .flags = IRQF_PERCPU | IRQF_TIMER,
54 .name = "timer",
55};
56
57
58void gic_event_handler(struct clock_event_device *dev)
59{
60}
61
62int __cpuinit gic_clockevent_init(void)
63{
64 unsigned int cpu = smp_processor_id();
65 struct clock_event_device *cd;
66 unsigned int irq;
67
68 if (!cpu_has_counter || !gic_frequency)
69 return -ENXIO;
70
71 irq = MIPS_GIC_IRQ_BASE;
72
73 cd = &per_cpu(gic_clockevent_device, cpu);
74
75 cd->name = "MIPS GIC";
76 cd->features = CLOCK_EVT_FEAT_ONESHOT;
77
78 clockevent_set_clock(cd, gic_frequency);
79
80 /* Calculate the min / max delta */
81 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
82 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
83
84 cd->rating = 300;
85 cd->irq = irq;
86 cd->cpumask = cpumask_of(cpu);
87 cd->set_next_event = gic_next_event;
88 cd->set_mode = gic_set_clock_mode;
89 cd->event_handler = gic_event_handler;
90
91 clockevents_register_device(cd);
92
93 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
94 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
95
96 if (gic_timer_irq_installed)
97 return 0;
98
99 gic_timer_irq_installed = 1;
100
101 setup_irq(irq, &gic_compare_irqaction);
102 irq_set_handler(irq, handle_percpu_irq);
103 return 0;
104}
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 07b847d77f5d..02033eaf8825 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#ifndef CONFIG_MIPS_MT_SMTC 25#ifndef CONFIG_MIPS_MT_SMTC
26
27static int mips_next_event(unsigned long delta, 26static int mips_next_event(unsigned long delta,
28 struct clock_event_device *evt) 27 struct clock_event_device *evt)
29{ 28{
@@ -49,7 +48,6 @@ DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
49int cp0_timer_irq_installed; 48int cp0_timer_irq_installed;
50 49
51#ifndef CONFIG_MIPS_MT_SMTC 50#ifndef CONFIG_MIPS_MT_SMTC
52
53irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 51irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
54{ 52{
55 const int r2 = cpu_has_mips_r2; 53 const int r2 = cpu_has_mips_r2;
@@ -74,6 +72,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
74 /* Clear Count/Compare Interrupt */ 72 /* Clear Count/Compare Interrupt */
75 write_c0_compare(read_c0_compare()); 73 write_c0_compare(read_c0_compare());
76 cd = &per_cpu(mips_clockevent_device, cpu); 74 cd = &per_cpu(mips_clockevent_device, cpu);
75#ifdef CONFIG_CEVT_GIC
76 if (!gic_present)
77#endif
77 cd->event_handler(cd); 78 cd->event_handler(cd);
78 } 79 }
79 80
@@ -118,6 +119,10 @@ int c0_compare_int_usable(void)
118 unsigned int delta; 119 unsigned int delta;
119 unsigned int cnt; 120 unsigned int cnt;
120 121
122#ifdef CONFIG_KVM_GUEST
123 return 1;
124#endif
125
121 /* 126 /*
122 * IP7 already pending? Try to clear it by acking the timer. 127 * IP7 already pending? Try to clear it by acking the timer.
123 */ 128 */
@@ -166,7 +171,6 @@ int c0_compare_int_usable(void)
166} 171}
167 172
168#ifndef CONFIG_MIPS_MT_SMTC 173#ifndef CONFIG_MIPS_MT_SMTC
169
170int __cpuinit r4k_clockevent_init(void) 174int __cpuinit r4k_clockevent_init(void)
171{ 175{
172 unsigned int cpu = smp_processor_id(); 176 unsigned int cpu = smp_processor_id();
@@ -206,6 +210,9 @@ int __cpuinit r4k_clockevent_init(void)
206 cd->set_mode = mips_set_clock_mode; 210 cd->set_mode = mips_set_clock_mode;
207 cd->event_handler = mips_event_handler; 211 cd->event_handler = mips_event_handler;
208 212
213#ifdef CONFIG_CEVT_GIC
214 if (!gic_present)
215#endif
209 clockevents_register_device(cd); 216 clockevents_register_device(cd);
210 217
211 if (cp0_timer_irq_installed) 218 if (cp0_timer_irq_installed)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5fe66a0c3224..4bbffdb9024f 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
470 c->options |= MIPS_CPU_ULRI; 470 c->options |= MIPS_CPU_ULRI;
471 if (config3 & MIPS_CONF3_ISA) 471 if (config3 & MIPS_CONF3_ISA)
472 c->options |= MIPS_CPU_MICROMIPS; 472 c->options |= MIPS_CPU_MICROMIPS;
473#ifdef CONFIG_CPU_MICROMIPS
474 write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
475#endif
473 if (config3 & MIPS_CONF3_VZ) 476 if (config3 & MIPS_CONF3_VZ)
474 c->ases |= MIPS_ASE_VZ; 477 c->ases |= MIPS_ASE_VZ;
475 478
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
index 5dca24bce51b..e02620901117 100644
--- a/arch/mips/kernel/csrc-gic.c
+++ b/arch/mips/kernel/csrc-gic.c
@@ -5,23 +5,14 @@
5 * 5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */ 7 */
8#include <linux/clocksource.h>
9#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/time.h>
10 10
11#include <asm/time.h>
12#include <asm/gic.h> 11#include <asm/gic.h>
13 12
14static cycle_t gic_hpt_read(struct clocksource *cs) 13static cycle_t gic_hpt_read(struct clocksource *cs)
15{ 14{
16 unsigned int hi, hi2, lo; 15 return gic_read_count();
17
18 do {
19 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
20 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
21 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
22 } while (hi2 != hi);
23
24 return (((cycle_t) hi) << 32) + lo;
25} 16}
26 17
27static struct clocksource gic_clocksource = { 18static struct clocksource gic_clocksource = {
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index ecb347ce1b3d..5c2ba9f08a80 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -5,8 +5,8 @@
5 * 5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 * Copyright (C) 2002, 2007 Maciej W. Rozycki 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki
9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12 12
@@ -21,8 +21,10 @@
21#include <asm/war.h> 21#include <asm/war.h>
22#include <asm/thread_info.h> 22#include <asm/thread_info.h>
23 23
24#ifdef CONFIG_MIPS_MT_SMTC
24#define PANIC_PIC(msg) \ 25#define PANIC_PIC(msg) \
25 .set push; \ 26 .set push; \
27 .set nomicromips; \
26 .set reorder; \ 28 .set reorder; \
27 PTR_LA a0,8f; \ 29 PTR_LA a0,8f; \
28 .set noat; \ 30 .set noat; \
@@ -31,17 +33,10 @@
319: b 9b; \ 339: b 9b; \
32 .set pop; \ 34 .set pop; \
33 TEXT(msg) 35 TEXT(msg)
36#endif
34 37
35 __INIT 38 __INIT
36 39
37NESTED(except_vec0_generic, 0, sp)
38 PANIC_PIC("Exception vector 0 called")
39 END(except_vec0_generic)
40
41NESTED(except_vec1_generic, 0, sp)
42 PANIC_PIC("Exception vector 1 called")
43 END(except_vec1_generic)
44
45/* 40/*
46 * General exception vector for all other CPUs. 41 * General exception vector for all other CPUs.
47 * 42 *
@@ -138,12 +133,19 @@ LEAF(r4k_wait)
138 nop 133 nop
139 nop 134 nop
140 nop 135 nop
136#ifdef CONFIG_CPU_MICROMIPS
137 nop
138 nop
139 nop
140 nop
141#endif
141 .set mips3 142 .set mips3
142 wait 143 wait
143 /* end of rollback region (the region size must be power of two) */ 144 /* end of rollback region (the region size must be power of two) */
144 .set pop
1451: 1451:
146 jr ra 146 jr ra
147 nop
148 .set pop
147 END(r4k_wait) 149 END(r4k_wait)
148 150
149 .macro BUILD_ROLLBACK_PROLOGUE handler 151 .macro BUILD_ROLLBACK_PROLOGUE handler
@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp)
201 LONG_L s0, TI_REGS($28) 203 LONG_L s0, TI_REGS($28)
202 LONG_S sp, TI_REGS($28) 204 LONG_S sp, TI_REGS($28)
203 PTR_LA ra, ret_from_irq 205 PTR_LA ra, ret_from_irq
204 j plat_irq_dispatch 206 PTR_LA v0, plat_irq_dispatch
207 jr v0
208#ifdef CONFIG_CPU_MICROMIPS
209 nop
210#endif
205 END(handle_int) 211 END(handle_int)
206 212
207 __INIT 213 __INIT
@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp)
222/* 228/*
223 * EJTAG debug exception handler. 229 * EJTAG debug exception handler.
224 * The EJTAG debug exception entry point is 0xbfc00480, which 230 * The EJTAG debug exception entry point is 0xbfc00480, which
225 * normally is in the boot PROM, so the boot PROM must do a 231 * normally is in the boot PROM, so the boot PROM must do an
226 * unconditional jump to this vector. 232 * unconditional jump to this vector.
227 */ 233 */
228NESTED(except_vec_ejtag_debug, 0, sp) 234NESTED(except_vec_ejtag_debug, 0, sp)
229 j ejtag_debug_handler 235 j ejtag_debug_handler
236#ifdef CONFIG_CPU_MICROMIPS
237 nop
238#endif
230 END(except_vec_ejtag_debug) 239 END(except_vec_ejtag_debug)
231 240
232 __FINIT 241 __FINIT
@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp)
251FEXPORT(except_vec_vi_mori) 260FEXPORT(except_vec_vi_mori)
252 ori a0, $0, 0 261 ori a0, $0, 0
253#endif /* CONFIG_MIPS_MT_SMTC */ 262#endif /* CONFIG_MIPS_MT_SMTC */
263 PTR_LA v1, except_vec_vi_handler
254FEXPORT(except_vec_vi_lui) 264FEXPORT(except_vec_vi_lui)
255 lui v0, 0 /* Patched */ 265 lui v0, 0 /* Patched */
256 j except_vec_vi_handler 266 jr v1
257FEXPORT(except_vec_vi_ori) 267FEXPORT(except_vec_vi_ori)
258 ori v0, 0 /* Patched */ 268 ori v0, 0 /* Patched */
259 .set pop 269 .set pop
@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer)
354 */ 364 */
355NESTED(except_vec_nmi, 0, sp) 365NESTED(except_vec_nmi, 0, sp)
356 j nmi_handler 366 j nmi_handler
367#ifdef CONFIG_CPU_MICROMIPS
368 nop
369#endif
357 END(except_vec_nmi) 370 END(except_vec_nmi)
358 371
359 __FINIT 372 __FINIT
@@ -480,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
480 .set noreorder 493 .set noreorder
481 /* check if TLB contains a entry for EPC */ 494 /* check if TLB contains a entry for EPC */
482 MFC0 k1, CP0_ENTRYHI 495 MFC0 k1, CP0_ENTRYHI
483 andi k1, 0xff /* ASID_MASK */ 496 andi k1, 0xff /* ASID_MASK patched at run-time!! */
484 MFC0 k0, CP0_EPC 497 MFC0 k0, CP0_EPC
485 PTR_SRL k0, _PAGE_SHIFT + 1 498 PTR_SRL k0, _PAGE_SHIFT + 1
486 PTR_SLL k0, _PAGE_SHIFT + 1 499 PTR_SLL k0, _PAGE_SHIFT + 1
@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp)
500 .set push 513 .set push
501 .set noat 514 .set noat
502 .set noreorder 515 .set noreorder
503 /* 0x7c03e83b: rdhwr v1,$29 */ 516 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
517 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
504 MFC0 k1, CP0_EPC 518 MFC0 k1, CP0_EPC
505 lui k0, 0x7c03 519#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
506 lw k1, (k1) 520 and k0, k1, 1
507 ori k0, 0xe83b 521 beqz k0, 1f
508 .set reorder 522 xor k1, k0
523 lhu k0, (k1)
524 lhu k1, 2(k1)
525 ins k1, k0, 16, 16
526 lui k0, 0x007d
527 b docheck
528 ori k0, 0x6b3c
5291:
530 lui k0, 0x7c03
531 lw k1, (k1)
532 ori k0, 0xe83b
533#else
534 andi k0, k1, 1
535 bnez k0, handle_ri
536 lui k0, 0x7c03
537 lw k1, (k1)
538 ori k0, 0xe83b
539#endif
540 .set reorder
541docheck:
509 bne k0, k1, handle_ri /* if not ours */ 542 bne k0, k1, handle_ri /* if not ours */
543
544isrdhwr:
510 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 545 /* The insn is rdhwr. No need to check CAUSE.BD here. */
511 get_saved_sp /* k1 := current_thread_info */ 546 get_saved_sp /* k1 := current_thread_info */
512 .set noreorder 547 .set noreorder
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 485e6a961b31..c01b307317a9 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -10,6 +10,7 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/clocksource.h>
13 14
14#include <asm/io.h> 15#include <asm/io.h>
15#include <asm/gic.h> 16#include <asm/gic.h>
@@ -19,6 +20,8 @@
19#include <linux/hardirq.h> 20#include <linux/hardirq.h>
20#include <asm-generic/bitops/find.h> 21#include <asm-generic/bitops/find.h>
21 22
23unsigned int gic_frequency;
24unsigned int gic_present;
22unsigned long _gic_base; 25unsigned long _gic_base;
23unsigned int gic_irq_base; 26unsigned int gic_irq_base;
24unsigned int gic_irq_flags[GIC_NUM_INTRS]; 27unsigned int gic_irq_flags[GIC_NUM_INTRS];
@@ -30,6 +33,39 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
30static struct gic_pending_regs pending_regs[NR_CPUS]; 33static struct gic_pending_regs pending_regs[NR_CPUS];
31static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; 34static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
32 35
36#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
37cycle_t gic_read_count(void)
38{
39 unsigned int hi, hi2, lo;
40
41 do {
42 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
43 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
44 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
45 } while (hi2 != hi);
46
47 return (((cycle_t) hi) << 32) + lo;
48}
49
50void gic_write_compare(cycle_t cnt)
51{
52 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
53 (int)(cnt >> 32));
54 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
55 (int)(cnt & 0xffffffff));
56}
57
58cycle_t gic_read_compare(void)
59{
60 unsigned int hi, lo;
61
62 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
63 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
64
65 return (((cycle_t) hi) << 32) + lo;
66}
67#endif
68
33unsigned int gic_get_timer_pending(void) 69unsigned int gic_get_timer_pending(void)
34{ 70{
35 unsigned int vpe_pending; 71 unsigned int vpe_pending;
@@ -116,6 +152,17 @@ static void __init vpe_local_setup(unsigned int numvpes)
116 } 152 }
117} 153}
118 154
155unsigned int gic_compare_int(void)
156{
157 unsigned int pending;
158
159 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
160 if (pending & GIC_VPE_PEND_CMP_MSK)
161 return 1;
162 else
163 return 0;
164}
165
119unsigned int gic_get_int(void) 166unsigned int gic_get_int(void)
120{ 167{
121 unsigned int i; 168 unsigned int i;
diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c
index 411a058d2c53..876097529697 100644
--- a/arch/mips/kernel/mips_machine.c
+++ b/arch/mips/kernel/mips_machine.c
@@ -11,9 +11,9 @@
11#include <linux/slab.h> 11#include <linux/slab.h>
12 12
13#include <asm/mips_machine.h> 13#include <asm/mips_machine.h>
14#include <asm/prom.h>
14 15
15static struct mips_machine *mips_machine __initdata; 16static struct mips_machine *mips_machine __initdata;
16static char *mips_machine_name = "Unknown";
17 17
18#define for_each_machine(mach) \ 18#define for_each_machine(mach) \
19 for ((mach) = (struct mips_machine *)&__mips_machines_start; \ 19 for ((mach) = (struct mips_machine *)&__mips_machines_start; \
@@ -21,25 +21,6 @@ static char *mips_machine_name = "Unknown";
21 (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \ 21 (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
22 (mach)++) 22 (mach)++)
23 23
24__init void mips_set_machine_name(const char *name)
25{
26 char *p;
27
28 if (name == NULL)
29 return;
30
31 p = kstrdup(name, GFP_KERNEL);
32 if (!p)
33 pr_err("MIPS: no memory for machine_name\n");
34
35 mips_machine_name = p;
36}
37
38char *mips_get_machine_name(void)
39{
40 return mips_machine_name;
41}
42
43__init int mips_machtype_setup(char *id) 24__init int mips_machtype_setup(char *id)
44{ 25{
45 struct mips_machine *mach; 26 struct mips_machine *mach;
@@ -79,7 +60,6 @@ __init void mips_machine_setup(void)
79 return; 60 return;
80 61
81 mips_set_machine_name(mips_machine->mach_name); 62 mips_set_machine_name(mips_machine->mach_name);
82 pr_info("MIPS: machine is %s\n", mips_machine_name);
83 63
84 if (mips_machine->mach_setup) 64 if (mips_machine->mach_setup)
85 mips_machine->mach_setup(); 65 mips_machine->mach_setup();
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 7a54f74b7818..a3e461408b7e 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -12,7 +12,7 @@
12#include <asm/cpu-features.h> 12#include <asm/cpu-features.h>
13#include <asm/mipsregs.h> 13#include <asm/mipsregs.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/mips_machine.h> 15#include <asm/prom.h>
16 16
17unsigned int vced_count, vcei_count; 17unsigned int vced_count, vcei_count;
18 18
@@ -99,6 +99,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
99 if (cpu_has_vz) seq_printf(m, "%s", " vz"); 99 if (cpu_has_vz) seq_printf(m, "%s", " vz");
100 seq_printf(m, "\n"); 100 seq_printf(m, "\n");
101 101
102 if (cpu_has_mmips) {
103 seq_printf(m, "micromips kernel\t: %s\n",
104 (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
105 }
102 seq_printf(m, "shadow register sets\t: %d\n", 106 seq_printf(m, "shadow register sets\t: %d\n",
103 cpu_data[n].srsets); 107 cpu_data[n].srsets);
104 seq_printf(m, "kscratch registers\t: %d\n", 108 seq_printf(m, "kscratch registers\t: %d\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index cfc742d75b7f..eb902c1f0cad 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer 9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
10 */ 11 */
11#include <linux/errno.h> 12#include <linux/errno.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
@@ -225,34 +226,115 @@ struct mips_frame_info {
225 226
226static inline int is_ra_save_ins(union mips_instruction *ip) 227static inline int is_ra_save_ins(union mips_instruction *ip)
227{ 228{
229#ifdef CONFIG_CPU_MICROMIPS
230 union mips_instruction mmi;
231
232 /*
233 * swsp ra,offset
234 * swm16 reglist,offset(sp)
235 * swm32 reglist,offset(sp)
236 * sw32 ra,offset(sp)
237 * jradiussp - NOT SUPPORTED
238 *
239 * microMIPS is way more fun...
240 */
241 if (mm_insn_16bit(ip->halfword[0])) {
242 mmi.word = (ip->halfword[0] << 16);
243 return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
244 mmi.mm16_r5_format.rt == 31) ||
245 (mmi.mm16_m_format.opcode == mm_pool16c_op &&
246 mmi.mm16_m_format.func == mm_swm16_op));
247 }
248 else {
249 mmi.halfword[0] = ip->halfword[1];
250 mmi.halfword[1] = ip->halfword[0];
251 return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
252 mmi.mm_m_format.rd > 9 &&
253 mmi.mm_m_format.base == 29 &&
254 mmi.mm_m_format.func == mm_swm32_func) ||
255 (mmi.i_format.opcode == mm_sw32_op &&
256 mmi.i_format.rs == 29 &&
257 mmi.i_format.rt == 31));
258 }
259#else
228 /* sw / sd $ra, offset($sp) */ 260 /* sw / sd $ra, offset($sp) */
229 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && 261 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
230 ip->i_format.rs == 29 && 262 ip->i_format.rs == 29 &&
231 ip->i_format.rt == 31; 263 ip->i_format.rt == 31;
264#endif
232} 265}
233 266
234static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) 267static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
235{ 268{
269#ifdef CONFIG_CPU_MICROMIPS
270 /*
271 * jr16,jrc,jalr16,jalr16
272 * jal
273 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
274 * jraddiusp - NOT SUPPORTED
275 *
276 * microMIPS is kind of more fun...
277 */
278 union mips_instruction mmi;
279
280 mmi.word = (ip->halfword[0] << 16);
281
282 if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
283 (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
284 ip->j_format.opcode == mm_jal32_op)
285 return 1;
286 if (ip->r_format.opcode != mm_pool32a_op ||
287 ip->r_format.func != mm_pool32axf_op)
288 return 0;
289 return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
290#else
236 if (ip->j_format.opcode == jal_op) 291 if (ip->j_format.opcode == jal_op)
237 return 1; 292 return 1;
238 if (ip->r_format.opcode != spec_op) 293 if (ip->r_format.opcode != spec_op)
239 return 0; 294 return 0;
240 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; 295 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
296#endif
241} 297}
242 298
243static inline int is_sp_move_ins(union mips_instruction *ip) 299static inline int is_sp_move_ins(union mips_instruction *ip)
244{ 300{
301#ifdef CONFIG_CPU_MICROMIPS
302 /*
303 * addiusp -imm
304 * addius5 sp,-imm
305 * addiu32 sp,sp,-imm
306 * jradiussp - NOT SUPPORTED
307 *
308 * microMIPS is not more fun...
309 */
310 if (mm_insn_16bit(ip->halfword[0])) {
311 union mips_instruction mmi;
312
313 mmi.word = (ip->halfword[0] << 16);
314 return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
315 mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
316 (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
317 mmi.mm16_r5_format.rt == 29));
318 }
319 return (ip->mm_i_format.opcode == mm_addiu32_op &&
320 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
321#else
245 /* addiu/daddiu sp,sp,-imm */ 322 /* addiu/daddiu sp,sp,-imm */
246 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) 323 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
247 return 0; 324 return 0;
248 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) 325 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
249 return 1; 326 return 1;
327#endif
250 return 0; 328 return 0;
251} 329}
252 330
253static int get_frame_info(struct mips_frame_info *info) 331static int get_frame_info(struct mips_frame_info *info)
254{ 332{
333#ifdef CONFIG_CPU_MICROMIPS
334 union mips_instruction *ip = (void *) (((char *) info->func) - 1);
335#else
255 union mips_instruction *ip = info->func; 336 union mips_instruction *ip = info->func;
337#endif
256 unsigned max_insns = info->func_size / sizeof(union mips_instruction); 338 unsigned max_insns = info->func_size / sizeof(union mips_instruction);
257 unsigned i; 339 unsigned i;
258 340
@@ -272,7 +354,26 @@ static int get_frame_info(struct mips_frame_info *info)
272 break; 354 break;
273 if (!info->frame_size) { 355 if (!info->frame_size) {
274 if (is_sp_move_ins(ip)) 356 if (is_sp_move_ins(ip))
357 {
358#ifdef CONFIG_CPU_MICROMIPS
359 if (mm_insn_16bit(ip->halfword[0]))
360 {
361 unsigned short tmp;
362
363 if (ip->halfword[0] & mm_addiusp_func)
364 {
365 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
366 info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
367 } else {
368 tmp = (ip->halfword[0] >> 1);
369 info->frame_size = -(signed short)(tmp & 0xf);
370 }
371 ip = (void *) &ip->halfword[1];
372 ip--;
373 } else
374#endif
275 info->frame_size = - ip->i_format.simmediate; 375 info->frame_size = - ip->i_format.simmediate;
376 }
276 continue; 377 continue;
277 } 378 }
278 if (info->pc_offset == -1 && is_ra_save_ins(ip)) { 379 if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 028f6f837ef9..5712bb532245 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -23,6 +23,23 @@
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/prom.h> 24#include <asm/prom.h>
25 25
26static char mips_machine_name[64] = "Unknown";
27
28__init void mips_set_machine_name(const char *name)
29{
30 if (name == NULL)
31 return;
32
33 strncpy(mips_machine_name, name, sizeof(mips_machine_name));
34 pr_info("MIPS: machine is %s\n", mips_get_machine_name());
35}
36
37char *mips_get_machine_name(void)
38{
39 return mips_machine_name;
40}
41
42#ifdef CONFIG_OF
26int __init early_init_dt_scan_memory_arch(unsigned long node, 43int __init early_init_dt_scan_memory_arch(unsigned long node,
27 const char *uname, int depth, 44 const char *uname, int depth,
28 void *data) 45 void *data)
@@ -50,6 +67,18 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
50} 67}
51#endif 68#endif
52 69
70int __init early_init_dt_scan_model(unsigned long node, const char *uname,
71 int depth, void *data)
72{
73 if (!depth) {
74 char *model = of_get_flat_dt_prop(node, "model", NULL);
75
76 if (model)
77 mips_set_machine_name(model);
78 }
79 return 0;
80}
81
53void __init early_init_devtree(void *params) 82void __init early_init_devtree(void *params)
54{ 83{
55 /* Setup flat device-tree pointer */ 84 /* Setup flat device-tree pointer */
@@ -65,6 +94,9 @@ void __init early_init_devtree(void *params)
65 /* Scan memory nodes */ 94 /* Scan memory nodes */
66 of_scan_flat_dt(early_init_dt_scan_root, NULL); 95 of_scan_flat_dt(early_init_dt_scan_root, NULL);
67 of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); 96 of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
97
98 /* try to load the mips machine name */
99 of_scan_flat_dt(early_init_dt_scan_model, NULL);
68} 100}
69 101
70void __init __dt_setup_arch(struct boot_param_header *bph) 102void __init __dt_setup_arch(struct boot_param_header *bph)
@@ -79,3 +111,4 @@ void __init __dt_setup_arch(struct boot_param_header *bph)
79 111
80 early_init_devtree(initial_boot_params); 112 early_init_devtree(initial_boot_params);
81} 113}
114#endif
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 9ea29649fc28..9b36424b03c5 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -138,9 +138,18 @@ stackargs:
1385: jr t1 1385: jr t1
139 sw t5, 16(sp) # argument #5 to ksp 139 sw t5, 16(sp) # argument #5 to ksp
140 140
141#ifdef CONFIG_CPU_MICROMIPS
141 sw t8, 28(sp) # argument #8 to ksp 142 sw t8, 28(sp) # argument #8 to ksp
143 nop
142 sw t7, 24(sp) # argument #7 to ksp 144 sw t7, 24(sp) # argument #7 to ksp
145 nop
143 sw t6, 20(sp) # argument #6 to ksp 146 sw t6, 20(sp) # argument #6 to ksp
147 nop
148#else
149 sw t8, 28(sp) # argument #8 to ksp
150 sw t7, 24(sp) # argument #7 to ksp
151 sw t6, 20(sp) # argument #6 to ksp
152#endif
1446: j stack_done # go back 1536: j stack_done # go back
145 nop 154 nop
146 .set pop 155 .set pop
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 4c774d5d5087..c7f90519e58c 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -23,6 +23,7 @@
23#include <linux/pfn.h> 23#include <linux/pfn.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/kexec.h> 25#include <linux/kexec.h>
26#include <linux/sizes.h>
26 27
27#include <asm/addrspace.h> 28#include <asm/addrspace.h>
28#include <asm/bootinfo.h> 29#include <asm/bootinfo.h>
@@ -77,6 +78,8 @@ EXPORT_SYMBOL(mips_io_port_base);
77static struct resource code_resource = { .name = "Kernel code", }; 78static struct resource code_resource = { .name = "Kernel code", };
78static struct resource data_resource = { .name = "Kernel data", }; 79static struct resource data_resource = { .name = "Kernel data", };
79 80
81static void *detect_magic __initdata = detect_memory_region;
82
80void __init add_memory_region(phys_t start, phys_t size, long type) 83void __init add_memory_region(phys_t start, phys_t size, long type)
81{ 84{
82 int x = boot_mem_map.nr_map; 85 int x = boot_mem_map.nr_map;
@@ -122,6 +125,25 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
122 boot_mem_map.nr_map++; 125 boot_mem_map.nr_map++;
123} 126}
124 127
128void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
129{
130 void *dm = &detect_magic;
131 phys_t size;
132
133 for (size = sz_min; size < sz_max; size <<= 1) {
134 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
135 break;
136 }
137
138 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
139 ((unsigned long long) size) / SZ_1M,
140 (unsigned long long) start,
141 ((unsigned long long) sz_min) / SZ_1M,
142 ((unsigned long long) sz_max) / SZ_1M);
143
144 add_memory_region(start, size, BOOT_MEM_RAM);
145}
146
125static void __init print_memory_map(void) 147static void __init print_memory_map(void)
126{ 148{
127 int i; 149 int i;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b5e88fd83277..fd3ef2c2afbc 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -35,6 +35,7 @@
35#include <asm/war.h> 35#include <asm/war.h>
36#include <asm/vdso.h> 36#include <asm/vdso.h>
37#include <asm/dsp.h> 37#include <asm/dsp.h>
38#include <asm/inst.h>
38 39
39#include "signal-common.h" 40#include "signal-common.h"
40 41
@@ -480,7 +481,15 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
480 sigset_t *oldset = sigmask_to_save(); 481 sigset_t *oldset = sigmask_to_save();
481 int ret; 482 int ret;
482 struct mips_abi *abi = current->thread.abi; 483 struct mips_abi *abi = current->thread.abi;
484#ifdef CONFIG_CPU_MICROMIPS
485 void *vdso;
486 unsigned int tmp = (unsigned int)current->mm->context.vdso;
487
488 set_isa16_mode(tmp);
489 vdso = (void *)tmp;
490#else
483 void *vdso = current->mm->context.vdso; 491 void *vdso = current->mm->context.vdso;
492#endif
484 493
485 if (regs->regs[0]) { 494 if (regs->regs[0]) {
486 switch(regs->regs[2]) { 495 switch(regs->regs[2]) {
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index bfede063d96a..3e5164c11cac 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -34,6 +34,7 @@
34#include <asm/mipsregs.h> 34#include <asm/mipsregs.h>
35#include <asm/mipsmtregs.h> 35#include <asm/mipsmtregs.h>
36#include <asm/mips_mt.h> 36#include <asm/mips_mt.h>
37#include <asm/gic.h>
37 38
38static void __init smvp_copy_vpe_config(void) 39static void __init smvp_copy_vpe_config(void)
39{ 40{
@@ -151,8 +152,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
151static void __cpuinit vsmp_init_secondary(void) 152static void __cpuinit vsmp_init_secondary(void)
152{ 153{
153#ifdef CONFIG_IRQ_GIC 154#ifdef CONFIG_IRQ_GIC
154 extern int gic_present;
155
156 /* This is Malta specific: IPI,performance and timer interrupts */ 155 /* This is Malta specific: IPI,performance and timer interrupts */
157 if (gic_present) 156 if (gic_present)
158 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 157 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index aee04af213c5..c17619fe18e3 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -83,6 +83,7 @@ static inline void set_cpu_sibling_map(int cpu)
83} 83}
84 84
85struct plat_smp_ops *mp_ops; 85struct plat_smp_ops *mp_ops;
86EXPORT_SYMBOL(mp_ops);
86 87
87__cpuinit void register_smp_ops(struct plat_smp_ops *ops) 88__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
88{ 89{
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
index 76016ac0a9c8..2866863a39df 100644
--- a/arch/mips/kernel/smtc-asm.S
+++ b/arch/mips/kernel/smtc-asm.S
@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
49 .text 49 .text
50 .align 5 50 .align 5
51FEXPORT(__smtc_ipi_vector) 51FEXPORT(__smtc_ipi_vector)
52#ifdef CONFIG_CPU_MICROMIPS
53 nop
54#endif
52 .set noat 55 .set noat
53 /* Disable thread scheduling to make Status update atomic */ 56 /* Disable thread scheduling to make Status update atomic */
54 DMT 27 # dmt k1 57 DMT 27 # dmt k1
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 7186222dc5bb..31d22f3121c9 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -111,7 +111,7 @@ static int vpe0limit;
111static int ipibuffers; 111static int ipibuffers;
112static int nostlb; 112static int nostlb;
113static int asidmask; 113static int asidmask;
114unsigned long smtc_asid_mask = 0xff; 114unsigned int smtc_asid_mask = 0xff;
115 115
116static int __init vpe0tcs(char *str) 116static int __init vpe0tcs(char *str)
117{ 117{
@@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1395 asid = asid_cache(cpu); 1395 asid = asid_cache(cpu);
1396 1396
1397 do { 1397 do {
1398 if (!((asid += ASID_INC) & ASID_MASK) ) { 1398 if (!ASID_MASK(ASID_INC(asid))) {
1399 if (cpu_has_vtag_icache) 1399 if (cpu_has_vtag_icache)
1400 flush_icache_all(); 1400 flush_icache_all();
1401 /* Traverse all online CPUs (hack requires contiguous range) */ 1401 /* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1414 mips_ihb(); 1414 mips_ihb();
1415 } 1415 }
1416 tcstat = read_tc_c0_tcstatus(); 1416 tcstat = read_tc_c0_tcstatus();
1417 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); 1417 smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
1418 if (!prevhalt) 1418 if (!prevhalt)
1419 write_tc_c0_tchalt(0); 1419 write_tc_c0_tchalt(0);
1420 } 1420 }
@@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1423 asid = ASID_FIRST_VERSION; 1423 asid = ASID_FIRST_VERSION;
1424 local_flush_tlb_all(); /* start new asid cycle */ 1424 local_flush_tlb_all(); /* start new asid cycle */
1425 } 1425 }
1426 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); 1426 } while (smtc_live_asid[tlb][ASID_MASK(asid)]);
1427 1427
1428 /* 1428 /*
1429 * SMTC shares the TLB within VPEs and possibly across all VPEs. 1429 * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
1461 tlb_read(); 1461 tlb_read();
1462 ehb(); 1462 ehb();
1463 ehi = read_c0_entryhi(); 1463 ehi = read_c0_entryhi();
1464 if ((ehi & ASID_MASK) == asid) { 1464 if (ASID_MASK(ehi) == asid) {
1465 /* 1465 /*
1466 * Invalidate only entries with specified ASID, 1466 * Invalidate only entries with specified ASID,
1467 * makiing sure all entries differ. 1467 * makiing sure all entries differ.
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 25225515451f..77cff1f6d050 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -8,8 +8,8 @@
8 * Copyright (C) 1998 Ulf Carlsson 8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 */ 13 */
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
@@ -60,9 +60,9 @@ extern void check_wait(void);
60extern asmlinkage void r4k_wait(void); 60extern asmlinkage void r4k_wait(void);
61extern asmlinkage void rollback_handle_int(void); 61extern asmlinkage void rollback_handle_int(void);
62extern asmlinkage void handle_int(void); 62extern asmlinkage void handle_int(void);
63extern asmlinkage void handle_tlbm(void); 63extern u32 handle_tlbl[];
64extern asmlinkage void handle_tlbl(void); 64extern u32 handle_tlbs[];
65extern asmlinkage void handle_tlbs(void); 65extern u32 handle_tlbm[];
66extern asmlinkage void handle_adel(void); 66extern asmlinkage void handle_adel(void);
67extern asmlinkage void handle_ades(void); 67extern asmlinkage void handle_ades(void);
68extern asmlinkage void handle_ibe(void); 68extern asmlinkage void handle_ibe(void);
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void);
83extern asmlinkage void handle_mcheck(void); 83extern asmlinkage void handle_mcheck(void);
84extern asmlinkage void handle_reserved(void); 84extern asmlinkage void handle_reserved(void);
85 85
86extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
87 struct mips_fpu_struct *ctx, int has_fpu,
88 void *__user *fault_addr);
89
90void (*board_be_init)(void); 86void (*board_be_init)(void);
91int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 87int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
92void (*board_nmi_handler_setup)(void); 88void (*board_nmi_handler_setup)(void);
@@ -482,6 +478,12 @@ asmlinkage void do_be(struct pt_regs *regs)
482#define SYNC 0x0000000f 478#define SYNC 0x0000000f
483#define RDHWR 0x0000003b 479#define RDHWR 0x0000003b
484 480
481/* microMIPS definitions */
482#define MM_POOL32A_FUNC 0xfc00ffff
483#define MM_RDHWR 0x00006b3c
484#define MM_RS 0x001f0000
485#define MM_RT 0x03e00000
486
485/* 487/*
486 * The ll_bit is cleared by r*_switch.S 488 * The ll_bit is cleared by r*_switch.S
487 */ 489 */
@@ -596,42 +598,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
596 * Simulate trapping 'rdhwr' instructions to provide user accessible 598 * Simulate trapping 'rdhwr' instructions to provide user accessible
597 * registers not implemented in hardware. 599 * registers not implemented in hardware.
598 */ 600 */
599static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) 601static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
600{ 602{
601 struct thread_info *ti = task_thread_info(current); 603 struct thread_info *ti = task_thread_info(current);
602 604
605 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
606 1, regs, 0);
607 switch (rd) {
608 case 0: /* CPU number */
609 regs->regs[rt] = smp_processor_id();
610 return 0;
611 case 1: /* SYNCI length */
612 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
613 current_cpu_data.icache.linesz);
614 return 0;
615 case 2: /* Read count register */
616 regs->regs[rt] = read_c0_count();
617 return 0;
618 case 3: /* Count register resolution */
619 switch (current_cpu_data.cputype) {
620 case CPU_20KC:
621 case CPU_25KF:
622 regs->regs[rt] = 1;
623 break;
624 default:
625 regs->regs[rt] = 2;
626 }
627 return 0;
628 case 29:
629 regs->regs[rt] = ti->tp_value;
630 return 0;
631 default:
632 return -1;
633 }
634}
635
636static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
637{
603 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 638 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
604 int rd = (opcode & RD) >> 11; 639 int rd = (opcode & RD) >> 11;
605 int rt = (opcode & RT) >> 16; 640 int rt = (opcode & RT) >> 16;
606 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 641
607 1, regs, 0); 642 simulate_rdhwr(regs, rd, rt);
608 switch (rd) { 643 return 0;
609 case 0: /* CPU number */ 644 }
610 regs->regs[rt] = smp_processor_id(); 645
611 return 0; 646 /* Not ours. */
612 case 1: /* SYNCI length */ 647 return -1;
613 regs->regs[rt] = min(current_cpu_data.dcache.linesz, 648}
614 current_cpu_data.icache.linesz); 649
615 return 0; 650static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
616 case 2: /* Read count register */ 651{
617 regs->regs[rt] = read_c0_count(); 652 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
618 return 0; 653 int rd = (opcode & MM_RS) >> 16;
619 case 3: /* Count register resolution */ 654 int rt = (opcode & MM_RT) >> 21;
620 switch (current_cpu_data.cputype) { 655 simulate_rdhwr(regs, rd, rt);
621 case CPU_20KC: 656 return 0;
622 case CPU_25KF:
623 regs->regs[rt] = 1;
624 break;
625 default:
626 regs->regs[rt] = 2;
627 }
628 return 0;
629 case 29:
630 regs->regs[rt] = ti->tp_value;
631 return 0;
632 default:
633 return -1;
634 }
635 } 657 }
636 658
637 /* Not ours. */ 659 /* Not ours. */
@@ -662,7 +684,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
662 force_sig_info(SIGFPE, &info, current); 684 force_sig_info(SIGFPE, &info, current);
663} 685}
664 686
665static int process_fpemu_return(int sig, void __user *fault_addr) 687int process_fpemu_return(int sig, void __user *fault_addr)
666{ 688{
667 if (sig == SIGSEGV || sig == SIGBUS) { 689 if (sig == SIGSEGV || sig == SIGBUS) {
668 struct siginfo si = {0}; 690 struct siginfo si = {0};
@@ -813,9 +835,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
813asmlinkage void do_bp(struct pt_regs *regs) 835asmlinkage void do_bp(struct pt_regs *regs)
814{ 836{
815 unsigned int opcode, bcode; 837 unsigned int opcode, bcode;
816 838 unsigned long epc;
817 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 839 u16 instr[2];
818 goto out_sigsegv; 840
841 if (get_isa16_mode(regs->cp0_epc)) {
842 /* Calculate EPC. */
843 epc = exception_epc(regs);
844 if (cpu_has_mmips) {
845 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
846 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
847 goto out_sigsegv;
848 opcode = (instr[0] << 16) | instr[1];
849 } else {
850 /* MIPS16e mode */
851 if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
852 goto out_sigsegv;
853 bcode = (instr[0] >> 6) & 0x3f;
854 do_trap_or_bp(regs, bcode, "Break");
855 return;
856 }
857 } else {
858 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
859 goto out_sigsegv;
860 }
819 861
820 /* 862 /*
821 * There is the ancient bug in the MIPS assemblers that the break 863 * There is the ancient bug in the MIPS assemblers that the break
@@ -856,13 +898,22 @@ out_sigsegv:
856asmlinkage void do_tr(struct pt_regs *regs) 898asmlinkage void do_tr(struct pt_regs *regs)
857{ 899{
858 unsigned int opcode, tcode = 0; 900 unsigned int opcode, tcode = 0;
901 u16 instr[2];
902 unsigned long epc = exception_epc(regs);
859 903
860 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 904 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
861 goto out_sigsegv; 905 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
906 goto out_sigsegv;
907 opcode = (instr[0] << 16) | instr[1];
862 908
863 /* Immediate versions don't provide a code. */ 909 /* Immediate versions don't provide a code. */
864 if (!(opcode & OPCODE)) 910 if (!(opcode & OPCODE)) {
865 tcode = ((opcode >> 6) & ((1 << 10) - 1)); 911 if (get_isa16_mode(regs->cp0_epc))
912 /* microMIPS */
913 tcode = (opcode >> 12) & 0x1f;
914 else
915 tcode = ((opcode >> 6) & ((1 << 10) - 1));
916 }
866 917
867 do_trap_or_bp(regs, tcode, "Trap"); 918 do_trap_or_bp(regs, tcode, "Trap");
868 return; 919 return;
@@ -875,6 +926,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
875{ 926{
876 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); 927 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
877 unsigned long old_epc = regs->cp0_epc; 928 unsigned long old_epc = regs->cp0_epc;
929 unsigned long old31 = regs->regs[31];
878 unsigned int opcode = 0; 930 unsigned int opcode = 0;
879 int status = -1; 931 int status = -1;
880 932
@@ -887,23 +939,37 @@ asmlinkage void do_ri(struct pt_regs *regs)
887 if (unlikely(compute_return_epc(regs) < 0)) 939 if (unlikely(compute_return_epc(regs) < 0))
888 return; 940 return;
889 941
890 if (unlikely(get_user(opcode, epc) < 0)) 942 if (get_isa16_mode(regs->cp0_epc)) {
891 status = SIGSEGV; 943 unsigned short mmop[2] = { 0 };
892 944
893 if (!cpu_has_llsc && status < 0) 945 if (unlikely(get_user(mmop[0], epc) < 0))
894 status = simulate_llsc(regs, opcode); 946 status = SIGSEGV;
947 if (unlikely(get_user(mmop[1], epc) < 0))
948 status = SIGSEGV;
949 opcode = (mmop[0] << 16) | mmop[1];
895 950
896 if (status < 0) 951 if (status < 0)
897 status = simulate_rdhwr(regs, opcode); 952 status = simulate_rdhwr_mm(regs, opcode);
953 } else {
954 if (unlikely(get_user(opcode, epc) < 0))
955 status = SIGSEGV;
898 956
899 if (status < 0) 957 if (!cpu_has_llsc && status < 0)
900 status = simulate_sync(regs, opcode); 958 status = simulate_llsc(regs, opcode);
959
960 if (status < 0)
961 status = simulate_rdhwr_normal(regs, opcode);
962
963 if (status < 0)
964 status = simulate_sync(regs, opcode);
965 }
901 966
902 if (status < 0) 967 if (status < 0)
903 status = SIGILL; 968 status = SIGILL;
904 969
905 if (unlikely(status > 0)) { 970 if (unlikely(status > 0)) {
906 regs->cp0_epc = old_epc; /* Undo skip-over. */ 971 regs->cp0_epc = old_epc; /* Undo skip-over. */
972 regs->regs[31] = old31;
907 force_sig(status, current); 973 force_sig(status, current);
908 } 974 }
909} 975}
@@ -973,7 +1039,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
973asmlinkage void do_cpu(struct pt_regs *regs) 1039asmlinkage void do_cpu(struct pt_regs *regs)
974{ 1040{
975 unsigned int __user *epc; 1041 unsigned int __user *epc;
976 unsigned long old_epc; 1042 unsigned long old_epc, old31;
977 unsigned int opcode; 1043 unsigned int opcode;
978 unsigned int cpid; 1044 unsigned int cpid;
979 int status; 1045 int status;
@@ -987,26 +1053,41 @@ asmlinkage void do_cpu(struct pt_regs *regs)
987 case 0: 1053 case 0:
988 epc = (unsigned int __user *)exception_epc(regs); 1054 epc = (unsigned int __user *)exception_epc(regs);
989 old_epc = regs->cp0_epc; 1055 old_epc = regs->cp0_epc;
1056 old31 = regs->regs[31];
990 opcode = 0; 1057 opcode = 0;
991 status = -1; 1058 status = -1;
992 1059
993 if (unlikely(compute_return_epc(regs) < 0)) 1060 if (unlikely(compute_return_epc(regs) < 0))
994 return; 1061 return;
995 1062
996 if (unlikely(get_user(opcode, epc) < 0)) 1063 if (get_isa16_mode(regs->cp0_epc)) {
997 status = SIGSEGV; 1064 unsigned short mmop[2] = { 0 };
998 1065
999 if (!cpu_has_llsc && status < 0) 1066 if (unlikely(get_user(mmop[0], epc) < 0))
1000 status = simulate_llsc(regs, opcode); 1067 status = SIGSEGV;
1068 if (unlikely(get_user(mmop[1], epc) < 0))
1069 status = SIGSEGV;
1070 opcode = (mmop[0] << 16) | mmop[1];
1001 1071
1002 if (status < 0) 1072 if (status < 0)
1003 status = simulate_rdhwr(regs, opcode); 1073 status = simulate_rdhwr_mm(regs, opcode);
1074 } else {
1075 if (unlikely(get_user(opcode, epc) < 0))
1076 status = SIGSEGV;
1077
1078 if (!cpu_has_llsc && status < 0)
1079 status = simulate_llsc(regs, opcode);
1080
1081 if (status < 0)
1082 status = simulate_rdhwr_normal(regs, opcode);
1083 }
1004 1084
1005 if (status < 0) 1085 if (status < 0)
1006 status = SIGILL; 1086 status = SIGILL;
1007 1087
1008 if (unlikely(status > 0)) { 1088 if (unlikely(status > 0)) {
1009 regs->cp0_epc = old_epc; /* Undo skip-over. */ 1089 regs->cp0_epc = old_epc; /* Undo skip-over. */
1090 regs->regs[31] = old31;
1010 force_sig(status, current); 1091 force_sig(status, current);
1011 } 1092 }
1012 1093
@@ -1320,7 +1401,7 @@ asmlinkage void cache_parity_error(void)
1320void ejtag_exception_handler(struct pt_regs *regs) 1401void ejtag_exception_handler(struct pt_regs *regs)
1321{ 1402{
1322 const int field = 2 * sizeof(unsigned long); 1403 const int field = 2 * sizeof(unsigned long);
1323 unsigned long depc, old_epc; 1404 unsigned long depc, old_epc, old_ra;
1324 unsigned int debug; 1405 unsigned int debug;
1325 1406
1326 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); 1407 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
@@ -1335,10 +1416,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
1335 * calculation. 1416 * calculation.
1336 */ 1417 */
1337 old_epc = regs->cp0_epc; 1418 old_epc = regs->cp0_epc;
1419 old_ra = regs->regs[31];
1338 regs->cp0_epc = depc; 1420 regs->cp0_epc = depc;
1339 __compute_return_epc(regs); 1421 compute_return_epc(regs);
1340 depc = regs->cp0_epc; 1422 depc = regs->cp0_epc;
1341 regs->cp0_epc = old_epc; 1423 regs->cp0_epc = old_epc;
1424 regs->regs[31] = old_ra;
1342 } else 1425 } else
1343 depc += 4; 1426 depc += 4;
1344 write_c0_depc(depc); 1427 write_c0_depc(depc);
@@ -1377,11 +1460,27 @@ unsigned long vi_handlers[64];
1377void __init *set_except_vector(int n, void *addr) 1460void __init *set_except_vector(int n, void *addr)
1378{ 1461{
1379 unsigned long handler = (unsigned long) addr; 1462 unsigned long handler = (unsigned long) addr;
1380 unsigned long old_handler = exception_handlers[n]; 1463 unsigned long old_handler;
1464
1465#ifdef CONFIG_CPU_MICROMIPS
1466 /*
1467 * Only the TLB handlers are cache aligned with an even
1468 * address. All other handlers are on an odd address and
1469 * require no modification. Otherwise, MIPS32 mode will
1470 * be entered when handling any TLB exceptions. That
1471 * would be bad...since we must stay in microMIPS mode.
1472 */
1473 if (!(handler & 0x1))
1474 handler |= 1;
1475#endif
1476 old_handler = xchg(&exception_handlers[n], handler);
1381 1477
1382 exception_handlers[n] = handler;
1383 if (n == 0 && cpu_has_divec) { 1478 if (n == 0 && cpu_has_divec) {
1479#ifdef CONFIG_CPU_MICROMIPS
1480 unsigned long jump_mask = ~((1 << 27) - 1);
1481#else
1384 unsigned long jump_mask = ~((1 << 28) - 1); 1482 unsigned long jump_mask = ~((1 << 28) - 1);
1483#endif
1385 u32 *buf = (u32 *)(ebase + 0x200); 1484 u32 *buf = (u32 *)(ebase + 0x200);
1386 unsigned int k0 = 26; 1485 unsigned int k0 = 26;
1387 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { 1486 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
@@ -1397,7 +1496,7 @@ void __init *set_except_vector(int n, void *addr)
1397 return (void *)old_handler; 1496 return (void *)old_handler;
1398} 1497}
1399 1498
1400static asmlinkage void do_default_vi(void) 1499static void do_default_vi(void)
1401{ 1500{
1402 show_regs(get_irq_regs()); 1501 show_regs(get_irq_regs());
1403 panic("Caught unexpected vectored interrupt."); 1502 panic("Caught unexpected vectored interrupt.");
@@ -1408,17 +1507,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1408 unsigned long handler; 1507 unsigned long handler;
1409 unsigned long old_handler = vi_handlers[n]; 1508 unsigned long old_handler = vi_handlers[n];
1410 int srssets = current_cpu_data.srsets; 1509 int srssets = current_cpu_data.srsets;
1411 u32 *w; 1510 u16 *h;
1412 unsigned char *b; 1511 unsigned char *b;
1413 1512
1414 BUG_ON(!cpu_has_veic && !cpu_has_vint); 1513 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1514 BUG_ON((n < 0) && (n > 9));
1415 1515
1416 if (addr == NULL) { 1516 if (addr == NULL) {
1417 handler = (unsigned long) do_default_vi; 1517 handler = (unsigned long) do_default_vi;
1418 srs = 0; 1518 srs = 0;
1419 } else 1519 } else
1420 handler = (unsigned long) addr; 1520 handler = (unsigned long) addr;
1421 vi_handlers[n] = (unsigned long) addr; 1521 vi_handlers[n] = handler;
1422 1522
1423 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 1523 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1424 1524
@@ -1437,9 +1537,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1437 if (srs == 0) { 1537 if (srs == 0) {
1438 /* 1538 /*
1439 * If no shadow set is selected then use the default handler 1539 * If no shadow set is selected then use the default handler
1440 * that does normal register saving and a standard interrupt exit 1540 * that does normal register saving and standard interrupt exit
1441 */ 1541 */
1442
1443 extern char except_vec_vi, except_vec_vi_lui; 1542 extern char except_vec_vi, except_vec_vi_lui;
1444 extern char except_vec_vi_ori, except_vec_vi_end; 1543 extern char except_vec_vi_ori, except_vec_vi_end;
1445 extern char rollback_except_vec_vi; 1544 extern char rollback_except_vec_vi;
@@ -1452,11 +1551,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1452 * Status.IM bit to be masked before going there. 1551 * Status.IM bit to be masked before going there.
1453 */ 1552 */
1454 extern char except_vec_vi_mori; 1553 extern char except_vec_vi_mori;
1554#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1555 const int mori_offset = &except_vec_vi_mori - vec_start + 2;
1556#else
1455 const int mori_offset = &except_vec_vi_mori - vec_start; 1557 const int mori_offset = &except_vec_vi_mori - vec_start;
1558#endif
1456#endif /* CONFIG_MIPS_MT_SMTC */ 1559#endif /* CONFIG_MIPS_MT_SMTC */
1457 const int handler_len = &except_vec_vi_end - vec_start; 1560#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1561 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1562 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1563#else
1458 const int lui_offset = &except_vec_vi_lui - vec_start; 1564 const int lui_offset = &except_vec_vi_lui - vec_start;
1459 const int ori_offset = &except_vec_vi_ori - vec_start; 1565 const int ori_offset = &except_vec_vi_ori - vec_start;
1566#endif
1567 const int handler_len = &except_vec_vi_end - vec_start;
1460 1568
1461 if (handler_len > VECTORSPACING) { 1569 if (handler_len > VECTORSPACING) {
1462 /* 1570 /*
@@ -1466,30 +1574,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1466 panic("VECTORSPACING too small"); 1574 panic("VECTORSPACING too small");
1467 } 1575 }
1468 1576
1469 memcpy(b, vec_start, handler_len); 1577 set_handler(((unsigned long)b - ebase), vec_start,
1578#ifdef CONFIG_CPU_MICROMIPS
1579 (handler_len - 1));
1580#else
1581 handler_len);
1582#endif
1470#ifdef CONFIG_MIPS_MT_SMTC 1583#ifdef CONFIG_MIPS_MT_SMTC
1471 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1584 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1472 1585
1473 w = (u32 *)(b + mori_offset); 1586 h = (u16 *)(b + mori_offset);
1474 *w = (*w & 0xffff0000) | (0x100 << n); 1587 *h = (0x100 << n);
1475#endif /* CONFIG_MIPS_MT_SMTC */ 1588#endif /* CONFIG_MIPS_MT_SMTC */
1476 w = (u32 *)(b + lui_offset); 1589 h = (u16 *)(b + lui_offset);
1477 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1590 *h = (handler >> 16) & 0xffff;
1478 w = (u32 *)(b + ori_offset); 1591 h = (u16 *)(b + ori_offset);
1479 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); 1592 *h = (handler & 0xffff);
1480 local_flush_icache_range((unsigned long)b, 1593 local_flush_icache_range((unsigned long)b,
1481 (unsigned long)(b+handler_len)); 1594 (unsigned long)(b+handler_len));
1482 } 1595 }
1483 else { 1596 else {
1484 /* 1597 /*
1485 * In other cases jump directly to the interrupt handler 1598 * In other cases jump directly to the interrupt handler. It
1486 * 1599 * is the handler's responsibility to save registers if required
1487 * It is the handlers responsibility to save registers if required 1600 * (eg hi/lo) and return from the exception using "eret".
1488 * (eg hi/lo) and return from the exception using "eret"
1489 */ 1601 */
1490 w = (u32 *)b; 1602 u32 insn;
1491 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ 1603
1492 *w = 0; 1604 h = (u16 *)b;
1605 /* j handler */
1606#ifdef CONFIG_CPU_MICROMIPS
1607 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1608#else
1609 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1610#endif
1611 h[0] = (insn >> 16) & 0xffff;
1612 h[1] = insn & 0xffff;
1613 h[2] = 0;
1614 h[3] = 0;
1493 local_flush_icache_range((unsigned long)b, 1615 local_flush_icache_range((unsigned long)b,
1494 (unsigned long)(b+8)); 1616 (unsigned long)(b+8));
1495 } 1617 }
@@ -1534,6 +1656,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1534 unsigned int cpu = smp_processor_id(); 1656 unsigned int cpu = smp_processor_id();
1535 unsigned int status_set = ST0_CU0; 1657 unsigned int status_set = ST0_CU0;
1536 unsigned int hwrena = cpu_hwrena_impl_bits; 1658 unsigned int hwrena = cpu_hwrena_impl_bits;
1659 unsigned long asid = 0;
1537#ifdef CONFIG_MIPS_MT_SMTC 1660#ifdef CONFIG_MIPS_MT_SMTC
1538 int secondaryTC = 0; 1661 int secondaryTC = 0;
1539 int bootTC = (cpu == 0); 1662 int bootTC = (cpu == 0);
@@ -1617,8 +1740,9 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1617 } 1740 }
1618#endif /* CONFIG_MIPS_MT_SMTC */ 1741#endif /* CONFIG_MIPS_MT_SMTC */
1619 1742
1620 if (!cpu_data[cpu].asid_cache) 1743 asid = ASID_FIRST_VERSION;
1621 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1744 cpu_data[cpu].asid_cache = asid;
1745 TLBMISS_HANDLER_SETUP();
1622 1746
1623 atomic_inc(&init_mm.mm_count); 1747 atomic_inc(&init_mm.mm_count);
1624 current->active_mm = &init_mm; 1748 current->active_mm = &init_mm;
@@ -1648,7 +1772,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1648/* Install CPU exception handler */ 1772/* Install CPU exception handler */
1649void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) 1773void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
1650{ 1774{
1775#ifdef CONFIG_CPU_MICROMIPS
1776 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
1777#else
1651 memcpy((void *)(ebase + offset), addr, size); 1778 memcpy((void *)(ebase + offset), addr, size);
1779#endif
1652 local_flush_icache_range(ebase + offset, ebase + offset + size); 1780 local_flush_icache_range(ebase + offset, ebase + offset + size);
1653} 1781}
1654 1782
@@ -1682,8 +1810,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
1682 1810
1683void __init trap_init(void) 1811void __init trap_init(void)
1684{ 1812{
1685 extern char except_vec3_generic, except_vec3_r4000; 1813 extern char except_vec3_generic;
1686 extern char except_vec4; 1814 extern char except_vec4;
1815 extern char except_vec3_r4000;
1687 unsigned long i; 1816 unsigned long i;
1688 int rollback; 1817 int rollback;
1689 1818
@@ -1700,7 +1829,12 @@ void __init trap_init(void)
1700 ebase = (unsigned long) 1829 ebase = (unsigned long)
1701 __alloc_bootmem(size, 1 << fls(size), 0); 1830 __alloc_bootmem(size, 1 << fls(size), 0);
1702 } else { 1831 } else {
1703 ebase = CKSEG0; 1832#ifdef CONFIG_KVM_GUEST
1833#define KVM_GUEST_KSEG0 0x40000000
1834 ebase = KVM_GUEST_KSEG0;
1835#else
1836 ebase = CKSEG0;
1837#endif
1704 if (cpu_has_mips_r2) 1838 if (cpu_has_mips_r2)
1705 ebase += (read_c0_ebase() & 0x3ffff000); 1839 ebase += (read_c0_ebase() & 0x3ffff000);
1706 } 1840 }
@@ -1816,11 +1950,11 @@ void __init trap_init(void)
1816 1950
1817 if (cpu_has_vce) 1951 if (cpu_has_vce)
1818 /* Special exception: R4[04]00 uses also the divec space. */ 1952 /* Special exception: R4[04]00 uses also the divec space. */
1819 memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); 1953 set_handler(0x180, &except_vec3_r4000, 0x100);
1820 else if (cpu_has_4kex) 1954 else if (cpu_has_4kex)
1821 memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); 1955 set_handler(0x180, &except_vec3_generic, 0x80);
1822 else 1956 else
1823 memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); 1957 set_handler(0x080, &except_vec3_generic, 0x80);
1824 1958
1825 local_flush_icache_range(ebase, ebase + 0x400); 1959 local_flush_icache_range(ebase, ebase + 0x400);
1826 flush_tlb_handlers(); 1960 flush_tlb_handlers();
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 6087a54c86a0..203d8857070d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -83,8 +83,12 @@
83#include <asm/branch.h> 83#include <asm/branch.h>
84#include <asm/byteorder.h> 84#include <asm/byteorder.h>
85#include <asm/cop2.h> 85#include <asm/cop2.h>
86#include <asm/fpu.h>
87#include <asm/fpu_emulator.h>
86#include <asm/inst.h> 88#include <asm/inst.h>
87#include <asm/uaccess.h> 89#include <asm/uaccess.h>
90#include <asm/fpu.h>
91#include <asm/fpu_emulator.h>
88 92
89#define STR(x) __STR(x) 93#define STR(x) __STR(x)
90#define __STR(x) #x 94#define __STR(x) #x
@@ -102,12 +106,332 @@ static u32 unaligned_action;
102#endif 106#endif
103extern void show_registers(struct pt_regs *regs); 107extern void show_registers(struct pt_regs *regs);
104 108
109#ifdef __BIG_ENDIAN
110#define LoadHW(addr, value, res) \
111 __asm__ __volatile__ (".set\tnoat\n" \
112 "1:\tlb\t%0, 0(%2)\n" \
113 "2:\tlbu\t$1, 1(%2)\n\t" \
114 "sll\t%0, 0x8\n\t" \
115 "or\t%0, $1\n\t" \
116 "li\t%1, 0\n" \
117 "3:\t.set\tat\n\t" \
118 ".insn\n\t" \
119 ".section\t.fixup,\"ax\"\n\t" \
120 "4:\tli\t%1, %3\n\t" \
121 "j\t3b\n\t" \
122 ".previous\n\t" \
123 ".section\t__ex_table,\"a\"\n\t" \
124 STR(PTR)"\t1b, 4b\n\t" \
125 STR(PTR)"\t2b, 4b\n\t" \
126 ".previous" \
127 : "=&r" (value), "=r" (res) \
128 : "r" (addr), "i" (-EFAULT));
129
130#define LoadW(addr, value, res) \
131 __asm__ __volatile__ ( \
132 "1:\tlwl\t%0, (%2)\n" \
133 "2:\tlwr\t%0, 3(%2)\n\t" \
134 "li\t%1, 0\n" \
135 "3:\n\t" \
136 ".insn\n\t" \
137 ".section\t.fixup,\"ax\"\n\t" \
138 "4:\tli\t%1, %3\n\t" \
139 "j\t3b\n\t" \
140 ".previous\n\t" \
141 ".section\t__ex_table,\"a\"\n\t" \
142 STR(PTR)"\t1b, 4b\n\t" \
143 STR(PTR)"\t2b, 4b\n\t" \
144 ".previous" \
145 : "=&r" (value), "=r" (res) \
146 : "r" (addr), "i" (-EFAULT));
147
148#define LoadHWU(addr, value, res) \
149 __asm__ __volatile__ ( \
150 ".set\tnoat\n" \
151 "1:\tlbu\t%0, 0(%2)\n" \
152 "2:\tlbu\t$1, 1(%2)\n\t" \
153 "sll\t%0, 0x8\n\t" \
154 "or\t%0, $1\n\t" \
155 "li\t%1, 0\n" \
156 "3:\n\t" \
157 ".insn\n\t" \
158 ".set\tat\n\t" \
159 ".section\t.fixup,\"ax\"\n\t" \
160 "4:\tli\t%1, %3\n\t" \
161 "j\t3b\n\t" \
162 ".previous\n\t" \
163 ".section\t__ex_table,\"a\"\n\t" \
164 STR(PTR)"\t1b, 4b\n\t" \
165 STR(PTR)"\t2b, 4b\n\t" \
166 ".previous" \
167 : "=&r" (value), "=r" (res) \
168 : "r" (addr), "i" (-EFAULT));
169
170#define LoadWU(addr, value, res) \
171 __asm__ __volatile__ ( \
172 "1:\tlwl\t%0, (%2)\n" \
173 "2:\tlwr\t%0, 3(%2)\n\t" \
174 "dsll\t%0, %0, 32\n\t" \
175 "dsrl\t%0, %0, 32\n\t" \
176 "li\t%1, 0\n" \
177 "3:\n\t" \
178 ".insn\n\t" \
179 "\t.section\t.fixup,\"ax\"\n\t" \
180 "4:\tli\t%1, %3\n\t" \
181 "j\t3b\n\t" \
182 ".previous\n\t" \
183 ".section\t__ex_table,\"a\"\n\t" \
184 STR(PTR)"\t1b, 4b\n\t" \
185 STR(PTR)"\t2b, 4b\n\t" \
186 ".previous" \
187 : "=&r" (value), "=r" (res) \
188 : "r" (addr), "i" (-EFAULT));
189
190#define LoadDW(addr, value, res) \
191 __asm__ __volatile__ ( \
192 "1:\tldl\t%0, (%2)\n" \
193 "2:\tldr\t%0, 7(%2)\n\t" \
194 "li\t%1, 0\n" \
195 "3:\n\t" \
196 ".insn\n\t" \
197 "\t.section\t.fixup,\"ax\"\n\t" \
198 "4:\tli\t%1, %3\n\t" \
199 "j\t3b\n\t" \
200 ".previous\n\t" \
201 ".section\t__ex_table,\"a\"\n\t" \
202 STR(PTR)"\t1b, 4b\n\t" \
203 STR(PTR)"\t2b, 4b\n\t" \
204 ".previous" \
205 : "=&r" (value), "=r" (res) \
206 : "r" (addr), "i" (-EFAULT));
207
208#define StoreHW(addr, value, res) \
209 __asm__ __volatile__ ( \
210 ".set\tnoat\n" \
211 "1:\tsb\t%1, 1(%2)\n\t" \
212 "srl\t$1, %1, 0x8\n" \
213 "2:\tsb\t$1, 0(%2)\n\t" \
214 ".set\tat\n\t" \
215 "li\t%0, 0\n" \
216 "3:\n\t" \
217 ".insn\n\t" \
218 ".section\t.fixup,\"ax\"\n\t" \
219 "4:\tli\t%0, %3\n\t" \
220 "j\t3b\n\t" \
221 ".previous\n\t" \
222 ".section\t__ex_table,\"a\"\n\t" \
223 STR(PTR)"\t1b, 4b\n\t" \
224 STR(PTR)"\t2b, 4b\n\t" \
225 ".previous" \
226 : "=r" (res) \
227 : "r" (value), "r" (addr), "i" (-EFAULT));
228
229#define StoreW(addr, value, res) \
230 __asm__ __volatile__ ( \
231 "1:\tswl\t%1,(%2)\n" \
232 "2:\tswr\t%1, 3(%2)\n\t" \
233 "li\t%0, 0\n" \
234 "3:\n\t" \
235 ".insn\n\t" \
236 ".section\t.fixup,\"ax\"\n\t" \
237 "4:\tli\t%0, %3\n\t" \
238 "j\t3b\n\t" \
239 ".previous\n\t" \
240 ".section\t__ex_table,\"a\"\n\t" \
241 STR(PTR)"\t1b, 4b\n\t" \
242 STR(PTR)"\t2b, 4b\n\t" \
243 ".previous" \
244 : "=r" (res) \
245 : "r" (value), "r" (addr), "i" (-EFAULT));
246
247#define StoreDW(addr, value, res) \
248 __asm__ __volatile__ ( \
249 "1:\tsdl\t%1,(%2)\n" \
250 "2:\tsdr\t%1, 7(%2)\n\t" \
251 "li\t%0, 0\n" \
252 "3:\n\t" \
253 ".insn\n\t" \
254 ".section\t.fixup,\"ax\"\n\t" \
255 "4:\tli\t%0, %3\n\t" \
256 "j\t3b\n\t" \
257 ".previous\n\t" \
258 ".section\t__ex_table,\"a\"\n\t" \
259 STR(PTR)"\t1b, 4b\n\t" \
260 STR(PTR)"\t2b, 4b\n\t" \
261 ".previous" \
262 : "=r" (res) \
263 : "r" (value), "r" (addr), "i" (-EFAULT));
264#endif
265
266#ifdef __LITTLE_ENDIAN
267#define LoadHW(addr, value, res) \
268 __asm__ __volatile__ (".set\tnoat\n" \
269 "1:\tlb\t%0, 1(%2)\n" \
270 "2:\tlbu\t$1, 0(%2)\n\t" \
271 "sll\t%0, 0x8\n\t" \
272 "or\t%0, $1\n\t" \
273 "li\t%1, 0\n" \
274 "3:\t.set\tat\n\t" \
275 ".insn\n\t" \
276 ".section\t.fixup,\"ax\"\n\t" \
277 "4:\tli\t%1, %3\n\t" \
278 "j\t3b\n\t" \
279 ".previous\n\t" \
280 ".section\t__ex_table,\"a\"\n\t" \
281 STR(PTR)"\t1b, 4b\n\t" \
282 STR(PTR)"\t2b, 4b\n\t" \
283 ".previous" \
284 : "=&r" (value), "=r" (res) \
285 : "r" (addr), "i" (-EFAULT));
286
287#define LoadW(addr, value, res) \
288 __asm__ __volatile__ ( \
289 "1:\tlwl\t%0, 3(%2)\n" \
290 "2:\tlwr\t%0, (%2)\n\t" \
291 "li\t%1, 0\n" \
292 "3:\n\t" \
293 ".insn\n\t" \
294 ".section\t.fixup,\"ax\"\n\t" \
295 "4:\tli\t%1, %3\n\t" \
296 "j\t3b\n\t" \
297 ".previous\n\t" \
298 ".section\t__ex_table,\"a\"\n\t" \
299 STR(PTR)"\t1b, 4b\n\t" \
300 STR(PTR)"\t2b, 4b\n\t" \
301 ".previous" \
302 : "=&r" (value), "=r" (res) \
303 : "r" (addr), "i" (-EFAULT));
304
305#define LoadHWU(addr, value, res) \
306 __asm__ __volatile__ ( \
307 ".set\tnoat\n" \
308 "1:\tlbu\t%0, 1(%2)\n" \
309 "2:\tlbu\t$1, 0(%2)\n\t" \
310 "sll\t%0, 0x8\n\t" \
311 "or\t%0, $1\n\t" \
312 "li\t%1, 0\n" \
313 "3:\n\t" \
314 ".insn\n\t" \
315 ".set\tat\n\t" \
316 ".section\t.fixup,\"ax\"\n\t" \
317 "4:\tli\t%1, %3\n\t" \
318 "j\t3b\n\t" \
319 ".previous\n\t" \
320 ".section\t__ex_table,\"a\"\n\t" \
321 STR(PTR)"\t1b, 4b\n\t" \
322 STR(PTR)"\t2b, 4b\n\t" \
323 ".previous" \
324 : "=&r" (value), "=r" (res) \
325 : "r" (addr), "i" (-EFAULT));
326
327#define LoadWU(addr, value, res) \
328 __asm__ __volatile__ ( \
329 "1:\tlwl\t%0, 3(%2)\n" \
330 "2:\tlwr\t%0, (%2)\n\t" \
331 "dsll\t%0, %0, 32\n\t" \
332 "dsrl\t%0, %0, 32\n\t" \
333 "li\t%1, 0\n" \
334 "3:\n\t" \
335 ".insn\n\t" \
336 "\t.section\t.fixup,\"ax\"\n\t" \
337 "4:\tli\t%1, %3\n\t" \
338 "j\t3b\n\t" \
339 ".previous\n\t" \
340 ".section\t__ex_table,\"a\"\n\t" \
341 STR(PTR)"\t1b, 4b\n\t" \
342 STR(PTR)"\t2b, 4b\n\t" \
343 ".previous" \
344 : "=&r" (value), "=r" (res) \
345 : "r" (addr), "i" (-EFAULT));
346
347#define LoadDW(addr, value, res) \
348 __asm__ __volatile__ ( \
349 "1:\tldl\t%0, 7(%2)\n" \
350 "2:\tldr\t%0, (%2)\n\t" \
351 "li\t%1, 0\n" \
352 "3:\n\t" \
353 ".insn\n\t" \
354 "\t.section\t.fixup,\"ax\"\n\t" \
355 "4:\tli\t%1, %3\n\t" \
356 "j\t3b\n\t" \
357 ".previous\n\t" \
358 ".section\t__ex_table,\"a\"\n\t" \
359 STR(PTR)"\t1b, 4b\n\t" \
360 STR(PTR)"\t2b, 4b\n\t" \
361 ".previous" \
362 : "=&r" (value), "=r" (res) \
363 : "r" (addr), "i" (-EFAULT));
364
365#define StoreHW(addr, value, res) \
366 __asm__ __volatile__ ( \
367 ".set\tnoat\n" \
368 "1:\tsb\t%1, 0(%2)\n\t" \
369 "srl\t$1,%1, 0x8\n" \
370 "2:\tsb\t$1, 1(%2)\n\t" \
371 ".set\tat\n\t" \
372 "li\t%0, 0\n" \
373 "3:\n\t" \
374 ".insn\n\t" \
375 ".section\t.fixup,\"ax\"\n\t" \
376 "4:\tli\t%0, %3\n\t" \
377 "j\t3b\n\t" \
378 ".previous\n\t" \
379 ".section\t__ex_table,\"a\"\n\t" \
380 STR(PTR)"\t1b, 4b\n\t" \
381 STR(PTR)"\t2b, 4b\n\t" \
382 ".previous" \
383 : "=r" (res) \
384 : "r" (value), "r" (addr), "i" (-EFAULT));
385
386#define StoreW(addr, value, res) \
387 __asm__ __volatile__ ( \
388 "1:\tswl\t%1, 3(%2)\n" \
389 "2:\tswr\t%1, (%2)\n\t" \
390 "li\t%0, 0\n" \
391 "3:\n\t" \
392 ".insn\n\t" \
393 ".section\t.fixup,\"ax\"\n\t" \
394 "4:\tli\t%0, %3\n\t" \
395 "j\t3b\n\t" \
396 ".previous\n\t" \
397 ".section\t__ex_table,\"a\"\n\t" \
398 STR(PTR)"\t1b, 4b\n\t" \
399 STR(PTR)"\t2b, 4b\n\t" \
400 ".previous" \
401 : "=r" (res) \
402 : "r" (value), "r" (addr), "i" (-EFAULT));
403
404#define StoreDW(addr, value, res) \
405 __asm__ __volatile__ ( \
406 "1:\tsdl\t%1, 7(%2)\n" \
407 "2:\tsdr\t%1, (%2)\n\t" \
408 "li\t%0, 0\n" \
409 "3:\n\t" \
410 ".insn\n\t" \
411 ".section\t.fixup,\"ax\"\n\t" \
412 "4:\tli\t%0, %3\n\t" \
413 "j\t3b\n\t" \
414 ".previous\n\t" \
415 ".section\t__ex_table,\"a\"\n\t" \
416 STR(PTR)"\t1b, 4b\n\t" \
417 STR(PTR)"\t2b, 4b\n\t" \
418 ".previous" \
419 : "=r" (res) \
420 : "r" (value), "r" (addr), "i" (-EFAULT));
421#endif
422
105static void emulate_load_store_insn(struct pt_regs *regs, 423static void emulate_load_store_insn(struct pt_regs *regs,
106 void __user *addr, unsigned int __user *pc) 424 void __user *addr, unsigned int __user *pc)
107{ 425{
108 union mips_instruction insn; 426 union mips_instruction insn;
109 unsigned long value; 427 unsigned long value;
110 unsigned int res; 428 unsigned int res;
429 unsigned long origpc;
430 unsigned long orig31;
431 void __user *fault_addr = NULL;
432
433 origpc = (unsigned long)pc;
434 orig31 = regs->regs[31];
111 435
112 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 436 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
113 437
@@ -117,22 +441,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
117 __get_user(insn.word, pc); 441 __get_user(insn.word, pc);
118 442
119 switch (insn.i_format.opcode) { 443 switch (insn.i_format.opcode) {
120 /* 444 /*
121 * These are instructions that a compiler doesn't generate. We 445 * These are instructions that a compiler doesn't generate. We
122 * can assume therefore that the code is MIPS-aware and 446 * can assume therefore that the code is MIPS-aware and
123 * really buggy. Emulating these instructions would break the 447 * really buggy. Emulating these instructions would break the
124 * semantics anyway. 448 * semantics anyway.
125 */ 449 */
126 case ll_op: 450 case ll_op:
127 case lld_op: 451 case lld_op:
128 case sc_op: 452 case sc_op:
129 case scd_op: 453 case scd_op:
130 454
131 /* 455 /*
132 * For these instructions the only way to create an address 456 * For these instructions the only way to create an address
133 * error is an attempted access to kernel/supervisor address 457 * error is an attempted access to kernel/supervisor address
134 * space. 458 * space.
135 */ 459 */
136 case ldl_op: 460 case ldl_op:
137 case ldr_op: 461 case ldr_op:
138 case lwl_op: 462 case lwl_op:
@@ -146,36 +470,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
146 case sb_op: 470 case sb_op:
147 goto sigbus; 471 goto sigbus;
148 472
149 /* 473 /*
150 * The remaining opcodes are the ones that are really of interest. 474 * The remaining opcodes are the ones that are really of
151 */ 475 * interest.
476 */
152 case lh_op: 477 case lh_op:
153 if (!access_ok(VERIFY_READ, addr, 2)) 478 if (!access_ok(VERIFY_READ, addr, 2))
154 goto sigbus; 479 goto sigbus;
155 480
156 __asm__ __volatile__ (".set\tnoat\n" 481 LoadHW(addr, value, res);
157#ifdef __BIG_ENDIAN
158 "1:\tlb\t%0, 0(%2)\n"
159 "2:\tlbu\t$1, 1(%2)\n\t"
160#endif
161#ifdef __LITTLE_ENDIAN
162 "1:\tlb\t%0, 1(%2)\n"
163 "2:\tlbu\t$1, 0(%2)\n\t"
164#endif
165 "sll\t%0, 0x8\n\t"
166 "or\t%0, $1\n\t"
167 "li\t%1, 0\n"
168 "3:\t.set\tat\n\t"
169 ".section\t.fixup,\"ax\"\n\t"
170 "4:\tli\t%1, %3\n\t"
171 "j\t3b\n\t"
172 ".previous\n\t"
173 ".section\t__ex_table,\"a\"\n\t"
174 STR(PTR)"\t1b, 4b\n\t"
175 STR(PTR)"\t2b, 4b\n\t"
176 ".previous"
177 : "=&r" (value), "=r" (res)
178 : "r" (addr), "i" (-EFAULT));
179 if (res) 482 if (res)
180 goto fault; 483 goto fault;
181 compute_return_epc(regs); 484 compute_return_epc(regs);
@@ -186,26 +489,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
186 if (!access_ok(VERIFY_READ, addr, 4)) 489 if (!access_ok(VERIFY_READ, addr, 4))
187 goto sigbus; 490 goto sigbus;
188 491
189 __asm__ __volatile__ ( 492 LoadW(addr, value, res);
190#ifdef __BIG_ENDIAN
191 "1:\tlwl\t%0, (%2)\n"
192 "2:\tlwr\t%0, 3(%2)\n\t"
193#endif
194#ifdef __LITTLE_ENDIAN
195 "1:\tlwl\t%0, 3(%2)\n"
196 "2:\tlwr\t%0, (%2)\n\t"
197#endif
198 "li\t%1, 0\n"
199 "3:\t.section\t.fixup,\"ax\"\n\t"
200 "4:\tli\t%1, %3\n\t"
201 "j\t3b\n\t"
202 ".previous\n\t"
203 ".section\t__ex_table,\"a\"\n\t"
204 STR(PTR)"\t1b, 4b\n\t"
205 STR(PTR)"\t2b, 4b\n\t"
206 ".previous"
207 : "=&r" (value), "=r" (res)
208 : "r" (addr), "i" (-EFAULT));
209 if (res) 493 if (res)
210 goto fault; 494 goto fault;
211 compute_return_epc(regs); 495 compute_return_epc(regs);
@@ -216,30 +500,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
216 if (!access_ok(VERIFY_READ, addr, 2)) 500 if (!access_ok(VERIFY_READ, addr, 2))
217 goto sigbus; 501 goto sigbus;
218 502
219 __asm__ __volatile__ ( 503 LoadHWU(addr, value, res);
220 ".set\tnoat\n"
221#ifdef __BIG_ENDIAN
222 "1:\tlbu\t%0, 0(%2)\n"
223 "2:\tlbu\t$1, 1(%2)\n\t"
224#endif
225#ifdef __LITTLE_ENDIAN
226 "1:\tlbu\t%0, 1(%2)\n"
227 "2:\tlbu\t$1, 0(%2)\n\t"
228#endif
229 "sll\t%0, 0x8\n\t"
230 "or\t%0, $1\n\t"
231 "li\t%1, 0\n"
232 "3:\t.set\tat\n\t"
233 ".section\t.fixup,\"ax\"\n\t"
234 "4:\tli\t%1, %3\n\t"
235 "j\t3b\n\t"
236 ".previous\n\t"
237 ".section\t__ex_table,\"a\"\n\t"
238 STR(PTR)"\t1b, 4b\n\t"
239 STR(PTR)"\t2b, 4b\n\t"
240 ".previous"
241 : "=&r" (value), "=r" (res)
242 : "r" (addr), "i" (-EFAULT));
243 if (res) 504 if (res)
244 goto fault; 505 goto fault;
245 compute_return_epc(regs); 506 compute_return_epc(regs);
@@ -258,28 +519,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
258 if (!access_ok(VERIFY_READ, addr, 4)) 519 if (!access_ok(VERIFY_READ, addr, 4))
259 goto sigbus; 520 goto sigbus;
260 521
261 __asm__ __volatile__ ( 522 LoadWU(addr, value, res);
262#ifdef __BIG_ENDIAN
263 "1:\tlwl\t%0, (%2)\n"
264 "2:\tlwr\t%0, 3(%2)\n\t"
265#endif
266#ifdef __LITTLE_ENDIAN
267 "1:\tlwl\t%0, 3(%2)\n"
268 "2:\tlwr\t%0, (%2)\n\t"
269#endif
270 "dsll\t%0, %0, 32\n\t"
271 "dsrl\t%0, %0, 32\n\t"
272 "li\t%1, 0\n"
273 "3:\t.section\t.fixup,\"ax\"\n\t"
274 "4:\tli\t%1, %3\n\t"
275 "j\t3b\n\t"
276 ".previous\n\t"
277 ".section\t__ex_table,\"a\"\n\t"
278 STR(PTR)"\t1b, 4b\n\t"
279 STR(PTR)"\t2b, 4b\n\t"
280 ".previous"
281 : "=&r" (value), "=r" (res)
282 : "r" (addr), "i" (-EFAULT));
283 if (res) 523 if (res)
284 goto fault; 524 goto fault;
285 compute_return_epc(regs); 525 compute_return_epc(regs);
@@ -302,26 +542,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
302 if (!access_ok(VERIFY_READ, addr, 8)) 542 if (!access_ok(VERIFY_READ, addr, 8))
303 goto sigbus; 543 goto sigbus;
304 544
305 __asm__ __volatile__ ( 545 LoadDW(addr, value, res);
306#ifdef __BIG_ENDIAN
307 "1:\tldl\t%0, (%2)\n"
308 "2:\tldr\t%0, 7(%2)\n\t"
309#endif
310#ifdef __LITTLE_ENDIAN
311 "1:\tldl\t%0, 7(%2)\n"
312 "2:\tldr\t%0, (%2)\n\t"
313#endif
314 "li\t%1, 0\n"
315 "3:\t.section\t.fixup,\"ax\"\n\t"
316 "4:\tli\t%1, %3\n\t"
317 "j\t3b\n\t"
318 ".previous\n\t"
319 ".section\t__ex_table,\"a\"\n\t"
320 STR(PTR)"\t1b, 4b\n\t"
321 STR(PTR)"\t2b, 4b\n\t"
322 ".previous"
323 : "=&r" (value), "=r" (res)
324 : "r" (addr), "i" (-EFAULT));
325 if (res) 546 if (res)
326 goto fault; 547 goto fault;
327 compute_return_epc(regs); 548 compute_return_epc(regs);
@@ -336,68 +557,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
336 if (!access_ok(VERIFY_WRITE, addr, 2)) 557 if (!access_ok(VERIFY_WRITE, addr, 2))
337 goto sigbus; 558 goto sigbus;
338 559
560 compute_return_epc(regs);
339 value = regs->regs[insn.i_format.rt]; 561 value = regs->regs[insn.i_format.rt];
340 __asm__ __volatile__ ( 562 StoreHW(addr, value, res);
341#ifdef __BIG_ENDIAN
342 ".set\tnoat\n"
343 "1:\tsb\t%1, 1(%2)\n\t"
344 "srl\t$1, %1, 0x8\n"
345 "2:\tsb\t$1, 0(%2)\n\t"
346 ".set\tat\n\t"
347#endif
348#ifdef __LITTLE_ENDIAN
349 ".set\tnoat\n"
350 "1:\tsb\t%1, 0(%2)\n\t"
351 "srl\t$1,%1, 0x8\n"
352 "2:\tsb\t$1, 1(%2)\n\t"
353 ".set\tat\n\t"
354#endif
355 "li\t%0, 0\n"
356 "3:\n\t"
357 ".section\t.fixup,\"ax\"\n\t"
358 "4:\tli\t%0, %3\n\t"
359 "j\t3b\n\t"
360 ".previous\n\t"
361 ".section\t__ex_table,\"a\"\n\t"
362 STR(PTR)"\t1b, 4b\n\t"
363 STR(PTR)"\t2b, 4b\n\t"
364 ".previous"
365 : "=r" (res)
366 : "r" (value), "r" (addr), "i" (-EFAULT));
367 if (res) 563 if (res)
368 goto fault; 564 goto fault;
369 compute_return_epc(regs);
370 break; 565 break;
371 566
372 case sw_op: 567 case sw_op:
373 if (!access_ok(VERIFY_WRITE, addr, 4)) 568 if (!access_ok(VERIFY_WRITE, addr, 4))
374 goto sigbus; 569 goto sigbus;
375 570
571 compute_return_epc(regs);
376 value = regs->regs[insn.i_format.rt]; 572 value = regs->regs[insn.i_format.rt];
377 __asm__ __volatile__ ( 573 StoreW(addr, value, res);
378#ifdef __BIG_ENDIAN
379 "1:\tswl\t%1,(%2)\n"
380 "2:\tswr\t%1, 3(%2)\n\t"
381#endif
382#ifdef __LITTLE_ENDIAN
383 "1:\tswl\t%1, 3(%2)\n"
384 "2:\tswr\t%1, (%2)\n\t"
385#endif
386 "li\t%0, 0\n"
387 "3:\n\t"
388 ".section\t.fixup,\"ax\"\n\t"
389 "4:\tli\t%0, %3\n\t"
390 "j\t3b\n\t"
391 ".previous\n\t"
392 ".section\t__ex_table,\"a\"\n\t"
393 STR(PTR)"\t1b, 4b\n\t"
394 STR(PTR)"\t2b, 4b\n\t"
395 ".previous"
396 : "=r" (res)
397 : "r" (value), "r" (addr), "i" (-EFAULT));
398 if (res) 574 if (res)
399 goto fault; 575 goto fault;
400 compute_return_epc(regs);
401 break; 576 break;
402 577
403 case sd_op: 578 case sd_op:
@@ -412,31 +587,11 @@ static void emulate_load_store_insn(struct pt_regs *regs,
412 if (!access_ok(VERIFY_WRITE, addr, 8)) 587 if (!access_ok(VERIFY_WRITE, addr, 8))
413 goto sigbus; 588 goto sigbus;
414 589
590 compute_return_epc(regs);
415 value = regs->regs[insn.i_format.rt]; 591 value = regs->regs[insn.i_format.rt];
416 __asm__ __volatile__ ( 592 StoreDW(addr, value, res);
417#ifdef __BIG_ENDIAN
418 "1:\tsdl\t%1,(%2)\n"
419 "2:\tsdr\t%1, 7(%2)\n\t"
420#endif
421#ifdef __LITTLE_ENDIAN
422 "1:\tsdl\t%1, 7(%2)\n"
423 "2:\tsdr\t%1, (%2)\n\t"
424#endif
425 "li\t%0, 0\n"
426 "3:\n\t"
427 ".section\t.fixup,\"ax\"\n\t"
428 "4:\tli\t%0, %3\n\t"
429 "j\t3b\n\t"
430 ".previous\n\t"
431 ".section\t__ex_table,\"a\"\n\t"
432 STR(PTR)"\t1b, 4b\n\t"
433 STR(PTR)"\t2b, 4b\n\t"
434 ".previous"
435 : "=r" (res)
436 : "r" (value), "r" (addr), "i" (-EFAULT));
437 if (res) 593 if (res)
438 goto fault; 594 goto fault;
439 compute_return_epc(regs);
440 break; 595 break;
441#endif /* CONFIG_64BIT */ 596#endif /* CONFIG_64BIT */
442 597
@@ -447,10 +602,21 @@ static void emulate_load_store_insn(struct pt_regs *regs,
447 case ldc1_op: 602 case ldc1_op:
448 case swc1_op: 603 case swc1_op:
449 case sdc1_op: 604 case sdc1_op:
450 /* 605 die_if_kernel("Unaligned FP access in kernel code", regs);
451 * I herewith declare: this does not happen. So send SIGBUS. 606 BUG_ON(!used_math());
452 */ 607 BUG_ON(!is_fpu_owner());
453 goto sigbus; 608
609 lose_fpu(1); /* Save FPU state for the emulator. */
610 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
611 &fault_addr);
612 own_fpu(1); /* Restore FPU state. */
613
614 /* Signal if something went wrong. */
615 process_fpemu_return(res, fault_addr);
616
617 if (res == 0)
618 break;
619 return;
454 620
455 /* 621 /*
456 * COP2 is available to implementor for application specific use. 622 * COP2 is available to implementor for application specific use.
@@ -488,6 +654,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
488 return; 654 return;
489 655
490fault: 656fault:
657 /* roll back jump/branch */
658 regs->cp0_epc = origpc;
659 regs->regs[31] = orig31;
491 /* Did we have an exception handler installed? */ 660 /* Did we have an exception handler installed? */
492 if (fixup_exception(regs)) 661 if (fixup_exception(regs))
493 return; 662 return;
@@ -504,10 +673,881 @@ sigbus:
504 return; 673 return;
505 674
506sigill: 675sigill:
507 die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); 676 die_if_kernel
677 ("Unhandled kernel unaligned access or invalid instruction", regs);
508 force_sig(SIGILL, current); 678 force_sig(SIGILL, current);
509} 679}
510 680
681/* Recode table from 16-bit register notation to 32-bit GPR. */
682const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
683
684/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
685const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
686
687void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
688{
689 unsigned long value;
690 unsigned int res;
691 int i;
692 unsigned int reg = 0, rvar;
693 unsigned long orig31;
694 u16 __user *pc16;
695 u16 halfword;
696 unsigned int word;
697 unsigned long origpc, contpc;
698 union mips_instruction insn;
699 struct mm_decoded_insn mminsn;
700 void __user *fault_addr = NULL;
701
702 origpc = regs->cp0_epc;
703 orig31 = regs->regs[31];
704
705 mminsn.micro_mips_mode = 1;
706
707 /*
708 * This load never faults.
709 */
710 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
711 __get_user(halfword, pc16);
712 pc16++;
713 contpc = regs->cp0_epc + 2;
714 word = ((unsigned int)halfword << 16);
715 mminsn.pc_inc = 2;
716
717 if (!mm_insn_16bit(halfword)) {
718 __get_user(halfword, pc16);
719 pc16++;
720 contpc = regs->cp0_epc + 4;
721 mminsn.pc_inc = 4;
722 word |= halfword;
723 }
724 mminsn.insn = word;
725
726 if (get_user(halfword, pc16))
727 goto fault;
728 mminsn.next_pc_inc = 2;
729 word = ((unsigned int)halfword << 16);
730
731 if (!mm_insn_16bit(halfword)) {
732 pc16++;
733 if (get_user(halfword, pc16))
734 goto fault;
735 mminsn.next_pc_inc = 4;
736 word |= halfword;
737 }
738 mminsn.next_insn = word;
739
740 insn = (union mips_instruction)(mminsn.insn);
741 if (mm_isBranchInstr(regs, mminsn, &contpc))
742 insn = (union mips_instruction)(mminsn.next_insn);
743
744 /* Parse instruction to find what to do */
745
746 switch (insn.mm_i_format.opcode) {
747
748 case mm_pool32a_op:
749 switch (insn.mm_x_format.func) {
750 case mm_lwxs_op:
751 reg = insn.mm_x_format.rd;
752 goto loadW;
753 }
754
755 goto sigbus;
756
757 case mm_pool32b_op:
758 switch (insn.mm_m_format.func) {
759 case mm_lwp_func:
760 reg = insn.mm_m_format.rd;
761 if (reg == 31)
762 goto sigbus;
763
764 if (!access_ok(VERIFY_READ, addr, 8))
765 goto sigbus;
766
767 LoadW(addr, value, res);
768 if (res)
769 goto fault;
770 regs->regs[reg] = value;
771 addr += 4;
772 LoadW(addr, value, res);
773 if (res)
774 goto fault;
775 regs->regs[reg + 1] = value;
776 goto success;
777
778 case mm_swp_func:
779 reg = insn.mm_m_format.rd;
780 if (reg == 31)
781 goto sigbus;
782
783 if (!access_ok(VERIFY_WRITE, addr, 8))
784 goto sigbus;
785
786 value = regs->regs[reg];
787 StoreW(addr, value, res);
788 if (res)
789 goto fault;
790 addr += 4;
791 value = regs->regs[reg + 1];
792 StoreW(addr, value, res);
793 if (res)
794 goto fault;
795 goto success;
796
797 case mm_ldp_func:
798#ifdef CONFIG_64BIT
799 reg = insn.mm_m_format.rd;
800 if (reg == 31)
801 goto sigbus;
802
803 if (!access_ok(VERIFY_READ, addr, 16))
804 goto sigbus;
805
806 LoadDW(addr, value, res);
807 if (res)
808 goto fault;
809 regs->regs[reg] = value;
810 addr += 8;
811 LoadDW(addr, value, res);
812 if (res)
813 goto fault;
814 regs->regs[reg + 1] = value;
815 goto success;
816#endif /* CONFIG_64BIT */
817
818 goto sigill;
819
820 case mm_sdp_func:
821#ifdef CONFIG_64BIT
822 reg = insn.mm_m_format.rd;
823 if (reg == 31)
824 goto sigbus;
825
826 if (!access_ok(VERIFY_WRITE, addr, 16))
827 goto sigbus;
828
829 value = regs->regs[reg];
830 StoreDW(addr, value, res);
831 if (res)
832 goto fault;
833 addr += 8;
834 value = regs->regs[reg + 1];
835 StoreDW(addr, value, res);
836 if (res)
837 goto fault;
838 goto success;
839#endif /* CONFIG_64BIT */
840
841 goto sigill;
842
843 case mm_lwm32_func:
844 reg = insn.mm_m_format.rd;
845 rvar = reg & 0xf;
846 if ((rvar > 9) || !reg)
847 goto sigill;
848 if (reg & 0x10) {
849 if (!access_ok
850 (VERIFY_READ, addr, 4 * (rvar + 1)))
851 goto sigbus;
852 } else {
853 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
854 goto sigbus;
855 }
856 if (rvar == 9)
857 rvar = 8;
858 for (i = 16; rvar; rvar--, i++) {
859 LoadW(addr, value, res);
860 if (res)
861 goto fault;
862 addr += 4;
863 regs->regs[i] = value;
864 }
865 if ((reg & 0xf) == 9) {
866 LoadW(addr, value, res);
867 if (res)
868 goto fault;
869 addr += 4;
870 regs->regs[30] = value;
871 }
872 if (reg & 0x10) {
873 LoadW(addr, value, res);
874 if (res)
875 goto fault;
876 regs->regs[31] = value;
877 }
878 goto success;
879
880 case mm_swm32_func:
881 reg = insn.mm_m_format.rd;
882 rvar = reg & 0xf;
883 if ((rvar > 9) || !reg)
884 goto sigill;
885 if (reg & 0x10) {
886 if (!access_ok
887 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
888 goto sigbus;
889 } else {
890 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
891 goto sigbus;
892 }
893 if (rvar == 9)
894 rvar = 8;
895 for (i = 16; rvar; rvar--, i++) {
896 value = regs->regs[i];
897 StoreW(addr, value, res);
898 if (res)
899 goto fault;
900 addr += 4;
901 }
902 if ((reg & 0xf) == 9) {
903 value = regs->regs[30];
904 StoreW(addr, value, res);
905 if (res)
906 goto fault;
907 addr += 4;
908 }
909 if (reg & 0x10) {
910 value = regs->regs[31];
911 StoreW(addr, value, res);
912 if (res)
913 goto fault;
914 }
915 goto success;
916
917 case mm_ldm_func:
918#ifdef CONFIG_64BIT
919 reg = insn.mm_m_format.rd;
920 rvar = reg & 0xf;
921 if ((rvar > 9) || !reg)
922 goto sigill;
923 if (reg & 0x10) {
924 if (!access_ok
925 (VERIFY_READ, addr, 8 * (rvar + 1)))
926 goto sigbus;
927 } else {
928 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
929 goto sigbus;
930 }
931 if (rvar == 9)
932 rvar = 8;
933
934 for (i = 16; rvar; rvar--, i++) {
935 LoadDW(addr, value, res);
936 if (res)
937 goto fault;
938 addr += 4;
939 regs->regs[i] = value;
940 }
941 if ((reg & 0xf) == 9) {
942 LoadDW(addr, value, res);
943 if (res)
944 goto fault;
945 addr += 8;
946 regs->regs[30] = value;
947 }
948 if (reg & 0x10) {
949 LoadDW(addr, value, res);
950 if (res)
951 goto fault;
952 regs->regs[31] = value;
953 }
954 goto success;
955#endif /* CONFIG_64BIT */
956
957 goto sigill;
958
959 case mm_sdm_func:
960#ifdef CONFIG_64BIT
961 reg = insn.mm_m_format.rd;
962 rvar = reg & 0xf;
963 if ((rvar > 9) || !reg)
964 goto sigill;
965 if (reg & 0x10) {
966 if (!access_ok
967 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
968 goto sigbus;
969 } else {
970 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
971 goto sigbus;
972 }
973 if (rvar == 9)
974 rvar = 8;
975
976 for (i = 16; rvar; rvar--, i++) {
977 value = regs->regs[i];
978 StoreDW(addr, value, res);
979 if (res)
980 goto fault;
981 addr += 8;
982 }
983 if ((reg & 0xf) == 9) {
984 value = regs->regs[30];
985 StoreDW(addr, value, res);
986 if (res)
987 goto fault;
988 addr += 8;
989 }
990 if (reg & 0x10) {
991 value = regs->regs[31];
992 StoreDW(addr, value, res);
993 if (res)
994 goto fault;
995 }
996 goto success;
997#endif /* CONFIG_64BIT */
998
999 goto sigill;
1000
1001 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1002 }
1003
1004 goto sigbus;
1005
1006 case mm_pool32c_op:
1007 switch (insn.mm_m_format.func) {
1008 case mm_lwu_func:
1009 reg = insn.mm_m_format.rd;
1010 goto loadWU;
1011 }
1012
1013 /* LL,SC,LLD,SCD are not serviced */
1014 goto sigbus;
1015
1016 case mm_pool32f_op:
1017 switch (insn.mm_x_format.func) {
1018 case mm_lwxc1_func:
1019 case mm_swxc1_func:
1020 case mm_ldxc1_func:
1021 case mm_sdxc1_func:
1022 goto fpu_emul;
1023 }
1024
1025 goto sigbus;
1026
1027 case mm_ldc132_op:
1028 case mm_sdc132_op:
1029 case mm_lwc132_op:
1030 case mm_swc132_op:
1031fpu_emul:
1032 /* roll back jump/branch */
1033 regs->cp0_epc = origpc;
1034 regs->regs[31] = orig31;
1035
1036 die_if_kernel("Unaligned FP access in kernel code", regs);
1037 BUG_ON(!used_math());
1038 BUG_ON(!is_fpu_owner());
1039
1040 lose_fpu(1); /* save the FPU state for the emulator */
1041 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1042 &fault_addr);
1043 own_fpu(1); /* restore FPU state */
1044
1045 /* If something went wrong, signal */
1046 process_fpemu_return(res, fault_addr);
1047
1048 if (res == 0)
1049 goto success;
1050 return;
1051
1052 case mm_lh32_op:
1053 reg = insn.mm_i_format.rt;
1054 goto loadHW;
1055
1056 case mm_lhu32_op:
1057 reg = insn.mm_i_format.rt;
1058 goto loadHWU;
1059
1060 case mm_lw32_op:
1061 reg = insn.mm_i_format.rt;
1062 goto loadW;
1063
1064 case mm_sh32_op:
1065 reg = insn.mm_i_format.rt;
1066 goto storeHW;
1067
1068 case mm_sw32_op:
1069 reg = insn.mm_i_format.rt;
1070 goto storeW;
1071
1072 case mm_ld32_op:
1073 reg = insn.mm_i_format.rt;
1074 goto loadDW;
1075
1076 case mm_sd32_op:
1077 reg = insn.mm_i_format.rt;
1078 goto storeDW;
1079
1080 case mm_pool16c_op:
1081 switch (insn.mm16_m_format.func) {
1082 case mm_lwm16_op:
1083 reg = insn.mm16_m_format.rlist;
1084 rvar = reg + 1;
1085 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1086 goto sigbus;
1087
1088 for (i = 16; rvar; rvar--, i++) {
1089 LoadW(addr, value, res);
1090 if (res)
1091 goto fault;
1092 addr += 4;
1093 regs->regs[i] = value;
1094 }
1095 LoadW(addr, value, res);
1096 if (res)
1097 goto fault;
1098 regs->regs[31] = value;
1099
1100 goto success;
1101
1102 case mm_swm16_op:
1103 reg = insn.mm16_m_format.rlist;
1104 rvar = reg + 1;
1105 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1106 goto sigbus;
1107
1108 for (i = 16; rvar; rvar--, i++) {
1109 value = regs->regs[i];
1110 StoreW(addr, value, res);
1111 if (res)
1112 goto fault;
1113 addr += 4;
1114 }
1115 value = regs->regs[31];
1116 StoreW(addr, value, res);
1117 if (res)
1118 goto fault;
1119
1120 goto success;
1121
1122 }
1123
1124 goto sigbus;
1125
1126 case mm_lhu16_op:
1127 reg = reg16to32[insn.mm16_rb_format.rt];
1128 goto loadHWU;
1129
1130 case mm_lw16_op:
1131 reg = reg16to32[insn.mm16_rb_format.rt];
1132 goto loadW;
1133
1134 case mm_sh16_op:
1135 reg = reg16to32st[insn.mm16_rb_format.rt];
1136 goto storeHW;
1137
1138 case mm_sw16_op:
1139 reg = reg16to32st[insn.mm16_rb_format.rt];
1140 goto storeW;
1141
1142 case mm_lwsp16_op:
1143 reg = insn.mm16_r5_format.rt;
1144 goto loadW;
1145
1146 case mm_swsp16_op:
1147 reg = insn.mm16_r5_format.rt;
1148 goto storeW;
1149
1150 case mm_lwgp16_op:
1151 reg = reg16to32[insn.mm16_r3_format.rt];
1152 goto loadW;
1153
1154 default:
1155 goto sigill;
1156 }
1157
1158loadHW:
1159 if (!access_ok(VERIFY_READ, addr, 2))
1160 goto sigbus;
1161
1162 LoadHW(addr, value, res);
1163 if (res)
1164 goto fault;
1165 regs->regs[reg] = value;
1166 goto success;
1167
1168loadHWU:
1169 if (!access_ok(VERIFY_READ, addr, 2))
1170 goto sigbus;
1171
1172 LoadHWU(addr, value, res);
1173 if (res)
1174 goto fault;
1175 regs->regs[reg] = value;
1176 goto success;
1177
1178loadW:
1179 if (!access_ok(VERIFY_READ, addr, 4))
1180 goto sigbus;
1181
1182 LoadW(addr, value, res);
1183 if (res)
1184 goto fault;
1185 regs->regs[reg] = value;
1186 goto success;
1187
1188loadWU:
1189#ifdef CONFIG_64BIT
1190 /*
1191 * A 32-bit kernel might be running on a 64-bit processor. But
1192 * if we're on a 32-bit processor and an i-cache incoherency
1193 * or race makes us see a 64-bit instruction here the sdl/sdr
1194 * would blow up, so for now we don't handle unaligned 64-bit
1195 * instructions on 32-bit kernels.
1196 */
1197 if (!access_ok(VERIFY_READ, addr, 4))
1198 goto sigbus;
1199
1200 LoadWU(addr, value, res);
1201 if (res)
1202 goto fault;
1203 regs->regs[reg] = value;
1204 goto success;
1205#endif /* CONFIG_64BIT */
1206
1207 /* Cannot handle 64-bit instructions in 32-bit kernel */
1208 goto sigill;
1209
1210loadDW:
1211#ifdef CONFIG_64BIT
1212 /*
1213 * A 32-bit kernel might be running on a 64-bit processor. But
1214 * if we're on a 32-bit processor and an i-cache incoherency
1215 * or race makes us see a 64-bit instruction here the sdl/sdr
1216 * would blow up, so for now we don't handle unaligned 64-bit
1217 * instructions on 32-bit kernels.
1218 */
1219 if (!access_ok(VERIFY_READ, addr, 8))
1220 goto sigbus;
1221
1222 LoadDW(addr, value, res);
1223 if (res)
1224 goto fault;
1225 regs->regs[reg] = value;
1226 goto success;
1227#endif /* CONFIG_64BIT */
1228
1229 /* Cannot handle 64-bit instructions in 32-bit kernel */
1230 goto sigill;
1231
1232storeHW:
1233 if (!access_ok(VERIFY_WRITE, addr, 2))
1234 goto sigbus;
1235
1236 value = regs->regs[reg];
1237 StoreHW(addr, value, res);
1238 if (res)
1239 goto fault;
1240 goto success;
1241
1242storeW:
1243 if (!access_ok(VERIFY_WRITE, addr, 4))
1244 goto sigbus;
1245
1246 value = regs->regs[reg];
1247 StoreW(addr, value, res);
1248 if (res)
1249 goto fault;
1250 goto success;
1251
1252storeDW:
1253#ifdef CONFIG_64BIT
1254 /*
1255 * A 32-bit kernel might be running on a 64-bit processor. But
1256 * if we're on a 32-bit processor and an i-cache incoherency
1257 * or race makes us see a 64-bit instruction here the sdl/sdr
1258 * would blow up, so for now we don't handle unaligned 64-bit
1259 * instructions on 32-bit kernels.
1260 */
1261 if (!access_ok(VERIFY_WRITE, addr, 8))
1262 goto sigbus;
1263
1264 value = regs->regs[reg];
1265 StoreDW(addr, value, res);
1266 if (res)
1267 goto fault;
1268 goto success;
1269#endif /* CONFIG_64BIT */
1270
1271 /* Cannot handle 64-bit instructions in 32-bit kernel */
1272 goto sigill;
1273
1274success:
1275 regs->cp0_epc = contpc; /* advance or branch */
1276
1277#ifdef CONFIG_DEBUG_FS
1278 unaligned_instructions++;
1279#endif
1280 return;
1281
1282fault:
1283 /* roll back jump/branch */
1284 regs->cp0_epc = origpc;
1285 regs->regs[31] = orig31;
1286 /* Did we have an exception handler installed? */
1287 if (fixup_exception(regs))
1288 return;
1289
1290 die_if_kernel("Unhandled kernel unaligned access", regs);
1291 force_sig(SIGSEGV, current);
1292
1293 return;
1294
1295sigbus:
1296 die_if_kernel("Unhandled kernel unaligned access", regs);
1297 force_sig(SIGBUS, current);
1298
1299 return;
1300
1301sigill:
1302 die_if_kernel
1303 ("Unhandled kernel unaligned access or invalid instruction", regs);
1304 force_sig(SIGILL, current);
1305}
1306
1307static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1308{
1309 unsigned long value;
1310 unsigned int res;
1311 int reg;
1312 unsigned long orig31;
1313 u16 __user *pc16;
1314 unsigned long origpc;
1315 union mips16e_instruction mips16inst, oldinst;
1316
1317 origpc = regs->cp0_epc;
1318 orig31 = regs->regs[31];
1319 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1320 /*
1321 * This load never faults.
1322 */
1323 __get_user(mips16inst.full, pc16);
1324 oldinst = mips16inst;
1325
1326 /* skip EXTEND instruction */
1327 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1328 pc16++;
1329 __get_user(mips16inst.full, pc16);
1330 } else if (delay_slot(regs)) {
1331 /* skip jump instructions */
1332 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1333 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1334 pc16++;
1335 pc16++;
1336 if (get_user(mips16inst.full, pc16))
1337 goto sigbus;
1338 }
1339
1340 switch (mips16inst.ri.opcode) {
1341 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1342 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1343 case MIPS16e_ldpc_func:
1344 case MIPS16e_ldsp_func:
1345 reg = reg16to32[mips16inst.ri64.ry];
1346 goto loadDW;
1347
1348 case MIPS16e_sdsp_func:
1349 reg = reg16to32[mips16inst.ri64.ry];
1350 goto writeDW;
1351
1352 case MIPS16e_sdrasp_func:
1353 reg = 29; /* GPRSP */
1354 goto writeDW;
1355 }
1356
1357 goto sigbus;
1358
1359 case MIPS16e_swsp_op:
1360 case MIPS16e_lwpc_op:
1361 case MIPS16e_lwsp_op:
1362 reg = reg16to32[mips16inst.ri.rx];
1363 break;
1364
1365 case MIPS16e_i8_op:
1366 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1367 goto sigbus;
1368 reg = 29; /* GPRSP */
1369 break;
1370
1371 default:
1372 reg = reg16to32[mips16inst.rri.ry];
1373 break;
1374 }
1375
1376 switch (mips16inst.ri.opcode) {
1377
1378 case MIPS16e_lb_op:
1379 case MIPS16e_lbu_op:
1380 case MIPS16e_sb_op:
1381 goto sigbus;
1382
1383 case MIPS16e_lh_op:
1384 if (!access_ok(VERIFY_READ, addr, 2))
1385 goto sigbus;
1386
1387 LoadHW(addr, value, res);
1388 if (res)
1389 goto fault;
1390 MIPS16e_compute_return_epc(regs, &oldinst);
1391 regs->regs[reg] = value;
1392 break;
1393
1394 case MIPS16e_lhu_op:
1395 if (!access_ok(VERIFY_READ, addr, 2))
1396 goto sigbus;
1397
1398 LoadHWU(addr, value, res);
1399 if (res)
1400 goto fault;
1401 MIPS16e_compute_return_epc(regs, &oldinst);
1402 regs->regs[reg] = value;
1403 break;
1404
1405 case MIPS16e_lw_op:
1406 case MIPS16e_lwpc_op:
1407 case MIPS16e_lwsp_op:
1408 if (!access_ok(VERIFY_READ, addr, 4))
1409 goto sigbus;
1410
1411 LoadW(addr, value, res);
1412 if (res)
1413 goto fault;
1414 MIPS16e_compute_return_epc(regs, &oldinst);
1415 regs->regs[reg] = value;
1416 break;
1417
1418 case MIPS16e_lwu_op:
1419#ifdef CONFIG_64BIT
1420 /*
1421 * A 32-bit kernel might be running on a 64-bit processor. But
1422 * if we're on a 32-bit processor and an i-cache incoherency
1423 * or race makes us see a 64-bit instruction here the sdl/sdr
1424 * would blow up, so for now we don't handle unaligned 64-bit
1425 * instructions on 32-bit kernels.
1426 */
1427 if (!access_ok(VERIFY_READ, addr, 4))
1428 goto sigbus;
1429
1430 LoadWU(addr, value, res);
1431 if (res)
1432 goto fault;
1433 MIPS16e_compute_return_epc(regs, &oldinst);
1434 regs->regs[reg] = value;
1435 break;
1436#endif /* CONFIG_64BIT */
1437
1438 /* Cannot handle 64-bit instructions in 32-bit kernel */
1439 goto sigill;
1440
1441 case MIPS16e_ld_op:
1442loadDW:
1443#ifdef CONFIG_64BIT
1444 /*
1445 * A 32-bit kernel might be running on a 64-bit processor. But
1446 * if we're on a 32-bit processor and an i-cache incoherency
1447 * or race makes us see a 64-bit instruction here the sdl/sdr
1448 * would blow up, so for now we don't handle unaligned 64-bit
1449 * instructions on 32-bit kernels.
1450 */
1451 if (!access_ok(VERIFY_READ, addr, 8))
1452 goto sigbus;
1453
1454 LoadDW(addr, value, res);
1455 if (res)
1456 goto fault;
1457 MIPS16e_compute_return_epc(regs, &oldinst);
1458 regs->regs[reg] = value;
1459 break;
1460#endif /* CONFIG_64BIT */
1461
1462 /* Cannot handle 64-bit instructions in 32-bit kernel */
1463 goto sigill;
1464
1465 case MIPS16e_sh_op:
1466 if (!access_ok(VERIFY_WRITE, addr, 2))
1467 goto sigbus;
1468
1469 MIPS16e_compute_return_epc(regs, &oldinst);
1470 value = regs->regs[reg];
1471 StoreHW(addr, value, res);
1472 if (res)
1473 goto fault;
1474 break;
1475
1476 case MIPS16e_sw_op:
1477 case MIPS16e_swsp_op:
1478 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1479 if (!access_ok(VERIFY_WRITE, addr, 4))
1480 goto sigbus;
1481
1482 MIPS16e_compute_return_epc(regs, &oldinst);
1483 value = regs->regs[reg];
1484 StoreW(addr, value, res);
1485 if (res)
1486 goto fault;
1487 break;
1488
1489 case MIPS16e_sd_op:
1490writeDW:
1491#ifdef CONFIG_64BIT
1492 /*
1493 * A 32-bit kernel might be running on a 64-bit processor. But
1494 * if we're on a 32-bit processor and an i-cache incoherency
1495 * or race makes us see a 64-bit instruction here the sdl/sdr
1496 * would blow up, so for now we don't handle unaligned 64-bit
1497 * instructions on 32-bit kernels.
1498 */
1499 if (!access_ok(VERIFY_WRITE, addr, 8))
1500 goto sigbus;
1501
1502 MIPS16e_compute_return_epc(regs, &oldinst);
1503 value = regs->regs[reg];
1504 StoreDW(addr, value, res);
1505 if (res)
1506 goto fault;
1507 break;
1508#endif /* CONFIG_64BIT */
1509
1510 /* Cannot handle 64-bit instructions in 32-bit kernel */
1511 goto sigill;
1512
1513 default:
1514 /*
1515 * Pheeee... We encountered an yet unknown instruction or
1516 * cache coherence problem. Die sucker, die ...
1517 */
1518 goto sigill;
1519 }
1520
1521#ifdef CONFIG_DEBUG_FS
1522 unaligned_instructions++;
1523#endif
1524
1525 return;
1526
1527fault:
1528 /* roll back jump/branch */
1529 regs->cp0_epc = origpc;
1530 regs->regs[31] = orig31;
1531 /* Did we have an exception handler installed? */
1532 if (fixup_exception(regs))
1533 return;
1534
1535 die_if_kernel("Unhandled kernel unaligned access", regs);
1536 force_sig(SIGSEGV, current);
1537
1538 return;
1539
1540sigbus:
1541 die_if_kernel("Unhandled kernel unaligned access", regs);
1542 force_sig(SIGBUS, current);
1543
1544 return;
1545
1546sigill:
1547 die_if_kernel
1548 ("Unhandled kernel unaligned access or invalid instruction", regs);
1549 force_sig(SIGILL, current);
1550}
511asmlinkage void do_ade(struct pt_regs *regs) 1551asmlinkage void do_ade(struct pt_regs *regs)
512{ 1552{
513 unsigned int __user *pc; 1553 unsigned int __user *pc;
@@ -517,23 +1557,62 @@ asmlinkage void do_ade(struct pt_regs *regs)
517 1, regs, regs->cp0_badvaddr); 1557 1, regs, regs->cp0_badvaddr);
518 /* 1558 /*
519 * Did we catch a fault trying to load an instruction? 1559 * Did we catch a fault trying to load an instruction?
520 * Or are we running in MIPS16 mode?
521 */ 1560 */
522 if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) 1561 if (regs->cp0_badvaddr == regs->cp0_epc)
523 goto sigbus; 1562 goto sigbus;
524 1563
525 pc = (unsigned int __user *) exception_epc(regs);
526 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 1564 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
527 goto sigbus; 1565 goto sigbus;
528 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 1566 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
529 goto sigbus; 1567 goto sigbus;
530 else if (unaligned_action == UNALIGNED_ACTION_SHOW)
531 show_registers(regs);
532 1568
533 /* 1569 /*
534 * Do branch emulation only if we didn't forward the exception. 1570 * Do branch emulation only if we didn't forward the exception.
535 * This is all so but ugly ... 1571 * This is all so but ugly ...
536 */ 1572 */
1573
1574 /*
1575 * Are we running in microMIPS mode?
1576 */
1577 if (get_isa16_mode(regs->cp0_epc)) {
1578 /*
1579 * Did we catch a fault trying to load an instruction in
1580 * 16-bit mode?
1581 */
1582 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1583 goto sigbus;
1584 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1585 show_registers(regs);
1586
1587 if (cpu_has_mmips) {
1588 seg = get_fs();
1589 if (!user_mode(regs))
1590 set_fs(KERNEL_DS);
1591 emulate_load_store_microMIPS(regs,
1592 (void __user *)regs->cp0_badvaddr);
1593 set_fs(seg);
1594
1595 return;
1596 }
1597
1598 if (cpu_has_mips16) {
1599 seg = get_fs();
1600 if (!user_mode(regs))
1601 set_fs(KERNEL_DS);
1602 emulate_load_store_MIPS16e(regs,
1603 (void __user *)regs->cp0_badvaddr);
1604 set_fs(seg);
1605
1606 return;
1607 }
1608
1609 goto sigbus;
1610 }
1611
1612 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1613 show_registers(regs);
1614 pc = (unsigned int __user *)exception_epc(regs);
1615
537 seg = get_fs(); 1616 seg = get_fs();
538 if (!user_mode(regs)) 1617 if (!user_mode(regs))
539 set_fs(KERNEL_DS); 1618 set_fs(KERNEL_DS);
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644
index 000000000000..51617e481aa3
--- /dev/null
+++ b/arch/mips/kvm/00README.txt
@@ -0,0 +1,31 @@
1KVM/MIPS Trap & Emulate Release Notes
2=====================================
3
4(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
5 Malta Board with FPGA based 34K
6 Sigma Designs TangoX board with a 24K based 8654 SoC.
7 Malta Board with 74K @ 1GHz
8
9(2) Both Guest kernel and Guest Userspace execute in UM.
10 Guest User address space: 0x00000000 -> 0x40000000
11 Guest Kernel Unmapped: 0x40000000 -> 0x60000000
12 Guest Kernel Mapped: 0x60000000 -> 0x80000000
13
14 Guest Usermode virtual memory is limited to 1GB.
15
16(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
17 Note that due to cache aliasing issues, 4K page sizes are NOT supported.
18
19(3) No HugeTLB Support
20 Both the host kernel and Guest kernel should have the page size set to 16K.
21 This will be implemented in a future release.
22
23(4) KVM/MIPS does not have support for SMP Guests
24 Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
25 LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
26 when we ERET back to the guest. This causes the guest to hang in an infinite loop.
27 This will be fixed in a future release.
28
29(5) Use Host FPU
30 Currently KVM/MIPS emulates a 24K CPU without a FPU.
31 This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644
index 000000000000..2c15590e55f7
--- /dev/null
+++ b/arch/mips/kvm/Kconfig
@@ -0,0 +1,49 @@
1#
2# KVM configuration
3#
4source "virt/kvm/Kconfig"
5
6menuconfig VIRTUALIZATION
7 bool "Virtualization"
8 depends on HAVE_KVM
9 ---help---
10 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests).
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and disabled.
15
16if VIRTUALIZATION
17
18config KVM
19 tristate "Kernel-based Virtual Machine (KVM) support"
20 depends on HAVE_KVM
21 select PREEMPT_NOTIFIERS
22 select ANON_INODES
23 select KVM_MMIO
24 ---help---
25 Support for hosting Guest kernels.
26 Currently supported on MIPS32 processors.
27
28config KVM_MIPS_DYN_TRANS
29 bool "KVM/MIPS: Dynamic binary translation to reduce traps"
30 depends on KVM
31 ---help---
32 When running in Trap & Emulate mode patch privileged
33 instructions to reduce the number of traps.
34
35 If unsure, say Y.
36
37config KVM_MIPS_DEBUG_COP0_COUNTERS
38 bool "Maintain counters for COP0 accesses"
39 depends on KVM
40 ---help---
41 Maintain statistics for Guest COP0 accesses.
42 A histogram of COP0 accesses is printed when the VM is
43 shutdown.
44
45 If unsure, say N.
46
47source drivers/vhost/Kconfig
48
49endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644
index 000000000000..78d87bbc99db
--- /dev/null
+++ b/arch/mips/kvm/Makefile
@@ -0,0 +1,13 @@
1# Makefile for KVM support for MIPS
2#
3
4common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
5
6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
7
8kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
9 kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
10 kvm_mips_dyntrans.o kvm_trap_emul.o
11
12obj-$(CONFIG_KVM) += kvm.o
13obj-y += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644
index 000000000000..313c2e37b978
--- /dev/null
+++ b/arch/mips/kvm/kvm_cb.c
@@ -0,0 +1,14 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 * Authors: Yann Le Du <ledu@kymasys.com>
8 */
9
10#include <linux/export.h>
11#include <linux/kvm_host.h>
12
13struct kvm_mips_callbacks *kvm_mips_callbacks;
14EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644
index 000000000000..dca2aa665993
--- /dev/null
+++ b/arch/mips/kvm/kvm_locore.S
@@ -0,0 +1,650 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Main entry point for the guest, exception handling.
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15#include <asm/mipsregs.h>
16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h>
18
19
20#define _C_LABEL(x) x
21#define MIPSX(name) mips32_ ## name
22#define CALLFRAME_SIZ 32
23
24/*
25 * VECTOR
26 * exception vector entrypoint
27 */
28#define VECTOR(x, regmask) \
29 .ent _C_LABEL(x),0; \
30 EXPORT(x);
31
32#define VECTOR_END(x) \
33 EXPORT(x);
34
35/* Overload, Danger Will Robinson!! */
36#define PT_HOST_ASID PT_BVADDR
37#define PT_HOST_USERLOCAL PT_EPC
38
39#define CP0_DDATA_LO $28,3
40#define CP0_EBASE $15,1
41
42#define CP0_INTCTL $12,1
43#define CP0_SRSCTL $12,2
44#define CP0_SRSMAP $12,3
45#define CP0_HWRENA $7,0
46
47/* Resume Flags */
48#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
49
50#define RESUME_GUEST 0
51#define RESUME_HOST RESUME_FLAG_HOST
52
53/*
54 * __kvm_mips_vcpu_run: entry point to the guest
55 * a0: run
56 * a1: vcpu
57 */
58
59FEXPORT(__kvm_mips_vcpu_run)
60 .set push
61 .set noreorder
62 .set noat
63
64 /* k0/k1 not being used in host kernel context */
65 addiu k1,sp, -PT_SIZE
66 LONG_S $0, PT_R0(k1)
67 LONG_S $1, PT_R1(k1)
68 LONG_S $2, PT_R2(k1)
69 LONG_S $3, PT_R3(k1)
70
71 LONG_S $4, PT_R4(k1)
72 LONG_S $5, PT_R5(k1)
73 LONG_S $6, PT_R6(k1)
74 LONG_S $7, PT_R7(k1)
75
76 LONG_S $8, PT_R8(k1)
77 LONG_S $9, PT_R9(k1)
78 LONG_S $10, PT_R10(k1)
79 LONG_S $11, PT_R11(k1)
80 LONG_S $12, PT_R12(k1)
81 LONG_S $13, PT_R13(k1)
82 LONG_S $14, PT_R14(k1)
83 LONG_S $15, PT_R15(k1)
84 LONG_S $16, PT_R16(k1)
85 LONG_S $17, PT_R17(k1)
86
87 LONG_S $18, PT_R18(k1)
88 LONG_S $19, PT_R19(k1)
89 LONG_S $20, PT_R20(k1)
90 LONG_S $21, PT_R21(k1)
91 LONG_S $22, PT_R22(k1)
92 LONG_S $23, PT_R23(k1)
93 LONG_S $24, PT_R24(k1)
94 LONG_S $25, PT_R25(k1)
95
96 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
97
98 LONG_S $28, PT_R28(k1)
99 LONG_S $29, PT_R29(k1)
100 LONG_S $30, PT_R30(k1)
101 LONG_S $31, PT_R31(k1)
102
103 /* Save hi/lo */
104 mflo v0
105 LONG_S v0, PT_LO(k1)
106 mfhi v1
107 LONG_S v1, PT_HI(k1)
108
109 /* Save host status */
110 mfc0 v0, CP0_STATUS
111 LONG_S v0, PT_STATUS(k1)
112
113 /* Save host ASID, shove it into the BVADDR location */
114 mfc0 v1,CP0_ENTRYHI
115 andi v1, 0xff
116 LONG_S v1, PT_HOST_ASID(k1)
117
118 /* Save DDATA_LO, will be used to store pointer to vcpu */
119 mfc0 v1, CP0_DDATA_LO
120 LONG_S v1, PT_HOST_USERLOCAL(k1)
121
122 /* DDATA_LO has pointer to vcpu */
123 mtc0 a1,CP0_DDATA_LO
124
125 /* Offset into vcpu->arch */
126 addiu k1, a1, VCPU_HOST_ARCH
127
128 /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
129 LONG_S sp, VCPU_HOST_STACK(k1)
130
131 /* Save the kernel gp as well */
132 LONG_S gp, VCPU_HOST_GP(k1)
133
134 /* Setup status register for running the guest in UM, interrupts are disabled */
135 li k0,(ST0_EXL | KSU_USER| ST0_BEV)
136 mtc0 k0,CP0_STATUS
137 ehb
138
139 /* load up the new EBASE */
140 LONG_L k0, VCPU_GUEST_EBASE(k1)
141 mtc0 k0,CP0_EBASE
142
143 /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
144 * but make sure that timer interrupts are enabled
145 */
146 li k0,(ST0_EXL | KSU_USER | ST0_IE)
147 andi v0, v0, ST0_IM
148 or k0, k0, v0
149 mtc0 k0,CP0_STATUS
150 ehb
151
152
153 /* Set Guest EPC */
154 LONG_L t0, VCPU_PC(k1)
155 mtc0 t0, CP0_EPC
156
157FEXPORT(__kvm_mips_load_asid)
158 /* Set the ASID for the Guest Kernel */
159 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
160 /* addresses shift to 0x80000000 */
161 bltz t0, 1f /* If kernel */
162 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
163 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
1641:
165 /* t1: contains the base of the ASID array, need to get the cpu id */
166 LONG_L t2, TI_CPU($28) /* smp_processor_id */
167 sll t2, t2, 2 /* x4 */
168 addu t3, t1, t2
169 LONG_L k0, (t3)
170 andi k0, k0, 0xff
171 mtc0 k0,CP0_ENTRYHI
172 ehb
173
174 /* Disable RDHWR access */
175 mtc0 zero, CP0_HWRENA
176
177 /* Now load up the Guest Context from VCPU */
178 LONG_L $1, VCPU_R1(k1)
179 LONG_L $2, VCPU_R2(k1)
180 LONG_L $3, VCPU_R3(k1)
181
182 LONG_L $4, VCPU_R4(k1)
183 LONG_L $5, VCPU_R5(k1)
184 LONG_L $6, VCPU_R6(k1)
185 LONG_L $7, VCPU_R7(k1)
186
187 LONG_L $8, VCPU_R8(k1)
188 LONG_L $9, VCPU_R9(k1)
189 LONG_L $10, VCPU_R10(k1)
190 LONG_L $11, VCPU_R11(k1)
191 LONG_L $12, VCPU_R12(k1)
192 LONG_L $13, VCPU_R13(k1)
193 LONG_L $14, VCPU_R14(k1)
194 LONG_L $15, VCPU_R15(k1)
195 LONG_L $16, VCPU_R16(k1)
196 LONG_L $17, VCPU_R17(k1)
197 LONG_L $18, VCPU_R18(k1)
198 LONG_L $19, VCPU_R19(k1)
199 LONG_L $20, VCPU_R20(k1)
200 LONG_L $21, VCPU_R21(k1)
201 LONG_L $22, VCPU_R22(k1)
202 LONG_L $23, VCPU_R23(k1)
203 LONG_L $24, VCPU_R24(k1)
204 LONG_L $25, VCPU_R25(k1)
205
206 /* k0/k1 loaded up later */
207
208 LONG_L $28, VCPU_R28(k1)
209 LONG_L $29, VCPU_R29(k1)
210 LONG_L $30, VCPU_R30(k1)
211 LONG_L $31, VCPU_R31(k1)
212
213 /* Restore hi/lo */
214 LONG_L k0, VCPU_LO(k1)
215 mtlo k0
216
217 LONG_L k0, VCPU_HI(k1)
218 mthi k0
219
220FEXPORT(__kvm_mips_load_k0k1)
221 /* Restore the guest's k0/k1 registers */
222 LONG_L k0, VCPU_R26(k1)
223 LONG_L k1, VCPU_R27(k1)
224
225 /* Jump to guest */
226 eret
227 .set pop
228
229VECTOR(MIPSX(exception), unknown)
230/*
231 * Find out what mode we came from and jump to the proper handler.
232 */
233 .set push
234 .set noat
235 .set noreorder
236 mtc0 k0, CP0_ERROREPC #01: Save guest k0
237 ehb #02:
238
239 mfc0 k0, CP0_EBASE #02: Get EBASE
240 srl k0, k0, 10 #03: Get rid of CPUNum
241 sll k0, k0, 10 #04
242 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
243 addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
244 j k0 #07: jump to the function
245 nop #08: branch delay slot
246 .set push
247VECTOR_END(MIPSX(exceptionEnd))
248.end MIPSX(exception)
249
250/*
251 * Generic Guest exception handler. We end up here when the guest
252 * does something that causes a trap to kernel mode.
253 *
254 */
255NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
256 .set push
257 .set noat
258 .set noreorder
259
260 /* Get the VCPU pointer from DDTATA_LO */
261 mfc0 k1, CP0_DDATA_LO
262 addiu k1, k1, VCPU_HOST_ARCH
263
264 /* Start saving Guest context to VCPU */
265 LONG_S $0, VCPU_R0(k1)
266 LONG_S $1, VCPU_R1(k1)
267 LONG_S $2, VCPU_R2(k1)
268 LONG_S $3, VCPU_R3(k1)
269 LONG_S $4, VCPU_R4(k1)
270 LONG_S $5, VCPU_R5(k1)
271 LONG_S $6, VCPU_R6(k1)
272 LONG_S $7, VCPU_R7(k1)
273 LONG_S $8, VCPU_R8(k1)
274 LONG_S $9, VCPU_R9(k1)
275 LONG_S $10, VCPU_R10(k1)
276 LONG_S $11, VCPU_R11(k1)
277 LONG_S $12, VCPU_R12(k1)
278 LONG_S $13, VCPU_R13(k1)
279 LONG_S $14, VCPU_R14(k1)
280 LONG_S $15, VCPU_R15(k1)
281 LONG_S $16, VCPU_R16(k1)
282 LONG_S $17,VCPU_R17(k1)
283 LONG_S $18, VCPU_R18(k1)
284 LONG_S $19, VCPU_R19(k1)
285 LONG_S $20, VCPU_R20(k1)
286 LONG_S $21, VCPU_R21(k1)
287 LONG_S $22, VCPU_R22(k1)
288 LONG_S $23, VCPU_R23(k1)
289 LONG_S $24, VCPU_R24(k1)
290 LONG_S $25, VCPU_R25(k1)
291
292 /* Guest k0/k1 saved later */
293
294 LONG_S $28, VCPU_R28(k1)
295 LONG_S $29, VCPU_R29(k1)
296 LONG_S $30, VCPU_R30(k1)
297 LONG_S $31, VCPU_R31(k1)
298
299 /* We need to save hi/lo and restore them on
300 * the way out
301 */
302 mfhi t0
303 LONG_S t0, VCPU_HI(k1)
304
305 mflo t0
306 LONG_S t0, VCPU_LO(k1)
307
308 /* Finally save guest k0/k1 to VCPU */
309 mfc0 t0, CP0_ERROREPC
310 LONG_S t0, VCPU_R26(k1)
311
312 /* Get GUEST k1 and save it in VCPU */
313 la t1, ~0x2ff
314 mfc0 t0, CP0_EBASE
315 and t0, t0, t1
316 LONG_L t0, 0x3000(t0)
317 LONG_S t0, VCPU_R27(k1)
318
319 /* Now that context has been saved, we can use other registers */
320
321 /* Restore vcpu */
322 mfc0 a1, CP0_DDATA_LO
323 move s1, a1
324
325 /* Restore run (vcpu->run) */
326 LONG_L a0, VCPU_RUN(a1)
327 /* Save pointer to run in s0, will be saved by the compiler */
328 move s0, a0
329
330
331 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
332 mfc0 k0,CP0_EPC
333 LONG_S k0, VCPU_PC(k1)
334
335 mfc0 k0, CP0_BADVADDR
336 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
337
338 mfc0 k0, CP0_CAUSE
339 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
340
341 mfc0 k0, CP0_ENTRYHI
342 LONG_S k0, VCPU_HOST_ENTRYHI(k1)
343
344 /* Now restore the host state just enough to run the handlers */
345
346 /* Swtich EBASE to the one used by Linux */
347 /* load up the host EBASE */
348 mfc0 v0, CP0_STATUS
349
350 .set at
351 or k0, v0, ST0_BEV
352 .set noat
353
354 mtc0 k0, CP0_STATUS
355 ehb
356
357 LONG_L k0, VCPU_HOST_EBASE(k1)
358 mtc0 k0,CP0_EBASE
359
360
361 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
362 .set at
363 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
364 or v0, v0, ST0_CU0
365 .set noat
366 mtc0 v0, CP0_STATUS
367 ehb
368
369 /* Load up host GP */
370 LONG_L gp, VCPU_HOST_GP(k1)
371
372 /* Need a stack before we can jump to "C" */
373 LONG_L sp, VCPU_HOST_STACK(k1)
374
375 /* Saved host state */
376 addiu sp,sp, -PT_SIZE
377
378 /* XXXKYMA do we need to load the host ASID, maybe not because the
379 * kernel entries are marked GLOBAL, need to verify
380 */
381
382 /* Restore host DDATA_LO */
383 LONG_L k0, PT_HOST_USERLOCAL(sp)
384 mtc0 k0, CP0_DDATA_LO
385
386 /* Restore RDHWR access */
387 la k0, 0x2000000F
388 mtc0 k0, CP0_HWRENA
389
390 /* Jump to handler */
391FEXPORT(__kvm_mips_jump_to_handler)
392 /* XXXKYMA: not sure if this is safe, how large is the stack?? */
393 /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
394 la t9,kvm_mips_handle_exit
395 jalr.hb t9
396 addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */
397
398 /* Return from handler Make sure interrupts are disabled */
399 di
400 ehb
401
402 /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
403 * while we were handling the exception from the guest, reload k1
404 */
405 move k1, s1
406 addiu k1, k1, VCPU_HOST_ARCH
407
408 /* Check return value, should tell us if we are returning to the host (handle I/O etc)
409 * or resuming the guest
410 */
411 andi t0, v0, RESUME_HOST
412 bnez t0, __kvm_mips_return_to_host
413 nop
414
415__kvm_mips_return_to_guest:
416 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
417 mtc0 s1, CP0_DDATA_LO
418
419 /* Load up the Guest EBASE to minimize the window where BEV is set */
420 LONG_L t0, VCPU_GUEST_EBASE(k1)
421
422 /* Switch EBASE back to the one used by KVM */
423 mfc0 v1, CP0_STATUS
424 .set at
425 or k0, v1, ST0_BEV
426 .set noat
427 mtc0 k0, CP0_STATUS
428 ehb
429 mtc0 t0,CP0_EBASE
430
431 /* Setup status register for running guest in UM */
432 .set at
433 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
434 and v1, v1, ~ST0_CU0
435 .set noat
436 mtc0 v1, CP0_STATUS
437 ehb
438
439
440 /* Set Guest EPC */
441 LONG_L t0, VCPU_PC(k1)
442 mtc0 t0, CP0_EPC
443
444 /* Set the ASID for the Guest Kernel */
445 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
446 /* addresses shift to 0x80000000 */
447 bltz t0, 1f /* If kernel */
448 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
449 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
4501:
451 /* t1: contains the base of the ASID array, need to get the cpu id */
452 LONG_L t2, TI_CPU($28) /* smp_processor_id */
453 sll t2, t2, 2 /* x4 */
454 addu t3, t1, t2
455 LONG_L k0, (t3)
456 andi k0, k0, 0xff
457 mtc0 k0,CP0_ENTRYHI
458 ehb
459
460 /* Disable RDHWR access */
461 mtc0 zero, CP0_HWRENA
462
463 /* load the guest context from VCPU and return */
464 LONG_L $0, VCPU_R0(k1)
465 LONG_L $1, VCPU_R1(k1)
466 LONG_L $2, VCPU_R2(k1)
467 LONG_L $3, VCPU_R3(k1)
468 LONG_L $4, VCPU_R4(k1)
469 LONG_L $5, VCPU_R5(k1)
470 LONG_L $6, VCPU_R6(k1)
471 LONG_L $7, VCPU_R7(k1)
472 LONG_L $8, VCPU_R8(k1)
473 LONG_L $9, VCPU_R9(k1)
474 LONG_L $10, VCPU_R10(k1)
475 LONG_L $11, VCPU_R11(k1)
476 LONG_L $12, VCPU_R12(k1)
477 LONG_L $13, VCPU_R13(k1)
478 LONG_L $14, VCPU_R14(k1)
479 LONG_L $15, VCPU_R15(k1)
480 LONG_L $16, VCPU_R16(k1)
481 LONG_L $17, VCPU_R17(k1)
482 LONG_L $18, VCPU_R18(k1)
483 LONG_L $19, VCPU_R19(k1)
484 LONG_L $20, VCPU_R20(k1)
485 LONG_L $21, VCPU_R21(k1)
486 LONG_L $22, VCPU_R22(k1)
487 LONG_L $23, VCPU_R23(k1)
488 LONG_L $24, VCPU_R24(k1)
489 LONG_L $25, VCPU_R25(k1)
490
491 /* $/k1 loaded later */
492 LONG_L $28, VCPU_R28(k1)
493 LONG_L $29, VCPU_R29(k1)
494 LONG_L $30, VCPU_R30(k1)
495 LONG_L $31, VCPU_R31(k1)
496
497FEXPORT(__kvm_mips_skip_guest_restore)
498 LONG_L k0, VCPU_HI(k1)
499 mthi k0
500
501 LONG_L k0, VCPU_LO(k1)
502 mtlo k0
503
504 LONG_L k0, VCPU_R26(k1)
505 LONG_L k1, VCPU_R27(k1)
506
507 eret
508
509__kvm_mips_return_to_host:
510 /* EBASE is already pointing to Linux */
511 LONG_L k1, VCPU_HOST_STACK(k1)
512 addiu k1,k1, -PT_SIZE
513
514 /* Restore host DDATA_LO */
515 LONG_L k0, PT_HOST_USERLOCAL(k1)
516 mtc0 k0, CP0_DDATA_LO
517
518 /* Restore host ASID */
519 LONG_L k0, PT_HOST_ASID(sp)
520 andi k0, 0xff
521 mtc0 k0,CP0_ENTRYHI
522 ehb
523
524 /* Load context saved on the host stack */
525 LONG_L $0, PT_R0(k1)
526 LONG_L $1, PT_R1(k1)
527
528 /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */
529 sra k0, v0, 2
530 move $2, k0
531
532 LONG_L $3, PT_R3(k1)
533 LONG_L $4, PT_R4(k1)
534 LONG_L $5, PT_R5(k1)
535 LONG_L $6, PT_R6(k1)
536 LONG_L $7, PT_R7(k1)
537 LONG_L $8, PT_R8(k1)
538 LONG_L $9, PT_R9(k1)
539 LONG_L $10, PT_R10(k1)
540 LONG_L $11, PT_R11(k1)
541 LONG_L $12, PT_R12(k1)
542 LONG_L $13, PT_R13(k1)
543 LONG_L $14, PT_R14(k1)
544 LONG_L $15, PT_R15(k1)
545 LONG_L $16, PT_R16(k1)
546 LONG_L $17, PT_R17(k1)
547 LONG_L $18, PT_R18(k1)
548 LONG_L $19, PT_R19(k1)
549 LONG_L $20, PT_R20(k1)
550 LONG_L $21, PT_R21(k1)
551 LONG_L $22, PT_R22(k1)
552 LONG_L $23, PT_R23(k1)
553 LONG_L $24, PT_R24(k1)
554 LONG_L $25, PT_R25(k1)
555
556 /* Host k0/k1 were not saved */
557
558 LONG_L $28, PT_R28(k1)
559 LONG_L $29, PT_R29(k1)
560 LONG_L $30, PT_R30(k1)
561
562 LONG_L k0, PT_HI(k1)
563 mthi k0
564
565 LONG_L k0, PT_LO(k1)
566 mtlo k0
567
568 /* Restore RDHWR access */
569 la k0, 0x2000000F
570 mtc0 k0, CP0_HWRENA
571
572
573 /* Restore RA, which is the address we will return to */
574 LONG_L ra, PT_R31(k1)
575 j ra
576 nop
577
578 .set pop
579VECTOR_END(MIPSX(GuestExceptionEnd))
580.end MIPSX(GuestException)
581
582MIPSX(exceptions):
583 ####
584 ##### The exception handlers.
585 #####
586 .word _C_LABEL(MIPSX(GuestException)) # 0
587 .word _C_LABEL(MIPSX(GuestException)) # 1
588 .word _C_LABEL(MIPSX(GuestException)) # 2
589 .word _C_LABEL(MIPSX(GuestException)) # 3
590 .word _C_LABEL(MIPSX(GuestException)) # 4
591 .word _C_LABEL(MIPSX(GuestException)) # 5
592 .word _C_LABEL(MIPSX(GuestException)) # 6
593 .word _C_LABEL(MIPSX(GuestException)) # 7
594 .word _C_LABEL(MIPSX(GuestException)) # 8
595 .word _C_LABEL(MIPSX(GuestException)) # 9
596 .word _C_LABEL(MIPSX(GuestException)) # 10
597 .word _C_LABEL(MIPSX(GuestException)) # 11
598 .word _C_LABEL(MIPSX(GuestException)) # 12
599 .word _C_LABEL(MIPSX(GuestException)) # 13
600 .word _C_LABEL(MIPSX(GuestException)) # 14
601 .word _C_LABEL(MIPSX(GuestException)) # 15
602 .word _C_LABEL(MIPSX(GuestException)) # 16
603 .word _C_LABEL(MIPSX(GuestException)) # 17
604 .word _C_LABEL(MIPSX(GuestException)) # 18
605 .word _C_LABEL(MIPSX(GuestException)) # 19
606 .word _C_LABEL(MIPSX(GuestException)) # 20
607 .word _C_LABEL(MIPSX(GuestException)) # 21
608 .word _C_LABEL(MIPSX(GuestException)) # 22
609 .word _C_LABEL(MIPSX(GuestException)) # 23
610 .word _C_LABEL(MIPSX(GuestException)) # 24
611 .word _C_LABEL(MIPSX(GuestException)) # 25
612 .word _C_LABEL(MIPSX(GuestException)) # 26
613 .word _C_LABEL(MIPSX(GuestException)) # 27
614 .word _C_LABEL(MIPSX(GuestException)) # 28
615 .word _C_LABEL(MIPSX(GuestException)) # 29
616 .word _C_LABEL(MIPSX(GuestException)) # 30
617 .word _C_LABEL(MIPSX(GuestException)) # 31
618
619
620/* This routine makes changes to the instruction stream effective to the hardware.
621 * It should be called after the instruction stream is written.
622 * On return, the new instructions are effective.
623 * Inputs:
624 * a0 = Start address of new instruction stream
625 * a1 = Size, in bytes, of new instruction stream
626 */
627
628#define HW_SYNCI_Step $1
629LEAF(MIPSX(SyncICache))
630 .set push
631 .set mips32r2
632 beq a1, zero, 20f
633 nop
634 addu a1, a0, a1
635 rdhwr v0, HW_SYNCI_Step
636 beq v0, zero, 20f
637 nop
638
63910:
640 synci 0(a0)
641 addu a0, a0, v0
642 sltu v1, a0, a1
643 bne v1, zero, 10b
644 nop
645 sync
64620:
647 jr.hb ra
648 nop
649 .set pop
650END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644
index 000000000000..e0dad0289797
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips.c
@@ -0,0 +1,958 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/bootmem.h>
18#include <asm/page.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
22#include <linux/kvm_host.h>
23
24#include "kvm_mips_int.h"
25#include "kvm_mips_comm.h"
26
27#define CREATE_TRACE_POINTS
28#include "trace.h"
29
30#ifndef VECTORSPACING
31#define VECTORSPACING 0x100 /* for EI/VI mode */
32#endif
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) },
37 { "cache", VCPU_STAT(cache_exits) },
38 { "signal", VCPU_STAT(signal_exits) },
39 { "interrupt", VCPU_STAT(int_exits) },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
46 { "syscall", VCPU_STAT(syscall_exits) },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
48 { "break_inst", VCPU_STAT(break_inst_exits) },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
51 {NULL}
52};
53
54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55{
56 int i;
57 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0;
60 }
61 return 0;
62}
63
64gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
65{
66 return gfn;
67}
68
69/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
70 * are "runnable" if interrupts are pending
71 */
72int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
73{
74 return !!(vcpu->arch.pending_exceptions);
75}
76
77int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
78{
79 return 1;
80}
81
82int kvm_arch_hardware_enable(void *garbage)
83{
84 return 0;
85}
86
87void kvm_arch_hardware_disable(void *garbage)
88{
89}
90
91int kvm_arch_hardware_setup(void)
92{
93 return 0;
94}
95
96void kvm_arch_hardware_unsetup(void)
97{
98}
99
100void kvm_arch_check_processor_compat(void *rtn)
101{
102 int *r = (int *)rtn;
103 *r = 0;
104 return;
105}
106
107static void kvm_mips_init_tlbs(struct kvm *kvm)
108{
109 unsigned long wired;
110
111 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
112 wired = read_c0_wired();
113 write_c0_wired(wired + 1);
114 mtc0_tlbw_hazard();
115 kvm->arch.commpage_tlb = wired;
116
117 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
118 kvm->arch.commpage_tlb);
119}
120
121static void kvm_mips_init_vm_percpu(void *arg)
122{
123 struct kvm *kvm = (struct kvm *)arg;
124
125 kvm_mips_init_tlbs(kvm);
126 kvm_mips_callbacks->vm_init(kvm);
127
128}
129
130int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
131{
132 if (atomic_inc_return(&kvm_mips_instance) == 1) {
133 kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
134 __func__);
135 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
136 }
137
138
139 return 0;
140}
141
142void kvm_mips_free_vcpus(struct kvm *kvm)
143{
144 unsigned int i;
145 struct kvm_vcpu *vcpu;
146
147 /* Put the pages we reserved for the guest pmap */
148 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
149 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
150 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
151 }
152
153 if (kvm->arch.guest_pmap)
154 kfree(kvm->arch.guest_pmap);
155
156 kvm_for_each_vcpu(i, vcpu, kvm) {
157 kvm_arch_vcpu_free(vcpu);
158 }
159
160 mutex_lock(&kvm->lock);
161
162 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
163 kvm->vcpus[i] = NULL;
164
165 atomic_set(&kvm->online_vcpus, 0);
166
167 mutex_unlock(&kvm->lock);
168}
169
170void kvm_arch_sync_events(struct kvm *kvm)
171{
172}
173
174static void kvm_mips_uninit_tlbs(void *arg)
175{
176 /* Restore wired count */
177 write_c0_wired(0);
178 mtc0_tlbw_hazard();
179 /* Clear out all the TLBs */
180 kvm_local_flush_tlb_all();
181}
182
183void kvm_arch_destroy_vm(struct kvm *kvm)
184{
185 kvm_mips_free_vcpus(kvm);
186
187 /* If this is the last instance, restore wired count */
188 if (atomic_dec_return(&kvm_mips_instance) == 0) {
189 kvm_info("%s: last KVM instance, restoring TLB parameters\n",
190 __func__);
191 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
192 }
193}
194
195long
196kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
197{
198 return -EINVAL;
199}
200
201void kvm_arch_free_memslot(struct kvm_memory_slot *free,
202 struct kvm_memory_slot *dont)
203{
204}
205
206int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
207{
208 return 0;
209}
210
211int kvm_arch_prepare_memory_region(struct kvm *kvm,
212 struct kvm_memory_slot *memslot,
213 struct kvm_userspace_memory_region *mem,
214 enum kvm_mr_change change)
215{
216 return 0;
217}
218
219void kvm_arch_commit_memory_region(struct kvm *kvm,
220 struct kvm_userspace_memory_region *mem,
221 const struct kvm_memory_slot *old,
222 enum kvm_mr_change change)
223{
224 unsigned long npages = 0;
225 int i, err = 0;
226
227 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
228 __func__, kvm, mem->slot, mem->guest_phys_addr,
229 mem->memory_size, mem->userspace_addr);
230
231 /* Setup Guest PMAP table */
232 if (!kvm->arch.guest_pmap) {
233 if (mem->slot == 0)
234 npages = mem->memory_size >> PAGE_SHIFT;
235
236 if (npages) {
237 kvm->arch.guest_pmap_npages = npages;
238 kvm->arch.guest_pmap =
239 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
240
241 if (!kvm->arch.guest_pmap) {
242 kvm_err("Failed to allocate guest PMAP");
243 err = -ENOMEM;
244 goto out;
245 }
246
247 kvm_info
248 ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
249 npages, kvm->arch.guest_pmap);
250
251 /* Now setup the page table */
252 for (i = 0; i < npages; i++) {
253 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
254 }
255 }
256 }
257out:
258 return;
259}
260
261void kvm_arch_flush_shadow_all(struct kvm *kvm)
262{
263}
264
265void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
266 struct kvm_memory_slot *slot)
267{
268}
269
270void kvm_arch_flush_shadow(struct kvm *kvm)
271{
272}
273
274struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
275{
276 extern char mips32_exception[], mips32_exceptionEnd[];
277 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
278 int err, size, offset;
279 void *gebase;
280 int i;
281
282 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
283
284 if (!vcpu) {
285 err = -ENOMEM;
286 goto out;
287 }
288
289 err = kvm_vcpu_init(vcpu, kvm, id);
290
291 if (err)
292 goto out_free_cpu;
293
294 kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
295
296 /* Allocate space for host mode exception handlers that handle
297 * guest mode exits
298 */
299 if (cpu_has_veic || cpu_has_vint) {
300 size = 0x200 + VECTORSPACING * 64;
301 } else {
302 size = 0x200;
303 }
304
305 /* Save Linux EBASE */
306 vcpu->arch.host_ebase = (void *)read_c0_ebase();
307
308 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
309
310 if (!gebase) {
311 err = -ENOMEM;
312 goto out_free_cpu;
313 }
314 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
315 ALIGN(size, PAGE_SIZE), gebase);
316
317 /* Save new ebase */
318 vcpu->arch.guest_ebase = gebase;
319
320 /* Copy L1 Guest Exception handler to correct offset */
321
322 /* TLB Refill, EXL = 0 */
323 memcpy(gebase, mips32_exception,
324 mips32_exceptionEnd - mips32_exception);
325
326 /* General Exception Entry point */
327 memcpy(gebase + 0x180, mips32_exception,
328 mips32_exceptionEnd - mips32_exception);
329
330 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
331 for (i = 0; i < 8; i++) {
332 kvm_debug("L1 Vectored handler @ %p\n",
333 gebase + 0x200 + (i * VECTORSPACING));
334 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
335 mips32_exceptionEnd - mips32_exception);
336 }
337
338 /* General handler, relocate to unmapped space for sanity's sake */
339 offset = 0x2000;
340 kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
341 gebase + offset,
342 mips32_GuestExceptionEnd - mips32_GuestException);
343
344 memcpy(gebase + offset, mips32_GuestException,
345 mips32_GuestExceptionEnd - mips32_GuestException);
346
347 /* Invalidate the icache for these ranges */
348 mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
349
350 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
351 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
352
353 if (!vcpu->arch.kseg0_commpage) {
354 err = -ENOMEM;
355 goto out_free_gebase;
356 }
357
358 kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
359 kvm_mips_commpage_init(vcpu);
360
361 /* Init */
362 vcpu->arch.last_sched_cpu = -1;
363
364 /* Start off the timer */
365 kvm_mips_emulate_count(vcpu);
366
367 return vcpu;
368
369out_free_gebase:
370 kfree(gebase);
371
372out_free_cpu:
373 kfree(vcpu);
374
375out:
376 return ERR_PTR(err);
377}
378
379void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
380{
381 hrtimer_cancel(&vcpu->arch.comparecount_timer);
382
383 kvm_vcpu_uninit(vcpu);
384
385 kvm_mips_dump_stats(vcpu);
386
387 if (vcpu->arch.guest_ebase)
388 kfree(vcpu->arch.guest_ebase);
389
390 if (vcpu->arch.kseg0_commpage)
391 kfree(vcpu->arch.kseg0_commpage);
392
393}
394
395void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
396{
397 kvm_arch_vcpu_free(vcpu);
398}
399
400int
401kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
402 struct kvm_guest_debug *dbg)
403{
404 return -EINVAL;
405}
406
407int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
408{
409 int r = 0;
410 sigset_t sigsaved;
411
412 if (vcpu->sigset_active)
413 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
414
415 if (vcpu->mmio_needed) {
416 if (!vcpu->mmio_is_write)
417 kvm_mips_complete_mmio_load(vcpu, run);
418 vcpu->mmio_needed = 0;
419 }
420
421 /* Check if we have any exceptions/interrupts pending */
422 kvm_mips_deliver_interrupts(vcpu,
423 kvm_read_c0_guest_cause(vcpu->arch.cop0));
424
425 local_irq_disable();
426 kvm_guest_enter();
427
428 r = __kvm_mips_vcpu_run(run, vcpu);
429
430 kvm_guest_exit();
431 local_irq_enable();
432
433 if (vcpu->sigset_active)
434 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
435
436 return r;
437}
438
439int
440kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
441{
442 int intr = (int)irq->irq;
443 struct kvm_vcpu *dvcpu = NULL;
444
445 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
446 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
447 (int)intr);
448
449 if (irq->cpu == -1)
450 dvcpu = vcpu;
451 else
452 dvcpu = vcpu->kvm->vcpus[irq->cpu];
453
454 if (intr == 2 || intr == 3 || intr == 4) {
455 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
456
457 } else if (intr == -2 || intr == -3 || intr == -4) {
458 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
459 } else {
460 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
461 irq->cpu, irq->irq);
462 return -EINVAL;
463 }
464
465 dvcpu->arch.wait = 0;
466
467 if (waitqueue_active(&dvcpu->wq)) {
468 wake_up_interruptible(&dvcpu->wq);
469 }
470
471 return 0;
472}
473
474int
475kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
476 struct kvm_mp_state *mp_state)
477{
478 return -EINVAL;
479}
480
481int
482kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483 struct kvm_mp_state *mp_state)
484{
485 return -EINVAL;
486}
487
488long
489kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
490{
491 struct kvm_vcpu *vcpu = filp->private_data;
492 void __user *argp = (void __user *)arg;
493 long r;
494 int intr;
495
496 switch (ioctl) {
497 case KVM_NMI:
498 /* Treat the NMI as a CPU reset */
499 r = kvm_mips_reset_vcpu(vcpu);
500 break;
501 case KVM_INTERRUPT:
502 {
503 struct kvm_mips_interrupt irq;
504 r = -EFAULT;
505 if (copy_from_user(&irq, argp, sizeof(irq)))
506 goto out;
507
508 intr = (int)irq.irq;
509
510 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
511 irq.irq);
512
513 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
514 break;
515 }
516 default:
517 r = -EINVAL;
518 }
519
520out:
521 return r;
522}
523
524/*
525 * Get (and clear) the dirty memory log for a memory slot.
526 */
527int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
528{
529 struct kvm_memory_slot *memslot;
530 unsigned long ga, ga_end;
531 int is_dirty = 0;
532 int r;
533 unsigned long n;
534
535 mutex_lock(&kvm->slots_lock);
536
537 r = kvm_get_dirty_log(kvm, log, &is_dirty);
538 if (r)
539 goto out;
540
541 /* If nothing is dirty, don't bother messing with page tables. */
542 if (is_dirty) {
543 memslot = &kvm->memslots->memslots[log->slot];
544
545 ga = memslot->base_gfn << PAGE_SHIFT;
546 ga_end = ga + (memslot->npages << PAGE_SHIFT);
547
548 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
549 ga_end);
550
551 n = kvm_dirty_bitmap_bytes(memslot);
552 memset(memslot->dirty_bitmap, 0, n);
553 }
554
555 r = 0;
556out:
557 mutex_unlock(&kvm->slots_lock);
558 return r;
559
560}
561
562long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
563{
564 long r;
565
566 switch (ioctl) {
567 default:
568 r = -EINVAL;
569 }
570
571 return r;
572}
573
574int kvm_arch_init(void *opaque)
575{
576 int ret;
577
578 if (kvm_mips_callbacks) {
579 kvm_err("kvm: module already exists\n");
580 return -EEXIST;
581 }
582
583 ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
584
585 return ret;
586}
587
588void kvm_arch_exit(void)
589{
590 kvm_mips_callbacks = NULL;
591}
592
593int
594kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
595{
596 return -ENOTSUPP;
597}
598
599int
600kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
601{
602 return -ENOTSUPP;
603}
604
605int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
606{
607 return 0;
608}
609
610int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
611{
612 return -ENOTSUPP;
613}
614
615int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
616{
617 return -ENOTSUPP;
618}
619
620int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
621{
622 return VM_FAULT_SIGBUS;
623}
624
625int kvm_dev_ioctl_check_extension(long ext)
626{
627 int r;
628
629 switch (ext) {
630 case KVM_CAP_COALESCED_MMIO:
631 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
632 break;
633 default:
634 r = 0;
635 break;
636 }
637 return r;
638
639}
640
641int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
642{
643 return kvm_mips_pending_timer(vcpu);
644}
645
646int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
647{
648 int i;
649 struct mips_coproc *cop0;
650
651 if (!vcpu)
652 return -1;
653
654 printk("VCPU Register Dump:\n");
655 printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
656 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
657
658 for (i = 0; i < 32; i += 4) {
659 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
660 vcpu->arch.gprs[i],
661 vcpu->arch.gprs[i + 1],
662 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
663 }
664 printk("\thi: 0x%08lx\n", vcpu->arch.hi);
665 printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
666
667 cop0 = vcpu->arch.cop0;
668 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
669 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
670
671 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
672
673 return 0;
674}
675
676int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
677{
678 int i;
679
680 for (i = 0; i < 32; i++)
681 vcpu->arch.gprs[i] = regs->gprs[i];
682
683 vcpu->arch.hi = regs->hi;
684 vcpu->arch.lo = regs->lo;
685 vcpu->arch.pc = regs->pc;
686
687 return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
688}
689
690int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
691{
692 int i;
693
694 for (i = 0; i < 32; i++)
695 regs->gprs[i] = vcpu->arch.gprs[i];
696
697 regs->hi = vcpu->arch.hi;
698 regs->lo = vcpu->arch.lo;
699 regs->pc = vcpu->arch.pc;
700
701 return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
702}
703
704void kvm_mips_comparecount_func(unsigned long data)
705{
706 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
707
708 kvm_mips_callbacks->queue_timer_int(vcpu);
709
710 vcpu->arch.wait = 0;
711 if (waitqueue_active(&vcpu->wq)) {
712 wake_up_interruptible(&vcpu->wq);
713 }
714}
715
716/*
717 * low level hrtimer wake routine.
718 */
719enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
720{
721 struct kvm_vcpu *vcpu;
722
723 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
724 kvm_mips_comparecount_func((unsigned long) vcpu);
725 hrtimer_forward_now(&vcpu->arch.comparecount_timer,
726 ktime_set(0, MS_TO_NS(10)));
727 return HRTIMER_RESTART;
728}
729
730int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
731{
732 kvm_mips_callbacks->vcpu_init(vcpu);
733 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
734 HRTIMER_MODE_REL);
735 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
736 kvm_mips_init_shadow_tlb(vcpu);
737 return 0;
738}
739
740void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
741{
742 return;
743}
744
745int
746kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
747{
748 return 0;
749}
750
751/* Initial guest state */
752int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
753{
754 return kvm_mips_callbacks->vcpu_setup(vcpu);
755}
756
757static
758void kvm_mips_set_c0_status(void)
759{
760 uint32_t status = read_c0_status();
761
762 if (cpu_has_fpu)
763 status |= (ST0_CU1);
764
765 if (cpu_has_dsp)
766 status |= (ST0_MX);
767
768 write_c0_status(status);
769 ehb();
770}
771
772/*
773 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
774 */
775int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
776{
777 uint32_t cause = vcpu->arch.host_cp0_cause;
778 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
779 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
780 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
781 enum emulation_result er = EMULATE_DONE;
782 int ret = RESUME_GUEST;
783
784 /* Set a default exit reason */
785 run->exit_reason = KVM_EXIT_UNKNOWN;
786 run->ready_for_interrupt_injection = 1;
787
788 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
789 kvm_mips_set_c0_status();
790
791 local_irq_enable();
792
793 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
794 cause, opc, run, vcpu);
795
796 /* Do a privilege check, if in UM most of these exit conditions end up
797 * causing an exception to be delivered to the Guest Kernel
798 */
799 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
800 if (er == EMULATE_PRIV_FAIL) {
801 goto skip_emul;
802 } else if (er == EMULATE_FAIL) {
803 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
804 ret = RESUME_HOST;
805 goto skip_emul;
806 }
807
808 switch (exccode) {
809 case T_INT:
810 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
811
812 ++vcpu->stat.int_exits;
813 trace_kvm_exit(vcpu, INT_EXITS);
814
815 if (need_resched()) {
816 cond_resched();
817 }
818
819 ret = RESUME_GUEST;
820 break;
821
822 case T_COP_UNUSABLE:
823 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
824
825 ++vcpu->stat.cop_unusable_exits;
826 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
827 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
828 /* XXXKYMA: Might need to return to user space */
829 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
830 ret = RESUME_HOST;
831 }
832 break;
833
834 case T_TLB_MOD:
835 ++vcpu->stat.tlbmod_exits;
836 trace_kvm_exit(vcpu, TLBMOD_EXITS);
837 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
838 break;
839
840 case T_TLB_ST_MISS:
841 kvm_debug
842 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
843 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
844 badvaddr);
845
846 ++vcpu->stat.tlbmiss_st_exits;
847 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
848 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
849 break;
850
851 case T_TLB_LD_MISS:
852 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
853 cause, opc, badvaddr);
854
855 ++vcpu->stat.tlbmiss_ld_exits;
856 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
857 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
858 break;
859
860 case T_ADDR_ERR_ST:
861 ++vcpu->stat.addrerr_st_exits;
862 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
863 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
864 break;
865
866 case T_ADDR_ERR_LD:
867 ++vcpu->stat.addrerr_ld_exits;
868 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
869 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
870 break;
871
872 case T_SYSCALL:
873 ++vcpu->stat.syscall_exits;
874 trace_kvm_exit(vcpu, SYSCALL_EXITS);
875 ret = kvm_mips_callbacks->handle_syscall(vcpu);
876 break;
877
878 case T_RES_INST:
879 ++vcpu->stat.resvd_inst_exits;
880 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
881 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
882 break;
883
884 case T_BREAK:
885 ++vcpu->stat.break_inst_exits;
886 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
887 ret = kvm_mips_callbacks->handle_break(vcpu);
888 break;
889
890 default:
891 kvm_err
892 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
893 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
894 kvm_read_c0_guest_status(vcpu->arch.cop0));
895 kvm_arch_vcpu_dump_regs(vcpu);
896 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
897 ret = RESUME_HOST;
898 break;
899
900 }
901
902skip_emul:
903 local_irq_disable();
904
905 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
906 kvm_mips_deliver_interrupts(vcpu, cause);
907
908 if (!(ret & RESUME_HOST)) {
909 /* Only check for signals if not already exiting to userspace */
910 if (signal_pending(current)) {
911 run->exit_reason = KVM_EXIT_INTR;
912 ret = (-EINTR << 2) | RESUME_HOST;
913 ++vcpu->stat.signal_exits;
914 trace_kvm_exit(vcpu, SIGNAL_EXITS);
915 }
916 }
917
918 return ret;
919}
920
921int __init kvm_mips_init(void)
922{
923 int ret;
924
925 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
926
927 if (ret)
928 return ret;
929
930 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
931 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
932 * to avoid the possibility of double faulting. The issue is that the TLB code
933 * references routines that are part of the the KVM module,
934 * which are only available once the module is loaded.
935 */
936 kvm_mips_gfn_to_pfn = gfn_to_pfn;
937 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
938 kvm_mips_is_error_pfn = is_error_pfn;
939
940 pr_info("KVM/MIPS Initialized\n");
941 return 0;
942}
943
944void __exit kvm_mips_exit(void)
945{
946 kvm_exit();
947
948 kvm_mips_gfn_to_pfn = NULL;
949 kvm_mips_release_pfn_clean = NULL;
950 kvm_mips_is_error_pfn = NULL;
951
952 pr_info("KVM/MIPS unloaded\n");
953}
954
955module_init(kvm_mips_init);
956module_exit(kvm_mips_exit);
957
958EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644
index 000000000000..a4a8c85cc8f7
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_comm.h
@@ -0,0 +1,23 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: commpage: mapped into get kernel space
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#ifndef __KVM_MIPS_COMMPAGE_H__
13#define __KVM_MIPS_COMMPAGE_H__
14
15struct kvm_mips_commpage {
16 struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */
17};
18
19#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
20
21extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
22
23#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644
index 000000000000..3873b1ecc40f
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_commpage.c
@@ -0,0 +1,37 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* commpage, currently used for Virtual COP0 registers.
7* Mapped into the guest kernel @ 0x0.
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <asm/page.h>
20#include <asm/cacheflush.h>
21#include <asm/mmu_context.h>
22
23#include <linux/kvm_host.h>
24
25#include "kvm_mips_comm.h"
26
27void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
28{
29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
30 memset(page, 0, sizeof(struct kvm_mips_commpage));
31
32 /* Specific init values for fields */
33 vcpu->arch.cop0 = &page->cop0;
34 memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
35
36 return;
37}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644
index 000000000000..96528e2d1ea6
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_dyntrans.c
@@ -0,0 +1,149 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/kvm_host.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19
20#include "kvm_mips_comm.h"
21
22#define SYNCI_TEMPLATE 0x041f0000
23#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
24#define SYNCI_OFFSET ((x) & 0xffff)
25
26#define LW_TEMPLATE 0x8c000000
27#define CLEAR_TEMPLATE 0x00000020
28#define SW_TEMPLATE 0xac000000
29
30int
31kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
32 struct kvm_vcpu *vcpu)
33{
34 int result = 0;
35 unsigned long kseg0_opc;
36 uint32_t synci_inst = 0x0;
37
38 /* Replace the CACHE instruction, with a NOP */
39 kseg0_opc =
40 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
41 (vcpu, (unsigned long) opc));
42 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
43 mips32_SyncICache(kseg0_opc, 32);
44
45 return result;
46}
47
48/*
49 * Address based CACHE instructions are transformed into synci(s). A little heavy
50 * for just D-cache invalidates, but avoids an expensive trap
51 */
52int
53kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
54 struct kvm_vcpu *vcpu)
55{
56 int result = 0;
57 unsigned long kseg0_opc;
58 uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
59
60 base = (inst >> 21) & 0x1f;
61 offset = inst & 0xffff;
62 synci_inst |= (base << 21);
63 synci_inst |= offset;
64
65 kseg0_opc =
66 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
67 (vcpu, (unsigned long) opc));
68 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
69 mips32_SyncICache(kseg0_opc, 32);
70
71 return result;
72}
73
74int
75kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
76{
77 int32_t rt, rd, sel;
78 uint32_t mfc0_inst;
79 unsigned long kseg0_opc, flags;
80
81 rt = (inst >> 16) & 0x1f;
82 rd = (inst >> 11) & 0x1f;
83 sel = inst & 0x7;
84
85 if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
86 mfc0_inst = CLEAR_TEMPLATE;
87 mfc0_inst |= ((rt & 0x1f) << 16);
88 } else {
89 mfc0_inst = LW_TEMPLATE;
90 mfc0_inst |= ((rt & 0x1f) << 16);
91 mfc0_inst |=
92 offsetof(struct mips_coproc,
93 reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
94 cop0);
95 }
96
97 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
98 kseg0_opc =
99 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
100 (vcpu, (unsigned long) opc));
101 memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
102 mips32_SyncICache(kseg0_opc, 32);
103 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
104 local_irq_save(flags);
105 memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
106 mips32_SyncICache((unsigned long) opc, 32);
107 local_irq_restore(flags);
108 } else {
109 kvm_err("%s: Invalid address: %p\n", __func__, opc);
110 return -EFAULT;
111 }
112
113 return 0;
114}
115
116int
117kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
118{
119 int32_t rt, rd, sel;
120 uint32_t mtc0_inst = SW_TEMPLATE;
121 unsigned long kseg0_opc, flags;
122
123 rt = (inst >> 16) & 0x1f;
124 rd = (inst >> 11) & 0x1f;
125 sel = inst & 0x7;
126
127 mtc0_inst |= ((rt & 0x1f) << 16);
128 mtc0_inst |=
129 offsetof(struct mips_coproc,
130 reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
131
132 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
133 kseg0_opc =
134 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
135 (vcpu, (unsigned long) opc));
136 memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
137 mips32_SyncICache(kseg0_opc, 32);
138 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
139 local_irq_save(flags);
140 memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
141 mips32_SyncICache((unsigned long) opc, 32);
142 local_irq_restore(flags);
143 } else {
144 kvm_err("%s: Invalid address: %p\n", __func__, opc);
145 return -EFAULT;
146 }
147
148 return 0;
149}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644
index 000000000000..2b2bac9a40aa
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -0,0 +1,1826 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Instruction/Exception emulation
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/kvm_host.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <linux/random.h>
20#include <asm/page.h>
21#include <asm/cacheflush.h>
22#include <asm/cpu-info.h>
23#include <asm/mmu_context.h>
24#include <asm/tlbflush.h>
25#include <asm/inst.h>
26
27#undef CONFIG_MIPS_MT
28#include <asm/r4kcache.h>
29#define CONFIG_MIPS_MT
30
31#include "kvm_mips_opcode.h"
32#include "kvm_mips_int.h"
33#include "kvm_mips_comm.h"
34
35#include "trace.h"
36
37/*
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
40 */
41unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42 unsigned long instpc)
43{
44 unsigned int dspcontrol;
45 union mips_instruction insn;
46 struct kvm_vcpu_arch *arch = &vcpu->arch;
47 long epc = instpc;
48 long nextpc = KVM_INVALID_INST;
49
50 if (epc & 3)
51 goto unaligned;
52
53 /*
54 * Read the instruction
55 */
56 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57
58 if (insn.word == KVM_INVALID_INST)
59 return KVM_INVALID_INST;
60
61 switch (insn.i_format.opcode) {
62 /*
63 * jr and jalr are in r_format format.
64 */
65 case spec_op:
66 switch (insn.r_format.func) {
67 case jalr_op:
68 arch->gprs[insn.r_format.rd] = epc + 8;
69 /* Fall through */
70 case jr_op:
71 nextpc = arch->gprs[insn.r_format.rs];
72 break;
73 }
74 break;
75
76 /*
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80 */
81 case bcond_op:
82 switch (insn.i_format.rt) {
83 case bltz_op:
84 case bltzl_op:
85 if ((long)arch->gprs[insn.i_format.rs] < 0)
86 epc = epc + 4 + (insn.i_format.simmediate << 2);
87 else
88 epc += 8;
89 nextpc = epc;
90 break;
91
92 case bgez_op:
93 case bgezl_op:
94 if ((long)arch->gprs[insn.i_format.rs] >= 0)
95 epc = epc + 4 + (insn.i_format.simmediate << 2);
96 else
97 epc += 8;
98 nextpc = epc;
99 break;
100
101 case bltzal_op:
102 case bltzall_op:
103 arch->gprs[31] = epc + 8;
104 if ((long)arch->gprs[insn.i_format.rs] < 0)
105 epc = epc + 4 + (insn.i_format.simmediate << 2);
106 else
107 epc += 8;
108 nextpc = epc;
109 break;
110
111 case bgezal_op:
112 case bgezall_op:
113 arch->gprs[31] = epc + 8;
114 if ((long)arch->gprs[insn.i_format.rs] >= 0)
115 epc = epc + 4 + (insn.i_format.simmediate << 2);
116 else
117 epc += 8;
118 nextpc = epc;
119 break;
120 case bposge32_op:
121 if (!cpu_has_dsp)
122 goto sigill;
123
124 dspcontrol = rddsp(0x01);
125
126 if (dspcontrol >= 32) {
127 epc = epc + 4 + (insn.i_format.simmediate << 2);
128 } else
129 epc += 8;
130 nextpc = epc;
131 break;
132 }
133 break;
134
135 /*
136 * These are unconditional and in j_format.
137 */
138 case jal_op:
139 arch->gprs[31] = instpc + 8;
140 case j_op:
141 epc += 4;
142 epc >>= 28;
143 epc <<= 28;
144 epc |= (insn.j_format.target << 2);
145 nextpc = epc;
146 break;
147
148 /*
149 * These are conditional and in i_format.
150 */
151 case beq_op:
152 case beql_op:
153 if (arch->gprs[insn.i_format.rs] ==
154 arch->gprs[insn.i_format.rt])
155 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 else
157 epc += 8;
158 nextpc = epc;
159 break;
160
161 case bne_op:
162 case bnel_op:
163 if (arch->gprs[insn.i_format.rs] !=
164 arch->gprs[insn.i_format.rt])
165 epc = epc + 4 + (insn.i_format.simmediate << 2);
166 else
167 epc += 8;
168 nextpc = epc;
169 break;
170
171 case blez_op: /* not really i_format */
172 case blezl_op:
173 /* rt field assumed to be zero */
174 if ((long)arch->gprs[insn.i_format.rs] <= 0)
175 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 else
177 epc += 8;
178 nextpc = epc;
179 break;
180
181 case bgtz_op:
182 case bgtzl_op:
183 /* rt field assumed to be zero */
184 if ((long)arch->gprs[insn.i_format.rs] > 0)
185 epc = epc + 4 + (insn.i_format.simmediate << 2);
186 else
187 epc += 8;
188 nextpc = epc;
189 break;
190
191 /*
192 * And now the FPA/cp1 branch instructions.
193 */
194 case cop1_op:
195 printk("%s: unsupported cop1_op\n", __func__);
196 break;
197 }
198
199 return nextpc;
200
201unaligned:
202 printk("%s: unaligned epc\n", __func__);
203 return nextpc;
204
205sigill:
206 printk("%s: DSP branch but not DSP ASE\n", __func__);
207 return nextpc;
208}
209
210enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
211{
212 unsigned long branch_pc;
213 enum emulation_result er = EMULATE_DONE;
214
215 if (cause & CAUSEF_BD) {
216 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
217 if (branch_pc == KVM_INVALID_INST) {
218 er = EMULATE_FAIL;
219 } else {
220 vcpu->arch.pc = branch_pc;
221 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
222 }
223 } else
224 vcpu->arch.pc += 4;
225
226 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227
228 return er;
229}
230
231/* Everytime the compare register is written to, we need to decide when to fire
232 * the timer that represents timer ticks to the GUEST.
233 *
234 */
235enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
236{
237 struct mips_coproc *cop0 = vcpu->arch.cop0;
238 enum emulation_result er = EMULATE_DONE;
239
240 /* If COUNT is enabled */
241 if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
242 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
243 hrtimer_start(&vcpu->arch.comparecount_timer,
244 ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
245 } else {
246 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
247 }
248
249 return er;
250}
251
252enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
253{
254 struct mips_coproc *cop0 = vcpu->arch.cop0;
255 enum emulation_result er = EMULATE_DONE;
256
257 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
258 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
259 kvm_read_c0_guest_epc(cop0));
260 kvm_clear_c0_guest_status(cop0, ST0_EXL);
261 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
262
263 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
264 kvm_clear_c0_guest_status(cop0, ST0_ERL);
265 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
266 } else {
267 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
268 vcpu->arch.pc);
269 er = EMULATE_FAIL;
270 }
271
272 return er;
273}
274
275enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
276{
277 enum emulation_result er = EMULATE_DONE;
278
279 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
280 vcpu->arch.pending_exceptions);
281
282 ++vcpu->stat.wait_exits;
283 trace_kvm_exit(vcpu, WAIT_EXITS);
284 if (!vcpu->arch.pending_exceptions) {
285 vcpu->arch.wait = 1;
286 kvm_vcpu_block(vcpu);
287
288 /* We we are runnable, then definitely go off to user space to check if any
289 * I/O interrupts are pending.
290 */
291 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
292 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
293 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
294 }
295 }
296
297 return er;
298}
299
300/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301 * this, if things ever change
302 */
303enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
304{
305 struct mips_coproc *cop0 = vcpu->arch.cop0;
306 enum emulation_result er = EMULATE_FAIL;
307 uint32_t pc = vcpu->arch.pc;
308
309 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
310 return er;
311}
312
313/* Write Guest TLB Entry @ Index */
314enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
315{
316 struct mips_coproc *cop0 = vcpu->arch.cop0;
317 int index = kvm_read_c0_guest_index(cop0);
318 enum emulation_result er = EMULATE_DONE;
319 struct kvm_mips_tlb *tlb = NULL;
320 uint32_t pc = vcpu->arch.pc;
321
322 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
323 printk("%s: illegal index: %d\n", __func__, index);
324 printk
325 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326 pc, index, kvm_read_c0_guest_entryhi(cop0),
327 kvm_read_c0_guest_entrylo0(cop0),
328 kvm_read_c0_guest_entrylo1(cop0),
329 kvm_read_c0_guest_pagemask(cop0));
330 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
331 }
332
333 tlb = &vcpu->arch.guest_tlb[index];
334#if 1
335 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
337#endif
338
339 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
340 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
341 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
342 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
343
344 kvm_debug
345 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346 pc, index, kvm_read_c0_guest_entryhi(cop0),
347 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
348 kvm_read_c0_guest_pagemask(cop0));
349
350 return er;
351}
352
353/* Write Guest TLB Entry @ Random Index */
354enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
355{
356 struct mips_coproc *cop0 = vcpu->arch.cop0;
357 enum emulation_result er = EMULATE_DONE;
358 struct kvm_mips_tlb *tlb = NULL;
359 uint32_t pc = vcpu->arch.pc;
360 int index;
361
362#if 1
363 get_random_bytes(&index, sizeof(index));
364 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
365#else
366 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
367#endif
368
369 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
370 printk("%s: illegal index: %d\n", __func__, index);
371 return EMULATE_FAIL;
372 }
373
374 tlb = &vcpu->arch.guest_tlb[index];
375
376#if 1
377 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
379#endif
380
381 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
382 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
383 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
384 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
385
386 kvm_debug
387 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388 pc, index, kvm_read_c0_guest_entryhi(cop0),
389 kvm_read_c0_guest_entrylo0(cop0),
390 kvm_read_c0_guest_entrylo1(cop0));
391
392 return er;
393}
394
395enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
396{
397 struct mips_coproc *cop0 = vcpu->arch.cop0;
398 long entryhi = kvm_read_c0_guest_entryhi(cop0);
399 enum emulation_result er = EMULATE_DONE;
400 uint32_t pc = vcpu->arch.pc;
401 int index = -1;
402
403 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
404
405 kvm_write_c0_guest_index(cop0, index);
406
407 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
408 index);
409
410 return er;
411}
412
413enum emulation_result
414kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
415 struct kvm_run *run, struct kvm_vcpu *vcpu)
416{
417 struct mips_coproc *cop0 = vcpu->arch.cop0;
418 enum emulation_result er = EMULATE_DONE;
419 int32_t rt, rd, copz, sel, co_bit, op;
420 uint32_t pc = vcpu->arch.pc;
421 unsigned long curr_pc;
422
423 /*
424 * Update PC and hold onto current PC in case there is
425 * an error and we want to rollback the PC
426 */
427 curr_pc = vcpu->arch.pc;
428 er = update_pc(vcpu, cause);
429 if (er == EMULATE_FAIL) {
430 return er;
431 }
432
433 copz = (inst >> 21) & 0x1f;
434 rt = (inst >> 16) & 0x1f;
435 rd = (inst >> 11) & 0x1f;
436 sel = inst & 0x7;
437 co_bit = (inst >> 25) & 1;
438
439 /* Verify that the register is valid */
440 if (rd > MIPS_CP0_DESAVE) {
441 printk("Invalid rd: %d\n", rd);
442 er = EMULATE_FAIL;
443 goto done;
444 }
445
446 if (co_bit) {
447 op = (inst) & 0xff;
448
449 switch (op) {
450 case tlbr_op: /* Read indexed TLB entry */
451 er = kvm_mips_emul_tlbr(vcpu);
452 break;
453 case tlbwi_op: /* Write indexed */
454 er = kvm_mips_emul_tlbwi(vcpu);
455 break;
456 case tlbwr_op: /* Write random */
457 er = kvm_mips_emul_tlbwr(vcpu);
458 break;
459 case tlbp_op: /* TLB Probe */
460 er = kvm_mips_emul_tlbp(vcpu);
461 break;
462 case rfe_op:
463 printk("!!!COP0_RFE!!!\n");
464 break;
465 case eret_op:
466 er = kvm_mips_emul_eret(vcpu);
467 goto dont_update_pc;
468 break;
469 case wait_op:
470 er = kvm_mips_emul_wait(vcpu);
471 break;
472 }
473 } else {
474 switch (copz) {
475 case mfc_op:
476#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
477 cop0->stat[rd][sel]++;
478#endif
479 /* Get reg */
480 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
481 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
482 vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
483 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
484 vcpu->arch.gprs[rt] = 0x0;
485#ifdef CONFIG_KVM_MIPS_DYN_TRANS
486 kvm_mips_trans_mfc0(inst, opc, vcpu);
487#endif
488 }
489 else {
490 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
491
492#ifdef CONFIG_KVM_MIPS_DYN_TRANS
493 kvm_mips_trans_mfc0(inst, opc, vcpu);
494#endif
495 }
496
497 kvm_debug
498 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
499 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
500
501 break;
502
503 case dmfc_op:
504 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
505 break;
506
507 case mtc_op:
508#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
509 cop0->stat[rd][sel]++;
510#endif
511 if ((rd == MIPS_CP0_TLB_INDEX)
512 && (vcpu->arch.gprs[rt] >=
513 KVM_MIPS_GUEST_TLB_SIZE)) {
514 printk("Invalid TLB Index: %ld",
515 vcpu->arch.gprs[rt]);
516 er = EMULATE_FAIL;
517 break;
518 }
519#define C0_EBASE_CORE_MASK 0xff
520 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
521 /* Preserve CORE number */
522 kvm_change_c0_guest_ebase(cop0,
523 ~(C0_EBASE_CORE_MASK),
524 vcpu->arch.gprs[rt]);
525 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526 kvm_read_c0_guest_ebase(cop0));
527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
528 uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
529 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
530 &&
531 (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
532 != nasid)) {
533
534 kvm_debug
535 ("MTCz, change ASID from %#lx to %#lx\n",
536 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
537 ASID_MASK(vcpu->arch.gprs[rt]));
538
539 /* Blow away the shadow host TLBs */
540 kvm_mips_flush_host_tlb(1);
541 }
542 kvm_write_c0_guest_entryhi(cop0,
543 vcpu->arch.gprs[rt]);
544 }
545 /* Are we writing to COUNT */
546 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
547 /* Linux doesn't seem to write into COUNT, we throw an error
548 * if we notice a write to COUNT
549 */
550 /*er = EMULATE_FAIL; */
551 goto done;
552 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
553 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
554 pc, kvm_read_c0_guest_compare(cop0),
555 vcpu->arch.gprs[rt]);
556
557 /* If we are writing to COMPARE */
558 /* Clear pending timer interrupt, if any */
559 kvm_mips_callbacks->dequeue_timer_int(vcpu);
560 kvm_write_c0_guest_compare(cop0,
561 vcpu->arch.gprs[rt]);
562 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
563 kvm_write_c0_guest_status(cop0,
564 vcpu->arch.gprs[rt]);
565 /* Make sure that CU1 and NMI bits are never set */
566 kvm_clear_c0_guest_status(cop0,
567 (ST0_CU1 | ST0_NMI));
568
569#ifdef CONFIG_KVM_MIPS_DYN_TRANS
570 kvm_mips_trans_mtc0(inst, opc, vcpu);
571#endif
572 } else {
573 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
574#ifdef CONFIG_KVM_MIPS_DYN_TRANS
575 kvm_mips_trans_mtc0(inst, opc, vcpu);
576#endif
577 }
578
579 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
580 rd, sel, cop0->reg[rd][sel]);
581 break;
582
583 case dmtc_op:
584 printk
585 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
586 vcpu->arch.pc, rt, rd, sel);
587 er = EMULATE_FAIL;
588 break;
589
590 case mfmcz_op:
591#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
592 cop0->stat[MIPS_CP0_STATUS][0]++;
593#endif
594 if (rt != 0) {
595 vcpu->arch.gprs[rt] =
596 kvm_read_c0_guest_status(cop0);
597 }
598 /* EI */
599 if (inst & 0x20) {
600 kvm_debug("[%#lx] mfmcz_op: EI\n",
601 vcpu->arch.pc);
602 kvm_set_c0_guest_status(cop0, ST0_IE);
603 } else {
604 kvm_debug("[%#lx] mfmcz_op: DI\n",
605 vcpu->arch.pc);
606 kvm_clear_c0_guest_status(cop0, ST0_IE);
607 }
608
609 break;
610
611 case wrpgpr_op:
612 {
613 uint32_t css =
614 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
615 uint32_t pss =
616 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
617 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
618 if (css || pss) {
619 er = EMULATE_FAIL;
620 break;
621 }
622 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
623 vcpu->arch.gprs[rt]);
624 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
625 }
626 break;
627 default:
628 printk
629 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
630 vcpu->arch.pc, copz);
631 er = EMULATE_FAIL;
632 break;
633 }
634 }
635
636done:
637 /*
638 * Rollback PC only if emulation was unsuccessful
639 */
640 if (er == EMULATE_FAIL) {
641 vcpu->arch.pc = curr_pc;
642 }
643
644dont_update_pc:
645 /*
646 * This is for special instructions whose emulation
647 * updates the PC, so do not overwrite the PC under
648 * any circumstances
649 */
650
651 return er;
652}
653
654enum emulation_result
655kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
656 struct kvm_run *run, struct kvm_vcpu *vcpu)
657{
658 enum emulation_result er = EMULATE_DO_MMIO;
659 int32_t op, base, rt, offset;
660 uint32_t bytes;
661 void *data = run->mmio.data;
662 unsigned long curr_pc;
663
664 /*
665 * Update PC and hold onto current PC in case there is
666 * an error and we want to rollback the PC
667 */
668 curr_pc = vcpu->arch.pc;
669 er = update_pc(vcpu, cause);
670 if (er == EMULATE_FAIL)
671 return er;
672
673 rt = (inst >> 16) & 0x1f;
674 base = (inst >> 21) & 0x1f;
675 offset = inst & 0xffff;
676 op = (inst >> 26) & 0x3f;
677
678 switch (op) {
679 case sb_op:
680 bytes = 1;
681 if (bytes > sizeof(run->mmio.data)) {
682 kvm_err("%s: bad MMIO length: %d\n", __func__,
683 run->mmio.len);
684 }
685 run->mmio.phys_addr =
686 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
687 host_cp0_badvaddr);
688 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
689 er = EMULATE_FAIL;
690 break;
691 }
692 run->mmio.len = bytes;
693 run->mmio.is_write = 1;
694 vcpu->mmio_needed = 1;
695 vcpu->mmio_is_write = 1;
696 *(u8 *) data = vcpu->arch.gprs[rt];
697 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
698 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
699 *(uint8_t *) data);
700
701 break;
702
703 case sw_op:
704 bytes = 4;
705 if (bytes > sizeof(run->mmio.data)) {
706 kvm_err("%s: bad MMIO length: %d\n", __func__,
707 run->mmio.len);
708 }
709 run->mmio.phys_addr =
710 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
711 host_cp0_badvaddr);
712 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
713 er = EMULATE_FAIL;
714 break;
715 }
716
717 run->mmio.len = bytes;
718 run->mmio.is_write = 1;
719 vcpu->mmio_needed = 1;
720 vcpu->mmio_is_write = 1;
721 *(uint32_t *) data = vcpu->arch.gprs[rt];
722
723 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
724 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
725 vcpu->arch.gprs[rt], *(uint32_t *) data);
726 break;
727
728 case sh_op:
729 bytes = 2;
730 if (bytes > sizeof(run->mmio.data)) {
731 kvm_err("%s: bad MMIO length: %d\n", __func__,
732 run->mmio.len);
733 }
734 run->mmio.phys_addr =
735 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
736 host_cp0_badvaddr);
737 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
738 er = EMULATE_FAIL;
739 break;
740 }
741
742 run->mmio.len = bytes;
743 run->mmio.is_write = 1;
744 vcpu->mmio_needed = 1;
745 vcpu->mmio_is_write = 1;
746 *(uint16_t *) data = vcpu->arch.gprs[rt];
747
748 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
749 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
750 vcpu->arch.gprs[rt], *(uint32_t *) data);
751 break;
752
753 default:
754 printk("Store not yet supported");
755 er = EMULATE_FAIL;
756 break;
757 }
758
759 /*
760 * Rollback PC if emulation was unsuccessful
761 */
762 if (er == EMULATE_FAIL) {
763 vcpu->arch.pc = curr_pc;
764 }
765
766 return er;
767}
768
769enum emulation_result
770kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
771 struct kvm_run *run, struct kvm_vcpu *vcpu)
772{
773 enum emulation_result er = EMULATE_DO_MMIO;
774 int32_t op, base, rt, offset;
775 uint32_t bytes;
776
777 rt = (inst >> 16) & 0x1f;
778 base = (inst >> 21) & 0x1f;
779 offset = inst & 0xffff;
780 op = (inst >> 26) & 0x3f;
781
782 vcpu->arch.pending_load_cause = cause;
783 vcpu->arch.io_gpr = rt;
784
785 switch (op) {
786 case lw_op:
787 bytes = 4;
788 if (bytes > sizeof(run->mmio.data)) {
789 kvm_err("%s: bad MMIO length: %d\n", __func__,
790 run->mmio.len);
791 er = EMULATE_FAIL;
792 break;
793 }
794 run->mmio.phys_addr =
795 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
796 host_cp0_badvaddr);
797 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
798 er = EMULATE_FAIL;
799 break;
800 }
801
802 run->mmio.len = bytes;
803 run->mmio.is_write = 0;
804 vcpu->mmio_needed = 1;
805 vcpu->mmio_is_write = 0;
806 break;
807
808 case lh_op:
809 case lhu_op:
810 bytes = 2;
811 if (bytes > sizeof(run->mmio.data)) {
812 kvm_err("%s: bad MMIO length: %d\n", __func__,
813 run->mmio.len);
814 er = EMULATE_FAIL;
815 break;
816 }
817 run->mmio.phys_addr =
818 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
819 host_cp0_badvaddr);
820 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
821 er = EMULATE_FAIL;
822 break;
823 }
824
825 run->mmio.len = bytes;
826 run->mmio.is_write = 0;
827 vcpu->mmio_needed = 1;
828 vcpu->mmio_is_write = 0;
829
830 if (op == lh_op)
831 vcpu->mmio_needed = 2;
832 else
833 vcpu->mmio_needed = 1;
834
835 break;
836
837 case lbu_op:
838 case lb_op:
839 bytes = 1;
840 if (bytes > sizeof(run->mmio.data)) {
841 kvm_err("%s: bad MMIO length: %d\n", __func__,
842 run->mmio.len);
843 er = EMULATE_FAIL;
844 break;
845 }
846 run->mmio.phys_addr =
847 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
848 host_cp0_badvaddr);
849 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
850 er = EMULATE_FAIL;
851 break;
852 }
853
854 run->mmio.len = bytes;
855 run->mmio.is_write = 0;
856 vcpu->mmio_is_write = 0;
857
858 if (op == lb_op)
859 vcpu->mmio_needed = 2;
860 else
861 vcpu->mmio_needed = 1;
862
863 break;
864
865 default:
866 printk("Load not yet supported");
867 er = EMULATE_FAIL;
868 break;
869 }
870
871 return er;
872}
873
874int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
875{
876 unsigned long offset = (va & ~PAGE_MASK);
877 struct kvm *kvm = vcpu->kvm;
878 unsigned long pa;
879 gfn_t gfn;
880 pfn_t pfn;
881
882 gfn = va >> PAGE_SHIFT;
883
884 if (gfn >= kvm->arch.guest_pmap_npages) {
885 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
886 kvm_mips_dump_host_tlbs();
887 kvm_arch_vcpu_dump_regs(vcpu);
888 return -1;
889 }
890 pfn = kvm->arch.guest_pmap[gfn];
891 pa = (pfn << PAGE_SHIFT) | offset;
892
893 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
894
895 mips32_SyncICache(CKSEG0ADDR(pa), 32);
896 return 0;
897}
898
899#define MIPS_CACHE_OP_INDEX_INV 0x0
900#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
901#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
902#define MIPS_CACHE_OP_IMP 0x3
903#define MIPS_CACHE_OP_HIT_INV 0x4
904#define MIPS_CACHE_OP_FILL_WB_INV 0x5
905#define MIPS_CACHE_OP_HIT_HB 0x6
906#define MIPS_CACHE_OP_FETCH_LOCK 0x7
907
908#define MIPS_CACHE_ICACHE 0x0
909#define MIPS_CACHE_DCACHE 0x1
910#define MIPS_CACHE_SEC 0x3
911
912enum emulation_result
913kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
914 struct kvm_run *run, struct kvm_vcpu *vcpu)
915{
916 struct mips_coproc *cop0 = vcpu->arch.cop0;
917 extern void (*r4k_blast_dcache) (void);
918 extern void (*r4k_blast_icache) (void);
919 enum emulation_result er = EMULATE_DONE;
920 int32_t offset, cache, op_inst, op, base;
921 struct kvm_vcpu_arch *arch = &vcpu->arch;
922 unsigned long va;
923 unsigned long curr_pc;
924
925 /*
926 * Update PC and hold onto current PC in case there is
927 * an error and we want to rollback the PC
928 */
929 curr_pc = vcpu->arch.pc;
930 er = update_pc(vcpu, cause);
931 if (er == EMULATE_FAIL)
932 return er;
933
934 base = (inst >> 21) & 0x1f;
935 op_inst = (inst >> 16) & 0x1f;
936 offset = inst & 0xffff;
937 cache = (inst >> 16) & 0x3;
938 op = (inst >> 18) & 0x7;
939
940 va = arch->gprs[base] + offset;
941
942 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
943 cache, op, base, arch->gprs[base], offset);
944
945 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
946 * the caches entirely by stepping through all the ways/indexes
947 */
948 if (op == MIPS_CACHE_OP_INDEX_INV) {
949 kvm_debug
950 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
951 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
952 arch->gprs[base], offset);
953
954 if (cache == MIPS_CACHE_DCACHE)
955 r4k_blast_dcache();
956 else if (cache == MIPS_CACHE_ICACHE)
957 r4k_blast_icache();
958 else {
959 printk("%s: unsupported CACHE INDEX operation\n",
960 __func__);
961 return EMULATE_FAIL;
962 }
963
964#ifdef CONFIG_KVM_MIPS_DYN_TRANS
965 kvm_mips_trans_cache_index(inst, opc, vcpu);
966#endif
967 goto done;
968 }
969
970 preempt_disable();
971 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
972
973 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
974 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
975 }
976 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
977 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
978 int index;
979
980 /* If an entry already exists then skip */
981 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
982 goto skip_fault;
983 }
984
985 /* If address not in the guest TLB, then give the guest a fault, the
986 * resulting handler will do the right thing
987 */
988 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
989 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
990
991 if (index < 0) {
992 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
993 vcpu->arch.host_cp0_badvaddr = va;
994 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
995 vcpu);
996 preempt_enable();
997 goto dont_update_pc;
998 } else {
999 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1000 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1001 if (!TLB_IS_VALID(*tlb, va)) {
1002 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1003 run, vcpu);
1004 preempt_enable();
1005 goto dont_update_pc;
1006 } else {
1007 /* We fault an entry from the guest tlb to the shadow host TLB */
1008 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1009 NULL,
1010 NULL);
1011 }
1012 }
1013 } else {
1014 printk
1015 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1016 cache, op, base, arch->gprs[base], offset);
1017 er = EMULATE_FAIL;
1018 preempt_enable();
1019 goto dont_update_pc;
1020
1021 }
1022
1023skip_fault:
1024 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1025 if (cache == MIPS_CACHE_DCACHE
1026 && (op == MIPS_CACHE_OP_FILL_WB_INV
1027 || op == MIPS_CACHE_OP_HIT_INV)) {
1028 flush_dcache_line(va);
1029
1030#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1031 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1032 kvm_mips_trans_cache_va(inst, opc, vcpu);
1033#endif
1034 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1035 flush_dcache_line(va);
1036 flush_icache_line(va);
1037
1038#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1039 /* Replace the CACHE instruction, with a SYNCI */
1040 kvm_mips_trans_cache_va(inst, opc, vcpu);
1041#endif
1042 } else {
1043 printk
1044 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1045 cache, op, base, arch->gprs[base], offset);
1046 er = EMULATE_FAIL;
1047 preempt_enable();
1048 goto dont_update_pc;
1049 }
1050
1051 preempt_enable();
1052
1053 dont_update_pc:
1054 /*
1055 * Rollback PC
1056 */
1057 vcpu->arch.pc = curr_pc;
1058 done:
1059 return er;
1060}
1061
1062enum emulation_result
1063kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1064 struct kvm_run *run, struct kvm_vcpu *vcpu)
1065{
1066 enum emulation_result er = EMULATE_DONE;
1067 uint32_t inst;
1068
1069 /*
1070 * Fetch the instruction.
1071 */
1072 if (cause & CAUSEF_BD) {
1073 opc += 1;
1074 }
1075
1076 inst = kvm_get_inst(opc, vcpu);
1077
1078 switch (((union mips_instruction)inst).r_format.opcode) {
1079 case cop0_op:
1080 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1081 break;
1082 case sb_op:
1083 case sh_op:
1084 case sw_op:
1085 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1086 break;
1087 case lb_op:
1088 case lbu_op:
1089 case lhu_op:
1090 case lh_op:
1091 case lw_op:
1092 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1093 break;
1094
1095 case cache_op:
1096 ++vcpu->stat.cache_exits;
1097 trace_kvm_exit(vcpu, CACHE_EXITS);
1098 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1099 break;
1100
1101 default:
1102 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1103 inst);
1104 kvm_arch_vcpu_dump_regs(vcpu);
1105 er = EMULATE_FAIL;
1106 break;
1107 }
1108
1109 return er;
1110}
1111
1112enum emulation_result
1113kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1114 struct kvm_run *run, struct kvm_vcpu *vcpu)
1115{
1116 struct mips_coproc *cop0 = vcpu->arch.cop0;
1117 struct kvm_vcpu_arch *arch = &vcpu->arch;
1118 enum emulation_result er = EMULATE_DONE;
1119
1120 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1121 /* save old pc */
1122 kvm_write_c0_guest_epc(cop0, arch->pc);
1123 kvm_set_c0_guest_status(cop0, ST0_EXL);
1124
1125 if (cause & CAUSEF_BD)
1126 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1127 else
1128 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1129
1130 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1131
1132 kvm_change_c0_guest_cause(cop0, (0xff),
1133 (T_SYSCALL << CAUSEB_EXCCODE));
1134
1135 /* Set PC to the exception entry point */
1136 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1137
1138 } else {
1139 printk("Trying to deliver SYSCALL when EXL is already set\n");
1140 er = EMULATE_FAIL;
1141 }
1142
1143 return er;
1144}
1145
1146enum emulation_result
1147kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1148 struct kvm_run *run, struct kvm_vcpu *vcpu)
1149{
1150 struct mips_coproc *cop0 = vcpu->arch.cop0;
1151 struct kvm_vcpu_arch *arch = &vcpu->arch;
1152 enum emulation_result er = EMULATE_DONE;
1153 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1154 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1155
1156 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1157 /* save old pc */
1158 kvm_write_c0_guest_epc(cop0, arch->pc);
1159 kvm_set_c0_guest_status(cop0, ST0_EXL);
1160
1161 if (cause & CAUSEF_BD)
1162 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1163 else
1164 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1165
1166 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1167 arch->pc);
1168
1169 /* set pc to the exception entry point */
1170 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1171
1172 } else {
1173 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1174 arch->pc);
1175
1176 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1177 }
1178
1179 kvm_change_c0_guest_cause(cop0, (0xff),
1180 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1181
1182 /* setup badvaddr, context and entryhi registers for the guest */
1183 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1184 /* XXXKYMA: is the context register used by linux??? */
1185 kvm_write_c0_guest_entryhi(cop0, entryhi);
1186 /* Blow away the shadow host TLBs */
1187 kvm_mips_flush_host_tlb(1);
1188
1189 return er;
1190}
1191
1192enum emulation_result
1193kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1194 struct kvm_run *run, struct kvm_vcpu *vcpu)
1195{
1196 struct mips_coproc *cop0 = vcpu->arch.cop0;
1197 struct kvm_vcpu_arch *arch = &vcpu->arch;
1198 enum emulation_result er = EMULATE_DONE;
1199 unsigned long entryhi =
1200 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1201 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1202
1203 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1204 /* save old pc */
1205 kvm_write_c0_guest_epc(cop0, arch->pc);
1206 kvm_set_c0_guest_status(cop0, ST0_EXL);
1207
1208 if (cause & CAUSEF_BD)
1209 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1210 else
1211 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1212
1213 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1214 arch->pc);
1215
1216 /* set pc to the exception entry point */
1217 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1218
1219 } else {
1220 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1221 arch->pc);
1222 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1223 }
1224
1225 kvm_change_c0_guest_cause(cop0, (0xff),
1226 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1227
1228 /* setup badvaddr, context and entryhi registers for the guest */
1229 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1230 /* XXXKYMA: is the context register used by linux??? */
1231 kvm_write_c0_guest_entryhi(cop0, entryhi);
1232 /* Blow away the shadow host TLBs */
1233 kvm_mips_flush_host_tlb(1);
1234
1235 return er;
1236}
1237
1238enum emulation_result
1239kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1240 struct kvm_run *run, struct kvm_vcpu *vcpu)
1241{
1242 struct mips_coproc *cop0 = vcpu->arch.cop0;
1243 struct kvm_vcpu_arch *arch = &vcpu->arch;
1244 enum emulation_result er = EMULATE_DONE;
1245 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1246 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1247
1248 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1249 /* save old pc */
1250 kvm_write_c0_guest_epc(cop0, arch->pc);
1251 kvm_set_c0_guest_status(cop0, ST0_EXL);
1252
1253 if (cause & CAUSEF_BD)
1254 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1255 else
1256 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1257
1258 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1259 arch->pc);
1260
1261 /* Set PC to the exception entry point */
1262 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1263 } else {
1264 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1265 arch->pc);
1266 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1267 }
1268
1269 kvm_change_c0_guest_cause(cop0, (0xff),
1270 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1271
1272 /* setup badvaddr, context and entryhi registers for the guest */
1273 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1274 /* XXXKYMA: is the context register used by linux??? */
1275 kvm_write_c0_guest_entryhi(cop0, entryhi);
1276 /* Blow away the shadow host TLBs */
1277 kvm_mips_flush_host_tlb(1);
1278
1279 return er;
1280}
1281
1282enum emulation_result
1283kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1284 struct kvm_run *run, struct kvm_vcpu *vcpu)
1285{
1286 struct mips_coproc *cop0 = vcpu->arch.cop0;
1287 struct kvm_vcpu_arch *arch = &vcpu->arch;
1288 enum emulation_result er = EMULATE_DONE;
1289 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1290 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1291
1292 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1293 /* save old pc */
1294 kvm_write_c0_guest_epc(cop0, arch->pc);
1295 kvm_set_c0_guest_status(cop0, ST0_EXL);
1296
1297 if (cause & CAUSEF_BD)
1298 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1299 else
1300 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1301
1302 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1303 arch->pc);
1304
1305 /* Set PC to the exception entry point */
1306 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1307 } else {
1308 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1309 arch->pc);
1310 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1311 }
1312
1313 kvm_change_c0_guest_cause(cop0, (0xff),
1314 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1315
1316 /* setup badvaddr, context and entryhi registers for the guest */
1317 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1318 /* XXXKYMA: is the context register used by linux??? */
1319 kvm_write_c0_guest_entryhi(cop0, entryhi);
1320 /* Blow away the shadow host TLBs */
1321 kvm_mips_flush_host_tlb(1);
1322
1323 return er;
1324}
1325
1326/* TLBMOD: store into address matching TLB with Dirty bit off */
1327enum emulation_result
1328kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1329 struct kvm_run *run, struct kvm_vcpu *vcpu)
1330{
1331 enum emulation_result er = EMULATE_DONE;
1332
1333#ifdef DEBUG
1334 /*
1335 * If address not in the guest TLB, then we are in trouble
1336 */
1337 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1338 if (index < 0) {
1339 /* XXXKYMA Invalidate and retry */
1340 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1341 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1342 __func__, entryhi);
1343 kvm_mips_dump_guest_tlbs(vcpu);
1344 kvm_mips_dump_host_tlbs();
1345 return EMULATE_FAIL;
1346 }
1347#endif
1348
1349 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1350 return er;
1351}
1352
1353enum emulation_result
1354kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1355 struct kvm_run *run, struct kvm_vcpu *vcpu)
1356{
1357 struct mips_coproc *cop0 = vcpu->arch.cop0;
1358 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1359 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1360 struct kvm_vcpu_arch *arch = &vcpu->arch;
1361 enum emulation_result er = EMULATE_DONE;
1362
1363 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1364 /* save old pc */
1365 kvm_write_c0_guest_epc(cop0, arch->pc);
1366 kvm_set_c0_guest_status(cop0, ST0_EXL);
1367
1368 if (cause & CAUSEF_BD)
1369 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1370 else
1371 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1372
1373 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1374 arch->pc);
1375
1376 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1377 } else {
1378 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1379 arch->pc);
1380 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1381 }
1382
1383 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1384
1385 /* setup badvaddr, context and entryhi registers for the guest */
1386 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1387 /* XXXKYMA: is the context register used by linux??? */
1388 kvm_write_c0_guest_entryhi(cop0, entryhi);
1389 /* Blow away the shadow host TLBs */
1390 kvm_mips_flush_host_tlb(1);
1391
1392 return er;
1393}
1394
1395enum emulation_result
1396kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1397 struct kvm_run *run, struct kvm_vcpu *vcpu)
1398{
1399 struct mips_coproc *cop0 = vcpu->arch.cop0;
1400 struct kvm_vcpu_arch *arch = &vcpu->arch;
1401 enum emulation_result er = EMULATE_DONE;
1402
1403 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1404 /* save old pc */
1405 kvm_write_c0_guest_epc(cop0, arch->pc);
1406 kvm_set_c0_guest_status(cop0, ST0_EXL);
1407
1408 if (cause & CAUSEF_BD)
1409 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1410 else
1411 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1412
1413 }
1414
1415 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1416
1417 kvm_change_c0_guest_cause(cop0, (0xff),
1418 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1419 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1420
1421 return er;
1422}
1423
1424enum emulation_result
1425kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1426 struct kvm_run *run, struct kvm_vcpu *vcpu)
1427{
1428 struct mips_coproc *cop0 = vcpu->arch.cop0;
1429 struct kvm_vcpu_arch *arch = &vcpu->arch;
1430 enum emulation_result er = EMULATE_DONE;
1431
1432 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1433 /* save old pc */
1434 kvm_write_c0_guest_epc(cop0, arch->pc);
1435 kvm_set_c0_guest_status(cop0, ST0_EXL);
1436
1437 if (cause & CAUSEF_BD)
1438 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1439 else
1440 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1441
1442 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1443
1444 kvm_change_c0_guest_cause(cop0, (0xff),
1445 (T_RES_INST << CAUSEB_EXCCODE));
1446
1447 /* Set PC to the exception entry point */
1448 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1449
1450 } else {
1451 kvm_err("Trying to deliver RI when EXL is already set\n");
1452 er = EMULATE_FAIL;
1453 }
1454
1455 return er;
1456}
1457
1458enum emulation_result
1459kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1460 struct kvm_run *run, struct kvm_vcpu *vcpu)
1461{
1462 struct mips_coproc *cop0 = vcpu->arch.cop0;
1463 struct kvm_vcpu_arch *arch = &vcpu->arch;
1464 enum emulation_result er = EMULATE_DONE;
1465
1466 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1467 /* save old pc */
1468 kvm_write_c0_guest_epc(cop0, arch->pc);
1469 kvm_set_c0_guest_status(cop0, ST0_EXL);
1470
1471 if (cause & CAUSEF_BD)
1472 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1473 else
1474 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1475
1476 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1477
1478 kvm_change_c0_guest_cause(cop0, (0xff),
1479 (T_BREAK << CAUSEB_EXCCODE));
1480
1481 /* Set PC to the exception entry point */
1482 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1483
1484 } else {
1485 printk("Trying to deliver BP when EXL is already set\n");
1486 er = EMULATE_FAIL;
1487 }
1488
1489 return er;
1490}
1491
1492/*
1493 * ll/sc, rdhwr, sync emulation
1494 */
1495
1496#define OPCODE 0xfc000000
1497#define BASE 0x03e00000
1498#define RT 0x001f0000
1499#define OFFSET 0x0000ffff
1500#define LL 0xc0000000
1501#define SC 0xe0000000
1502#define SPEC0 0x00000000
1503#define SPEC3 0x7c000000
1504#define RD 0x0000f800
1505#define FUNC 0x0000003f
1506#define SYNC 0x0000000f
1507#define RDHWR 0x0000003b
1508
1509enum emulation_result
1510kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1511 struct kvm_run *run, struct kvm_vcpu *vcpu)
1512{
1513 struct mips_coproc *cop0 = vcpu->arch.cop0;
1514 struct kvm_vcpu_arch *arch = &vcpu->arch;
1515 enum emulation_result er = EMULATE_DONE;
1516 unsigned long curr_pc;
1517 uint32_t inst;
1518
1519 /*
1520 * Update PC and hold onto current PC in case there is
1521 * an error and we want to rollback the PC
1522 */
1523 curr_pc = vcpu->arch.pc;
1524 er = update_pc(vcpu, cause);
1525 if (er == EMULATE_FAIL)
1526 return er;
1527
1528 /*
1529 * Fetch the instruction.
1530 */
1531 if (cause & CAUSEF_BD)
1532 opc += 1;
1533
1534 inst = kvm_get_inst(opc, vcpu);
1535
1536 if (inst == KVM_INVALID_INST) {
1537 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1538 return EMULATE_FAIL;
1539 }
1540
1541 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1542 int rd = (inst & RD) >> 11;
1543 int rt = (inst & RT) >> 16;
1544 switch (rd) {
1545 case 0: /* CPU number */
1546 arch->gprs[rt] = 0;
1547 break;
1548 case 1: /* SYNCI length */
1549 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1550 current_cpu_data.icache.linesz);
1551 break;
1552 case 2: /* Read count register */
1553 printk("RDHWR: Cont register\n");
1554 arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
1555 break;
1556 case 3: /* Count register resolution */
1557 switch (current_cpu_data.cputype) {
1558 case CPU_20KC:
1559 case CPU_25KF:
1560 arch->gprs[rt] = 1;
1561 break;
1562 default:
1563 arch->gprs[rt] = 2;
1564 }
1565 break;
1566 case 29:
1567#if 1
1568 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1569#else
1570 /* UserLocal not implemented */
1571 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1572#endif
1573 break;
1574
1575 default:
1576 printk("RDHWR not supported\n");
1577 er = EMULATE_FAIL;
1578 break;
1579 }
1580 } else {
1581 printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
1582 er = EMULATE_FAIL;
1583 }
1584
1585 /*
1586 * Rollback PC only if emulation was unsuccessful
1587 */
1588 if (er == EMULATE_FAIL) {
1589 vcpu->arch.pc = curr_pc;
1590 }
1591 return er;
1592}
1593
1594enum emulation_result
1595kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1596{
1597 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1598 enum emulation_result er = EMULATE_DONE;
1599 unsigned long curr_pc;
1600
1601 if (run->mmio.len > sizeof(*gpr)) {
1602 printk("Bad MMIO length: %d", run->mmio.len);
1603 er = EMULATE_FAIL;
1604 goto done;
1605 }
1606
1607 /*
1608 * Update PC and hold onto current PC in case there is
1609 * an error and we want to rollback the PC
1610 */
1611 curr_pc = vcpu->arch.pc;
1612 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
1613 if (er == EMULATE_FAIL)
1614 return er;
1615
1616 switch (run->mmio.len) {
1617 case 4:
1618 *gpr = *(int32_t *) run->mmio.data;
1619 break;
1620
1621 case 2:
1622 if (vcpu->mmio_needed == 2)
1623 *gpr = *(int16_t *) run->mmio.data;
1624 else
1625 *gpr = *(int16_t *) run->mmio.data;
1626
1627 break;
1628 case 1:
1629 if (vcpu->mmio_needed == 2)
1630 *gpr = *(int8_t *) run->mmio.data;
1631 else
1632 *gpr = *(u8 *) run->mmio.data;
1633 break;
1634 }
1635
1636 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
1637 kvm_debug
1638 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1639 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
1640 vcpu->mmio_needed);
1641
1642done:
1643 return er;
1644}
1645
1646static enum emulation_result
1647kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1648 struct kvm_run *run, struct kvm_vcpu *vcpu)
1649{
1650 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1651 struct mips_coproc *cop0 = vcpu->arch.cop0;
1652 struct kvm_vcpu_arch *arch = &vcpu->arch;
1653 enum emulation_result er = EMULATE_DONE;
1654
1655 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1656 /* save old pc */
1657 kvm_write_c0_guest_epc(cop0, arch->pc);
1658 kvm_set_c0_guest_status(cop0, ST0_EXL);
1659
1660 if (cause & CAUSEF_BD)
1661 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1662 else
1663 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1664
1665 kvm_change_c0_guest_cause(cop0, (0xff),
1666 (exccode << CAUSEB_EXCCODE));
1667
1668 /* Set PC to the exception entry point */
1669 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1670 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1671
1672 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1673 exccode, kvm_read_c0_guest_epc(cop0),
1674 kvm_read_c0_guest_badvaddr(cop0));
1675 } else {
1676 printk("Trying to deliver EXC when EXL is already set\n");
1677 er = EMULATE_FAIL;
1678 }
1679
1680 return er;
1681}
1682
1683enum emulation_result
1684kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
1685 struct kvm_run *run, struct kvm_vcpu *vcpu)
1686{
1687 enum emulation_result er = EMULATE_DONE;
1688 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1689 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1690
1691 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1692
1693 if (usermode) {
1694 switch (exccode) {
1695 case T_INT:
1696 case T_SYSCALL:
1697 case T_BREAK:
1698 case T_RES_INST:
1699 break;
1700
1701 case T_COP_UNUSABLE:
1702 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
1703 er = EMULATE_PRIV_FAIL;
1704 break;
1705
1706 case T_TLB_MOD:
1707 break;
1708
1709 case T_TLB_LD_MISS:
1710 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1711 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1712 printk("%s: LD MISS @ %#lx\n", __func__,
1713 badvaddr);
1714 cause &= ~0xff;
1715 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
1716 er = EMULATE_PRIV_FAIL;
1717 }
1718 break;
1719
1720 case T_TLB_ST_MISS:
1721 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1722 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1723 printk("%s: ST MISS @ %#lx\n", __func__,
1724 badvaddr);
1725 cause &= ~0xff;
1726 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
1727 er = EMULATE_PRIV_FAIL;
1728 }
1729 break;
1730
1731 case T_ADDR_ERR_ST:
1732 printk("%s: address error ST @ %#lx\n", __func__,
1733 badvaddr);
1734 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1735 cause &= ~0xff;
1736 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
1737 }
1738 er = EMULATE_PRIV_FAIL;
1739 break;
1740 case T_ADDR_ERR_LD:
1741 printk("%s: address error LD @ %#lx\n", __func__,
1742 badvaddr);
1743 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1744 cause &= ~0xff;
1745 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
1746 }
1747 er = EMULATE_PRIV_FAIL;
1748 break;
1749 default:
1750 er = EMULATE_PRIV_FAIL;
1751 break;
1752 }
1753 }
1754
1755 if (er == EMULATE_PRIV_FAIL) {
1756 kvm_mips_emulate_exc(cause, opc, run, vcpu);
1757 }
1758 return er;
1759}
1760
1761/* User Address (UA) fault, this could happen if
1762 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1763 * case we pass on the fault to the guest kernel and let it handle it.
1764 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1765 * case we inject the TLB from the Guest TLB into the shadow host TLB
1766 */
1767enum emulation_result
1768kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1769 struct kvm_run *run, struct kvm_vcpu *vcpu)
1770{
1771 enum emulation_result er = EMULATE_DONE;
1772 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1773 unsigned long va = vcpu->arch.host_cp0_badvaddr;
1774 int index;
1775
1776 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1777 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
1778
1779 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1780 * Check the Guest TLB, if the entry is not there then send the guest an
1781 * exception. The guest exc handler should then inject an entry into the
1782 * guest TLB
1783 */
1784 index = kvm_mips_guest_tlb_lookup(vcpu,
1785 (va & VPN2_MASK) |
1786 ASID_MASK(kvm_read_c0_guest_entryhi
1787 (vcpu->arch.cop0)));
1788 if (index < 0) {
1789 if (exccode == T_TLB_LD_MISS) {
1790 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
1791 } else if (exccode == T_TLB_ST_MISS) {
1792 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
1793 } else {
1794 printk("%s: invalid exc code: %d\n", __func__, exccode);
1795 er = EMULATE_FAIL;
1796 }
1797 } else {
1798 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1799
1800 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1801 if (!TLB_IS_VALID(*tlb, va)) {
1802 if (exccode == T_TLB_LD_MISS) {
1803 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
1804 vcpu);
1805 } else if (exccode == T_TLB_ST_MISS) {
1806 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
1807 vcpu);
1808 } else {
1809 printk("%s: invalid exc code: %d\n", __func__,
1810 exccode);
1811 er = EMULATE_FAIL;
1812 }
1813 } else {
1814#ifdef DEBUG
1815 kvm_debug
1816 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1817 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
1818#endif
1819 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1820 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
1821 NULL);
1822 }
1823 }
1824
1825 return er;
1826}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644
index 000000000000..1e5de16afe29
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.c
@@ -0,0 +1,243 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Interrupt delivery
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/bootmem.h>
18#include <asm/page.h>
19#include <asm/cacheflush.h>
20
21#include <linux/kvm_host.h>
22
23#include "kvm_mips_int.h"
24
25void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
26{
27 set_bit(priority, &vcpu->arch.pending_exceptions);
28}
29
30void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
31{
32 clear_bit(priority, &vcpu->arch.pending_exceptions);
33}
34
35void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
36{
37 /* Cause bits to reflect the pending timer interrupt,
38 * the EXC code will be set when we are actually
39 * delivering the interrupt:
40 */
41 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
42
43 /* Queue up an INT exception for the core */
44 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
45
46}
47
48void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
49{
50 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
51 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
52}
53
54void
55kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
56{
57 int intr = (int)irq->irq;
58
59 /* Cause bits to reflect the pending IO interrupt,
60 * the EXC code will be set when we are actually
61 * delivering the interrupt:
62 */
63 switch (intr) {
64 case 2:
65 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
66 /* Queue up an INT exception for the core */
67 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
68 break;
69
70 case 3:
71 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
72 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
73 break;
74
75 case 4:
76 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
77 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
78 break;
79
80 default:
81 break;
82 }
83
84}
85
86void
87kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
88 struct kvm_mips_interrupt *irq)
89{
90 int intr = (int)irq->irq;
91 switch (intr) {
92 case -2:
93 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
94 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
95 break;
96
97 case -3:
98 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
99 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
100 break;
101
102 case -4:
103 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
104 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
105 break;
106
107 default:
108 break;
109 }
110
111}
112
113/* Deliver the interrupt of the corresponding priority, if possible. */
114int
115kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
116 uint32_t cause)
117{
118 int allowed = 0;
119 uint32_t exccode;
120
121 struct kvm_vcpu_arch *arch = &vcpu->arch;
122 struct mips_coproc *cop0 = vcpu->arch.cop0;
123
124 switch (priority) {
125 case MIPS_EXC_INT_TIMER:
126 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
127 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
128 && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
129 allowed = 1;
130 exccode = T_INT;
131 }
132 break;
133
134 case MIPS_EXC_INT_IO:
135 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
136 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
137 && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
138 allowed = 1;
139 exccode = T_INT;
140 }
141 break;
142
143 case MIPS_EXC_INT_IPI_1:
144 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
145 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
146 && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
147 allowed = 1;
148 exccode = T_INT;
149 }
150 break;
151
152 case MIPS_EXC_INT_IPI_2:
153 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
154 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
155 && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
156 allowed = 1;
157 exccode = T_INT;
158 }
159 break;
160
161 default:
162 break;
163 }
164
165 /* Are we allowed to deliver the interrupt ??? */
166 if (allowed) {
167
168 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
169 /* save old pc */
170 kvm_write_c0_guest_epc(cop0, arch->pc);
171 kvm_set_c0_guest_status(cop0, ST0_EXL);
172
173 if (cause & CAUSEF_BD)
174 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
175 else
176 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
177
178 kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
179
180 } else
181 kvm_err("Trying to deliver interrupt when EXL is already set\n");
182
183 kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
184 (exccode << CAUSEB_EXCCODE));
185
186 /* XXXSL Set PC to the interrupt exception entry point */
187 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
188 arch->pc = KVM_GUEST_KSEG0 + 0x200;
189 else
190 arch->pc = KVM_GUEST_KSEG0 + 0x180;
191
192 clear_bit(priority, &vcpu->arch.pending_exceptions);
193 }
194
195 return allowed;
196}
197
198int
199kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
200 uint32_t cause)
201{
202 return 1;
203}
204
205void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
206{
207 unsigned long *pending = &vcpu->arch.pending_exceptions;
208 unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
209 unsigned int priority;
210
211 if (!(*pending) && !(*pending_clr))
212 return;
213
214 priority = __ffs(*pending_clr);
215 while (priority <= MIPS_EXC_MAX) {
216 if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
217 if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
218 break;
219 }
220
221 priority = find_next_bit(pending_clr,
222 BITS_PER_BYTE * sizeof(*pending_clr),
223 priority + 1);
224 }
225
226 priority = __ffs(*pending);
227 while (priority <= MIPS_EXC_MAX) {
228 if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
229 if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
230 break;
231 }
232
233 priority = find_next_bit(pending,
234 BITS_PER_BYTE * sizeof(*pending),
235 priority + 1);
236 }
237
238}
239
240int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
241{
242 return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
243}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644
index 000000000000..20da7d29eede
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.h
@@ -0,0 +1,49 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Interrupts
7* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8* Authors: Sanjay Lal <sanjayl@kymasys.com>
9*/
10
11/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
12 * for the guest in the order specified by their priorities
13 */
14
15#define MIPS_EXC_RESET 0
16#define MIPS_EXC_SRESET 1
17#define MIPS_EXC_DEBUG_ST 2
18#define MIPS_EXC_DEBUG 3
19#define MIPS_EXC_DDB 4
20#define MIPS_EXC_NMI 5
21#define MIPS_EXC_MCHK 6
22#define MIPS_EXC_INT_TIMER 7
23#define MIPS_EXC_INT_IO 8
24#define MIPS_EXC_EXECUTE 9
25#define MIPS_EXC_INT_IPI_1 10
26#define MIPS_EXC_INT_IPI_2 11
27#define MIPS_EXC_MAX 12
28/* XXXSL More to follow */
29
30#define C_TI (_ULCAST_(1) << 30)
31
32#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
33#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
34
35void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
36void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
37int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
38
39void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
40void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
41void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
42 struct kvm_mips_interrupt *irq);
43void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
44 struct kvm_mips_interrupt *irq);
45int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
46 uint32_t cause);
47int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
48 uint32_t cause);
49void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644
index 000000000000..86d3b4cc348b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_opcode.h
@@ -0,0 +1,24 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10/*
11 * Define opcode values not defined in <asm/isnt.h>
12 */
13
14#ifndef __KVM_MIPS_OPCODE_H__
15#define __KVM_MIPS_OPCODE_H__
16
17/* COP0 Ops */
18#define mfmcz_op 0x0b /* 01011 */
19#define wrpgpr_op 0x0e /* 01110 */
20
21/* COP0 opcodes (only if COP0 and CO=1): */
22#define wait_op 0x20 /* 100000 */
23
24#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644
index 000000000000..075904bcac1b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_stats.c
@@ -0,0 +1,82 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: COP0 access histogram
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/kvm_host.h>
13
14char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
15 "WAIT",
16 "CACHE",
17 "Signal",
18 "Interrupt",
19 "COP0/1 Unusable",
20 "TLB Mod",
21 "TLB Miss (LD)",
22 "TLB Miss (ST)",
23 "Address Err (ST)",
24 "Address Error (LD)",
25 "System Call",
26 "Reserved Inst",
27 "Break Inst",
28 "D-Cache Flushes",
29};
30
31char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
32 "Index",
33 "Random",
34 "EntryLo0",
35 "EntryLo1",
36 "Context",
37 "PG Mask",
38 "Wired",
39 "HWREna",
40 "BadVAddr",
41 "Count",
42 "EntryHI",
43 "Compare",
44 "Status",
45 "Cause",
46 "EXC PC",
47 "PRID",
48 "Config",
49 "LLAddr",
50 "Watch Lo",
51 "Watch Hi",
52 "X Context",
53 "Reserved",
54 "Impl Dep",
55 "Debug",
56 "DEPC",
57 "PerfCnt",
58 "ErrCtl",
59 "CacheErr",
60 "TagLo",
61 "TagHi",
62 "ErrorEPC",
63 "DESAVE"
64};
65
66int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
67{
68#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
69 int i, j;
70
71 printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
72 for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
73 for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
74 if (vcpu->arch.cop0->stat[i][j])
75 printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
76 vcpu->arch.cop0->stat[i][j]);
77 }
78 }
79#endif
80
81 return 0;
82}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644
index 000000000000..89511a9258d3
--- /dev/null
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -0,0 +1,928 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7* TLB handlers run from KSEG0
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/smp.h>
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/kvm_host.h>
20
21#include <asm/cpu.h>
22#include <asm/bootinfo.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
26
27#undef CONFIG_MIPS_MT
28#include <asm/r4kcache.h>
29#define CONFIG_MIPS_MT
30
31#define KVM_GUEST_PC_TLB 0
32#define KVM_GUEST_SP_TLB 1
33
34#define PRIx64 "llx"
35
36/* Use VZ EntryHi.EHINV to invalidate TLB entries */
37#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
38
39atomic_t kvm_mips_instance;
40EXPORT_SYMBOL(kvm_mips_instance);
41
42/* These function pointers are initialized once the KVM module is loaded */
43pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
44EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
45
46void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
47EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
48
49bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
50EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51
52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53{
54 return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
55}
56
57
58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59{
60 return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
61}
62
63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
64{
65 return vcpu->kvm->arch.commpage_tlb;
66}
67
68
69/*
70 * Structure defining an tlb entry data set.
71 */
72
73void kvm_mips_dump_host_tlbs(void)
74{
75 unsigned long old_entryhi;
76 unsigned long old_pagemask;
77 struct kvm_mips_tlb tlb;
78 unsigned long flags;
79 int i;
80
81 local_irq_save(flags);
82
83 old_entryhi = read_c0_entryhi();
84 old_pagemask = read_c0_pagemask();
85
86 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
88
89 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 write_c0_index(i);
91 mtc0_tlbw_hazard();
92
93 tlb_read();
94 tlbw_use_hazard();
95
96 tlb.tlb_hi = read_c0_entryhi();
97 tlb.tlb_lo0 = read_c0_entrylo0();
98 tlb.tlb_lo1 = read_c0_entrylo1();
99 tlb.tlb_mask = read_c0_pagemask();
100
101 printk("TLB%c%3d Hi 0x%08lx ",
102 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
103 i, tlb.tlb_hi);
104 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
105 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
106 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
107 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
108 (tlb.tlb_lo0 >> 3) & 7);
109 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
110 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
111 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
112 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
113 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
114 }
115 write_c0_entryhi(old_entryhi);
116 write_c0_pagemask(old_pagemask);
117 mtc0_tlbw_hazard();
118 local_irq_restore(flags);
119}
120
121void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
122{
123 struct mips_coproc *cop0 = vcpu->arch.cop0;
124 struct kvm_mips_tlb tlb;
125 int i;
126
127 printk("Guest TLBs:\n");
128 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
129
130 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
131 tlb = vcpu->arch.guest_tlb[i];
132 printk("TLB%c%3d Hi 0x%08lx ",
133 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
134 i, tlb.tlb_hi);
135 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
137 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo0 >> 3) & 7);
140 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
141 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
142 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
143 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
144 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
145 }
146}
147
148void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
149{
150 int i;
151 volatile struct kvm_mips_tlb tlb;
152
153 printk("Shadow TLBs:\n");
154 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
155 tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
156 printk("TLB%c%3d Hi 0x%08lx ",
157 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
158 i, tlb.tlb_hi);
159 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
160 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
161 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
162 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
163 (tlb.tlb_lo0 >> 3) & 7);
164 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
165 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
166 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
167 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
168 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
169 }
170}
171
172static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
173{
174 pfn_t pfn;
175
176 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
177 return;
178
179 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
180
181 if (kvm_mips_is_error_pfn(pfn)) {
182 panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
183 }
184
185 kvm->arch.guest_pmap[gfn] = pfn;
186 return;
187}
188
189/* Translate guest KSEG0 addresses to Host PA */
190unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
191 unsigned long gva)
192{
193 gfn_t gfn;
194 uint32_t offset = gva & ~PAGE_MASK;
195 struct kvm *kvm = vcpu->kvm;
196
197 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
198 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
199 __builtin_return_address(0), gva);
200 return KVM_INVALID_PAGE;
201 }
202
203 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
204
205 if (gfn >= kvm->arch.guest_pmap_npages) {
206 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
207 gva);
208 return KVM_INVALID_PAGE;
209 }
210 kvm_mips_map_page(vcpu->kvm, gfn);
211 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
212}
213
214/* XXXKYMA: Must be called with interrupts disabled */
215/* set flush_dcache_mask == 0 if no dcache flush required */
216int
217kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
218 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
219{
220 unsigned long flags;
221 unsigned long old_entryhi;
222 volatile int idx;
223
224 local_irq_save(flags);
225
226
227 old_entryhi = read_c0_entryhi();
228 write_c0_entryhi(entryhi);
229 mtc0_tlbw_hazard();
230
231 tlb_probe();
232 tlb_probe_hazard();
233 idx = read_c0_index();
234
235 if (idx > current_cpu_data.tlbsize) {
236 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
237 kvm_mips_dump_host_tlbs();
238 return -1;
239 }
240
241 if (idx < 0) {
242 idx = read_c0_random() % current_cpu_data.tlbsize;
243 write_c0_index(idx);
244 mtc0_tlbw_hazard();
245 }
246 write_c0_entrylo0(entrylo0);
247 write_c0_entrylo1(entrylo1);
248 mtc0_tlbw_hazard();
249
250 tlb_write_indexed();
251 tlbw_use_hazard();
252
253#ifdef DEBUG
254 if (debug) {
255 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
256 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
257 vcpu->arch.pc, idx, read_c0_entryhi(),
258 read_c0_entrylo0(), read_c0_entrylo1());
259 }
260#endif
261
262 /* Flush D-cache */
263 if (flush_dcache_mask) {
264 if (entrylo0 & MIPS3_PG_V) {
265 ++vcpu->stat.flush_dcache_exits;
266 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
267 }
268 if (entrylo1 & MIPS3_PG_V) {
269 ++vcpu->stat.flush_dcache_exits;
270 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
271 (0x1 << PAGE_SHIFT));
272 }
273 }
274
275 /* Restore old ASID */
276 write_c0_entryhi(old_entryhi);
277 mtc0_tlbw_hazard();
278 tlbw_use_hazard();
279 local_irq_restore(flags);
280 return 0;
281}
282
283
284/* XXXKYMA: Must be called with interrupts disabled */
285int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
286 struct kvm_vcpu *vcpu)
287{
288 gfn_t gfn;
289 pfn_t pfn0, pfn1;
290 unsigned long vaddr = 0;
291 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
292 int even;
293 struct kvm *kvm = vcpu->kvm;
294 const int flush_dcache_mask = 0;
295
296
297 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
298 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
299 kvm_mips_dump_host_tlbs();
300 return -1;
301 }
302
303 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
304 if (gfn >= kvm->arch.guest_pmap_npages) {
305 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
306 gfn, badvaddr);
307 kvm_mips_dump_host_tlbs();
308 return -1;
309 }
310 even = !(gfn & 0x1);
311 vaddr = badvaddr & (PAGE_MASK << 1);
312
313 kvm_mips_map_page(vcpu->kvm, gfn);
314 kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
315
316 if (even) {
317 pfn0 = kvm->arch.guest_pmap[gfn];
318 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
319 } else {
320 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
321 pfn1 = kvm->arch.guest_pmap[gfn];
322 }
323
324 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
325 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
326 (0x1 << 1);
327 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
328 (0x1 << 1);
329
330 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
331 flush_dcache_mask);
332}
333
334int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
335 struct kvm_vcpu *vcpu)
336{
337 pfn_t pfn0, pfn1;
338 unsigned long flags, old_entryhi = 0, vaddr = 0;
339 unsigned long entrylo0 = 0, entrylo1 = 0;
340
341
342 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
343 pfn1 = 0;
344 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
345 (0x1 << 1);
346 entrylo1 = 0;
347
348 local_irq_save(flags);
349
350 old_entryhi = read_c0_entryhi();
351 vaddr = badvaddr & (PAGE_MASK << 1);
352 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
353 mtc0_tlbw_hazard();
354 write_c0_entrylo0(entrylo0);
355 mtc0_tlbw_hazard();
356 write_c0_entrylo1(entrylo1);
357 mtc0_tlbw_hazard();
358 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
359 mtc0_tlbw_hazard();
360 tlb_write_indexed();
361 mtc0_tlbw_hazard();
362 tlbw_use_hazard();
363
364#ifdef DEBUG
365 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
366 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
367 read_c0_entrylo0(), read_c0_entrylo1());
368#endif
369
370 /* Restore old ASID */
371 write_c0_entryhi(old_entryhi);
372 mtc0_tlbw_hazard();
373 tlbw_use_hazard();
374 local_irq_restore(flags);
375
376 return 0;
377}
378
379int
380kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
381 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
382{
383 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
384 struct kvm *kvm = vcpu->kvm;
385 pfn_t pfn0, pfn1;
386
387
388 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
389 pfn0 = 0;
390 pfn1 = 0;
391 } else {
392 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
393 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
394
395 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
396 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
397 }
398
399 if (hpa0)
400 *hpa0 = pfn0 << PAGE_SHIFT;
401
402 if (hpa1)
403 *hpa1 = pfn1 << PAGE_SHIFT;
404
405 /* Get attributes from the Guest TLB */
406 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
407 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
408 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
409 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
410 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
411 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
412
413#ifdef DEBUG
414 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
415 tlb->tlb_lo0, tlb->tlb_lo1);
416#endif
417
418 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
419 tlb->tlb_mask);
420}
421
422int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
423{
424 int i;
425 int index = -1;
426 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
427
428
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
432 index = i;
433 break;
434 }
435 }
436
437#ifdef DEBUG
438 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
439 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
440#endif
441
442 return index;
443}
444
445int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
446{
447 unsigned long old_entryhi, flags;
448 volatile int idx;
449
450
451 local_irq_save(flags);
452
453 old_entryhi = read_c0_entryhi();
454
455 if (KVM_GUEST_KERNEL_MODE(vcpu))
456 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
457 else {
458 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
459 }
460
461 mtc0_tlbw_hazard();
462
463 tlb_probe();
464 tlb_probe_hazard();
465 idx = read_c0_index();
466
467 /* Restore old ASID */
468 write_c0_entryhi(old_entryhi);
469 mtc0_tlbw_hazard();
470 tlbw_use_hazard();
471
472 local_irq_restore(flags);
473
474#ifdef DEBUG
475 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
476#endif
477
478 return idx;
479}
480
481int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
482{
483 int idx;
484 unsigned long flags, old_entryhi;
485
486 local_irq_save(flags);
487
488
489 old_entryhi = read_c0_entryhi();
490
491 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
492 mtc0_tlbw_hazard();
493
494 tlb_probe();
495 tlb_probe_hazard();
496 idx = read_c0_index();
497
498 if (idx >= current_cpu_data.tlbsize)
499 BUG();
500
501 if (idx > 0) {
502 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
503 mtc0_tlbw_hazard();
504
505 write_c0_entrylo0(0);
506 mtc0_tlbw_hazard();
507
508 write_c0_entrylo1(0);
509 mtc0_tlbw_hazard();
510
511 tlb_write_indexed();
512 mtc0_tlbw_hazard();
513 }
514
515 write_c0_entryhi(old_entryhi);
516 mtc0_tlbw_hazard();
517 tlbw_use_hazard();
518
519 local_irq_restore(flags);
520
521#ifdef DEBUG
522 if (idx > 0) {
523 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
524 (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
525 }
526#endif
527
528 return 0;
529}
530
531/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
532int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
533{
534 unsigned long flags, old_entryhi;
535
536 if (index >= current_cpu_data.tlbsize)
537 BUG();
538
539 local_irq_save(flags);
540
541
542 old_entryhi = read_c0_entryhi();
543
544 write_c0_entryhi(UNIQUE_ENTRYHI(index));
545 mtc0_tlbw_hazard();
546
547 write_c0_index(index);
548 mtc0_tlbw_hazard();
549
550 write_c0_entrylo0(0);
551 mtc0_tlbw_hazard();
552
553 write_c0_entrylo1(0);
554 mtc0_tlbw_hazard();
555
556 tlb_write_indexed();
557 mtc0_tlbw_hazard();
558 tlbw_use_hazard();
559
560 write_c0_entryhi(old_entryhi);
561 mtc0_tlbw_hazard();
562 tlbw_use_hazard();
563
564 local_irq_restore(flags);
565
566 return 0;
567}
568
569void kvm_mips_flush_host_tlb(int skip_kseg0)
570{
571 unsigned long flags;
572 unsigned long old_entryhi, entryhi;
573 unsigned long old_pagemask;
574 int entry = 0;
575 int maxentry = current_cpu_data.tlbsize;
576
577
578 local_irq_save(flags);
579
580 old_entryhi = read_c0_entryhi();
581 old_pagemask = read_c0_pagemask();
582
583 /* Blast 'em all away. */
584 for (entry = 0; entry < maxentry; entry++) {
585
586 write_c0_index(entry);
587 mtc0_tlbw_hazard();
588
589 if (skip_kseg0) {
590 tlb_read();
591 tlbw_use_hazard();
592
593 entryhi = read_c0_entryhi();
594
595 /* Don't blow away guest kernel entries */
596 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
597 continue;
598 }
599 }
600
601 /* Make sure all entries differ. */
602 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
603 mtc0_tlbw_hazard();
604 write_c0_entrylo0(0);
605 mtc0_tlbw_hazard();
606 write_c0_entrylo1(0);
607 mtc0_tlbw_hazard();
608
609 tlb_write_indexed();
610 mtc0_tlbw_hazard();
611 }
612
613 tlbw_use_hazard();
614
615 write_c0_entryhi(old_entryhi);
616 write_c0_pagemask(old_pagemask);
617 mtc0_tlbw_hazard();
618 tlbw_use_hazard();
619
620 local_irq_restore(flags);
621}
622
623void
624kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
625 struct kvm_vcpu *vcpu)
626{
627 unsigned long asid = asid_cache(cpu);
628
629 if (!(ASID_MASK(ASID_INC(asid)))) {
630 if (cpu_has_vtag_icache) {
631 flush_icache_all();
632 }
633
634 kvm_local_flush_tlb_all(); /* start new asid cycle */
635
636 if (!asid) /* fix version if needed */
637 asid = ASID_FIRST_VERSION;
638 }
639
640 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
641}
642
643void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
644{
645 unsigned long flags;
646 unsigned long old_entryhi;
647 unsigned long old_pagemask;
648 int entry = 0;
649 int cpu = smp_processor_id();
650
651 local_irq_save(flags);
652
653 old_entryhi = read_c0_entryhi();
654 old_pagemask = read_c0_pagemask();
655
656 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
657 write_c0_index(entry);
658 mtc0_tlbw_hazard();
659 tlb_read();
660 tlbw_use_hazard();
661
662 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
663 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
664 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
665 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
666 }
667
668 write_c0_entryhi(old_entryhi);
669 write_c0_pagemask(old_pagemask);
670 mtc0_tlbw_hazard();
671
672 local_irq_restore(flags);
673
674}
675
676void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
677{
678 unsigned long flags;
679 unsigned long old_ctx;
680 int entry;
681 int cpu = smp_processor_id();
682
683 local_irq_save(flags);
684
685 old_ctx = read_c0_entryhi();
686
687 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
688 write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
689 mtc0_tlbw_hazard();
690 write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
691 write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
692
693 write_c0_index(entry);
694 mtc0_tlbw_hazard();
695
696 tlb_write_indexed();
697 tlbw_use_hazard();
698 }
699
700 tlbw_use_hazard();
701 write_c0_entryhi(old_ctx);
702 mtc0_tlbw_hazard();
703 local_irq_restore(flags);
704}
705
706
707void kvm_local_flush_tlb_all(void)
708{
709 unsigned long flags;
710 unsigned long old_ctx;
711 int entry = 0;
712
713 local_irq_save(flags);
714 /* Save old context and create impossible VPN2 value */
715 old_ctx = read_c0_entryhi();
716 write_c0_entrylo0(0);
717 write_c0_entrylo1(0);
718
719 /* Blast 'em all away. */
720 while (entry < current_cpu_data.tlbsize) {
721 /* Make sure all entries differ. */
722 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
723 write_c0_index(entry);
724 mtc0_tlbw_hazard();
725 tlb_write_indexed();
726 entry++;
727 }
728 tlbw_use_hazard();
729 write_c0_entryhi(old_ctx);
730 mtc0_tlbw_hazard();
731
732 local_irq_restore(flags);
733}
734
735void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
736{
737 int cpu, entry;
738
739 for_each_possible_cpu(cpu) {
740 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
741 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
742 UNIQUE_ENTRYHI(entry);
743 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
744 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
745 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
746 read_c0_pagemask();
747#ifdef DEBUG
748 kvm_debug
749 ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
750 cpu, entry,
751 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
752 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
753 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
754#endif
755 }
756 }
757}
758
759/* Restore ASID once we are scheduled back after preemption */
760void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
761{
762 unsigned long flags;
763 int newasid = 0;
764
765#ifdef DEBUG
766 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
767#endif
768
769 /* Alocate new kernel and user ASIDs if needed */
770
771 local_irq_save(flags);
772
773 if (((vcpu->arch.
774 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
775 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
776 vcpu->arch.guest_kernel_asid[cpu] =
777 vcpu->arch.guest_kernel_mm.context.asid[cpu];
778 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
779 vcpu->arch.guest_user_asid[cpu] =
780 vcpu->arch.guest_user_mm.context.asid[cpu];
781 newasid++;
782
783 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
784 cpu_context(cpu, current->mm));
785 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
786 cpu, vcpu->arch.guest_kernel_asid[cpu]);
787 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
788 vcpu->arch.guest_user_asid[cpu]);
789 }
790
791 if (vcpu->arch.last_sched_cpu != cpu) {
792 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
793 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
794 }
795
796 /* Only reload shadow host TLB if new ASIDs haven't been allocated */
797#if 0
798 if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
799 kvm_mips_flush_host_tlb(0);
800 kvm_shadow_tlb_load(vcpu);
801 }
802#endif
803
804 if (!newasid) {
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
808 ehb();
809 }
810 } else {
811 /* New ASIDs were allocated for the VM */
812
813 /* Were we in guest context? If so then the pre-empted ASID is no longer
814 * valid, we need to set it to what it should be based on the mode of
815 * the Guest (Kernel/User)
816 */
817 if (current->flags & PF_VCPU) {
818 if (KVM_GUEST_KERNEL_MODE(vcpu))
819 write_c0_entryhi(ASID_MASK(vcpu->arch.
820 guest_kernel_asid[cpu]));
821 else
822 write_c0_entryhi(ASID_MASK(vcpu->arch.
823 guest_user_asid[cpu]));
824 ehb();
825 }
826 }
827
828 local_irq_restore(flags);
829
830}
831
832/* ASID can change if another task is scheduled during preemption */
833void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
834{
835 unsigned long flags;
836 uint32_t cpu;
837
838 local_irq_save(flags);
839
840 cpu = smp_processor_id();
841
842
843 vcpu->arch.preempt_entryhi = read_c0_entryhi();
844 vcpu->arch.last_sched_cpu = cpu;
845
846#if 0
847 if ((atomic_read(&kvm_mips_instance) > 1)) {
848 kvm_shadow_tlb_put(vcpu);
849 }
850#endif
851
852 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
853 ASID_VERSION_MASK)) {
854 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
855 cpu_context(cpu, current->mm));
856 drop_mmu_context(current->mm, cpu);
857 }
858 write_c0_entryhi(cpu_asid(cpu, current->mm));
859 ehb();
860
861 local_irq_restore(flags);
862}
863
864uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
865{
866 struct mips_coproc *cop0 = vcpu->arch.cop0;
867 unsigned long paddr, flags;
868 uint32_t inst;
869 int index;
870
871 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
872 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
873 local_irq_save(flags);
874 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
875 if (index >= 0) {
876 inst = *(opc);
877 } else {
878 index =
879 kvm_mips_guest_tlb_lookup(vcpu,
880 ((unsigned long) opc & VPN2_MASK)
881 |
882 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
883 if (index < 0) {
884 kvm_err
885 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
886 __func__, opc, vcpu, read_c0_entryhi());
887 kvm_mips_dump_host_tlbs();
888 local_irq_restore(flags);
889 return KVM_INVALID_INST;
890 }
891 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
892 &vcpu->arch.
893 guest_tlb[index],
894 NULL, NULL);
895 inst = *(opc);
896 }
897 local_irq_restore(flags);
898 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
899 paddr =
900 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
901 (unsigned long) opc);
902 inst = *(uint32_t *) CKSEG0ADDR(paddr);
903 } else {
904 kvm_err("%s: illegal address: %p\n", __func__, opc);
905 return KVM_INVALID_INST;
906 }
907
908 return inst;
909}
910
911EXPORT_SYMBOL(kvm_local_flush_tlb_all);
912EXPORT_SYMBOL(kvm_shadow_tlb_put);
913EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
914EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
915EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
916EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
917EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
918EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
919EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
920EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
921EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
922EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
923EXPORT_SYMBOL(kvm_shadow_tlb_load);
924EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
925EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
926EXPORT_SYMBOL(kvm_get_inst);
927EXPORT_SYMBOL(kvm_arch_vcpu_load);
928EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644
index 000000000000..466aeef044bd
--- /dev/null
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -0,0 +1,482 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16
17#include <linux/kvm_host.h>
18
19#include "kvm_mips_opcode.h"
20#include "kvm_mips_int.h"
21
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{
24 gpa_t gpa;
25 uint32_t kseg = KSEGX(gva);
26
27 if ((kseg == CKSEG0) || (kseg == CKSEG1))
28 gpa = CPHYSADDR(gva);
29 else {
30 printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31 kvm_mips_dump_host_tlbs();
32 gpa = KVM_INVALID_ADDR;
33 }
34
35#ifdef DEBUG
36 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
37#endif
38
39 return gpa;
40}
41
42
43static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
44{
45 struct kvm_run *run = vcpu->run;
46 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
47 unsigned long cause = vcpu->arch.host_cp0_cause;
48 enum emulation_result er = EMULATE_DONE;
49 int ret = RESUME_GUEST;
50
51 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
52 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
53 } else
54 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
55
56 switch (er) {
57 case EMULATE_DONE:
58 ret = RESUME_GUEST;
59 break;
60
61 case EMULATE_FAIL:
62 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
63 ret = RESUME_HOST;
64 break;
65
66 case EMULATE_WAIT:
67 run->exit_reason = KVM_EXIT_INTR;
68 ret = RESUME_HOST;
69 break;
70
71 default:
72 BUG();
73 }
74 return ret;
75}
76
77static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
78{
79 struct kvm_run *run = vcpu->run;
80 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
81 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
82 unsigned long cause = vcpu->arch.host_cp0_cause;
83 enum emulation_result er = EMULATE_DONE;
84 int ret = RESUME_GUEST;
85
86 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
87 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
88#ifdef DEBUG
89 kvm_debug
90 ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
91 cause, opc, badvaddr);
92#endif
93 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
94
95 if (er == EMULATE_DONE)
96 ret = RESUME_GUEST;
97 else {
98 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
99 ret = RESUME_HOST;
100 }
101 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
102 /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
103 * using HIGHMEM. Need to address this in a HIGHMEM kernel
104 */
105 printk
106 ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
107 cause, opc, badvaddr);
108 kvm_mips_dump_host_tlbs();
109 kvm_arch_vcpu_dump_regs(vcpu);
110 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
111 ret = RESUME_HOST;
112 } else {
113 printk
114 ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
115 cause, opc, badvaddr);
116 kvm_mips_dump_host_tlbs();
117 kvm_arch_vcpu_dump_regs(vcpu);
118 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
119 ret = RESUME_HOST;
120 }
121 return ret;
122}
123
124static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
125{
126 struct kvm_run *run = vcpu->run;
127 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
128 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
129 unsigned long cause = vcpu->arch.host_cp0_cause;
130 enum emulation_result er = EMULATE_DONE;
131 int ret = RESUME_GUEST;
132
133 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
134 && KVM_GUEST_KERNEL_MODE(vcpu)) {
135 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
136 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
137 ret = RESUME_HOST;
138 }
139 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
140 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
141#ifdef DEBUG
142 kvm_debug
143 ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
144 cause, opc, badvaddr);
145#endif
146 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
147 if (er == EMULATE_DONE)
148 ret = RESUME_GUEST;
149 else {
150 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
151 ret = RESUME_HOST;
152 }
153 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
154 /* All KSEG0 faults are handled by KVM, as the guest kernel does not
155 * expect to ever get them
156 */
157 if (kvm_mips_handle_kseg0_tlb_fault
158 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
159 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
160 ret = RESUME_HOST;
161 }
162 } else {
163 kvm_err
164 ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
165 cause, opc, badvaddr);
166 kvm_mips_dump_host_tlbs();
167 kvm_arch_vcpu_dump_regs(vcpu);
168 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
169 ret = RESUME_HOST;
170 }
171 return ret;
172}
173
174static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
175{
176 struct kvm_run *run = vcpu->run;
177 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
178 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
179 unsigned long cause = vcpu->arch.host_cp0_cause;
180 enum emulation_result er = EMULATE_DONE;
181 int ret = RESUME_GUEST;
182
183 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
184 && KVM_GUEST_KERNEL_MODE(vcpu)) {
185 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
186 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
187 ret = RESUME_HOST;
188 }
189 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
190 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
191#ifdef DEBUG
192 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
193 vcpu->arch.pc, badvaddr);
194#endif
195
196 /* User Address (UA) fault, this could happen if
197 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
198 * case we pass on the fault to the guest kernel and let it handle it.
199 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
200 * case we inject the TLB from the Guest TLB into the shadow host TLB
201 */
202
203 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
204 if (er == EMULATE_DONE)
205 ret = RESUME_GUEST;
206 else {
207 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
208 ret = RESUME_HOST;
209 }
210 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
211 if (kvm_mips_handle_kseg0_tlb_fault
212 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
213 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214 ret = RESUME_HOST;
215 }
216 } else {
217 printk
218 ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
219 cause, opc, badvaddr);
220 kvm_mips_dump_host_tlbs();
221 kvm_arch_vcpu_dump_regs(vcpu);
222 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
223 ret = RESUME_HOST;
224 }
225 return ret;
226}
227
228static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
229{
230 struct kvm_run *run = vcpu->run;
231 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
232 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
233 unsigned long cause = vcpu->arch.host_cp0_cause;
234 enum emulation_result er = EMULATE_DONE;
235 int ret = RESUME_GUEST;
236
237 if (KVM_GUEST_KERNEL_MODE(vcpu)
238 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
239#ifdef DEBUG
240 kvm_debug("Emulate Store to MMIO space\n");
241#endif
242 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
243 if (er == EMULATE_FAIL) {
244 printk("Emulate Store to MMIO space failed\n");
245 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
246 ret = RESUME_HOST;
247 } else {
248 run->exit_reason = KVM_EXIT_MMIO;
249 ret = RESUME_HOST;
250 }
251 } else {
252 printk
253 ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
254 cause, opc, badvaddr);
255 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
256 ret = RESUME_HOST;
257 }
258 return ret;
259}
260
261static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
262{
263 struct kvm_run *run = vcpu->run;
264 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
265 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
266 unsigned long cause = vcpu->arch.host_cp0_cause;
267 enum emulation_result er = EMULATE_DONE;
268 int ret = RESUME_GUEST;
269
270 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
271#ifdef DEBUG
272 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
273#endif
274 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
275 if (er == EMULATE_FAIL) {
276 printk("Emulate Load from MMIO space failed\n");
277 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278 ret = RESUME_HOST;
279 } else {
280 run->exit_reason = KVM_EXIT_MMIO;
281 ret = RESUME_HOST;
282 }
283 } else {
284 printk
285 ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
286 cause, opc, badvaddr);
287 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
288 ret = RESUME_HOST;
289 er = EMULATE_FAIL;
290 }
291 return ret;
292}
293
294static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
295{
296 struct kvm_run *run = vcpu->run;
297 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
298 unsigned long cause = vcpu->arch.host_cp0_cause;
299 enum emulation_result er = EMULATE_DONE;
300 int ret = RESUME_GUEST;
301
302 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
303 if (er == EMULATE_DONE)
304 ret = RESUME_GUEST;
305 else {
306 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
307 ret = RESUME_HOST;
308 }
309 return ret;
310}
311
312static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
313{
314 struct kvm_run *run = vcpu->run;
315 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
316 unsigned long cause = vcpu->arch.host_cp0_cause;
317 enum emulation_result er = EMULATE_DONE;
318 int ret = RESUME_GUEST;
319
320 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
321 if (er == EMULATE_DONE)
322 ret = RESUME_GUEST;
323 else {
324 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
325 ret = RESUME_HOST;
326 }
327 return ret;
328}
329
330static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
331{
332 struct kvm_run *run = vcpu->run;
333 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
334 unsigned long cause = vcpu->arch.host_cp0_cause;
335 enum emulation_result er = EMULATE_DONE;
336 int ret = RESUME_GUEST;
337
338 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
339 if (er == EMULATE_DONE)
340 ret = RESUME_GUEST;
341 else {
342 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
343 ret = RESUME_HOST;
344 }
345 return ret;
346}
347
348static int
349kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
350{
351 struct mips_coproc *cop0 = vcpu->arch.cop0;
352
353 kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
354 kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
355 kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
356 kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
357 kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
358
359 kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
360 kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
361 kvm_write_c0_guest_pagemask(cop0,
362 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
363 kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
364 kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
365
366 return 0;
367}
368
369static int
370kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371{
372 struct mips_coproc *cop0 = vcpu->arch.cop0;
373
374 regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
375 regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
376 regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
377 regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
378 regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
379
380 regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
381 regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
382 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
383 kvm_read_c0_guest_pagemask(cop0);
384 regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
385 regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
386
387 regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
388 regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
389 regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
390 regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
391 regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
392
393 return 0;
394}
395
396static int kvm_trap_emul_vm_init(struct kvm *kvm)
397{
398 return 0;
399}
400
401static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
402{
403 return 0;
404}
405
406static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
407{
408 struct mips_coproc *cop0 = vcpu->arch.cop0;
409 uint32_t config1;
410 int vcpu_id = vcpu->vcpu_id;
411
412 /* Arch specific stuff, set up config registers properly so that the
413 * guest will come up as expected, for now we simulate a
414 * MIPS 24kc
415 */
416 kvm_write_c0_guest_prid(cop0, 0x00019300);
417 kvm_write_c0_guest_config(cop0,
418 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
419 (MMU_TYPE_R4000 << CP0C0_MT));
420
421 /* Read the cache characteristics from the host Config1 Register */
422 config1 = (read_c0_config1() & ~0x7f);
423
424 /* Set up MMU size */
425 config1 &= ~(0x3f << 25);
426 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
427
428 /* We unset some bits that we aren't emulating */
429 config1 &=
430 ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
431 (1 << CP0C1_WR) | (1 << CP0C1_CA));
432 kvm_write_c0_guest_config1(cop0, config1);
433
434 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
435 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
436 kvm_write_c0_guest_config3(cop0,
437 MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
438 CP0C3_ULRI));
439
440 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
441 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
442
443 /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
444 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
445
446 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
447 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
448
449 return 0;
450}
451
452static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
453 /* exit handlers */
454 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
455 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
456 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
457 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
458 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
459 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
460 .handle_syscall = kvm_trap_emul_handle_syscall,
461 .handle_res_inst = kvm_trap_emul_handle_res_inst,
462 .handle_break = kvm_trap_emul_handle_break,
463
464 .vm_init = kvm_trap_emul_vm_init,
465 .vcpu_init = kvm_trap_emul_vcpu_init,
466 .vcpu_setup = kvm_trap_emul_vcpu_setup,
467 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
468 .queue_timer_int = kvm_mips_queue_timer_int_cb,
469 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
470 .queue_io_int = kvm_mips_queue_io_int_cb,
471 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
472 .irq_deliver = kvm_mips_irq_deliver_cb,
473 .irq_clear = kvm_mips_irq_clear_cb,
474 .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
475 .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
476};
477
478int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
479{
480 *install_callbacks = &kvm_trap_emul_callbacks;
481 return 0;
482}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644
index 000000000000..bc9e0f406c08
--- /dev/null
+++ b/arch/mips/kvm/trace.h
@@ -0,0 +1,46 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_KVM_H
12
13#include <linux/tracepoint.h>
14
15#undef TRACE_SYSTEM
16#define TRACE_SYSTEM kvm
17#define TRACE_INCLUDE_PATH .
18#define TRACE_INCLUDE_FILE trace
19
20/*
21 * Tracepoints for VM eists
22 */
23extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
24
25TRACE_EVENT(kvm_exit,
26 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
27 TP_ARGS(vcpu, reason),
28 TP_STRUCT__entry(
29 __field(struct kvm_vcpu *, vcpu)
30 __field(unsigned int, reason)
31 ),
32
33 TP_fast_assign(
34 __entry->vcpu = vcpu;
35 __entry->reason = reason;
36 ),
37
38 TP_printk("[%s]PC: 0x%08lx",
39 kvm_mips_exit_types_str[__entry->reason],
40 __entry->vcpu->arch.pc)
41);
42
43#endif /* _TRACE_KVM_H */
44
45/* This part must be outside protection */
46#include <trace/define_trace.h>
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index a64daee740ee..3b2a1e78a543 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -19,7 +19,7 @@
19 */ 19 */
20void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) 20void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
21{ 21{
22 volatile unsigned long *a = addr; 22 unsigned long *a = (unsigned long *)addr;
23 unsigned bit = nr & SZLONG_MASK; 23 unsigned bit = nr & SZLONG_MASK;
24 unsigned long mask; 24 unsigned long mask;
25 unsigned long flags; 25 unsigned long flags;
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__mips_set_bit);
41 */ 41 */
42void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) 42void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
43{ 43{
44 volatile unsigned long *a = addr; 44 unsigned long *a = (unsigned long *)addr;
45 unsigned bit = nr & SZLONG_MASK; 45 unsigned bit = nr & SZLONG_MASK;
46 unsigned long mask; 46 unsigned long mask;
47 unsigned long flags; 47 unsigned long flags;
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
63 */ 63 */
64void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) 64void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
65{ 65{
66 volatile unsigned long *a = addr; 66 unsigned long *a = (unsigned long *)addr;
67 unsigned bit = nr & SZLONG_MASK; 67 unsigned bit = nr & SZLONG_MASK;
68 unsigned long mask; 68 unsigned long mask;
69 unsigned long flags; 69 unsigned long flags;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(__mips_change_bit);
86int __mips_test_and_set_bit(unsigned long nr, 86int __mips_test_and_set_bit(unsigned long nr,
87 volatile unsigned long *addr) 87 volatile unsigned long *addr)
88{ 88{
89 volatile unsigned long *a = addr; 89 unsigned long *a = (unsigned long *)addr;
90 unsigned bit = nr & SZLONG_MASK; 90 unsigned bit = nr & SZLONG_MASK;
91 unsigned long mask; 91 unsigned long mask;
92 unsigned long flags; 92 unsigned long flags;
@@ -112,7 +112,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit);
112int __mips_test_and_set_bit_lock(unsigned long nr, 112int __mips_test_and_set_bit_lock(unsigned long nr,
113 volatile unsigned long *addr) 113 volatile unsigned long *addr)
114{ 114{
115 volatile unsigned long *a = addr; 115 unsigned long *a = (unsigned long *)addr;
116 unsigned bit = nr & SZLONG_MASK; 116 unsigned bit = nr & SZLONG_MASK;
117 unsigned long mask; 117 unsigned long mask;
118 unsigned long flags; 118 unsigned long flags;
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
137 */ 137 */
138int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 138int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
139{ 139{
140 volatile unsigned long *a = addr; 140 unsigned long *a = (unsigned long *)addr;
141 unsigned bit = nr & SZLONG_MASK; 141 unsigned bit = nr & SZLONG_MASK;
142 unsigned long mask; 142 unsigned long mask;
143 unsigned long flags; 143 unsigned long flags;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
162 */ 162 */
163int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 163int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
164{ 164{
165 volatile unsigned long *a = addr; 165 unsigned long *a = (unsigned long *)addr;
166 unsigned bit = nr & SZLONG_MASK; 166 unsigned bit = nr & SZLONG_MASK;
167 unsigned long mask; 167 unsigned long mask;
168 unsigned long flags; 168 unsigned long flags;
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 32b9f21bfd85..8a12d00908e0 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -11,6 +11,7 @@
11#include <asm/page.h> 11#include <asm/page.h>
12#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13#include <asm/tlbdebug.h> 13#include <asm/tlbdebug.h>
14#include <asm/mmu_context.h>
14 15
15static inline const char *msk2str(unsigned int mask) 16static inline const char *msk2str(unsigned int mask)
16{ 17{
@@ -55,7 +56,7 @@ static void dump_tlb(int first, int last)
55 s_pagemask = read_c0_pagemask(); 56 s_pagemask = read_c0_pagemask();
56 s_entryhi = read_c0_entryhi(); 57 s_entryhi = read_c0_entryhi();
57 s_index = read_c0_index(); 58 s_index = read_c0_index();
58 asid = s_entryhi & 0xff; 59 asid = ASID_MASK(s_entryhi);
59 60
60 for (i = first; i <= last; i++) { 61 for (i = first; i <= last; i++) {
61 write_c0_index(i); 62 write_c0_index(i);
@@ -85,7 +86,7 @@ static void dump_tlb(int first, int last)
85 86
86 printk("va=%0*lx asid=%02lx\n", 87 printk("va=%0*lx asid=%02lx\n",
87 width, (entryhi & ~0x1fffUL), 88 width, (entryhi & ~0x1fffUL),
88 entryhi & 0xff); 89 ASID_MASK(entryhi));
89 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", 90 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
90 width, 91 width,
91 (entrylo0 << 6) & PAGE_MASK, c0, 92 (entrylo0 << 6) & PAGE_MASK, c0,
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 053d3b0b0317..0580194e7402 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -5,7 +5,8 @@
5 * 5 *
6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle 6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2007 Maciej W. Rozycki 8 * Copyright (C) 2007 by Maciej W. Rozycki
9 * Copyright (C) 2011, 2012 MIPS Technologies, Inc.
9 */ 10 */
10#include <asm/asm.h> 11#include <asm/asm.h>
11#include <asm/asm-offsets.h> 12#include <asm/asm-offsets.h>
@@ -19,6 +20,20 @@
19#define LONG_S_R sdr 20#define LONG_S_R sdr
20#endif 21#endif
21 22
23#ifdef CONFIG_CPU_MICROMIPS
24#define STORSIZE (LONGSIZE * 2)
25#define STORMASK (STORSIZE - 1)
26#define FILL64RG t8
27#define FILLPTRG t7
28#undef LONG_S
29#define LONG_S LONG_SP
30#else
31#define STORSIZE LONGSIZE
32#define STORMASK LONGMASK
33#define FILL64RG a1
34#define FILLPTRG t0
35#endif
36
22#define EX(insn,reg,addr,handler) \ 37#define EX(insn,reg,addr,handler) \
239: insn reg, addr; \ 389: insn reg, addr; \
24 .section __ex_table,"a"; \ 39 .section __ex_table,"a"; \
@@ -26,23 +41,25 @@
26 .previous 41 .previous
27 42
28 .macro f_fill64 dst, offset, val, fixup 43 .macro f_fill64 dst, offset, val, fixup
29 EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup) 44 EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup)
30 EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup) 45 EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup)
31 EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup) 46 EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup)
32 EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup) 47 EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup)
33 EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup) 48#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS))
34 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup) 49 EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup)
35 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup) 50 EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup)
36 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup) 51 EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup)
37#if LONGSIZE == 4 52 EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup)
38 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup) 53#endif
39 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup) 54#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4))
40 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup) 55 EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup)
41 EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup) 56 EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup)
42 EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup) 57 EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
43 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup) 58 EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
44 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup) 59 EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
45 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup) 60 EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
61 EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
62 EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
46#endif 63#endif
47 .endm 64 .endm
48 65
@@ -71,16 +88,20 @@ LEAF(memset)
711: 881:
72 89
73FEXPORT(__bzero) 90FEXPORT(__bzero)
74 sltiu t0, a2, LONGSIZE /* very small region? */ 91 sltiu t0, a2, STORSIZE /* very small region? */
75 bnez t0, .Lsmall_memset 92 bnez t0, .Lsmall_memset
76 andi t0, a0, LONGMASK /* aligned? */ 93 andi t0, a0, STORMASK /* aligned? */
77 94
95#ifdef CONFIG_CPU_MICROMIPS
96 move t8, a1 /* used by 'swp' instruction */
97 move t9, a1
98#endif
78#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 99#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
79 beqz t0, 1f 100 beqz t0, 1f
80 PTR_SUBU t0, LONGSIZE /* alignment in bytes */ 101 PTR_SUBU t0, STORSIZE /* alignment in bytes */
81#else 102#else
82 .set noat 103 .set noat
83 li AT, LONGSIZE 104 li AT, STORSIZE
84 beqz t0, 1f 105 beqz t0, 1f
85 PTR_SUBU t0, AT /* alignment in bytes */ 106 PTR_SUBU t0, AT /* alignment in bytes */
86 .set at 107 .set at
@@ -99,24 +120,27 @@ FEXPORT(__bzero)
991: ori t1, a2, 0x3f /* # of full blocks */ 1201: ori t1, a2, 0x3f /* # of full blocks */
100 xori t1, 0x3f 121 xori t1, 0x3f
101 beqz t1, .Lmemset_partial /* no block to fill */ 122 beqz t1, .Lmemset_partial /* no block to fill */
102 andi t0, a2, 0x40-LONGSIZE 123 andi t0, a2, 0x40-STORSIZE
103 124
104 PTR_ADDU t1, a0 /* end address */ 125 PTR_ADDU t1, a0 /* end address */
105 .set reorder 126 .set reorder
1061: PTR_ADDIU a0, 64 1271: PTR_ADDIU a0, 64
107 R10KCBARRIER(0(ra)) 128 R10KCBARRIER(0(ra))
108 f_fill64 a0, -64, a1, .Lfwd_fixup 129 f_fill64 a0, -64, FILL64RG, .Lfwd_fixup
109 bne t1, a0, 1b 130 bne t1, a0, 1b
110 .set noreorder 131 .set noreorder
111 132
112.Lmemset_partial: 133.Lmemset_partial:
113 R10KCBARRIER(0(ra)) 134 R10KCBARRIER(0(ra))
114 PTR_LA t1, 2f /* where to start */ 135 PTR_LA t1, 2f /* where to start */
136#ifdef CONFIG_CPU_MICROMIPS
137 LONG_SRL t7, t0, 1
138#endif
115#if LONGSIZE == 4 139#if LONGSIZE == 4
116 PTR_SUBU t1, t0 140 PTR_SUBU t1, FILLPTRG
117#else 141#else
118 .set noat 142 .set noat
119 LONG_SRL AT, t0, 1 143 LONG_SRL AT, FILLPTRG, 1
120 PTR_SUBU t1, AT 144 PTR_SUBU t1, AT
121 .set at 145 .set at
122#endif 146#endif
@@ -126,9 +150,9 @@ FEXPORT(__bzero)
126 .set push 150 .set push
127 .set noreorder 151 .set noreorder
128 .set nomacro 152 .set nomacro
129 f_fill64 a0, -64, a1, .Lpartial_fixup /* ... but first do longs ... */ 153 f_fill64 a0, -64, FILL64RG, .Lpartial_fixup /* ... but first do longs ... */
1302: .set pop 1542: .set pop
131 andi a2, LONGMASK /* At most one long to go */ 155 andi a2, STORMASK /* At most one long to go */
132 156
133 beqz a2, 1f 157 beqz a2, 1f
134 PTR_ADDU a0, a2 /* What's left */ 158 PTR_ADDU a0, a2 /* What's left */
@@ -169,7 +193,7 @@ FEXPORT(__bzero)
169 193
170.Lpartial_fixup: 194.Lpartial_fixup:
171 PTR_L t0, TI_TASK($28) 195 PTR_L t0, TI_TASK($28)
172 andi a2, LONGMASK 196 andi a2, STORMASK
173 LONG_L t0, THREAD_BUADDR(t0) 197 LONG_L t0, THREAD_BUADDR(t0)
174 LONG_ADDU a2, t1 198 LONG_ADDU a2, t1
175 jr ra 199 jr ra
@@ -177,4 +201,4 @@ FEXPORT(__bzero)
177 201
178.Llast_fixup: 202.Llast_fixup:
179 jr ra 203 jr ra
180 andi v1, a2, LONGMASK 204 andi v1, a2, STORMASK
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index cd160be3ce4d..6807f7172eaf 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -13,6 +13,7 @@
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14#include <linux/preempt.h> 14#include <linux/preempt.h>
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/stringify.h>
16 17
17#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) 18#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
18 19
@@ -34,8 +35,11 @@
34 * 35 *
35 * Workaround: mask EXL bit of the result or place a nop before mfc0. 36 * Workaround: mask EXL bit of the result or place a nop before mfc0.
36 */ 37 */
37__asm__( 38notrace void arch_local_irq_disable(void)
38 " .macro arch_local_irq_disable\n" 39{
40 preempt_disable();
41
42 __asm__ __volatile__(
39 " .set push \n" 43 " .set push \n"
40 " .set noat \n" 44 " .set noat \n"
41#ifdef CONFIG_MIPS_MT_SMTC 45#ifdef CONFIG_MIPS_MT_SMTC
@@ -52,108 +56,98 @@ __asm__(
52 " .set noreorder \n" 56 " .set noreorder \n"
53 " mtc0 $1,$12 \n" 57 " mtc0 $1,$12 \n"
54#endif 58#endif
55 " irq_disable_hazard \n" 59 " " __stringify(__irq_disable_hazard) " \n"
56 " .set pop \n" 60 " .set pop \n"
57 " .endm \n"); 61 : /* no outputs */
62 : /* no inputs */
63 : "memory");
58 64
59notrace void arch_local_irq_disable(void)
60{
61 preempt_disable();
62 __asm__ __volatile__(
63 "arch_local_irq_disable"
64 : /* no outputs */
65 : /* no inputs */
66 : "memory");
67 preempt_enable(); 65 preempt_enable();
68} 66}
69EXPORT_SYMBOL(arch_local_irq_disable); 67EXPORT_SYMBOL(arch_local_irq_disable);
70 68
71 69
72__asm__( 70notrace unsigned long arch_local_irq_save(void)
73 " .macro arch_local_irq_save result \n" 71{
72 unsigned long flags;
73
74 preempt_disable();
75
76 __asm__ __volatile__(
74 " .set push \n" 77 " .set push \n"
75 " .set reorder \n" 78 " .set reorder \n"
76 " .set noat \n" 79 " .set noat \n"
77#ifdef CONFIG_MIPS_MT_SMTC 80#ifdef CONFIG_MIPS_MT_SMTC
78 " mfc0 \\result, $2, 1 \n" 81 " mfc0 %[flags], $2, 1 \n"
79 " ori $1, \\result, 0x400 \n" 82 " ori $1, %[flags], 0x400 \n"
80 " .set noreorder \n" 83 " .set noreorder \n"
81 " mtc0 $1, $2, 1 \n" 84 " mtc0 $1, $2, 1 \n"
82 " andi \\result, \\result, 0x400 \n" 85 " andi %[flags], %[flags], 0x400 \n"
83#elif defined(CONFIG_CPU_MIPSR2) 86#elif defined(CONFIG_CPU_MIPSR2)
84 /* see irqflags.h for inline function */ 87 /* see irqflags.h for inline function */
85#else 88#else
86 " mfc0 \\result, $12 \n" 89 " mfc0 %[flags], $12 \n"
87 " ori $1, \\result, 0x1f \n" 90 " ori $1, %[flags], 0x1f \n"
88 " xori $1, 0x1f \n" 91 " xori $1, 0x1f \n"
89 " .set noreorder \n" 92 " .set noreorder \n"
90 " mtc0 $1, $12 \n" 93 " mtc0 $1, $12 \n"
91#endif 94#endif
92 " irq_disable_hazard \n" 95 " " __stringify(__irq_disable_hazard) " \n"
93 " .set pop \n" 96 " .set pop \n"
94 " .endm \n"); 97 : [flags] "=r" (flags)
98 : /* no inputs */
99 : "memory");
95 100
96notrace unsigned long arch_local_irq_save(void)
97{
98 unsigned long flags;
99 preempt_disable();
100 asm volatile("arch_local_irq_save\t%0"
101 : "=r" (flags)
102 : /* no inputs */
103 : "memory");
104 preempt_enable(); 101 preempt_enable();
102
105 return flags; 103 return flags;
106} 104}
107EXPORT_SYMBOL(arch_local_irq_save); 105EXPORT_SYMBOL(arch_local_irq_save);
108 106
107notrace void arch_local_irq_restore(unsigned long flags)
108{
109 unsigned long __tmp1;
110
111#ifdef CONFIG_MIPS_MT_SMTC
112 /*
113 * SMTC kernel needs to do a software replay of queued
114 * IPIs, at the cost of branch and call overhead on each
115 * local_irq_restore()
116 */
117 if (unlikely(!(flags & 0x0400)))
118 smtc_ipi_replay();
119#endif
120 preempt_disable();
109 121
110__asm__( 122 __asm__ __volatile__(
111 " .macro arch_local_irq_restore flags \n"
112 " .set push \n" 123 " .set push \n"
113 " .set noreorder \n" 124 " .set noreorder \n"
114 " .set noat \n" 125 " .set noat \n"
115#ifdef CONFIG_MIPS_MT_SMTC 126#ifdef CONFIG_MIPS_MT_SMTC
116 "mfc0 $1, $2, 1 \n" 127 " mfc0 $1, $2, 1 \n"
117 "andi \\flags, 0x400 \n" 128 " andi %[flags], 0x400 \n"
118 "ori $1, 0x400 \n" 129 " ori $1, 0x400 \n"
119 "xori $1, 0x400 \n" 130 " xori $1, 0x400 \n"
120 "or \\flags, $1 \n" 131 " or %[flags], $1 \n"
121 "mtc0 \\flags, $2, 1 \n" 132 " mtc0 %[flags], $2, 1 \n"
122#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 133#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
123 /* see irqflags.h for inline function */ 134 /* see irqflags.h for inline function */
124#elif defined(CONFIG_CPU_MIPSR2) 135#elif defined(CONFIG_CPU_MIPSR2)
125 /* see irqflags.h for inline function */ 136 /* see irqflags.h for inline function */
126#else 137#else
127 " mfc0 $1, $12 \n" 138 " mfc0 $1, $12 \n"
128 " andi \\flags, 1 \n" 139 " andi %[flags], 1 \n"
129 " ori $1, 0x1f \n" 140 " ori $1, 0x1f \n"
130 " xori $1, 0x1f \n" 141 " xori $1, 0x1f \n"
131 " or \\flags, $1 \n" 142 " or %[flags], $1 \n"
132 " mtc0 \\flags, $12 \n" 143 " mtc0 %[flags], $12 \n"
133#endif 144#endif
134 " irq_disable_hazard \n" 145 " " __stringify(__irq_disable_hazard) " \n"
135 " .set pop \n" 146 " .set pop \n"
136 " .endm \n"); 147 : [flags] "=r" (__tmp1)
148 : "0" (flags)
149 : "memory");
137 150
138notrace void arch_local_irq_restore(unsigned long flags)
139{
140 unsigned long __tmp1;
141
142#ifdef CONFIG_MIPS_MT_SMTC
143 /*
144 * SMTC kernel needs to do a software replay of queued
145 * IPIs, at the cost of branch and call overhead on each
146 * local_irq_restore()
147 */
148 if (unlikely(!(flags & 0x0400)))
149 smtc_ipi_replay();
150#endif
151 preempt_disable();
152 __asm__ __volatile__(
153 "arch_local_irq_restore\t%0"
154 : "=r" (__tmp1)
155 : "0" (flags)
156 : "memory");
157 preempt_enable(); 151 preempt_enable();
158} 152}
159EXPORT_SYMBOL(arch_local_irq_restore); 153EXPORT_SYMBOL(arch_local_irq_restore);
@@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags)
164 unsigned long __tmp1; 158 unsigned long __tmp1;
165 159
166 preempt_disable(); 160 preempt_disable();
161
167 __asm__ __volatile__( 162 __asm__ __volatile__(
168 "arch_local_irq_restore\t%0" 163 " .set push \n"
169 : "=r" (__tmp1) 164 " .set noreorder \n"
170 : "0" (flags) 165 " .set noat \n"
171 : "memory"); 166#ifdef CONFIG_MIPS_MT_SMTC
167 " mfc0 $1, $2, 1 \n"
168 " andi %[flags], 0x400 \n"
169 " ori $1, 0x400 \n"
170 " xori $1, 0x400 \n"
171 " or %[flags], $1 \n"
172 " mtc0 %[flags], $2, 1 \n"
173#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
174 /* see irqflags.h for inline function */
175#elif defined(CONFIG_CPU_MIPSR2)
176 /* see irqflags.h for inline function */
177#else
178 " mfc0 $1, $12 \n"
179 " andi %[flags], 1 \n"
180 " ori $1, 0x1f \n"
181 " xori $1, 0x1f \n"
182 " or %[flags], $1 \n"
183 " mtc0 %[flags], $12 \n"
184#endif
185 " " __stringify(__irq_disable_hazard) " \n"
186 " .set pop \n"
187 : [flags] "=r" (__tmp1)
188 : "0" (flags)
189 : "memory");
190
172 preempt_enable(); 191 preempt_enable();
173} 192}
174EXPORT_SYMBOL(__arch_local_irq_restore); 193EXPORT_SYMBOL(__arch_local_irq_restore);
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 91615c2ef0cf..8327698b9937 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -9,6 +9,7 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10 10
11#include <asm/mipsregs.h> 11#include <asm/mipsregs.h>
12#include <asm/mmu_context.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
14#include <asm/tlbdebug.h> 15#include <asm/tlbdebug.h>
@@ -21,7 +22,7 @@ static void dump_tlb(int first, int last)
21 unsigned int asid; 22 unsigned int asid;
22 unsigned long entryhi, entrylo0; 23 unsigned long entryhi, entrylo0;
23 24
24 asid = read_c0_entryhi() & 0xfc0; 25 asid = ASID_MASK(read_c0_entryhi());
25 26
26 for (i = first; i <= last; i++) { 27 for (i = first; i <= last; i++) {
27 write_c0_index(i<<8); 28 write_c0_index(i<<8);
@@ -35,7 +36,7 @@ static void dump_tlb(int first, int last)
35 36
36 /* Unused entries have a virtual address of KSEG0. */ 37 /* Unused entries have a virtual address of KSEG0. */
37 if ((entryhi & 0xffffe000) != 0x80000000 38 if ((entryhi & 0xffffe000) != 0x80000000
38 && (entryhi & 0xfc0) == asid) { 39 && (ASID_MASK(entryhi) == asid)) {
39 /* 40 /*
40 * Only print entries in use 41 * Only print entries in use
41 */ 42 */
@@ -44,7 +45,7 @@ static void dump_tlb(int first, int last)
44 printk("va=%08lx asid=%08lx" 45 printk("va=%08lx asid=%08lx"
45 " [pa=%06lx n=%d d=%d v=%d g=%d]", 46 " [pa=%06lx n=%d d=%d v=%d g=%d]",
46 (entryhi & 0xffffe000), 47 (entryhi & 0xffffe000),
47 entryhi & 0xfc0, 48 ASID_MASK(entryhi),
48 entrylo0 & PAGE_MASK, 49 entrylo0 & PAGE_MASK,
49 (entrylo0 & (1 << 11)) ? 1 : 0, 50 (entrylo0 & (1 << 11)) ? 1 : 0,
50 (entrylo0 & (1 << 10)) ? 1 : 0, 51 (entrylo0 & (1 << 10)) ? 1 : 0,
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index fdbb970f670d..e362dcdc69d1 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -3,8 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle 6 * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
7 * Copyright (c) 1999 Silicon Graphics, Inc. 7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2011 MIPS Technologies, Inc.
8 */ 9 */
9#include <asm/asm.h> 10#include <asm/asm.h>
10#include <asm/asm-offsets.h> 11#include <asm/asm-offsets.h>
@@ -28,9 +29,9 @@ LEAF(__strlen_user_asm)
28 29
29FEXPORT(__strlen_user_nocheck_asm) 30FEXPORT(__strlen_user_nocheck_asm)
30 move v0, a0 31 move v0, a0
311: EX(lb, t0, (v0), .Lfault) 321: EX(lbu, v1, (v0), .Lfault)
32 PTR_ADDIU v0, 1 33 PTR_ADDIU v0, 1
33 bnez t0, 1b 34 bnez v1, 1b
34 PTR_SUBU v0, a0 35 PTR_SUBU v0, a0
35 jr ra 36 jr ra
36 END(__strlen_user_asm) 37 END(__strlen_user_asm)
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index bad539487503..92870b6b53ea 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -3,7 +3,8 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 1996, 1999 by Ralf Baechle 6 * Copyright (C) 1996, 1999 by Ralf Baechle
7 * Copyright (C) 2011 MIPS Technologies, Inc.
7 */ 8 */
8#include <linux/errno.h> 9#include <linux/errno.h>
9#include <asm/asm.h> 10#include <asm/asm.h>
@@ -33,26 +34,27 @@ LEAF(__strncpy_from_user_asm)
33 bnez v0, .Lfault 34 bnez v0, .Lfault
34 35
35FEXPORT(__strncpy_from_user_nocheck_asm) 36FEXPORT(__strncpy_from_user_nocheck_asm)
36 move v0, zero
37 move v1, a1
38 .set noreorder 37 .set noreorder
391: EX(lbu, t0, (v1), .Lfault) 38 move t0, zero
39 move v1, a1
401: EX(lbu, v0, (v1), .Lfault)
40 PTR_ADDIU v1, 1 41 PTR_ADDIU v1, 1
41 R10KCBARRIER(0(ra)) 42 R10KCBARRIER(0(ra))
42 beqz t0, 2f 43 beqz v0, 2f
43 sb t0, (a0) 44 sb v0, (a0)
44 PTR_ADDIU v0, 1 45 PTR_ADDIU t0, 1
45 .set reorder 46 bne t0, a2, 1b
46 PTR_ADDIU a0, 1 47 PTR_ADDIU a0, 1
47 bne v0, a2, 1b 482: PTR_ADDU v0, a1, t0
482: PTR_ADDU t0, a1, v0 49 xor v0, a1
49 xor t0, a1 50 bltz v0, .Lfault
50 bltz t0, .Lfault 51 nop
51 jr ra # return n 52 jr ra # return n
53 move v0, t0
52 END(__strncpy_from_user_asm) 54 END(__strncpy_from_user_asm)
53 55
54.Lfault: li v0, -EFAULT 56.Lfault: jr ra
55 jr ra 57 li v0, -EFAULT
56 58
57 .section __ex_table,"a" 59 .section __ex_table,"a"
58 PTR 1b, .Lfault 60 PTR 1b, .Lfault
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index beea03c8c0ce..fcacea5e61f1 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -35,7 +35,7 @@ FEXPORT(__strnlen_user_nocheck_asm)
35 PTR_ADDU a1, a0 # stop pointer 35 PTR_ADDU a1, a0 # stop pointer
361: beq v0, a1, 1f # limit reached? 361: beq v0, a1, 1f # limit reached?
37 EX(lb, t0, (v0), .Lfault) 37 EX(lb, t0, (v0), .Lfault)
38 PTR_ADDU v0, 1 38 PTR_ADDIU v0, 1
39 bnez t0, 1b 39 bnez t0, 1b
401: PTR_SUBU v0, a0 401: PTR_SUBU v0, a0
41 jr ra 41 jr ra
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index afb5a0bcf7a5..f03771900813 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -45,6 +45,7 @@
45#include <asm/signal.h> 45#include <asm/signal.h>
46#include <asm/mipsregs.h> 46#include <asm/mipsregs.h>
47#include <asm/fpu_emulator.h> 47#include <asm/fpu_emulator.h>
48#include <asm/fpu.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49#include <asm/branch.h> 50#include <asm/branch.h>
50 51
@@ -81,6 +82,11 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
81/* Determine rounding mode from the RM bits of the FCSR */ 82/* Determine rounding mode from the RM bits of the FCSR */
82#define modeindex(v) ((v) & FPU_CSR_RM) 83#define modeindex(v) ((v) & FPU_CSR_RM)
83 84
85/* microMIPS bitfields */
86#define MM_POOL32A_MINOR_MASK 0x3f
87#define MM_POOL32A_MINOR_SHIFT 0x6
88#define MM_MIPS32_COND_FC 0x30
89
84/* Convert Mips rounding mode (0..3) to IEEE library modes. */ 90/* Convert Mips rounding mode (0..3) to IEEE library modes. */
85static const unsigned char ieee_rm[4] = { 91static const unsigned char ieee_rm[4] = {
86 [FPU_CSR_RN] = IEEE754_RN, 92 [FPU_CSR_RN] = IEEE754_RN,
@@ -110,6 +116,556 @@ static const unsigned int fpucondbit[8] = {
110}; 116};
111#endif 117#endif
112 118
119/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
120static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
121
122/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
123static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
124static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
125static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
126static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
127
128/*
129 * This functions translates a 32-bit microMIPS instruction
130 * into a 32-bit MIPS32 instruction. Returns 0 on success
131 * and SIGILL otherwise.
132 */
133static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
134{
135 union mips_instruction insn = *insn_ptr;
136 union mips_instruction mips32_insn = insn;
137 int func, fmt, op;
138
139 switch (insn.mm_i_format.opcode) {
140 case mm_ldc132_op:
141 mips32_insn.mm_i_format.opcode = ldc1_op;
142 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
143 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
144 break;
145 case mm_lwc132_op:
146 mips32_insn.mm_i_format.opcode = lwc1_op;
147 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
148 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
149 break;
150 case mm_sdc132_op:
151 mips32_insn.mm_i_format.opcode = sdc1_op;
152 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
153 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
154 break;
155 case mm_swc132_op:
156 mips32_insn.mm_i_format.opcode = swc1_op;
157 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
158 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
159 break;
160 case mm_pool32i_op:
161 /* NOTE: offset is << by 1 if in microMIPS mode. */
162 if ((insn.mm_i_format.rt == mm_bc1f_op) ||
163 (insn.mm_i_format.rt == mm_bc1t_op)) {
164 mips32_insn.fb_format.opcode = cop1_op;
165 mips32_insn.fb_format.bc = bc_op;
166 mips32_insn.fb_format.flag =
167 (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
168 } else
169 return SIGILL;
170 break;
171 case mm_pool32f_op:
172 switch (insn.mm_fp0_format.func) {
173 case mm_32f_01_op:
174 case mm_32f_11_op:
175 case mm_32f_02_op:
176 case mm_32f_12_op:
177 case mm_32f_41_op:
178 case mm_32f_51_op:
179 case mm_32f_42_op:
180 case mm_32f_52_op:
181 op = insn.mm_fp0_format.func;
182 if (op == mm_32f_01_op)
183 func = madd_s_op;
184 else if (op == mm_32f_11_op)
185 func = madd_d_op;
186 else if (op == mm_32f_02_op)
187 func = nmadd_s_op;
188 else if (op == mm_32f_12_op)
189 func = nmadd_d_op;
190 else if (op == mm_32f_41_op)
191 func = msub_s_op;
192 else if (op == mm_32f_51_op)
193 func = msub_d_op;
194 else if (op == mm_32f_42_op)
195 func = nmsub_s_op;
196 else
197 func = nmsub_d_op;
198 mips32_insn.fp6_format.opcode = cop1x_op;
199 mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
200 mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
201 mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
202 mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
203 mips32_insn.fp6_format.func = func;
204 break;
205 case mm_32f_10_op:
206 func = -1; /* Invalid */
207 op = insn.mm_fp5_format.op & 0x7;
208 if (op == mm_ldxc1_op)
209 func = ldxc1_op;
210 else if (op == mm_sdxc1_op)
211 func = sdxc1_op;
212 else if (op == mm_lwxc1_op)
213 func = lwxc1_op;
214 else if (op == mm_swxc1_op)
215 func = swxc1_op;
216
217 if (func != -1) {
218 mips32_insn.r_format.opcode = cop1x_op;
219 mips32_insn.r_format.rs =
220 insn.mm_fp5_format.base;
221 mips32_insn.r_format.rt =
222 insn.mm_fp5_format.index;
223 mips32_insn.r_format.rd = 0;
224 mips32_insn.r_format.re = insn.mm_fp5_format.fd;
225 mips32_insn.r_format.func = func;
226 } else
227 return SIGILL;
228 break;
229 case mm_32f_40_op:
230 op = -1; /* Invalid */
231 if (insn.mm_fp2_format.op == mm_fmovt_op)
232 op = 1;
233 else if (insn.mm_fp2_format.op == mm_fmovf_op)
234 op = 0;
235 if (op != -1) {
236 mips32_insn.fp0_format.opcode = cop1_op;
237 mips32_insn.fp0_format.fmt =
238 sdps_format[insn.mm_fp2_format.fmt];
239 mips32_insn.fp0_format.ft =
240 (insn.mm_fp2_format.cc<<2) + op;
241 mips32_insn.fp0_format.fs =
242 insn.mm_fp2_format.fs;
243 mips32_insn.fp0_format.fd =
244 insn.mm_fp2_format.fd;
245 mips32_insn.fp0_format.func = fmovc_op;
246 } else
247 return SIGILL;
248 break;
249 case mm_32f_60_op:
250 func = -1; /* Invalid */
251 if (insn.mm_fp0_format.op == mm_fadd_op)
252 func = fadd_op;
253 else if (insn.mm_fp0_format.op == mm_fsub_op)
254 func = fsub_op;
255 else if (insn.mm_fp0_format.op == mm_fmul_op)
256 func = fmul_op;
257 else if (insn.mm_fp0_format.op == mm_fdiv_op)
258 func = fdiv_op;
259 if (func != -1) {
260 mips32_insn.fp0_format.opcode = cop1_op;
261 mips32_insn.fp0_format.fmt =
262 sdps_format[insn.mm_fp0_format.fmt];
263 mips32_insn.fp0_format.ft =
264 insn.mm_fp0_format.ft;
265 mips32_insn.fp0_format.fs =
266 insn.mm_fp0_format.fs;
267 mips32_insn.fp0_format.fd =
268 insn.mm_fp0_format.fd;
269 mips32_insn.fp0_format.func = func;
270 } else
271 return SIGILL;
272 break;
273 case mm_32f_70_op:
274 func = -1; /* Invalid */
275 if (insn.mm_fp0_format.op == mm_fmovn_op)
276 func = fmovn_op;
277 else if (insn.mm_fp0_format.op == mm_fmovz_op)
278 func = fmovz_op;
279 if (func != -1) {
280 mips32_insn.fp0_format.opcode = cop1_op;
281 mips32_insn.fp0_format.fmt =
282 sdps_format[insn.mm_fp0_format.fmt];
283 mips32_insn.fp0_format.ft =
284 insn.mm_fp0_format.ft;
285 mips32_insn.fp0_format.fs =
286 insn.mm_fp0_format.fs;
287 mips32_insn.fp0_format.fd =
288 insn.mm_fp0_format.fd;
289 mips32_insn.fp0_format.func = func;
290 } else
291 return SIGILL;
292 break;
293 case mm_32f_73_op: /* POOL32FXF */
294 switch (insn.mm_fp1_format.op) {
295 case mm_movf0_op:
296 case mm_movf1_op:
297 case mm_movt0_op:
298 case mm_movt1_op:
299 if ((insn.mm_fp1_format.op & 0x7f) ==
300 mm_movf0_op)
301 op = 0;
302 else
303 op = 1;
304 mips32_insn.r_format.opcode = spec_op;
305 mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
306 mips32_insn.r_format.rt =
307 (insn.mm_fp4_format.cc << 2) + op;
308 mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
309 mips32_insn.r_format.re = 0;
310 mips32_insn.r_format.func = movc_op;
311 break;
312 case mm_fcvtd0_op:
313 case mm_fcvtd1_op:
314 case mm_fcvts0_op:
315 case mm_fcvts1_op:
316 if ((insn.mm_fp1_format.op & 0x7f) ==
317 mm_fcvtd0_op) {
318 func = fcvtd_op;
319 fmt = swl_format[insn.mm_fp3_format.fmt];
320 } else {
321 func = fcvts_op;
322 fmt = dwl_format[insn.mm_fp3_format.fmt];
323 }
324 mips32_insn.fp0_format.opcode = cop1_op;
325 mips32_insn.fp0_format.fmt = fmt;
326 mips32_insn.fp0_format.ft = 0;
327 mips32_insn.fp0_format.fs =
328 insn.mm_fp3_format.fs;
329 mips32_insn.fp0_format.fd =
330 insn.mm_fp3_format.rt;
331 mips32_insn.fp0_format.func = func;
332 break;
333 case mm_fmov0_op:
334 case mm_fmov1_op:
335 case mm_fabs0_op:
336 case mm_fabs1_op:
337 case mm_fneg0_op:
338 case mm_fneg1_op:
339 if ((insn.mm_fp1_format.op & 0x7f) ==
340 mm_fmov0_op)
341 func = fmov_op;
342 else if ((insn.mm_fp1_format.op & 0x7f) ==
343 mm_fabs0_op)
344 func = fabs_op;
345 else
346 func = fneg_op;
347 mips32_insn.fp0_format.opcode = cop1_op;
348 mips32_insn.fp0_format.fmt =
349 sdps_format[insn.mm_fp3_format.fmt];
350 mips32_insn.fp0_format.ft = 0;
351 mips32_insn.fp0_format.fs =
352 insn.mm_fp3_format.fs;
353 mips32_insn.fp0_format.fd =
354 insn.mm_fp3_format.rt;
355 mips32_insn.fp0_format.func = func;
356 break;
357 case mm_ffloorl_op:
358 case mm_ffloorw_op:
359 case mm_fceill_op:
360 case mm_fceilw_op:
361 case mm_ftruncl_op:
362 case mm_ftruncw_op:
363 case mm_froundl_op:
364 case mm_froundw_op:
365 case mm_fcvtl_op:
366 case mm_fcvtw_op:
367 if (insn.mm_fp1_format.op == mm_ffloorl_op)
368 func = ffloorl_op;
369 else if (insn.mm_fp1_format.op == mm_ffloorw_op)
370 func = ffloor_op;
371 else if (insn.mm_fp1_format.op == mm_fceill_op)
372 func = fceill_op;
373 else if (insn.mm_fp1_format.op == mm_fceilw_op)
374 func = fceil_op;
375 else if (insn.mm_fp1_format.op == mm_ftruncl_op)
376 func = ftruncl_op;
377 else if (insn.mm_fp1_format.op == mm_ftruncw_op)
378 func = ftrunc_op;
379 else if (insn.mm_fp1_format.op == mm_froundl_op)
380 func = froundl_op;
381 else if (insn.mm_fp1_format.op == mm_froundw_op)
382 func = fround_op;
383 else if (insn.mm_fp1_format.op == mm_fcvtl_op)
384 func = fcvtl_op;
385 else
386 func = fcvtw_op;
387 mips32_insn.fp0_format.opcode = cop1_op;
388 mips32_insn.fp0_format.fmt =
389 sd_format[insn.mm_fp1_format.fmt];
390 mips32_insn.fp0_format.ft = 0;
391 mips32_insn.fp0_format.fs =
392 insn.mm_fp1_format.fs;
393 mips32_insn.fp0_format.fd =
394 insn.mm_fp1_format.rt;
395 mips32_insn.fp0_format.func = func;
396 break;
397 case mm_frsqrt_op:
398 case mm_fsqrt_op:
399 case mm_frecip_op:
400 if (insn.mm_fp1_format.op == mm_frsqrt_op)
401 func = frsqrt_op;
402 else if (insn.mm_fp1_format.op == mm_fsqrt_op)
403 func = fsqrt_op;
404 else
405 func = frecip_op;
406 mips32_insn.fp0_format.opcode = cop1_op;
407 mips32_insn.fp0_format.fmt =
408 sdps_format[insn.mm_fp1_format.fmt];
409 mips32_insn.fp0_format.ft = 0;
410 mips32_insn.fp0_format.fs =
411 insn.mm_fp1_format.fs;
412 mips32_insn.fp0_format.fd =
413 insn.mm_fp1_format.rt;
414 mips32_insn.fp0_format.func = func;
415 break;
416 case mm_mfc1_op:
417 case mm_mtc1_op:
418 case mm_cfc1_op:
419 case mm_ctc1_op:
420 if (insn.mm_fp1_format.op == mm_mfc1_op)
421 op = mfc_op;
422 else if (insn.mm_fp1_format.op == mm_mtc1_op)
423 op = mtc_op;
424 else if (insn.mm_fp1_format.op == mm_cfc1_op)
425 op = cfc_op;
426 else
427 op = ctc_op;
428 mips32_insn.fp1_format.opcode = cop1_op;
429 mips32_insn.fp1_format.op = op;
430 mips32_insn.fp1_format.rt =
431 insn.mm_fp1_format.rt;
432 mips32_insn.fp1_format.fs =
433 insn.mm_fp1_format.fs;
434 mips32_insn.fp1_format.fd = 0;
435 mips32_insn.fp1_format.func = 0;
436 break;
437 default:
438 return SIGILL;
439 break;
440 }
441 break;
442 case mm_32f_74_op: /* c.cond.fmt */
443 mips32_insn.fp0_format.opcode = cop1_op;
444 mips32_insn.fp0_format.fmt =
445 sdps_format[insn.mm_fp4_format.fmt];
446 mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
447 mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
448 mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
449 mips32_insn.fp0_format.func =
450 insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
451 break;
452 default:
453 return SIGILL;
454 break;
455 }
456 break;
457 default:
458 return SIGILL;
459 break;
460 }
461
462 *insn_ptr = mips32_insn;
463 return 0;
464}
465
466int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
467 unsigned long *contpc)
468{
469 union mips_instruction insn = (union mips_instruction)dec_insn.insn;
470 int bc_false = 0;
471 unsigned int fcr31;
472 unsigned int bit;
473
474 switch (insn.mm_i_format.opcode) {
475 case mm_pool32a_op:
476 if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
477 mm_pool32axf_op) {
478 switch (insn.mm_i_format.simmediate >>
479 MM_POOL32A_MINOR_SHIFT) {
480 case mm_jalr_op:
481 case mm_jalrhb_op:
482 case mm_jalrs_op:
483 case mm_jalrshb_op:
484 if (insn.mm_i_format.rt != 0) /* Not mm_jr */
485 regs->regs[insn.mm_i_format.rt] =
486 regs->cp0_epc +
487 dec_insn.pc_inc +
488 dec_insn.next_pc_inc;
489 *contpc = regs->regs[insn.mm_i_format.rs];
490 return 1;
491 break;
492 }
493 }
494 break;
495 case mm_pool32i_op:
496 switch (insn.mm_i_format.rt) {
497 case mm_bltzals_op:
498 case mm_bltzal_op:
499 regs->regs[31] = regs->cp0_epc +
500 dec_insn.pc_inc +
501 dec_insn.next_pc_inc;
502 /* Fall through */
503 case mm_bltz_op:
504 if ((long)regs->regs[insn.mm_i_format.rs] < 0)
505 *contpc = regs->cp0_epc +
506 dec_insn.pc_inc +
507 (insn.mm_i_format.simmediate << 1);
508 else
509 *contpc = regs->cp0_epc +
510 dec_insn.pc_inc +
511 dec_insn.next_pc_inc;
512 return 1;
513 break;
514 case mm_bgezals_op:
515 case mm_bgezal_op:
516 regs->regs[31] = regs->cp0_epc +
517 dec_insn.pc_inc +
518 dec_insn.next_pc_inc;
519 /* Fall through */
520 case mm_bgez_op:
521 if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
522 *contpc = regs->cp0_epc +
523 dec_insn.pc_inc +
524 (insn.mm_i_format.simmediate << 1);
525 else
526 *contpc = regs->cp0_epc +
527 dec_insn.pc_inc +
528 dec_insn.next_pc_inc;
529 return 1;
530 break;
531 case mm_blez_op:
532 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
533 *contpc = regs->cp0_epc +
534 dec_insn.pc_inc +
535 (insn.mm_i_format.simmediate << 1);
536 else
537 *contpc = regs->cp0_epc +
538 dec_insn.pc_inc +
539 dec_insn.next_pc_inc;
540 return 1;
541 break;
542 case mm_bgtz_op:
543 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
544 *contpc = regs->cp0_epc +
545 dec_insn.pc_inc +
546 (insn.mm_i_format.simmediate << 1);
547 else
548 *contpc = regs->cp0_epc +
549 dec_insn.pc_inc +
550 dec_insn.next_pc_inc;
551 return 1;
552 break;
553 case mm_bc2f_op:
554 case mm_bc1f_op:
555 bc_false = 1;
556 /* Fall through */
557 case mm_bc2t_op:
558 case mm_bc1t_op:
559 preempt_disable();
560 if (is_fpu_owner())
561 asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
562 else
563 fcr31 = current->thread.fpu.fcr31;
564 preempt_enable();
565
566 if (bc_false)
567 fcr31 = ~fcr31;
568
569 bit = (insn.mm_i_format.rs >> 2);
570 bit += (bit != 0);
571 bit += 23;
572 if (fcr31 & (1 << bit))
573 *contpc = regs->cp0_epc +
574 dec_insn.pc_inc +
575 (insn.mm_i_format.simmediate << 1);
576 else
577 *contpc = regs->cp0_epc +
578 dec_insn.pc_inc + dec_insn.next_pc_inc;
579 return 1;
580 break;
581 }
582 break;
583 case mm_pool16c_op:
584 switch (insn.mm_i_format.rt) {
585 case mm_jalr16_op:
586 case mm_jalrs16_op:
587 regs->regs[31] = regs->cp0_epc +
588 dec_insn.pc_inc + dec_insn.next_pc_inc;
589 /* Fall through */
590 case mm_jr16_op:
591 *contpc = regs->regs[insn.mm_i_format.rs];
592 return 1;
593 break;
594 }
595 break;
596 case mm_beqz16_op:
597 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
598 *contpc = regs->cp0_epc +
599 dec_insn.pc_inc +
600 (insn.mm_b1_format.simmediate << 1);
601 else
602 *contpc = regs->cp0_epc +
603 dec_insn.pc_inc + dec_insn.next_pc_inc;
604 return 1;
605 break;
606 case mm_bnez16_op:
607 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
608 *contpc = regs->cp0_epc +
609 dec_insn.pc_inc +
610 (insn.mm_b1_format.simmediate << 1);
611 else
612 *contpc = regs->cp0_epc +
613 dec_insn.pc_inc + dec_insn.next_pc_inc;
614 return 1;
615 break;
616 case mm_b16_op:
617 *contpc = regs->cp0_epc + dec_insn.pc_inc +
618 (insn.mm_b0_format.simmediate << 1);
619 return 1;
620 break;
621 case mm_beq32_op:
622 if (regs->regs[insn.mm_i_format.rs] ==
623 regs->regs[insn.mm_i_format.rt])
624 *contpc = regs->cp0_epc +
625 dec_insn.pc_inc +
626 (insn.mm_i_format.simmediate << 1);
627 else
628 *contpc = regs->cp0_epc +
629 dec_insn.pc_inc +
630 dec_insn.next_pc_inc;
631 return 1;
632 break;
633 case mm_bne32_op:
634 if (regs->regs[insn.mm_i_format.rs] !=
635 regs->regs[insn.mm_i_format.rt])
636 *contpc = regs->cp0_epc +
637 dec_insn.pc_inc +
638 (insn.mm_i_format.simmediate << 1);
639 else
640 *contpc = regs->cp0_epc +
641 dec_insn.pc_inc + dec_insn.next_pc_inc;
642 return 1;
643 break;
644 case mm_jalx32_op:
645 regs->regs[31] = regs->cp0_epc +
646 dec_insn.pc_inc + dec_insn.next_pc_inc;
647 *contpc = regs->cp0_epc + dec_insn.pc_inc;
648 *contpc >>= 28;
649 *contpc <<= 28;
650 *contpc |= (insn.j_format.target << 2);
651 return 1;
652 break;
653 case mm_jals32_op:
654 case mm_jal32_op:
655 regs->regs[31] = regs->cp0_epc +
656 dec_insn.pc_inc + dec_insn.next_pc_inc;
657 /* Fall through */
658 case mm_j32_op:
659 *contpc = regs->cp0_epc + dec_insn.pc_inc;
660 *contpc >>= 27;
661 *contpc <<= 27;
662 *contpc |= (insn.j_format.target << 1);
663 set_isa16_mode(*contpc);
664 return 1;
665 break;
666 }
667 return 0;
668}
113 669
114/* 670/*
115 * Redundant with logic already in kernel/branch.c, 671 * Redundant with logic already in kernel/branch.c,
@@ -117,53 +673,177 @@ static const unsigned int fpucondbit[8] = {
117 * a single subroutine should be used across both 673 * a single subroutine should be used across both
118 * modules. 674 * modules.
119 */ 675 */
120static int isBranchInstr(mips_instruction * i) 676static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
677 unsigned long *contpc)
121{ 678{
122 switch (MIPSInst_OPCODE(*i)) { 679 union mips_instruction insn = (union mips_instruction)dec_insn.insn;
680 unsigned int fcr31;
681 unsigned int bit = 0;
682
683 switch (insn.i_format.opcode) {
123 case spec_op: 684 case spec_op:
124 switch (MIPSInst_FUNC(*i)) { 685 switch (insn.r_format.func) {
125 case jalr_op: 686 case jalr_op:
687 regs->regs[insn.r_format.rd] =
688 regs->cp0_epc + dec_insn.pc_inc +
689 dec_insn.next_pc_inc;
690 /* Fall through */
126 case jr_op: 691 case jr_op:
692 *contpc = regs->regs[insn.r_format.rs];
127 return 1; 693 return 1;
694 break;
128 } 695 }
129 break; 696 break;
130
131 case bcond_op: 697 case bcond_op:
132 switch (MIPSInst_RT(*i)) { 698 switch (insn.i_format.rt) {
699 case bltzal_op:
700 case bltzall_op:
701 regs->regs[31] = regs->cp0_epc +
702 dec_insn.pc_inc +
703 dec_insn.next_pc_inc;
704 /* Fall through */
133 case bltz_op: 705 case bltz_op:
134 case bgez_op:
135 case bltzl_op: 706 case bltzl_op:
136 case bgezl_op: 707 if ((long)regs->regs[insn.i_format.rs] < 0)
137 case bltzal_op: 708 *contpc = regs->cp0_epc +
709 dec_insn.pc_inc +
710 (insn.i_format.simmediate << 2);
711 else
712 *contpc = regs->cp0_epc +
713 dec_insn.pc_inc +
714 dec_insn.next_pc_inc;
715 return 1;
716 break;
138 case bgezal_op: 717 case bgezal_op:
139 case bltzall_op:
140 case bgezall_op: 718 case bgezall_op:
719 regs->regs[31] = regs->cp0_epc +
720 dec_insn.pc_inc +
721 dec_insn.next_pc_inc;
722 /* Fall through */
723 case bgez_op:
724 case bgezl_op:
725 if ((long)regs->regs[insn.i_format.rs] >= 0)
726 *contpc = regs->cp0_epc +
727 dec_insn.pc_inc +
728 (insn.i_format.simmediate << 2);
729 else
730 *contpc = regs->cp0_epc +
731 dec_insn.pc_inc +
732 dec_insn.next_pc_inc;
141 return 1; 733 return 1;
734 break;
142 } 735 }
143 break; 736 break;
144
145 case j_op:
146 case jal_op:
147 case jalx_op: 737 case jalx_op:
738 set_isa16_mode(bit);
739 case jal_op:
740 regs->regs[31] = regs->cp0_epc +
741 dec_insn.pc_inc +
742 dec_insn.next_pc_inc;
743 /* Fall through */
744 case j_op:
745 *contpc = regs->cp0_epc + dec_insn.pc_inc;
746 *contpc >>= 28;
747 *contpc <<= 28;
748 *contpc |= (insn.j_format.target << 2);
749 /* Set microMIPS mode bit: XOR for jalx. */
750 *contpc ^= bit;
751 return 1;
752 break;
148 case beq_op: 753 case beq_op:
149 case bne_op:
150 case blez_op:
151 case bgtz_op:
152 case beql_op: 754 case beql_op:
755 if (regs->regs[insn.i_format.rs] ==
756 regs->regs[insn.i_format.rt])
757 *contpc = regs->cp0_epc +
758 dec_insn.pc_inc +
759 (insn.i_format.simmediate << 2);
760 else
761 *contpc = regs->cp0_epc +
762 dec_insn.pc_inc +
763 dec_insn.next_pc_inc;
764 return 1;
765 break;
766 case bne_op:
153 case bnel_op: 767 case bnel_op:
768 if (regs->regs[insn.i_format.rs] !=
769 regs->regs[insn.i_format.rt])
770 *contpc = regs->cp0_epc +
771 dec_insn.pc_inc +
772 (insn.i_format.simmediate << 2);
773 else
774 *contpc = regs->cp0_epc +
775 dec_insn.pc_inc +
776 dec_insn.next_pc_inc;
777 return 1;
778 break;
779 case blez_op:
154 case blezl_op: 780 case blezl_op:
781 if ((long)regs->regs[insn.i_format.rs] <= 0)
782 *contpc = regs->cp0_epc +
783 dec_insn.pc_inc +
784 (insn.i_format.simmediate << 2);
785 else
786 *contpc = regs->cp0_epc +
787 dec_insn.pc_inc +
788 dec_insn.next_pc_inc;
789 return 1;
790 break;
791 case bgtz_op:
155 case bgtzl_op: 792 case bgtzl_op:
793 if ((long)regs->regs[insn.i_format.rs] > 0)
794 *contpc = regs->cp0_epc +
795 dec_insn.pc_inc +
796 (insn.i_format.simmediate << 2);
797 else
798 *contpc = regs->cp0_epc +
799 dec_insn.pc_inc +
800 dec_insn.next_pc_inc;
156 return 1; 801 return 1;
157 802 break;
158 case cop0_op: 803 case cop0_op:
159 case cop1_op: 804 case cop1_op:
160 case cop2_op: 805 case cop2_op:
161 case cop1x_op: 806 case cop1x_op:
162 if (MIPSInst_RS(*i) == bc_op) 807 if (insn.i_format.rs == bc_op) {
163 return 1; 808 preempt_disable();
809 if (is_fpu_owner())
810 asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
811 else
812 fcr31 = current->thread.fpu.fcr31;
813 preempt_enable();
814
815 bit = (insn.i_format.rt >> 2);
816 bit += (bit != 0);
817 bit += 23;
818 switch (insn.i_format.rt & 3) {
819 case 0: /* bc1f */
820 case 2: /* bc1fl */
821 if (~fcr31 & (1 << bit))
822 *contpc = regs->cp0_epc +
823 dec_insn.pc_inc +
824 (insn.i_format.simmediate << 2);
825 else
826 *contpc = regs->cp0_epc +
827 dec_insn.pc_inc +
828 dec_insn.next_pc_inc;
829 return 1;
830 break;
831 case 1: /* bc1t */
832 case 3: /* bc1tl */
833 if (fcr31 & (1 << bit))
834 *contpc = regs->cp0_epc +
835 dec_insn.pc_inc +
836 (insn.i_format.simmediate << 2);
837 else
838 *contpc = regs->cp0_epc +
839 dec_insn.pc_inc +
840 dec_insn.next_pc_inc;
841 return 1;
842 break;
843 }
844 }
164 break; 845 break;
165 } 846 }
166
167 return 0; 847 return 0;
168} 848}
169 849
@@ -210,26 +890,23 @@ static inline int cop1_64bit(struct pt_regs *xcp)
210 */ 890 */
211 891
212static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 892static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
213 void *__user *fault_addr) 893 struct mm_decoded_insn dec_insn, void *__user *fault_addr)
214{ 894{
215 mips_instruction ir; 895 mips_instruction ir;
216 unsigned long emulpc, contpc; 896 unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
217 unsigned int cond; 897 unsigned int cond;
218 898 int pc_inc;
219 if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
220 MIPS_FPU_EMU_INC_STATS(errors);
221 *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
222 return SIGBUS;
223 }
224 if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
225 MIPS_FPU_EMU_INC_STATS(errors);
226 *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
227 return SIGSEGV;
228 }
229 899
230 /* XXX NEC Vr54xx bug workaround */ 900 /* XXX NEC Vr54xx bug workaround */
231 if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir)) 901 if (xcp->cp0_cause & CAUSEF_BD) {
232 xcp->cp0_cause &= ~CAUSEF_BD; 902 if (dec_insn.micro_mips_mode) {
903 if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
904 xcp->cp0_cause &= ~CAUSEF_BD;
905 } else {
906 if (!isBranchInstr(xcp, dec_insn, &contpc))
907 xcp->cp0_cause &= ~CAUSEF_BD;
908 }
909 }
233 910
234 if (xcp->cp0_cause & CAUSEF_BD) { 911 if (xcp->cp0_cause & CAUSEF_BD) {
235 /* 912 /*
@@ -244,32 +921,33 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
244 * Linux MIPS branch emulator operates on context, updating the 921 * Linux MIPS branch emulator operates on context, updating the
245 * cp0_epc. 922 * cp0_epc.
246 */ 923 */
247 emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */ 924 ir = dec_insn.next_insn; /* process delay slot instr */
925 pc_inc = dec_insn.next_pc_inc;
926 } else {
927 ir = dec_insn.insn; /* process current instr */
928 pc_inc = dec_insn.pc_inc;
929 }
248 930
249 if (__compute_return_epc(xcp) < 0) { 931 /*
250#ifdef CP1DBG 932 * Since microMIPS FPU instructios are a subset of MIPS32 FPU
251 printk("failed to emulate branch at %p\n", 933 * instructions, we want to convert microMIPS FPU instructions
252 (void *) (xcp->cp0_epc)); 934 * into MIPS32 instructions so that we could reuse all of the
253#endif 935 * FPU emulation code.
936 *
937 * NOTE: We cannot do this for branch instructions since they
938 * are not a subset. Example: Cannot emulate a 16-bit
939 * aligned target address with a MIPS32 instruction.
940 */
941 if (dec_insn.micro_mips_mode) {
942 /*
943 * If next instruction is a 16-bit instruction, then it
944 * it cannot be a FPU instruction. This could happen
945 * since we can be called for non-FPU instructions.
946 */
947 if ((pc_inc == 2) ||
948 (microMIPS32_to_MIPS32((union mips_instruction *)&ir)
949 == SIGILL))
254 return SIGILL; 950 return SIGILL;
255 }
256 if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
257 MIPS_FPU_EMU_INC_STATS(errors);
258 *fault_addr = (mips_instruction __user *)emulpc;
259 return SIGBUS;
260 }
261 if (__get_user(ir, (mips_instruction __user *) emulpc)) {
262 MIPS_FPU_EMU_INC_STATS(errors);
263 *fault_addr = (mips_instruction __user *)emulpc;
264 return SIGSEGV;
265 }
266 /* __compute_return_epc() will have updated cp0_epc */
267 contpc = xcp->cp0_epc;
268 /* In order not to confuse ptrace() et al, tweak context */
269 xcp->cp0_epc = emulpc - 4;
270 } else {
271 emulpc = xcp->cp0_epc;
272 contpc = xcp->cp0_epc + 4;
273 } 951 }
274 952
275 emul: 953 emul:
@@ -474,22 +1152,35 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
474 /* branch taken: emulate dslot 1152 /* branch taken: emulate dslot
475 * instruction 1153 * instruction
476 */ 1154 */
477 xcp->cp0_epc += 4; 1155 xcp->cp0_epc += dec_insn.pc_inc;
478 contpc = (xcp->cp0_epc + 1156
479 (MIPSInst_SIMM(ir) << 2)); 1157 contpc = MIPSInst_SIMM(ir);
480 1158 ir = dec_insn.next_insn;
481 if (!access_ok(VERIFY_READ, xcp->cp0_epc, 1159 if (dec_insn.micro_mips_mode) {
482 sizeof(mips_instruction))) { 1160 contpc = (xcp->cp0_epc + (contpc << 1));
483 MIPS_FPU_EMU_INC_STATS(errors); 1161
484 *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 1162 /* If 16-bit instruction, not FPU. */
485 return SIGBUS; 1163 if ((dec_insn.next_pc_inc == 2) ||
486 } 1164 (microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
487 if (__get_user(ir, 1165
488 (mips_instruction __user *) xcp->cp0_epc)) { 1166 /*
489 MIPS_FPU_EMU_INC_STATS(errors); 1167 * Since this instruction will
490 *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 1168 * be put on the stack with
491 return SIGSEGV; 1169 * 32-bit words, get around
492 } 1170 * this problem by putting a
1171 * NOP16 as the second one.
1172 */
1173 if (dec_insn.next_pc_inc == 2)
1174 ir = (ir & (~0xffff)) | MM_NOP16;
1175
1176 /*
1177 * Single step the non-CP1
1178 * instruction in the dslot.
1179 */
1180 return mips_dsemul(xcp, ir, contpc);
1181 }
1182 } else
1183 contpc = (xcp->cp0_epc + (contpc << 2));
493 1184
494 switch (MIPSInst_OPCODE(ir)) { 1185 switch (MIPSInst_OPCODE(ir)) {
495 case lwc1_op: 1186 case lwc1_op:
@@ -525,8 +1216,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
525 * branch likely nullifies 1216 * branch likely nullifies
526 * dslot if not taken 1217 * dslot if not taken
527 */ 1218 */
528 xcp->cp0_epc += 4; 1219 xcp->cp0_epc += dec_insn.pc_inc;
529 contpc += 4; 1220 contpc += dec_insn.pc_inc;
530 /* 1221 /*
531 * else continue & execute 1222 * else continue & execute
532 * dslot as normal insn 1223 * dslot as normal insn
@@ -1313,25 +2004,75 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1313 int has_fpu, void *__user *fault_addr) 2004 int has_fpu, void *__user *fault_addr)
1314{ 2005{
1315 unsigned long oldepc, prevepc; 2006 unsigned long oldepc, prevepc;
1316 mips_instruction insn; 2007 struct mm_decoded_insn dec_insn;
2008 u16 instr[4];
2009 u16 *instr_ptr;
1317 int sig = 0; 2010 int sig = 0;
1318 2011
1319 oldepc = xcp->cp0_epc; 2012 oldepc = xcp->cp0_epc;
1320 do { 2013 do {
1321 prevepc = xcp->cp0_epc; 2014 prevepc = xcp->cp0_epc;
1322 2015
1323 if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { 2016 if (get_isa16_mode(prevepc) && cpu_has_mmips) {
1324 MIPS_FPU_EMU_INC_STATS(errors); 2017 /*
1325 *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 2018 * Get next 2 microMIPS instructions and convert them
1326 return SIGBUS; 2019 * into 32-bit instructions.
1327 } 2020 */
1328 if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) { 2021 if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
1329 MIPS_FPU_EMU_INC_STATS(errors); 2022 (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
1330 *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 2023 (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
1331 return SIGSEGV; 2024 (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
2025 MIPS_FPU_EMU_INC_STATS(errors);
2026 return SIGBUS;
2027 }
2028 instr_ptr = instr;
2029
2030 /* Get first instruction. */
2031 if (mm_insn_16bit(*instr_ptr)) {
2032 /* Duplicate the half-word. */
2033 dec_insn.insn = (*instr_ptr << 16) |
2034 (*instr_ptr);
2035 /* 16-bit instruction. */
2036 dec_insn.pc_inc = 2;
2037 instr_ptr += 1;
2038 } else {
2039 dec_insn.insn = (*instr_ptr << 16) |
2040 *(instr_ptr+1);
2041 /* 32-bit instruction. */
2042 dec_insn.pc_inc = 4;
2043 instr_ptr += 2;
2044 }
2045 /* Get second instruction. */
2046 if (mm_insn_16bit(*instr_ptr)) {
2047 /* Duplicate the half-word. */
2048 dec_insn.next_insn = (*instr_ptr << 16) |
2049 (*instr_ptr);
2050 /* 16-bit instruction. */
2051 dec_insn.next_pc_inc = 2;
2052 } else {
2053 dec_insn.next_insn = (*instr_ptr << 16) |
2054 *(instr_ptr+1);
2055 /* 32-bit instruction. */
2056 dec_insn.next_pc_inc = 4;
2057 }
2058 dec_insn.micro_mips_mode = 1;
2059 } else {
2060 if ((get_user(dec_insn.insn,
2061 (mips_instruction __user *) xcp->cp0_epc)) ||
2062 (get_user(dec_insn.next_insn,
2063 (mips_instruction __user *)(xcp->cp0_epc+4)))) {
2064 MIPS_FPU_EMU_INC_STATS(errors);
2065 return SIGBUS;
2066 }
2067 dec_insn.pc_inc = 4;
2068 dec_insn.next_pc_inc = 4;
2069 dec_insn.micro_mips_mode = 0;
1332 } 2070 }
1333 if (insn == 0) 2071
1334 xcp->cp0_epc += 4; /* skip nops */ 2072 if ((dec_insn.insn == 0) ||
2073 ((dec_insn.pc_inc == 2) &&
2074 ((dec_insn.insn & 0xffff) == MM_NOP16)))
2075 xcp->cp0_epc += dec_insn.pc_inc; /* Skip NOPs */
1335 else { 2076 else {
1336 /* 2077 /*
1337 * The 'ieee754_csr' is an alias of 2078 * The 'ieee754_csr' is an alias of
@@ -1341,7 +2082,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1341 */ 2082 */
1342 /* convert to ieee library modes */ 2083 /* convert to ieee library modes */
1343 ieee754_csr.rm = ieee_rm[ieee754_csr.rm]; 2084 ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
1344 sig = cop1Emulate(xcp, ctx, fault_addr); 2085 sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
1345 /* revert to mips rounding mode */ 2086 /* revert to mips rounding mode */
1346 ieee754_csr.rm = mips_rm[ieee754_csr.rm]; 2087 ieee754_csr.rm = mips_rm[ieee754_csr.rm];
1347 } 2088 }
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 384a3b0091ea..7ea622ab8dad 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -55,7 +55,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
55 struct emuframe __user *fr; 55 struct emuframe __user *fr;
56 int err; 56 int err;
57 57
58 if (ir == 0) { /* a nop is easy */ 58 if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) ||
59 (ir == 0)) {
60 /* NOP is easy */
59 regs->cp0_epc = cpc; 61 regs->cp0_epc = cpc;
60 regs->cp0_cause &= ~CAUSEF_BD; 62 regs->cp0_cause &= ~CAUSEF_BD;
61 return 0; 63 return 0;
@@ -91,8 +93,16 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
91 if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe)))) 93 if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
92 return SIGBUS; 94 return SIGBUS;
93 95
94 err = __put_user(ir, &fr->emul); 96 if (get_isa16_mode(regs->cp0_epc)) {
95 err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst); 97 err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
98 err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
99 err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
100 err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
101 } else {
102 err = __put_user(ir, &fr->emul);
103 err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
104 }
105
96 err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie); 106 err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
97 err |= __put_user(cpc, &fr->epc); 107 err |= __put_user(cpc, &fr->epc);
98 108
@@ -101,7 +111,8 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
101 return SIGBUS; 111 return SIGBUS;
102 } 112 }
103 113
104 regs->cp0_epc = (unsigned long) &fr->emul; 114 regs->cp0_epc = ((unsigned long) &fr->emul) |
115 get_isa16_mode(regs->cp0_epc);
105 116
106 flush_cache_sigtramp((unsigned long)&fr->badinst); 117 flush_cache_sigtramp((unsigned long)&fr->badinst);
107 118
@@ -114,9 +125,10 @@ int do_dsemulret(struct pt_regs *xcp)
114 unsigned long epc; 125 unsigned long epc;
115 u32 insn, cookie; 126 u32 insn, cookie;
116 int err = 0; 127 int err = 0;
128 u16 instr[2];
117 129
118 fr = (struct emuframe __user *) 130 fr = (struct emuframe __user *)
119 (xcp->cp0_epc - sizeof(mips_instruction)); 131 (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
120 132
121 /* 133 /*
122 * If we can't even access the area, something is very wrong, but we'll 134 * If we can't even access the area, something is very wrong, but we'll
@@ -131,7 +143,13 @@ int do_dsemulret(struct pt_regs *xcp)
131 * - Is the instruction pointed to by the EPC an BREAK_MATH? 143 * - Is the instruction pointed to by the EPC an BREAK_MATH?
132 * - Is the following memory word the BD_COOKIE? 144 * - Is the following memory word the BD_COOKIE?
133 */ 145 */
134 err = __get_user(insn, &fr->badinst); 146 if (get_isa16_mode(xcp->cp0_epc)) {
147 err = __get_user(instr[0], (u16 __user *)(&fr->badinst));
148 err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2));
149 insn = (instr[0] << 16) | instr[1];
150 } else {
151 err = __get_user(insn, &fr->badinst);
152 }
135 err |= __get_user(cookie, &fr->cookie); 153 err |= __get_user(cookie, &fr->cookie);
136 154
137 if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) { 155 if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 1dcec30ad1c4..e87aae1f2e80 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-y += cache.o dma-default.o extable.o fault.o \ 5obj-y += cache.o dma-default.o extable.o fault.o \
6 gup.o init.o mmap.o page.o page-funcs.o \ 6 gup.o init.o mmap.o page.o page-funcs.o \
7 tlbex.o tlbex-fault.o uasm.o 7 tlbex.o tlbex-fault.o uasm-mips.o
8 8
9obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o 9obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
10obj-$(CONFIG_64BIT) += pgtable-64.o 10obj-$(CONFIG_64BIT) += pgtable-64.o
@@ -22,3 +22,5 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
22obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o 22obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
23obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o 23obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
24obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o 24obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
25
26obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 2078915eacb9..21813beec7a5 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -33,6 +33,7 @@
33#include <asm/war.h> 33#include <asm/war.h>
34#include <asm/cacheflush.h> /* for run_uncached() */ 34#include <asm/cacheflush.h> /* for run_uncached() */
35#include <asm/traps.h> 35#include <asm/traps.h>
36#include <asm/dma-coherence.h>
36 37
37/* 38/*
38 * Special Variant of smp_call_function for use by cache functions: 39 * Special Variant of smp_call_function for use by cache functions:
@@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
136 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; 137 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
137} 138}
138 139
139static void (* r4k_blast_dcache)(void); 140void (* r4k_blast_dcache)(void);
141EXPORT_SYMBOL(r4k_blast_dcache);
140 142
141static void __cpuinit r4k_blast_dcache_setup(void) 143static void __cpuinit r4k_blast_dcache_setup(void)
142{ 144{
@@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
264 r4k_blast_icache_page_indexed = blast_icache64_page_indexed; 266 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
265} 267}
266 268
267static void (* r4k_blast_icache)(void); 269void (* r4k_blast_icache)(void);
270EXPORT_SYMBOL(r4k_blast_icache);
268 271
269static void __cpuinit r4k_blast_icache_setup(void) 272static void __cpuinit r4k_blast_icache_setup(void)
270{ 273{
@@ -1377,20 +1380,6 @@ static void __cpuinit coherency_setup(void)
1377 } 1380 }
1378} 1381}
1379 1382
1380#if defined(CONFIG_DMA_NONCOHERENT)
1381
1382static int __cpuinitdata coherentio;
1383
1384static int __init setcoherentio(char *str)
1385{
1386 coherentio = 1;
1387
1388 return 0;
1389}
1390
1391early_param("coherentio", setcoherentio);
1392#endif
1393
1394static void __cpuinit r4k_cache_error_setup(void) 1383static void __cpuinit r4k_cache_error_setup(void)
1395{ 1384{
1396 extern char __weak except_vec2_generic; 1385 extern char __weak except_vec2_generic;
@@ -1472,9 +1461,14 @@ void __cpuinit r4k_cache_init(void)
1472 1461
1473 build_clear_page(); 1462 build_clear_page();
1474 build_copy_page(); 1463 build_copy_page();
1475#if !defined(CONFIG_MIPS_CMP) 1464
1465 /*
1466 * We want to run CMP kernels on core with and without coherent
1467 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1468 * or not to flush caches.
1469 */
1476 local_r4k___flush_cache_all(NULL); 1470 local_r4k___flush_cache_all(NULL);
1477#endif 1471
1478 coherency_setup(); 1472 coherency_setup();
1479 board_cache_error_setup = r4k_cache_error_setup; 1473 board_cache_error_setup = r4k_cache_error_setup;
1480} 1474}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 07cec4407b0c..5aeb3eb0b72f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
48 48
49EXPORT_SYMBOL_GPL(local_flush_data_cache_page); 49EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
50EXPORT_SYMBOL(flush_data_cache_page); 50EXPORT_SYMBOL(flush_data_cache_page);
51EXPORT_SYMBOL(flush_icache_all);
51 52
52#ifdef CONFIG_DMA_NONCOHERENT 53#ifdef CONFIG_DMA_NONCOHERENT
53 54
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f9ef83829a52..caf92ecb37d6 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -22,6 +22,26 @@
22 22
23#include <dma-coherence.h> 23#include <dma-coherence.h>
24 24
25int coherentio = 0; /* User defined DMA coherency from command line. */
26EXPORT_SYMBOL_GPL(coherentio);
27int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
28
29static int __init setcoherentio(char *str)
30{
31 coherentio = 1;
32 pr_info("Hardware DMA cache coherency (command line)\n");
33 return 0;
34}
35early_param("coherentio", setcoherentio);
36
37static int __init setnocoherentio(char *str)
38{
39 coherentio = 0;
40 pr_info("Software DMA cache coherency (command line)\n");
41 return 0;
42}
43early_param("nocoherentio", setnocoherentio);
44
25static inline struct page *dma_addr_to_page(struct device *dev, 45static inline struct page *dma_addr_to_page(struct device *dev,
26 dma_addr_t dma_addr) 46 dma_addr_t dma_addr)
27{ 47{
@@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
115 135
116 if (!plat_device_is_coherent(dev)) { 136 if (!plat_device_is_coherent(dev)) {
117 dma_cache_wback_inv((unsigned long) ret, size); 137 dma_cache_wback_inv((unsigned long) ret, size);
118 ret = UNCAC_ADDR(ret); 138 if (!hw_coherentio)
139 ret = UNCAC_ADDR(ret);
119 } 140 }
120 } 141 }
121 142
@@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
142 163
143 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); 164 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
144 165
145 if (!plat_device_is_coherent(dev)) 166 if (!plat_device_is_coherent(dev) && !hw_coherentio)
146 addr = CAC_ADDR(addr); 167 addr = CAC_ADDR(addr);
147 168
148 free_pages(addr, get_order(size)); 169 free_pages(addr, get_order(size));
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index a29fba55b53e..4eb8dcfaf1ce 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void)
247 struct uasm_label *l = labels; 247 struct uasm_label *l = labels;
248 struct uasm_reloc *r = relocs; 248 struct uasm_reloc *r = relocs;
249 int i; 249 int i;
250 static atomic_t run_once = ATOMIC_INIT(0);
251
252 if (atomic_xchg(&run_once, 1)) {
253 return;
254 }
250 255
251 memset(labels, 0, sizeof(labels)); 256 memset(labels, 0, sizeof(labels));
252 memset(relocs, 0, sizeof(relocs)); 257 memset(relocs, 0, sizeof(relocs));
@@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void)
389 struct uasm_label *l = labels; 394 struct uasm_label *l = labels;
390 struct uasm_reloc *r = relocs; 395 struct uasm_reloc *r = relocs;
391 int i; 396 int i;
397 static atomic_t run_once = ATOMIC_INIT(0);
398
399 if (atomic_xchg(&run_once, 1)) {
400 return;
401 }
392 402
393 memset(labels, 0, sizeof(labels)); 403 memset(labels, 0, sizeof(labels));
394 memset(relocs, 0, sizeof(relocs)); 404 memset(relocs, 0, sizeof(relocs));
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed0827f..4a13c150f31b 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
51#endif 51#endif
52 52
53 local_irq_save(flags); 53 local_irq_save(flags);
54 old_ctx = read_c0_entryhi() & ASID_MASK; 54 old_ctx = ASID_MASK(read_c0_entryhi());
55 write_c0_entrylo0(0); 55 write_c0_entrylo0(0);
56 entry = r3k_have_wired_reg ? read_c0_wired() : 8; 56 entry = r3k_have_wired_reg ? read_c0_wired() : 8;
57 for (; entry < current_cpu_data.tlbsize; entry++) { 57 for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
87 87
88#ifdef DEBUG_TLB 88#ifdef DEBUG_TLB
89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", 89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
90 cpu_context(cpu, mm) & ASID_MASK, start, end); 90 ASID_MASK(cpu_context(cpu, mm)), start, end);
91#endif 91#endif
92 local_irq_save(flags); 92 local_irq_save(flags);
93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94 if (size <= current_cpu_data.tlbsize) { 94 if (size <= current_cpu_data.tlbsize) {
95 int oldpid = read_c0_entryhi() & ASID_MASK; 95 int oldpid = ASID_MASK(read_c0_entryhi());
96 int newpid = cpu_context(cpu, mm) & ASID_MASK; 96 int newpid = ASID_MASK(cpu_context(cpu, mm));
97 97
98 start &= PAGE_MASK; 98 start &= PAGE_MASK;
99 end += PAGE_SIZE - 1; 99 end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
166#ifdef DEBUG_TLB 166#ifdef DEBUG_TLB
167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); 167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
168#endif 168#endif
169 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; 169 newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
170 page &= PAGE_MASK; 170 page &= PAGE_MASK;
171 local_irq_save(flags); 171 local_irq_save(flags);
172 oldpid = read_c0_entryhi() & ASID_MASK; 172 oldpid = ASID_MASK(read_c0_entryhi());
173 write_c0_entryhi(page | newpid); 173 write_c0_entryhi(page | newpid);
174 BARRIER; 174 BARRIER;
175 tlb_probe(); 175 tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
197 if (current->active_mm != vma->vm_mm) 197 if (current->active_mm != vma->vm_mm)
198 return; 198 return;
199 199
200 pid = read_c0_entryhi() & ASID_MASK; 200 pid = ASID_MASK(read_c0_entryhi());
201 201
202#ifdef DEBUG_TLB 202#ifdef DEBUG_TLB
203 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { 203 if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", 204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
205 (cpu_context(cpu, vma->vm_mm)), pid); 205 (cpu_context(cpu, vma->vm_mm)), pid);
206 } 206 }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
241 241
242 local_irq_save(flags); 242 local_irq_save(flags);
243 /* Save old context and create impossible VPN2 value */ 243 /* Save old context and create impossible VPN2 value */
244 old_ctx = read_c0_entryhi() & ASID_MASK; 244 old_ctx = ASID_MASK(read_c0_entryhi());
245 old_pagemask = read_c0_pagemask(); 245 old_pagemask = read_c0_pagemask();
246 w = read_c0_wired(); 246 w = read_c0_wired();
247 write_c0_wired(w + 1); 247 write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
264#endif 264#endif
265 265
266 local_irq_save(flags); 266 local_irq_save(flags);
267 old_ctx = read_c0_entryhi() & ASID_MASK; 267 old_ctx = ASID_MASK(read_c0_entryhi());
268 write_c0_entrylo0(entrylo0); 268 write_c0_entrylo0(entrylo0);
269 write_c0_entryhi(entryhi); 269 write_c0_entryhi(entryhi);
270 write_c0_index(wired); 270 write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 493131c81a29..09653b290d53 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -13,6 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/hugetlb.h> 15#include <linux/hugetlb.h>
16#include <linux/module.h>
16 17
17#include <asm/cpu.h> 18#include <asm/cpu.h>
18#include <asm/bootinfo.h> 19#include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
94 FLUSH_ITLB; 95 FLUSH_ITLB;
95 EXIT_CRITICAL(flags); 96 EXIT_CRITICAL(flags);
96} 97}
98EXPORT_SYMBOL(local_flush_tlb_all);
97 99
98/* All entries common to a mm share an asid. To effectively flush 100/* All entries common to a mm share an asid. To effectively flush
99 these entries, we just bump the asid. */ 101 these entries, we just bump the asid. */
@@ -285,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
285 287
286 ENTER_CRITICAL(flags); 288 ENTER_CRITICAL(flags);
287 289
288 pid = read_c0_entryhi() & ASID_MASK; 290 pid = ASID_MASK(read_c0_entryhi());
289 address &= (PAGE_MASK << 1); 291 address &= (PAGE_MASK << 1);
290 write_c0_entryhi(address | pid); 292 write_c0_entryhi(address | pid);
291 pgdp = pgd_offset(vma->vm_mm, address); 293 pgdp = pgd_offset(vma->vm_mm, address);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499f806a..122f9207f49e 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
195 if (current->active_mm != vma->vm_mm) 195 if (current->active_mm != vma->vm_mm)
196 return; 196 return;
197 197
198 pid = read_c0_entryhi() & ASID_MASK; 198 pid = ASID_MASK(read_c0_entryhi());
199 199
200 local_irq_save(flags); 200 local_irq_save(flags);
201 address &= PAGE_MASK; 201 address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 820e6612d744..4d46d3787576 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,6 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/cache.h> 30#include <linux/cache.h>
31 31
32#include <asm/mmu_context.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
34#include <asm/war.h> 35#include <asm/war.h>
@@ -305,6 +306,78 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
305static int check_for_high_segbits __cpuinitdata; 306static int check_for_high_segbits __cpuinitdata;
306#endif 307#endif
307 308
309static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
310 unsigned int i_const)
311{
312 unsigned int **p;
313
314 for (p = start; p < stop; p++) {
315#ifndef CONFIG_CPU_MICROMIPS
316 unsigned int *ip;
317
318 ip = *p;
319 *ip = (*ip & 0xffff0000) | i_const;
320#else
321 unsigned short *ip;
322
323 ip = ((unsigned short *)((unsigned int)*p - 1));
324 if ((*ip & 0xf000) == 0x4000) {
325 *ip &= 0xfff1;
326 *ip |= (i_const << 1);
327 } else if ((*ip & 0xf000) == 0x6000) {
328 *ip &= 0xfff1;
329 *ip |= ((i_const >> 2) << 1);
330 } else {
331 ip++;
332 *ip = i_const;
333 }
334#endif
335 local_flush_icache_range((unsigned long)ip,
336 (unsigned long)ip + sizeof(*ip));
337 }
338}
339
340#define asid_insn_fixup(section, const) \
341do { \
342 extern unsigned int *__start_ ## section; \
343 extern unsigned int *__stop_ ## section; \
344 insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
345} while(0)
346
347/*
348 * Caller is assumed to flush the caches before the first context switch.
349 */
350static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
351 unsigned int version_mask,
352 unsigned int first_version)
353{
354 extern asmlinkage void handle_ri_rdhwr_vivt(void);
355 unsigned long *vivt_exc;
356
357#ifdef CONFIG_CPU_MICROMIPS
358 /*
359 * Worst case optimised microMIPS addiu instructions support
360 * only a 3-bit immediate value.
361 */
362 if(inc > 7)
363 panic("Invalid ASID increment value!");
364#endif
365 asid_insn_fixup(__asid_inc, inc);
366 asid_insn_fixup(__asid_mask, mask);
367 asid_insn_fixup(__asid_version_mask, version_mask);
368 asid_insn_fixup(__asid_first_version, first_version);
369
370 /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
371 vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
372#ifdef CONFIG_CPU_MICROMIPS
373 vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
374#endif
375 vivt_exc++;
376 *vivt_exc = (*vivt_exc & ~mask) | mask;
377
378 current_cpu_data.asid_cache = first_version;
379}
380
308static int check_for_high_segbits __cpuinitdata; 381static int check_for_high_segbits __cpuinitdata;
309 382
310static unsigned int kscratch_used_mask __cpuinitdata; 383static unsigned int kscratch_used_mask __cpuinitdata;
@@ -1458,17 +1531,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1458u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; 1531u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1459u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 1532u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
1460#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1533#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1461u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned; 1534u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
1462 1535
1463static void __cpuinit build_r4000_setup_pgd(void) 1536static void __cpuinit build_r4000_setup_pgd(void)
1464{ 1537{
1465 const int a0 = 4; 1538 const int a0 = 4;
1466 const int a1 = 5; 1539 const int a1 = 5;
1467 u32 *p = tlbmiss_handler_setup_pgd; 1540 u32 *p = tlbmiss_handler_setup_pgd_array;
1468 struct uasm_label *l = labels; 1541 struct uasm_label *l = labels;
1469 struct uasm_reloc *r = relocs; 1542 struct uasm_reloc *r = relocs;
1470 1543
1471 memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd)); 1544 memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
1472 memset(labels, 0, sizeof(labels)); 1545 memset(labels, 0, sizeof(labels));
1473 memset(relocs, 0, sizeof(relocs)); 1546 memset(relocs, 0, sizeof(relocs));
1474 1547
@@ -1496,15 +1569,15 @@ static void __cpuinit build_r4000_setup_pgd(void)
1496 uasm_i_jr(&p, 31); 1569 uasm_i_jr(&p, 31);
1497 UASM_i_MTC0(&p, a0, 31, pgd_reg); 1570 UASM_i_MTC0(&p, a0, 31, pgd_reg);
1498 } 1571 }
1499 if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd)) 1572 if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
1500 panic("tlbmiss_handler_setup_pgd space exceeded"); 1573 panic("tlbmiss_handler_setup_pgd_array space exceeded");
1501 uasm_resolve_relocs(relocs, labels); 1574 uasm_resolve_relocs(relocs, labels);
1502 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", 1575 pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
1503 (unsigned int)(p - tlbmiss_handler_setup_pgd)); 1576 (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
1504 1577
1505 dump_handler("tlbmiss_handler", 1578 dump_handler("tlbmiss_handler",
1506 tlbmiss_handler_setup_pgd, 1579 tlbmiss_handler_setup_pgd_array,
1507 ARRAY_SIZE(tlbmiss_handler_setup_pgd)); 1580 ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
1508} 1581}
1509#endif 1582#endif
1510 1583
@@ -2030,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
2030 2103
2031 uasm_l_nopage_tlbl(&l, p); 2104 uasm_l_nopage_tlbl(&l, p);
2032 build_restore_work_registers(&p); 2105 build_restore_work_registers(&p);
2106#ifdef CONFIG_CPU_MICROMIPS
2107 if ((unsigned long)tlb_do_page_fault_0 & 1) {
2108 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2109 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2110 uasm_i_jr(&p, K0);
2111 } else
2112#endif
2033 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 2113 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2034 uasm_i_nop(&p); 2114 uasm_i_nop(&p);
2035 2115
@@ -2077,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
2077 2157
2078 uasm_l_nopage_tlbs(&l, p); 2158 uasm_l_nopage_tlbs(&l, p);
2079 build_restore_work_registers(&p); 2159 build_restore_work_registers(&p);
2160#ifdef CONFIG_CPU_MICROMIPS
2161 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2162 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2163 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2164 uasm_i_jr(&p, K0);
2165 } else
2166#endif
2080 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2167 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2081 uasm_i_nop(&p); 2168 uasm_i_nop(&p);
2082 2169
@@ -2125,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
2125 2212
2126 uasm_l_nopage_tlbm(&l, p); 2213 uasm_l_nopage_tlbm(&l, p);
2127 build_restore_work_registers(&p); 2214 build_restore_work_registers(&p);
2215#ifdef CONFIG_CPU_MICROMIPS
2216 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2217 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2218 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2219 uasm_i_jr(&p, K0);
2220 } else
2221#endif
2128 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2222 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2129 uasm_i_nop(&p); 2223 uasm_i_nop(&p);
2130 2224
@@ -2162,8 +2256,12 @@ void __cpuinit build_tlb_refill_handler(void)
2162 case CPU_TX3922: 2256 case CPU_TX3922:
2163 case CPU_TX3927: 2257 case CPU_TX3927:
2164#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2258#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2165 build_r3000_tlb_refill_handler(); 2259 setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
2260 if (cpu_has_local_ebase)
2261 build_r3000_tlb_refill_handler();
2166 if (!run_once) { 2262 if (!run_once) {
2263 if (!cpu_has_local_ebase)
2264 build_r3000_tlb_refill_handler();
2167 build_r3000_tlb_load_handler(); 2265 build_r3000_tlb_load_handler();
2168 build_r3000_tlb_store_handler(); 2266 build_r3000_tlb_store_handler();
2169 build_r3000_tlb_modify_handler(); 2267 build_r3000_tlb_modify_handler();
@@ -2184,6 +2282,11 @@ void __cpuinit build_tlb_refill_handler(void)
2184 break; 2282 break;
2185 2283
2186 default: 2284 default:
2285#ifndef CONFIG_MIPS_MT_SMTC
2286 setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
2287#else
2288 setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
2289#endif
2187 if (!run_once) { 2290 if (!run_once) {
2188 scratch_reg = allocate_kscratch(); 2291 scratch_reg = allocate_kscratch();
2189#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2292#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -2192,9 +2295,12 @@ void __cpuinit build_tlb_refill_handler(void)
2192 build_r4000_tlb_load_handler(); 2295 build_r4000_tlb_load_handler();
2193 build_r4000_tlb_store_handler(); 2296 build_r4000_tlb_store_handler();
2194 build_r4000_tlb_modify_handler(); 2297 build_r4000_tlb_modify_handler();
2298 if (!cpu_has_local_ebase)
2299 build_r4000_tlb_refill_handler();
2195 run_once++; 2300 run_once++;
2196 } 2301 }
2197 build_r4000_tlb_refill_handler(); 2302 if (cpu_has_local_ebase)
2303 build_r4000_tlb_refill_handler();
2198 } 2304 }
2199} 2305}
2200 2306
@@ -2207,7 +2313,7 @@ void __cpuinit flush_tlb_handlers(void)
2207 local_flush_icache_range((unsigned long)handle_tlbm, 2313 local_flush_icache_range((unsigned long)handle_tlbm,
2208 (unsigned long)handle_tlbm + sizeof(handle_tlbm)); 2314 (unsigned long)handle_tlbm + sizeof(handle_tlbm));
2209#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2315#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2210 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, 2316 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
2211 (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm)); 2317 (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
2212#endif 2318#endif
2213} 2319}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644
index 000000000000..162ee6d62788
--- /dev/null
+++ b/arch/mips/mm/uasm-micromips.c
@@ -0,0 +1,221 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * A small micro-assembler. It is intentionally kept simple, does only
7 * support a subset of instructions, and does not try to hide pipeline
8 * effects like branch delay slots.
9 *
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
14 */
15
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/init.h>
19
20#include <asm/inst.h>
21#include <asm/elf.h>
22#include <asm/bugs.h>
23#define UASM_ISA _UASM_ISA_MICROMIPS
24#include <asm/uasm.h>
25
26#define RS_MASK 0x1f
27#define RS_SH 16
28#define RT_MASK 0x1f
29#define RT_SH 21
30#define SCIMM_MASK 0x3ff
31#define SCIMM_SH 16
32
33/* This macro sets the non-variable bits of an instruction. */
34#define M(a, b, c, d, e, f) \
35 ((a) << OP_SH \
36 | (b) << RT_SH \
37 | (c) << RS_SH \
38 | (d) << RD_SH \
39 | (e) << RE_SH \
40 | (f) << FUNC_SH)
41
42/* Define these when we are not the ISA the kernel is being compiled with. */
43#ifndef CONFIG_CPU_MICROMIPS
44#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
45#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
46#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
47#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
48#endif
49
50#include "uasm.c"
51
52static struct insn insn_table_MM[] __uasminitdata = {
53 { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
54 { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
55 { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
56 { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
57 { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
58 { insn_beql, 0, 0 },
59 { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
60 { insn_bgezl, 0, 0 },
61 { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
62 { insn_bltzl, 0, 0 },
63 { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
64 { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
65 { insn_daddu, 0, 0 },
66 { insn_daddiu, 0, 0 },
67 { insn_dmfc0, 0, 0 },
68 { insn_dmtc0, 0, 0 },
69 { insn_dsll, 0, 0 },
70 { insn_dsll32, 0, 0 },
71 { insn_dsra, 0, 0 },
72 { insn_dsrl, 0, 0 },
73 { insn_dsrl32, 0, 0 },
74 { insn_drotr, 0, 0 },
75 { insn_drotr32, 0, 0 },
76 { insn_dsubu, 0, 0 },
77 { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
78 { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
79 { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
80 { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
81 { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
82 { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
83 { insn_ld, 0, 0 },
84 { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
85 { insn_lld, 0, 0 },
86 { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
87 { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
88 { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
89 { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
90 { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
91 { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
92 { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
93 { insn_rfe, 0, 0 },
94 { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
95 { insn_scd, 0, 0 },
96 { insn_sd, 0, 0 },
97 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
98 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
99 { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
100 { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
101 { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
102 { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
103 { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
104 { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
105 { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
106 { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
107 { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
108 { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
109 { insn_dins, 0, 0 },
110 { insn_dinsm, 0, 0 },
111 { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
112 { insn_bbit0, 0, 0 },
113 { insn_bbit1, 0, 0 },
114 { insn_lwx, 0, 0 },
115 { insn_ldx, 0, 0 },
116 { insn_invalid, 0, 0 }
117};
118
119#undef M
120
121static inline __uasminit u32 build_bimm(s32 arg)
122{
123 WARN(arg > 0xffff || arg < -0x10000,
124 KERN_WARNING "Micro-assembler field overflow\n");
125
126 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
127
128 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
129}
130
131static inline __uasminit u32 build_jimm(u32 arg)
132{
133
134 WARN(arg & ~((JIMM_MASK << 2) | 1),
135 KERN_WARNING "Micro-assembler field overflow\n");
136
137 return (arg >> 1) & JIMM_MASK;
138}
139
140/*
141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM.
143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
145{
146 struct insn *ip = NULL;
147 unsigned int i;
148 va_list ap;
149 u32 op;
150
151 for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
152 if (insn_table_MM[i].opcode == opc) {
153 ip = &insn_table_MM[i];
154 break;
155 }
156
157 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
158 panic("Unsupported Micro-assembler instruction %d", opc);
159
160 op = ip->match;
161 va_start(ap, opc);
162 if (ip->fields & RS) {
163 if (opc == insn_mfc0 || opc == insn_mtc0)
164 op |= build_rt(va_arg(ap, u32));
165 else
166 op |= build_rs(va_arg(ap, u32));
167 }
168 if (ip->fields & RT) {
169 if (opc == insn_mfc0 || opc == insn_mtc0)
170 op |= build_rs(va_arg(ap, u32));
171 else
172 op |= build_rt(va_arg(ap, u32));
173 }
174 if (ip->fields & RD)
175 op |= build_rd(va_arg(ap, u32));
176 if (ip->fields & RE)
177 op |= build_re(va_arg(ap, u32));
178 if (ip->fields & SIMM)
179 op |= build_simm(va_arg(ap, s32));
180 if (ip->fields & UIMM)
181 op |= build_uimm(va_arg(ap, u32));
182 if (ip->fields & BIMM)
183 op |= build_bimm(va_arg(ap, s32));
184 if (ip->fields & JIMM)
185 op |= build_jimm(va_arg(ap, u32));
186 if (ip->fields & FUNC)
187 op |= build_func(va_arg(ap, u32));
188 if (ip->fields & SET)
189 op |= build_set(va_arg(ap, u32));
190 if (ip->fields & SCIMM)
191 op |= build_scimm(va_arg(ap, u32));
192 va_end(ap);
193
194#ifdef CONFIG_CPU_LITTLE_ENDIAN
195 **buf = ((op & 0xffff) << 16) | (op >> 16);
196#else
197 **buf = op;
198#endif
199 (*buf)++;
200}
201
202static inline void __uasminit
203__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
204{
205 long laddr = (long)lab->addr;
206 long raddr = (long)rel->addr;
207
208 switch (rel->type) {
209 case R_MIPS_PC16:
210#ifdef CONFIG_CPU_LITTLE_ENDIAN
211 *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
212#else
213 *rel->addr |= build_bimm(laddr - (raddr + 4));
214#endif
215 break;
216
217 default:
218 panic("Unsupported Micro-assembler relocation %d",
219 rel->type);
220 }
221}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644
index 000000000000..5fcdd8fe3e83
--- /dev/null
+++ b/arch/mips/mm/uasm-mips.c
@@ -0,0 +1,205 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * A small micro-assembler. It is intentionally kept simple, does only
7 * support a subset of instructions, and does not try to hide pipeline
8 * effects like branch delay slots.
9 *
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
14 */
15
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/init.h>
19
20#include <asm/inst.h>
21#include <asm/elf.h>
22#include <asm/bugs.h>
23#define UASM_ISA _UASM_ISA_CLASSIC
24#include <asm/uasm.h>
25
26#define RS_MASK 0x1f
27#define RS_SH 21
28#define RT_MASK 0x1f
29#define RT_SH 16
30#define SCIMM_MASK 0xfffff
31#define SCIMM_SH 6
32
33/* This macro sets the non-variable bits of an instruction. */
34#define M(a, b, c, d, e, f) \
35 ((a) << OP_SH \
36 | (b) << RS_SH \
37 | (c) << RT_SH \
38 | (d) << RD_SH \
39 | (e) << RE_SH \
40 | (f) << FUNC_SH)
41
42/* Define these when we are not the ISA the kernel is being compiled with. */
43#ifdef CONFIG_CPU_MICROMIPS
44#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
45#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
46#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
47#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
48#endif
49
50#include "uasm.c"
51
52static struct insn insn_table[] __uasminitdata = {
53 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
54 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
55 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
56 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
57 { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
58 { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
59 { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
60 { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
61 { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
62 { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
63 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
64 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
65 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
66 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
67 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
68 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
69 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
70 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
71 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
72 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
73 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
74 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
75 { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
76 { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
77 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
78 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
79 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
80 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
81 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
82 { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
83 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
84 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
85 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
86 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
87 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
88 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
89 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
90 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
92 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
93 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
94 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
95 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
96 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
97 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
98 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
99 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
100 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
101 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
102 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
103 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
104 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
105 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
106 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
107 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
108 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
109 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
110 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
111 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
112 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
113 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
114 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
115 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
116 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
117 { insn_invalid, 0, 0 }
118};
119
120#undef M
121
122static inline __uasminit u32 build_bimm(s32 arg)
123{
124 WARN(arg > 0x1ffff || arg < -0x20000,
125 KERN_WARNING "Micro-assembler field overflow\n");
126
127 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
128
129 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
130}
131
132static inline __uasminit u32 build_jimm(u32 arg)
133{
134 WARN(arg & ~(JIMM_MASK << 2),
135 KERN_WARNING "Micro-assembler field overflow\n");
136
137 return (arg >> 2) & JIMM_MASK;
138}
139
140/*
141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM.
143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
145{
146 struct insn *ip = NULL;
147 unsigned int i;
148 va_list ap;
149 u32 op;
150
151 for (i = 0; insn_table[i].opcode != insn_invalid; i++)
152 if (insn_table[i].opcode == opc) {
153 ip = &insn_table[i];
154 break;
155 }
156
157 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
158 panic("Unsupported Micro-assembler instruction %d", opc);
159
160 op = ip->match;
161 va_start(ap, opc);
162 if (ip->fields & RS)
163 op |= build_rs(va_arg(ap, u32));
164 if (ip->fields & RT)
165 op |= build_rt(va_arg(ap, u32));
166 if (ip->fields & RD)
167 op |= build_rd(va_arg(ap, u32));
168 if (ip->fields & RE)
169 op |= build_re(va_arg(ap, u32));
170 if (ip->fields & SIMM)
171 op |= build_simm(va_arg(ap, s32));
172 if (ip->fields & UIMM)
173 op |= build_uimm(va_arg(ap, u32));
174 if (ip->fields & BIMM)
175 op |= build_bimm(va_arg(ap, s32));
176 if (ip->fields & JIMM)
177 op |= build_jimm(va_arg(ap, u32));
178 if (ip->fields & FUNC)
179 op |= build_func(va_arg(ap, u32));
180 if (ip->fields & SET)
181 op |= build_set(va_arg(ap, u32));
182 if (ip->fields & SCIMM)
183 op |= build_scimm(va_arg(ap, u32));
184 va_end(ap);
185
186 **buf = op;
187 (*buf)++;
188}
189
190static inline void __uasminit
191__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
192{
193 long laddr = (long)lab->addr;
194 long raddr = (long)rel->addr;
195
196 switch (rel->type) {
197 case R_MIPS_PC16:
198 *rel->addr |= build_bimm(laddr - (raddr + 4));
199 break;
200
201 default:
202 panic("Unsupported Micro-assembler relocation %d",
203 rel->type);
204 }
205}
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 942ff6c2eba2..7eb5e4355d25 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -10,17 +10,9 @@
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki 11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
13 */ 14 */
14 15
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/init.h>
18
19#include <asm/inst.h>
20#include <asm/elf.h>
21#include <asm/bugs.h>
22#include <asm/uasm.h>
23
24enum fields { 16enum fields {
25 RS = 0x001, 17 RS = 0x001,
26 RT = 0x002, 18 RT = 0x002,
@@ -37,10 +29,6 @@ enum fields {
37 29
38#define OP_MASK 0x3f 30#define OP_MASK 0x3f
39#define OP_SH 26 31#define OP_SH 26
40#define RS_MASK 0x1f
41#define RS_SH 21
42#define RT_MASK 0x1f
43#define RT_SH 16
44#define RD_MASK 0x1f 32#define RD_MASK 0x1f
45#define RD_SH 11 33#define RD_SH 11
46#define RE_MASK 0x1f 34#define RE_MASK 0x1f
@@ -53,8 +41,6 @@ enum fields {
53#define FUNC_SH 0 41#define FUNC_SH 0
54#define SET_MASK 0x7 42#define SET_MASK 0x7
55#define SET_SH 0 43#define SET_SH 0
56#define SCIMM_MASK 0xfffff
57#define SCIMM_SH 6
58 44
59enum opcode { 45enum opcode {
60 insn_invalid, 46 insn_invalid,
@@ -77,85 +63,6 @@ struct insn {
77 enum fields fields; 63 enum fields fields;
78}; 64};
79 65
80/* This macro sets the non-variable bits of an instruction. */
81#define M(a, b, c, d, e, f) \
82 ((a) << OP_SH \
83 | (b) << RS_SH \
84 | (c) << RT_SH \
85 | (d) << RD_SH \
86 | (e) << RE_SH \
87 | (f) << FUNC_SH)
88
89static struct insn insn_table[] __uasminitdata = {
90 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
92 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
93 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
94 { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
95 { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
96 { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
97 { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
98 { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
99 { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
100 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
101 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
102 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
103 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
104 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
105 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
106 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
107 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
108 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
109 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
110 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
111 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
112 { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
113 { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
114 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
115 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
116 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
117 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
118 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
119 { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
120 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
121 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
122 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
123 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
124 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
125 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
126 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
127 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
128 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
129 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
130 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
131 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
132 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
133 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
134 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
135 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
136 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
137 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
138 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
139 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
140 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
141 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
142 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
143 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
144 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
145 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
146 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
147 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
148 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
149 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
150 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
151 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
152 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
153 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
154 { insn_invalid, 0, 0 }
155};
156
157#undef M
158
159static inline __uasminit u32 build_rs(u32 arg) 66static inline __uasminit u32 build_rs(u32 arg)
160{ 67{
161 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 68 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg)
199 return arg & IMM_MASK; 106 return arg & IMM_MASK;
200} 107}
201 108
202static inline __uasminit u32 build_bimm(s32 arg)
203{
204 WARN(arg > 0x1ffff || arg < -0x20000,
205 KERN_WARNING "Micro-assembler field overflow\n");
206
207 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
208
209 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
210}
211
212static inline __uasminit u32 build_jimm(u32 arg)
213{
214 WARN(arg & ~(JIMM_MASK << 2),
215 KERN_WARNING "Micro-assembler field overflow\n");
216
217 return (arg >> 2) & JIMM_MASK;
218}
219
220static inline __uasminit u32 build_scimm(u32 arg) 109static inline __uasminit u32 build_scimm(u32 arg)
221{ 110{
222 WARN(arg & ~SCIMM_MASK, 111 WARN(arg & ~SCIMM_MASK,
@@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg)
239 return arg & SET_MASK; 128 return arg & SET_MASK;
240} 129}
241 130
242/* 131static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
243 * The order of opcode arguments is implicitly left to right,
244 * starting with RS and ending with FUNC or IMM.
245 */
246static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
247{
248 struct insn *ip = NULL;
249 unsigned int i;
250 va_list ap;
251 u32 op;
252
253 for (i = 0; insn_table[i].opcode != insn_invalid; i++)
254 if (insn_table[i].opcode == opc) {
255 ip = &insn_table[i];
256 break;
257 }
258
259 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
260 panic("Unsupported Micro-assembler instruction %d", opc);
261
262 op = ip->match;
263 va_start(ap, opc);
264 if (ip->fields & RS)
265 op |= build_rs(va_arg(ap, u32));
266 if (ip->fields & RT)
267 op |= build_rt(va_arg(ap, u32));
268 if (ip->fields & RD)
269 op |= build_rd(va_arg(ap, u32));
270 if (ip->fields & RE)
271 op |= build_re(va_arg(ap, u32));
272 if (ip->fields & SIMM)
273 op |= build_simm(va_arg(ap, s32));
274 if (ip->fields & UIMM)
275 op |= build_uimm(va_arg(ap, u32));
276 if (ip->fields & BIMM)
277 op |= build_bimm(va_arg(ap, s32));
278 if (ip->fields & JIMM)
279 op |= build_jimm(va_arg(ap, u32));
280 if (ip->fields & FUNC)
281 op |= build_func(va_arg(ap, u32));
282 if (ip->fields & SET)
283 op |= build_set(va_arg(ap, u32));
284 if (ip->fields & SCIMM)
285 op |= build_scimm(va_arg(ap, u32));
286 va_end(ap);
287
288 **buf = op;
289 (*buf)++;
290}
291 132
292#define I_u1u2u3(op) \ 133#define I_u1u2u3(op) \
293Ip_u1u2u3(op) \ 134Ip_u1u2u3(op) \
@@ -445,7 +286,7 @@ I_u3u1u2(_ldx)
445 286
446#ifdef CONFIG_CPU_CAVIUM_OCTEON 287#ifdef CONFIG_CPU_CAVIUM_OCTEON
447#include <asm/octeon/octeon.h> 288#include <asm/octeon/octeon.h>
448void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, 289void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
449 unsigned int c) 290 unsigned int c)
450{ 291{
451 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) 292 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
457 else 298 else
458 build_insn(buf, insn_pref, c, a, b); 299 build_insn(buf, insn_pref, c, a, b);
459} 300}
460UASM_EXPORT_SYMBOL(uasm_i_pref); 301UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
461#else 302#else
462I_u2s3u1(_pref) 303I_u2s3u1(_pref)
463#endif 304#endif
464 305
465/* Handle labels. */ 306/* Handle labels. */
466void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 307void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
467{ 308{
468 (*lab)->addr = addr; 309 (*lab)->addr = addr;
469 (*lab)->lab = lid; 310 (*lab)->lab = lid;
470 (*lab)++; 311 (*lab)++;
471} 312}
472UASM_EXPORT_SYMBOL(uasm_build_label); 313UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
473 314
474int __uasminit uasm_in_compat_space_p(long addr) 315int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
475{ 316{
476 /* Is this address in 32bit compat space? */ 317 /* Is this address in 32bit compat space? */
477#ifdef CONFIG_64BIT 318#ifdef CONFIG_64BIT
@@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr)
480 return 1; 321 return 1;
481#endif 322#endif
482} 323}
483UASM_EXPORT_SYMBOL(uasm_in_compat_space_p); 324UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
484 325
485static int __uasminit uasm_rel_highest(long val) 326static int __uasminit uasm_rel_highest(long val)
486{ 327{
@@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val)
500#endif 341#endif
501} 342}
502 343
503int __uasminit uasm_rel_hi(long val) 344int __uasminit ISAFUNC(uasm_rel_hi)(long val)
504{ 345{
505 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 346 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
506} 347}
507UASM_EXPORT_SYMBOL(uasm_rel_hi); 348UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
508 349
509int __uasminit uasm_rel_lo(long val) 350int __uasminit ISAFUNC(uasm_rel_lo)(long val)
510{ 351{
511 return ((val & 0xffff) ^ 0x8000) - 0x8000; 352 return ((val & 0xffff) ^ 0x8000) - 0x8000;
512} 353}
513UASM_EXPORT_SYMBOL(uasm_rel_lo); 354UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
514 355
515void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) 356void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
516{ 357{
517 if (!uasm_in_compat_space_p(addr)) { 358 if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
518 uasm_i_lui(buf, rs, uasm_rel_highest(addr)); 359 ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
519 if (uasm_rel_higher(addr)) 360 if (uasm_rel_higher(addr))
520 uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr)); 361 ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
521 if (uasm_rel_hi(addr)) { 362 if (ISAFUNC(uasm_rel_hi(addr))) {
522 uasm_i_dsll(buf, rs, rs, 16); 363 ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
523 uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr)); 364 ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
524 uasm_i_dsll(buf, rs, rs, 16); 365 ISAFUNC(uasm_rel_hi)(addr));
366 ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
525 } else 367 } else
526 uasm_i_dsll32(buf, rs, rs, 0); 368 ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
527 } else 369 } else
528 uasm_i_lui(buf, rs, uasm_rel_hi(addr)); 370 ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
529} 371}
530UASM_EXPORT_SYMBOL(UASM_i_LA_mostly); 372UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
531 373
532void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr) 374void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
533{ 375{
534 UASM_i_LA_mostly(buf, rs, addr); 376 ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
535 if (uasm_rel_lo(addr)) { 377 if (ISAFUNC(uasm_rel_lo(addr))) {
536 if (!uasm_in_compat_space_p(addr)) 378 if (!ISAFUNC(uasm_in_compat_space_p)(addr))
537 uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr)); 379 ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
380 ISAFUNC(uasm_rel_lo(addr)));
538 else 381 else
539 uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); 382 ISAFUNC(uasm_i_addiu)(buf, rs, rs,
383 ISAFUNC(uasm_rel_lo(addr)));
540 } 384 }
541} 385}
542UASM_EXPORT_SYMBOL(UASM_i_LA); 386UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
543 387
544/* Handle relocations. */ 388/* Handle relocations. */
545void __uasminit 389void __uasminit
546uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) 390ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
547{ 391{
548 (*rel)->addr = addr; 392 (*rel)->addr = addr;
549 (*rel)->type = R_MIPS_PC16; 393 (*rel)->type = R_MIPS_PC16;
550 (*rel)->lab = lid; 394 (*rel)->lab = lid;
551 (*rel)++; 395 (*rel)++;
552} 396}
553UASM_EXPORT_SYMBOL(uasm_r_mips_pc16); 397UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
554 398
555static inline void __uasminit 399static inline void __uasminit
556__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 400__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
557{
558 long laddr = (long)lab->addr;
559 long raddr = (long)rel->addr;
560
561 switch (rel->type) {
562 case R_MIPS_PC16:
563 *rel->addr |= build_bimm(laddr - (raddr + 4));
564 break;
565
566 default:
567 panic("Unsupported Micro-assembler relocation %d",
568 rel->type);
569 }
570}
571 401
572void __uasminit 402void __uasminit
573uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 403ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
574{ 404{
575 struct uasm_label *l; 405 struct uasm_label *l;
576 406
@@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
579 if (rel->lab == l->lab) 409 if (rel->lab == l->lab)
580 __resolve_relocs(rel, l); 410 __resolve_relocs(rel, l);
581} 411}
582UASM_EXPORT_SYMBOL(uasm_resolve_relocs); 412UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
583 413
584void __uasminit 414void __uasminit
585uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) 415ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
586{ 416{
587 for (; rel->lab != UASM_LABEL_INVALID; rel++) 417 for (; rel->lab != UASM_LABEL_INVALID; rel++)
588 if (rel->addr >= first && rel->addr < end) 418 if (rel->addr >= first && rel->addr < end)
589 rel->addr += off; 419 rel->addr += off;
590} 420}
591UASM_EXPORT_SYMBOL(uasm_move_relocs); 421UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
592 422
593void __uasminit 423void __uasminit
594uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) 424ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
595{ 425{
596 for (; lab->lab != UASM_LABEL_INVALID; lab++) 426 for (; lab->lab != UASM_LABEL_INVALID; lab++)
597 if (lab->addr >= first && lab->addr < end) 427 if (lab->addr >= first && lab->addr < end)
598 lab->addr += off; 428 lab->addr += off;
599} 429}
600UASM_EXPORT_SYMBOL(uasm_move_labels); 430UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
601 431
602void __uasminit 432void __uasminit
603uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 433ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
604 u32 *end, u32 *target) 434 u32 *end, u32 *target)
605{ 435{
606 long off = (long)(target - first); 436 long off = (long)(target - first);
607 437
608 memcpy(target, first, (end - first) * sizeof(u32)); 438 memcpy(target, first, (end - first) * sizeof(u32));
609 439
610 uasm_move_relocs(rel, first, end, off); 440 ISAFUNC(uasm_move_relocs(rel, first, end, off));
611 uasm_move_labels(lab, first, end, off); 441 ISAFUNC(uasm_move_labels(lab, first, end, off));
612} 442}
613UASM_EXPORT_SYMBOL(uasm_copy_handler); 443UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
614 444
615int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) 445int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
616{ 446{
617 for (; rel->lab != UASM_LABEL_INVALID; rel++) { 447 for (; rel->lab != UASM_LABEL_INVALID; rel++) {
618 if (rel->addr == addr 448 if (rel->addr == addr
@@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
623 453
624 return 0; 454 return 0;
625} 455}
626UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay); 456UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
627 457
628/* Convenience functions for labeled branches. */ 458/* Convenience functions for labeled branches. */
629void __uasminit 459void __uasminit
630uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 460ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
631{ 461{
632 uasm_r_mips_pc16(r, *p, lid); 462 uasm_r_mips_pc16(r, *p, lid);
633 uasm_i_bltz(p, reg, 0); 463 ISAFUNC(uasm_i_bltz)(p, reg, 0);
634} 464}
635UASM_EXPORT_SYMBOL(uasm_il_bltz); 465UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
636 466
637void __uasminit 467void __uasminit
638uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) 468ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
639{ 469{
640 uasm_r_mips_pc16(r, *p, lid); 470 uasm_r_mips_pc16(r, *p, lid);
641 uasm_i_b(p, 0); 471 ISAFUNC(uasm_i_b)(p, 0);
642} 472}
643UASM_EXPORT_SYMBOL(uasm_il_b); 473UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
644 474
645void __uasminit 475void __uasminit
646uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 476ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
647{ 477{
648 uasm_r_mips_pc16(r, *p, lid); 478 uasm_r_mips_pc16(r, *p, lid);
649 uasm_i_beqz(p, reg, 0); 479 ISAFUNC(uasm_i_beqz)(p, reg, 0);
650} 480}
651UASM_EXPORT_SYMBOL(uasm_il_beqz); 481UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
652 482
653void __uasminit 483void __uasminit
654uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 484ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
655{ 485{
656 uasm_r_mips_pc16(r, *p, lid); 486 uasm_r_mips_pc16(r, *p, lid);
657 uasm_i_beqzl(p, reg, 0); 487 ISAFUNC(uasm_i_beqzl)(p, reg, 0);
658} 488}
659UASM_EXPORT_SYMBOL(uasm_il_beqzl); 489UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
660 490
661void __uasminit 491void __uasminit
662uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, 492ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
663 unsigned int reg2, int lid) 493 unsigned int reg2, int lid)
664{ 494{
665 uasm_r_mips_pc16(r, *p, lid); 495 uasm_r_mips_pc16(r, *p, lid);
666 uasm_i_bne(p, reg1, reg2, 0); 496 ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
667} 497}
668UASM_EXPORT_SYMBOL(uasm_il_bne); 498UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
669 499
670void __uasminit 500void __uasminit
671uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 501ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
672{ 502{
673 uasm_r_mips_pc16(r, *p, lid); 503 uasm_r_mips_pc16(r, *p, lid);
674 uasm_i_bnez(p, reg, 0); 504 ISAFUNC(uasm_i_bnez)(p, reg, 0);
675} 505}
676UASM_EXPORT_SYMBOL(uasm_il_bnez); 506UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
677 507
678void __uasminit 508void __uasminit
679uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 509ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
680{ 510{
681 uasm_r_mips_pc16(r, *p, lid); 511 uasm_r_mips_pc16(r, *p, lid);
682 uasm_i_bgezl(p, reg, 0); 512 ISAFUNC(uasm_i_bgezl)(p, reg, 0);
683} 513}
684UASM_EXPORT_SYMBOL(uasm_il_bgezl); 514UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
685 515
686void __uasminit 516void __uasminit
687uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 517ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
688{ 518{
689 uasm_r_mips_pc16(r, *p, lid); 519 uasm_r_mips_pc16(r, *p, lid);
690 uasm_i_bgez(p, reg, 0); 520 ISAFUNC(uasm_i_bgez)(p, reg, 0);
691} 521}
692UASM_EXPORT_SYMBOL(uasm_il_bgez); 522UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
693 523
694void __uasminit 524void __uasminit
695uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, 525ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
696 unsigned int bit, int lid) 526 unsigned int bit, int lid)
697{ 527{
698 uasm_r_mips_pc16(r, *p, lid); 528 uasm_r_mips_pc16(r, *p, lid);
699 uasm_i_bbit0(p, reg, bit, 0); 529 ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
700} 530}
701UASM_EXPORT_SYMBOL(uasm_il_bbit0); 531UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
702 532
703void __uasminit 533void __uasminit
704uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, 534ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
705 unsigned int bit, int lid) 535 unsigned int bit, int lid)
706{ 536{
707 uasm_r_mips_pc16(r, *p, lid); 537 uasm_r_mips_pc16(r, *p, lid);
708 uasm_i_bbit1(p, reg, bit, 0); 538 ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
709} 539}
710UASM_EXPORT_SYMBOL(uasm_il_bbit1); 540UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 6079ef33b5f0..0388fc8b5613 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -5,9 +5,8 @@
5# Copyright (C) 2008 Wind River Systems, Inc. 5# Copyright (C) 2008 Wind River Systems, Inc.
6# written by Ralf Baechle <ralf@linux-mips.org> 6# written by Ralf Baechle <ralf@linux-mips.org>
7# 7#
8obj-y := malta-amon.o malta-cmdline.o \ 8obj-y := malta-amon.o malta-display.o malta-init.o \
9 malta-display.o malta-init.o malta-int.o \ 9 malta-int.o malta-memory.o malta-platform.o \
10 malta-memory.o malta-platform.o \
11 malta-reset.o malta-setup.o malta-time.o 10 malta-reset.o malta-setup.o malta-time.o
12 11
13obj-$(CONFIG_EARLY_PRINTK) += malta-console.o 12obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform
index 5b548b5a4fcf..2cc72c9b38e3 100644
--- a/arch/mips/mti-malta/Platform
+++ b/arch/mips/mti-malta/Platform
@@ -3,5 +3,9 @@
3# 3#
4platform-$(CONFIG_MIPS_MALTA) += mti-malta/ 4platform-$(CONFIG_MIPS_MALTA) += mti-malta/
5cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta 5cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta
6load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 6ifdef CONFIG_KVM_GUEST
7 load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000
8else
9 load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000
10endif
7all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin 11all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c
deleted file mode 100644
index 5576a306a145..000000000000
--- a/arch/mips/mti-malta/malta-cmdline.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Carsten Langgaard, carstenl@mips.com
3 * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
4 *
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 *
18 * Kernel command line creation using the prom monitor (YAMON) argc/argv.
19 */
20#include <linux/init.h>
21#include <linux/string.h>
22
23#include <asm/bootinfo.h>
24
25extern int prom_argc;
26extern int *_prom_argv;
27
28/*
29 * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
30 * This macro take care of sign extension.
31 */
32#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
33
34char * __init prom_getcmdline(void)
35{
36 return &(arcs_cmdline[0]);
37}
38
39
40void __init prom_init_cmdline(void)
41{
42 char *cp;
43 int actr;
44
45 actr = 1; /* Always ignore argv[0] */
46
47 cp = &(arcs_cmdline[0]);
48 while(actr < prom_argc) {
49 strcpy(cp, prom_argv(actr));
50 cp += strlen(prom_argv(actr));
51 *cp++ = ' ';
52 actr++;
53 }
54 if (cp != &(arcs_cmdline[0])) {
55 /* get rid of trailing space */
56 --cp;
57 *cp = '\0';
58 }
59}
diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c
index 9bc58a24e80a..d4f807191ecd 100644
--- a/arch/mips/mti-malta/malta-display.c
+++ b/arch/mips/mti-malta/malta-display.c
@@ -1,28 +1,20 @@
1/* 1/*
2 * Carsten Langgaard, carstenl@mips.com 2 * This file is subject to the terms and conditions of the GNU General Public
3 * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. 3 * License. See the file "COPYING" in the main directory of this archive
4 * 4 * for more details.
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 * 5 *
18 * Display routines for display messages in MIPS boards ascii display. 6 * Display routines for display messages in MIPS boards ascii display.
7 *
8 * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc.
9 * All rights reserved.
10 * Authors: Carsten Langgaard <carstenl@mips.com>
11 * Steven J. Hill <sjhill@mips.com>
19 */ 12 */
20
21#include <linux/compiler.h> 13#include <linux/compiler.h>
22#include <linux/timer.h> 14#include <linux/timer.h>
23#include <asm/io.h> 15#include <linux/io.h>
16
24#include <asm/mips-boards/generic.h> 17#include <asm/mips-boards/generic.h>
25#include <asm/mips-boards/prom.h>
26 18
27extern const char display_string[]; 19extern const char display_string[];
28static unsigned int display_count; 20static unsigned int display_count;
@@ -36,11 +28,11 @@ void mips_display_message(const char *str)
36 if (unlikely(display == NULL)) 28 if (unlikely(display == NULL))
37 display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int)); 29 display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
38 30
39 for (i = 0; i <= 14; i=i+2) { 31 for (i = 0; i <= 14; i += 2) {
40 if (*str) 32 if (*str)
41 __raw_writel(*str++, display + i); 33 __raw_writel(*str++, display + i);
42 else 34 else
43 __raw_writel(' ', display + i); 35 __raw_writel(' ', display + i);
44 } 36 }
45} 37}
46 38
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index c2cbce9e435e..ff8caffd3266 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -1,54 +1,28 @@
1/* 1/*
2 * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc. 2 * This file is subject to the terms and conditions of the GNU General Public
3 * All rights reserved. 3 * License. See the file "COPYING" in the main directory of this archive
4 * Authors: Carsten Langgaard <carstenl@mips.com> 4 * for more details.
5 * Maciej W. Rozycki <macro@mips.com>
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 * 5 *
20 * PROM library initialisation code. 6 * PROM library initialisation code.
7 *
8 * Copyright (C) 1999,2000,2004,2005,2012 MIPS Technologies, Inc.
9 * All rights reserved.
10 * Authors: Carsten Langgaard <carstenl@mips.com>
11 * Maciej W. Rozycki <macro@mips.com>
12 * Steven J. Hill <sjhill@mips.com>
21 */ 13 */
22#include <linux/init.h> 14#include <linux/init.h>
23#include <linux/string.h> 15#include <linux/string.h>
24#include <linux/kernel.h> 16#include <linux/kernel.h>
25 17
26#include <asm/bootinfo.h>
27#include <asm/gt64120.h>
28#include <asm/io.h>
29#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
30#include <asm/smp-ops.h> 19#include <asm/smp-ops.h>
31#include <asm/traps.h> 20#include <asm/traps.h>
32 21#include <asm/fw/fw.h>
33#include <asm/gcmpregs.h> 22#include <asm/gcmpregs.h>
34#include <asm/mips-boards/prom.h>
35#include <asm/mips-boards/generic.h> 23#include <asm/mips-boards/generic.h>
36#include <asm/mips-boards/bonito64.h>
37#include <asm/mips-boards/msc01_pci.h>
38
39#include <asm/mips-boards/malta.h> 24#include <asm/mips-boards/malta.h>
40 25
41int prom_argc;
42int *_prom_argv, *_prom_envp;
43
44/*
45 * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
46 * This macro take care of sign extension, if running in 64-bit mode.
47 */
48#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
49
50int init_debug;
51
52static int mips_revision_corid; 26static int mips_revision_corid;
53int mips_revision_sconid; 27int mips_revision_sconid;
54 28
@@ -62,74 +36,6 @@ unsigned long _pcictrl_gt64120;
62/* MIPS System controller register base */ 36/* MIPS System controller register base */
63unsigned long _pcictrl_msc; 37unsigned long _pcictrl_msc;
64 38
65char *prom_getenv(char *envname)
66{
67 /*
68 * Return a pointer to the given environment variable.
69 * In 64-bit mode: we're using 64-bit pointers, but all pointers
70 * in the PROM structures are only 32-bit, so we need some
71 * workarounds, if we are running in 64-bit mode.
72 */
73 int i, index=0;
74
75 i = strlen(envname);
76
77 while (prom_envp(index)) {
78 if(strncmp(envname, prom_envp(index), i) == 0) {
79 return(prom_envp(index+1));
80 }
81 index += 2;
82 }
83
84 return NULL;
85}
86
87static inline unsigned char str2hexnum(unsigned char c)
88{
89 if (c >= '0' && c <= '9')
90 return c - '0';
91 if (c >= 'a' && c <= 'f')
92 return c - 'a' + 10;
93 return 0; /* foo */
94}
95
96static inline void str2eaddr(unsigned char *ea, unsigned char *str)
97{
98 int i;
99
100 for (i = 0; i < 6; i++) {
101 unsigned char num;
102
103 if((*str == '.') || (*str == ':'))
104 str++;
105 num = str2hexnum(*str++) << 4;
106 num |= (str2hexnum(*str++));
107 ea[i] = num;
108 }
109}
110
111int get_ethernet_addr(char *ethernet_addr)
112{
113 char *ethaddr_str;
114
115 ethaddr_str = prom_getenv("ethaddr");
116 if (!ethaddr_str) {
117 printk("ethaddr not set in boot prom\n");
118 return -1;
119 }
120 str2eaddr(ethernet_addr, ethaddr_str);
121
122 if (init_debug > 1) {
123 int i;
124 printk("get_ethernet_addr: ");
125 for (i=0; i<5; i++)
126 printk("%02x:", (unsigned char)*(ethernet_addr+i));
127 printk("%02x\n", *(ethernet_addr+i));
128 }
129
130 return 0;
131}
132
133#ifdef CONFIG_SERIAL_8250_CONSOLE 39#ifdef CONFIG_SERIAL_8250_CONSOLE
134static void __init console_config(void) 40static void __init console_config(void)
135{ 41{
@@ -138,17 +44,23 @@ static void __init console_config(void)
138 char parity = '\0', bits = '\0', flow = '\0'; 44 char parity = '\0', bits = '\0', flow = '\0';
139 char *s; 45 char *s;
140 46
141 if ((strstr(prom_getcmdline(), "console=")) == NULL) { 47 if ((strstr(fw_getcmdline(), "console=")) == NULL) {
142 s = prom_getenv("modetty0"); 48 s = fw_getenv("modetty0");
143 if (s) { 49 if (s) {
144 while (*s >= '0' && *s <= '9') 50 while (*s >= '0' && *s <= '9')
145 baud = baud*10 + *s++ - '0'; 51 baud = baud*10 + *s++ - '0';
146 if (*s == ',') s++; 52 if (*s == ',')
147 if (*s) parity = *s++; 53 s++;
148 if (*s == ',') s++; 54 if (*s)
149 if (*s) bits = *s++; 55 parity = *s++;
150 if (*s == ',') s++; 56 if (*s == ',')
151 if (*s == 'h') flow = 'r'; 57 s++;
58 if (*s)
59 bits = *s++;
60 if (*s == ',')
61 s++;
62 if (*s == 'h')
63 flow = 'r';
152 } 64 }
153 if (baud == 0) 65 if (baud == 0)
154 baud = 38400; 66 baud = 38400;
@@ -158,8 +70,9 @@ static void __init console_config(void)
158 bits = '8'; 70 bits = '8';
159 if (flow == '\0') 71 if (flow == '\0')
160 flow = 'r'; 72 flow = 'r';
161 sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow); 73 sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
162 strcat(prom_getcmdline(), console_string); 74 parity, bits, flow);
75 strcat(fw_getcmdline(), console_string);
163 pr_info("Config serial console:%s\n", console_string); 76 pr_info("Config serial console:%s\n", console_string);
164 } 77 }
165} 78}
@@ -193,10 +106,6 @@ extern struct plat_smp_ops msmtc_smp_ops;
193 106
194void __init prom_init(void) 107void __init prom_init(void)
195{ 108{
196 prom_argc = fw_arg0;
197 _prom_argv = (int *) fw_arg1;
198 _prom_envp = (int *) fw_arg2;
199
200 mips_display_message("LINUX"); 109 mips_display_message("LINUX");
201 110
202 /* 111 /*
@@ -306,7 +215,7 @@ void __init prom_init(void)
306 case MIPS_REVISION_SCON_SOCIT: 215 case MIPS_REVISION_SCON_SOCIT:
307 case MIPS_REVISION_SCON_ROCIT: 216 case MIPS_REVISION_SCON_ROCIT:
308 _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000); 217 _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
309 mips_pci_controller: 218mips_pci_controller:
310 mb(); 219 mb();
311 MSC_READ(MSC01_PCI_CFG, data); 220 MSC_READ(MSC01_PCI_CFG, data);
312 MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT); 221 MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
@@ -348,13 +257,13 @@ void __init prom_init(void)
348 default: 257 default:
349 /* Unknown system controller */ 258 /* Unknown system controller */
350 mips_display_message("SC Error"); 259 mips_display_message("SC Error");
351 while (1); /* We die here... */ 260 while (1); /* We die here... */
352 } 261 }
353 board_nmi_handler_setup = mips_nmi_setup; 262 board_nmi_handler_setup = mips_nmi_setup;
354 board_ejtag_handler_setup = mips_ejtag_setup; 263 board_ejtag_handler_setup = mips_ejtag_setup;
355 264
356 prom_init_cmdline(); 265 fw_init_cmdline();
357 prom_meminit(); 266 fw_meminit();
358#ifdef CONFIG_SERIAL_8250_CONSOLE 267#ifdef CONFIG_SERIAL_8250_CONSOLE
359 console_config(); 268 console_config();
360#endif 269#endif
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index e364af70e6cf..0a1339ac3ec8 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -47,7 +47,6 @@
47#include <asm/setup.h> 47#include <asm/setup.h>
48 48
49int gcmp_present = -1; 49int gcmp_present = -1;
50int gic_present;
51static unsigned long _msc01_biu_base; 50static unsigned long _msc01_biu_base;
52static unsigned long _gcmp_base; 51static unsigned long _gcmp_base;
53static unsigned int ipi_map[NR_CPUS]; 52static unsigned int ipi_map[NR_CPUS];
@@ -134,6 +133,9 @@ static void malta_ipi_irqdispatch(void)
134{ 133{
135 int irq; 134 int irq;
136 135
136 if (gic_compare_int())
137 do_IRQ(MIPS_GIC_IRQ_BASE);
138
137 irq = gic_get_int(); 139 irq = gic_get_int();
138 if (irq < 0) 140 if (irq < 0)
139 return; /* interrupt has already been cleared */ 141 return; /* interrupt has already been cleared */
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index f3d43aa023a9..1f73d63e92a7 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -1,73 +1,45 @@
1/* 1/*
2 * Carsten Langgaard, carstenl@mips.com 2 * This file is subject to the terms and conditions of the GNU General Public
3 * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. 3 * License. See the file "COPYING" in the main directory of this archive
4 * 4 * for more details.
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 * 5 *
18 * PROM library functions for acquiring/using memory descriptors given to 6 * PROM library functions for acquiring/using memory descriptors given to
19 * us from the YAMON. 7 * us from the YAMON.
8 *
9 * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc.
10 * All rights reserved.
11 * Authors: Carsten Langgaard <carstenl@mips.com>
12 * Steven J. Hill <sjhill@mips.com>
20 */ 13 */
21#include <linux/init.h> 14#include <linux/init.h>
22#include <linux/mm.h>
23#include <linux/bootmem.h> 15#include <linux/bootmem.h>
24#include <linux/pfn.h>
25#include <linux/string.h> 16#include <linux/string.h>
26 17
27#include <asm/bootinfo.h> 18#include <asm/bootinfo.h>
28#include <asm/page.h>
29#include <asm/sections.h> 19#include <asm/sections.h>
20#include <asm/fw/fw.h>
30 21
31#include <asm/mips-boards/prom.h> 22static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
32
33/*#define DEBUG*/
34
35enum yamon_memtypes {
36 yamon_dontuse,
37 yamon_prom,
38 yamon_free,
39};
40static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
41
42#ifdef DEBUG
43static char *mtypes[3] = {
44 "Dont use memory",
45 "YAMON PROM memory",
46 "Free memory",
47};
48#endif
49 23
50/* determined physical memory size, not overridden by command line args */ 24/* determined physical memory size, not overridden by command line args */
51unsigned long physical_memsize = 0L; 25unsigned long physical_memsize = 0L;
52 26
53static struct prom_pmemblock * __init prom_getmdesc(void) 27fw_memblock_t * __init fw_getmdesc(void)
54{ 28{
55 char *memsize_str; 29 char *memsize_str, *ptr;
56 unsigned int memsize; 30 unsigned int memsize;
57 char *ptr;
58 static char cmdline[COMMAND_LINE_SIZE] __initdata; 31 static char cmdline[COMMAND_LINE_SIZE] __initdata;
32 long val;
33 int tmp;
59 34
60 /* otherwise look in the environment */ 35 /* otherwise look in the environment */
61 memsize_str = prom_getenv("memsize"); 36 memsize_str = fw_getenv("memsize");
62 if (!memsize_str) { 37 if (!memsize_str) {
63 printk(KERN_WARNING 38 pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
64 "memsize not set in boot prom, set to default (32Mb)\n");
65 physical_memsize = 0x02000000; 39 physical_memsize = 0x02000000;
66 } else { 40 } else {
67#ifdef DEBUG 41 tmp = kstrtol(memsize_str, 0, &val);
68 pr_debug("prom_memsize = %s\n", memsize_str); 42 physical_memsize = (unsigned long)val;
69#endif
70 physical_memsize = simple_strtol(memsize_str, NULL, 0);
71 } 43 }
72 44
73#ifdef CONFIG_CPU_BIG_ENDIAN 45#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -90,11 +62,11 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
90 62
91 memset(mdesc, 0, sizeof(mdesc)); 63 memset(mdesc, 0, sizeof(mdesc));
92 64
93 mdesc[0].type = yamon_dontuse; 65 mdesc[0].type = fw_dontuse;
94 mdesc[0].base = 0x00000000; 66 mdesc[0].base = 0x00000000;
95 mdesc[0].size = 0x00001000; 67 mdesc[0].size = 0x00001000;
96 68
97 mdesc[1].type = yamon_prom; 69 mdesc[1].type = fw_code;
98 mdesc[1].base = 0x00001000; 70 mdesc[1].base = 0x00001000;
99 mdesc[1].size = 0x000ef000; 71 mdesc[1].size = 0x000ef000;
100 72
@@ -105,55 +77,45 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
105 * This mean that this area can't be used as DMA memory for PCI 77 * This mean that this area can't be used as DMA memory for PCI
106 * devices. 78 * devices.
107 */ 79 */
108 mdesc[2].type = yamon_dontuse; 80 mdesc[2].type = fw_dontuse;
109 mdesc[2].base = 0x000f0000; 81 mdesc[2].base = 0x000f0000;
110 mdesc[2].size = 0x00010000; 82 mdesc[2].size = 0x00010000;
111 83
112 mdesc[3].type = yamon_dontuse; 84 mdesc[3].type = fw_dontuse;
113 mdesc[3].base = 0x00100000; 85 mdesc[3].base = 0x00100000;
114 mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base; 86 mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
87 mdesc[3].base;
115 88
116 mdesc[4].type = yamon_free; 89 mdesc[4].type = fw_free;
117 mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); 90 mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
118 mdesc[4].size = memsize - mdesc[4].base; 91 mdesc[4].size = memsize - mdesc[4].base;
119 92
120 return &mdesc[0]; 93 return &mdesc[0];
121} 94}
122 95
123static int __init prom_memtype_classify(unsigned int type) 96static int __init fw_memtype_classify(unsigned int type)
124{ 97{
125 switch (type) { 98 switch (type) {
126 case yamon_free: 99 case fw_free:
127 return BOOT_MEM_RAM; 100 return BOOT_MEM_RAM;
128 case yamon_prom: 101 case fw_code:
129 return BOOT_MEM_ROM_DATA; 102 return BOOT_MEM_ROM_DATA;
130 default: 103 default:
131 return BOOT_MEM_RESERVED; 104 return BOOT_MEM_RESERVED;
132 } 105 }
133} 106}
134 107
135void __init prom_meminit(void) 108void __init fw_meminit(void)
136{ 109{
137 struct prom_pmemblock *p; 110 fw_memblock_t *p;
138 111
139#ifdef DEBUG 112 p = fw_getmdesc();
140 pr_debug("YAMON MEMORY DESCRIPTOR dump:\n");
141 p = prom_getmdesc();
142 while (p->size) {
143 int i = 0;
144 pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n",
145 i, p, p->base, p->size, mtypes[p->type]);
146 p++;
147 i++;
148 }
149#endif
150 p = prom_getmdesc();
151 113
152 while (p->size) { 114 while (p->size) {
153 long type; 115 long type;
154 unsigned long base, size; 116 unsigned long base, size;
155 117
156 type = prom_memtype_classify(p->type); 118 type = fw_memtype_classify(p->type);
157 base = p->base; 119 base = p->base;
158 size = p->size; 120 size = p->size;
159 121
@@ -172,7 +134,7 @@ void __init prom_free_prom_memory(void)
172 continue; 134 continue;
173 135
174 addr = boot_mem_map.map[i].addr; 136 addr = boot_mem_map.map[i].addr;
175 free_init_pages("prom memory", 137 free_init_pages("YAMON memory",
176 addr, addr + boot_mem_map.map[i].size); 138 addr, addr + boot_mem_map.map[i].size);
177 } 139 }
178} 140}
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 200f64df2c9b..c72a06936781 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -25,13 +25,13 @@
25#include <linux/screen_info.h> 25#include <linux/screen_info.h>
26#include <linux/time.h> 26#include <linux/time.h>
27 27
28#include <asm/bootinfo.h> 28#include <asm/fw/fw.h>
29#include <asm/mips-boards/generic.h> 29#include <asm/mips-boards/generic.h>
30#include <asm/mips-boards/prom.h>
31#include <asm/mips-boards/malta.h> 30#include <asm/mips-boards/malta.h>
32#include <asm/mips-boards/maltaint.h> 31#include <asm/mips-boards/maltaint.h>
33#include <asm/dma.h> 32#include <asm/dma.h>
34#include <asm/traps.h> 33#include <asm/traps.h>
34#include <asm/gcmpregs.h>
35#ifdef CONFIG_VT 35#ifdef CONFIG_VT
36#include <linux/console.h> 36#include <linux/console.h>
37#endif 37#endif
@@ -105,6 +105,66 @@ static void __init fd_activate(void)
105} 105}
106#endif 106#endif
107 107
108static int __init plat_enable_iocoherency(void)
109{
110 int supported = 0;
111 if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
112 if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
113 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
114 pr_info("Enabled Bonito CPU coherency\n");
115 supported = 1;
116 }
117 if (strstr(fw_getcmdline(), "iobcuncached")) {
118 BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
119 BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
120 ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
121 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
122 pr_info("Disabled Bonito IOBC coherency\n");
123 } else {
124 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
125 BONITO_PCIMEMBASECFG |=
126 (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
127 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
128 pr_info("Enabled Bonito IOBC coherency\n");
129 }
130 } else if (gcmp_niocu() != 0) {
131 /* Nothing special needs to be done to enable coherency */
132 pr_info("CMP IOCU detected\n");
133 if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
134 pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
135 return 0;
136 }
137 supported = 1;
138 }
139 hw_coherentio = supported;
140 return supported;
141}
142
143static void __init plat_setup_iocoherency(void)
144{
145#ifdef CONFIG_DMA_NONCOHERENT
146 /*
147 * Kernel has been configured with software coherency
148 * but we might choose to turn it off and use hardware
149 * coherency instead.
150 */
151 if (plat_enable_iocoherency()) {
152 if (coherentio == 0)
153 pr_info("Hardware DMA cache coherency disabled\n");
154 else
155 pr_info("Hardware DMA cache coherency enabled\n");
156 } else {
157 if (coherentio == 1)
158 pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
159 else
160 pr_info("Software DMA cache coherency enabled\n");
161 }
162#else
163 if (!plat_enable_iocoherency())
164 panic("Hardware DMA cache coherency not supported!");
165#endif
166}
167
108#ifdef CONFIG_BLK_DEV_IDE 168#ifdef CONFIG_BLK_DEV_IDE
109static void __init pci_clock_check(void) 169static void __init pci_clock_check(void)
110{ 170{
@@ -115,16 +175,15 @@ static void __init pci_clock_check(void)
115 33, 20, 25, 30, 12, 16, 37, 10 175 33, 20, 25, 30, 12, 16, 37, 10
116 }; 176 };
117 int pciclock = pciclocks[jmpr]; 177 int pciclock = pciclocks[jmpr];
118 char *argptr = prom_getcmdline(); 178 char *argptr = fw_getcmdline();
119 179
120 if (pciclock != 33 && !strstr(argptr, "idebus=")) { 180 if (pciclock != 33 && !strstr(argptr, "idebus=")) {
121 printk(KERN_WARNING "WARNING: PCI clock is %dMHz, " 181 pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n",
122 "setting idebus\n", pciclock); 182 pciclock);
123 argptr += strlen(argptr); 183 argptr += strlen(argptr);
124 sprintf(argptr, " idebus=%d", pciclock); 184 sprintf(argptr, " idebus=%d", pciclock);
125 if (pciclock < 20 || pciclock > 66) 185 if (pciclock < 20 || pciclock > 66)
126 printk(KERN_WARNING "WARNING: IDE timing " 186 pr_warn("WARNING: IDE timing calculations will be incorrect\n");
127 "calculations will be incorrect\n");
128 } 187 }
129} 188}
130#endif 189#endif
@@ -153,31 +212,31 @@ static void __init bonito_quirks_setup(void)
153{ 212{
154 char *argptr; 213 char *argptr;
155 214
156 argptr = prom_getcmdline(); 215 argptr = fw_getcmdline();
157 if (strstr(argptr, "debug")) { 216 if (strstr(argptr, "debug")) {
158 BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE; 217 BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
159 printk(KERN_INFO "Enabled Bonito debug mode\n"); 218 pr_info("Enabled Bonito debug mode\n");
160 } else 219 } else
161 BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE; 220 BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
162 221
163#ifdef CONFIG_DMA_COHERENT 222#ifdef CONFIG_DMA_COHERENT
164 if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { 223 if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
165 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; 224 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
166 printk(KERN_INFO "Enabled Bonito CPU coherency\n"); 225 pr_info("Enabled Bonito CPU coherency\n");
167 226
168 argptr = prom_getcmdline(); 227 argptr = fw_getcmdline();
169 if (strstr(argptr, "iobcuncached")) { 228 if (strstr(argptr, "iobcuncached")) {
170 BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN; 229 BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
171 BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG & 230 BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
172 ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | 231 ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
173 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); 232 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
174 printk(KERN_INFO "Disabled Bonito IOBC coherency\n"); 233 pr_info("Disabled Bonito IOBC coherency\n");
175 } else { 234 } else {
176 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN; 235 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
177 BONITO_PCIMEMBASECFG |= 236 BONITO_PCIMEMBASECFG |=
178 (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | 237 (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
179 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); 238 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
180 printk(KERN_INFO "Enabled Bonito IOBC coherency\n"); 239 pr_info("Enabled Bonito IOBC coherency\n");
181 } 240 }
182 } else 241 } else
183 panic("Hardware DMA cache coherency not supported"); 242 panic("Hardware DMA cache coherency not supported");
@@ -207,6 +266,8 @@ void __init plat_mem_setup(void)
207 if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) 266 if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
208 bonito_quirks_setup(); 267 bonito_quirks_setup();
209 268
269 plat_setup_iocoherency();
270
210#ifdef CONFIG_BLK_DEV_IDE 271#ifdef CONFIG_BLK_DEV_IDE
211 pci_clock_check(); 272 pci_clock_check();
212#endif 273#endif
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index a144b89cf9ba..0ad305f75802 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -39,12 +39,9 @@
39#include <asm/gic.h> 39#include <asm/gic.h>
40 40
41#include <asm/mips-boards/generic.h> 41#include <asm/mips-boards/generic.h>
42#include <asm/mips-boards/prom.h>
43
44#include <asm/mips-boards/maltaint.h> 42#include <asm/mips-boards/maltaint.h>
45 43
46unsigned long cpu_khz; 44unsigned long cpu_khz;
47int gic_frequency;
48 45
49static int mips_cpu_timer_irq; 46static int mips_cpu_timer_irq;
50static int mips_cpu_perf_irq; 47static int mips_cpu_perf_irq;
@@ -74,7 +71,24 @@ static void __init estimate_frequencies(void)
74{ 71{
75 unsigned long flags; 72 unsigned long flags;
76 unsigned int count, start; 73 unsigned int count, start;
74#ifdef CONFIG_IRQ_GIC
77 unsigned int giccount = 0, gicstart = 0; 75 unsigned int giccount = 0, gicstart = 0;
76#endif
77
78#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
79 unsigned int prid = read_c0_prid() & 0xffff00;
80
81 /*
82 * XXXKYMA: hardwire the CPU frequency to Host Freq/4
83 */
84 count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
85 if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
86 (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
87 count *= 2;
88
89 mips_hpt_frequency = count;
90 return;
91#endif
78 92
79 local_irq_save(flags); 93 local_irq_save(flags);
80 94
@@ -84,26 +98,32 @@ static void __init estimate_frequencies(void)
84 98
85 /* Initialize counters. */ 99 /* Initialize counters. */
86 start = read_c0_count(); 100 start = read_c0_count();
101#ifdef CONFIG_IRQ_GIC
87 if (gic_present) 102 if (gic_present)
88 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart); 103 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
104#endif
89 105
90 /* Read counter exactly on falling edge of update flag. */ 106 /* Read counter exactly on falling edge of update flag. */
91 while (CMOS_READ(RTC_REG_A) & RTC_UIP); 107 while (CMOS_READ(RTC_REG_A) & RTC_UIP);
92 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); 108 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
93 109
94 count = read_c0_count(); 110 count = read_c0_count();
111#ifdef CONFIG_IRQ_GIC
95 if (gic_present) 112 if (gic_present)
96 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount); 113 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
114#endif
97 115
98 local_irq_restore(flags); 116 local_irq_restore(flags);
99 117
100 count -= start; 118 count -= start;
101 if (gic_present)
102 giccount -= gicstart;
103
104 mips_hpt_frequency = count; 119 mips_hpt_frequency = count;
105 if (gic_present) 120
121#ifdef CONFIG_IRQ_GIC
122 if (gic_present) {
123 giccount -= gicstart;
106 gic_frequency = giccount; 124 gic_frequency = giccount;
125 }
126#endif
107} 127}
108 128
109void read_persistent_clock(struct timespec *ts) 129void read_persistent_clock(struct timespec *ts)
@@ -159,24 +179,27 @@ void __init plat_time_init(void)
159 (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) 179 (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
160 freq *= 2; 180 freq *= 2;
161 freq = freqround(freq, 5000); 181 freq = freqround(freq, 5000);
162 pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000, 182 printk("CPU frequency %d.%02d MHz\n", freq/1000000,
163 (freq%1000000)*100/1000000); 183 (freq%1000000)*100/1000000);
164 cpu_khz = freq / 1000; 184 cpu_khz = freq / 1000;
165 185
166 if (gic_present) { 186 mips_scroll_message();
167 freq = freqround(gic_frequency, 5000);
168 pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
169 (freq%1000000)*100/1000000);
170 gic_clocksource_init(gic_frequency);
171 } else
172 init_r4k_clocksource();
173 187
174#ifdef CONFIG_I8253 188#ifdef CONFIG_I8253
175 /* Only Malta has a PIT. */ 189 /* Only Malta has a PIT. */
176 setup_pit_timer(); 190 setup_pit_timer();
177#endif 191#endif
178 192
179 mips_scroll_message(); 193#ifdef CONFIG_IRQ_GIC
194 if (gic_present) {
195 freq = freqround(gic_frequency, 5000);
196 printk("GIC frequency %d.%02d MHz\n", freq/1000000,
197 (freq%1000000)*100/1000000);
198#ifdef CONFIG_CSRC_GIC
199 gic_clocksource_init(gic_frequency);
200#endif
201 }
202#endif
180 203
181 plat_perf_setup(); 204 plat_perf_setup();
182} 205}
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index 10ec701ce6c7..be114209217c 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -8,10 +8,10 @@
8# Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved. 8# Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved.
9# Steven J. Hill <sjhill@mips.com> 9# Steven J. Hill <sjhill@mips.com>
10# 10#
11obj-y := sead3-lcd.o sead3-cmdline.o \ 11obj-y := sead3-lcd.o sead3-display.o sead3-init.o \
12 sead3-display.o sead3-init.o sead3-int.o \ 12 sead3-int.o sead3-mtd.o sead3-net.o \
13 sead3-mtd.o sead3-net.o sead3-platform.o \ 13 sead3-platform.o sead3-reset.o \
14 sead3-reset.o sead3-setup.o sead3-time.o 14 sead3-setup.o sead3-time.o
15 15
16obj-y += sead3-i2c-dev.o sead3-i2c.o \ 16obj-y += sead3-i2c-dev.o sead3-i2c.o \
17 sead3-pic32-i2c-drv.o sead3-pic32-bus.o \ 17 sead3-pic32-i2c-drv.o sead3-pic32-bus.o \
diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c
index 322148c353ed..0a168c948b01 100644
--- a/arch/mips/mti-sead3/leds-sead3.c
+++ b/arch/mips/mti-sead3/leds-sead3.c
@@ -34,33 +34,15 @@ static void sead3_fled_set(struct led_classdev *led_cdev,
34static struct led_classdev sead3_pled = { 34static struct led_classdev sead3_pled = {
35 .name = "sead3::pled", 35 .name = "sead3::pled",
36 .brightness_set = sead3_pled_set, 36 .brightness_set = sead3_pled_set,
37 .flags = LED_CORE_SUSPENDRESUME,
37}; 38};
38 39
39static struct led_classdev sead3_fled = { 40static struct led_classdev sead3_fled = {
40 .name = "sead3::fled", 41 .name = "sead3::fled",
41 .brightness_set = sead3_fled_set, 42 .brightness_set = sead3_fled_set,
43 .flags = LED_CORE_SUSPENDRESUME,
42}; 44};
43 45
44#ifdef CONFIG_PM
45static int sead3_led_suspend(struct platform_device *dev,
46 pm_message_t state)
47{
48 led_classdev_suspend(&sead3_pled);
49 led_classdev_suspend(&sead3_fled);
50 return 0;
51}
52
53static int sead3_led_resume(struct platform_device *dev)
54{
55 led_classdev_resume(&sead3_pled);
56 led_classdev_resume(&sead3_fled);
57 return 0;
58}
59#else
60#define sead3_led_suspend NULL
61#define sead3_led_resume NULL
62#endif
63
64static int sead3_led_probe(struct platform_device *pdev) 46static int sead3_led_probe(struct platform_device *pdev)
65{ 47{
66 int ret; 48 int ret;
@@ -86,8 +68,6 @@ static int sead3_led_remove(struct platform_device *pdev)
86static struct platform_driver sead3_led_driver = { 68static struct platform_driver sead3_led_driver = {
87 .probe = sead3_led_probe, 69 .probe = sead3_led_probe,
88 .remove = sead3_led_remove, 70 .remove = sead3_led_remove,
89 .suspend = sead3_led_suspend,
90 .resume = sead3_led_resume,
91 .driver = { 71 .driver = {
92 .name = DRVNAME, 72 .name = DRVNAME,
93 .owner = THIS_MODULE, 73 .owner = THIS_MODULE,
diff --git a/arch/mips/mti-sead3/sead3-cmdline.c b/arch/mips/mti-sead3/sead3-cmdline.c
deleted file mode 100644
index a2e6cec67f57..000000000000
--- a/arch/mips/mti-sead3/sead3-cmdline.c
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */
8#include <linux/init.h>
9#include <linux/string.h>
10
11#include <asm/bootinfo.h>
12
13extern int prom_argc;
14extern int *_prom_argv;
15
16/*
17 * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
18 * This macro take care of sign extension.
19 */
20#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
21
22char * __init prom_getcmdline(void)
23{
24 return &(arcs_cmdline[0]);
25}
26
27void __init prom_init_cmdline(void)
28{
29 char *cp;
30 int actr;
31
32 actr = 1; /* Always ignore argv[0] */
33
34 cp = &(arcs_cmdline[0]);
35 while (actr < prom_argc) {
36 strcpy(cp, prom_argv(actr));
37 cp += strlen(prom_argv(actr));
38 *cp++ = ' ';
39 actr++;
40 }
41 if (cp != &(arcs_cmdline[0])) {
42 /* get rid of trailing space */
43 --cp;
44 *cp = '\0';
45 }
46}
diff --git a/arch/mips/mti-sead3/sead3-console.c b/arch/mips/mti-sead3/sead3-console.c
index 2ddef19a9adc..031f47d69770 100644
--- a/arch/mips/mti-sead3/sead3-console.c
+++ b/arch/mips/mti-sead3/sead3-console.c
@@ -26,7 +26,7 @@ static inline void serial_out(int offset, int value, unsigned int base_addr)
26 __raw_writel(value, PORT(base_addr, offset)); 26 __raw_writel(value, PORT(base_addr, offset));
27} 27}
28 28
29void __init prom_init_early_console(char port) 29void __init fw_init_early_console(char port)
30{ 30{
31 console_port = port; 31 console_port = port;
32} 32}
diff --git a/arch/mips/mti-sead3/sead3-display.c b/arch/mips/mti-sead3/sead3-display.c
index e389326cfa42..94875991907b 100644
--- a/arch/mips/mti-sead3/sead3-display.c
+++ b/arch/mips/mti-sead3/sead3-display.c
@@ -8,7 +8,6 @@
8#include <linux/timer.h> 8#include <linux/timer.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <asm/mips-boards/generic.h> 10#include <asm/mips-boards/generic.h>
11#include <asm/mips-boards/prom.h>
12 11
13static unsigned int display_count; 12static unsigned int display_count;
14static unsigned int max_display_count; 13static unsigned int max_display_count;
diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c
index f95abaa1aa5d..bfbd17b120a2 100644
--- a/arch/mips/mti-sead3/sead3-init.c
+++ b/arch/mips/mti-sead3/sead3-init.c
@@ -12,38 +12,51 @@
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/traps.h> 13#include <asm/traps.h>
14#include <asm/mips-boards/generic.h> 14#include <asm/mips-boards/generic.h>
15#include <asm/mips-boards/prom.h> 15#include <asm/fw/fw.h>
16
17extern void prom_init_early_console(char port);
18 16
19extern char except_vec_nmi; 17extern char except_vec_nmi;
20extern char except_vec_ejtag_debug; 18extern char except_vec_ejtag_debug;
21 19
22int prom_argc; 20#ifdef CONFIG_SERIAL_8250_CONSOLE
23int *_prom_argv, *_prom_envp; 21static void __init console_config(void)
24
25#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
26
27char *prom_getenv(char *envname)
28{ 22{
29 /* 23 char console_string[40];
30 * Return a pointer to the given environment variable. 24 int baud = 0;
31 * In 64-bit mode: we're using 64-bit pointers, but all pointers 25 char parity = '\0', bits = '\0', flow = '\0';
32 * in the PROM structures are only 32-bit, so we need some 26 char *s;
33 * workarounds, if we are running in 64-bit mode. 27
34 */ 28 if ((strstr(fw_getcmdline(), "console=")) == NULL) {
35 int i, index = 0; 29 s = fw_getenv("modetty0");
36 30 if (s) {
37 i = strlen(envname); 31 while (*s >= '0' && *s <= '9')
38 32 baud = baud*10 + *s++ - '0';
39 while (prom_envp(index)) { 33 if (*s == ',')
40 if (strncmp(envname, prom_envp(index), i) == 0) 34 s++;
41 return prom_envp(index+1); 35 if (*s)
42 index += 2; 36 parity = *s++;
37 if (*s == ',')
38 s++;
39 if (*s)
40 bits = *s++;
41 if (*s == ',')
42 s++;
43 if (*s == 'h')
44 flow = 'r';
45 }
46 if (baud == 0)
47 baud = 38400;
48 if (parity != 'n' && parity != 'o' && parity != 'e')
49 parity = 'n';
50 if (bits != '7' && bits != '8')
51 bits = '8';
52 if (flow == '\0')
53 flow = 'r';
54 sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
55 parity, bits, flow);
56 strcat(fw_getcmdline(), console_string);
43 } 57 }
44
45 return NULL;
46} 58}
59#endif
47 60
48static void __init mips_nmi_setup(void) 61static void __init mips_nmi_setup(void)
49{ 62{
@@ -52,7 +65,41 @@ static void __init mips_nmi_setup(void)
52 base = cpu_has_veic ? 65 base = cpu_has_veic ?
53 (void *)(CAC_BASE + 0xa80) : 66 (void *)(CAC_BASE + 0xa80) :
54 (void *)(CAC_BASE + 0x380); 67 (void *)(CAC_BASE + 0x380);
68#ifdef CONFIG_CPU_MICROMIPS
69 /*
70 * Decrement the exception vector address by one for microMIPS.
71 */
72 memcpy(base, (&except_vec_nmi - 1), 0x80);
73
74 /*
75 * This is a hack. We do not know if the boot loader was built with
76 * microMIPS instructions or not. If it was not, the NMI exception
77 * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
78 * assembly below forces us into microMIPS mode if we are a pure
79 * microMIPS kernel. The assembly instructions are:
80 *
81 * 3C1A8000 lui k0,0x8000
82 * 375A0381 ori k0,k0,0x381
83 * 03400008 jr k0
84 * 00000000 nop
85 *
86 * The mode switch occurs by jumping to the unaligned exception
87 * vector address at 0x80000381 which would have been 0x80000380
88 * in MIPS32 mode. The jump to the unaligned address transitions
89 * us into microMIPS mode.
90 */
91 if (!cpu_has_veic) {
92 void *base2 = (void *)(CAC_BASE + 0xa80);
93 *((unsigned int *)base2) = 0x3c1a8000;
94 *((unsigned int *)base2 + 1) = 0x375a0381;
95 *((unsigned int *)base2 + 2) = 0x03400008;
96 *((unsigned int *)base2 + 3) = 0x00000000;
97 flush_icache_range((unsigned long)base2,
98 (unsigned long)base2 + 0x10);
99 }
100#else
55 memcpy(base, &except_vec_nmi, 0x80); 101 memcpy(base, &except_vec_nmi, 0x80);
102#endif
56 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); 103 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
57} 104}
58 105
@@ -63,29 +110,40 @@ static void __init mips_ejtag_setup(void)
63 base = cpu_has_veic ? 110 base = cpu_has_veic ?
64 (void *)(CAC_BASE + 0xa00) : 111 (void *)(CAC_BASE + 0xa00) :
65 (void *)(CAC_BASE + 0x300); 112 (void *)(CAC_BASE + 0x300);
113#ifdef CONFIG_CPU_MICROMIPS
114 /* Deja vu... */
115 memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
116 if (!cpu_has_veic) {
117 void *base2 = (void *)(CAC_BASE + 0xa00);
118 *((unsigned int *)base2) = 0x3c1a8000;
119 *((unsigned int *)base2 + 1) = 0x375a0301;
120 *((unsigned int *)base2 + 2) = 0x03400008;
121 *((unsigned int *)base2 + 3) = 0x00000000;
122 flush_icache_range((unsigned long)base2,
123 (unsigned long)base2 + 0x10);
124 }
125#else
66 memcpy(base, &except_vec_ejtag_debug, 0x80); 126 memcpy(base, &except_vec_ejtag_debug, 0x80);
127#endif
67 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); 128 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
68} 129}
69 130
70void __init prom_init(void) 131void __init prom_init(void)
71{ 132{
72 prom_argc = fw_arg0;
73 _prom_argv = (int *) fw_arg1;
74 _prom_envp = (int *) fw_arg2;
75
76 board_nmi_handler_setup = mips_nmi_setup; 133 board_nmi_handler_setup = mips_nmi_setup;
77 board_ejtag_handler_setup = mips_ejtag_setup; 134 board_ejtag_handler_setup = mips_ejtag_setup;
78 135
79 prom_init_cmdline(); 136 fw_init_cmdline();
80#ifdef CONFIG_EARLY_PRINTK 137#ifdef CONFIG_EARLY_PRINTK
81 if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL) 138 if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL)
82 prom_init_early_console(0); 139 fw_init_early_console(0);
83 else if ((strstr(prom_getcmdline(), "console=ttyS1")) != NULL) 140 else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL)
84 prom_init_early_console(1); 141 fw_init_early_console(1);
85#endif 142#endif
86#ifdef CONFIG_SERIAL_8250_CONSOLE 143#ifdef CONFIG_SERIAL_8250_CONSOLE
87 if ((strstr(prom_getcmdline(), "console=")) == NULL) 144 if ((strstr(fw_getcmdline(), "console=")) == NULL)
88 strcat(prom_getcmdline(), " console=ttyS0,38400n8r"); 145 strcat(fw_getcmdline(), " console=ttyS0,38400n8r");
146 console_config();
89#endif 147#endif
90} 148}
91 149
diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c
index e26e08274fc5..6a560ac03def 100644
--- a/arch/mips/mti-sead3/sead3-int.c
+++ b/arch/mips/mti-sead3/sead3-int.c
@@ -20,7 +20,6 @@
20#define SEAD_CONFIG_BASE 0x1b100110 20#define SEAD_CONFIG_BASE 0x1b100110
21#define SEAD_CONFIG_SIZE 4 21#define SEAD_CONFIG_SIZE 4
22 22
23int gic_present;
24static unsigned long sead3_config_reg; 23static unsigned long sead3_config_reg;
25 24
26/* 25/*
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index f012fd164cee..b5059dc899f4 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -11,10 +11,6 @@
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12 12
13#include <asm/mips-boards/generic.h> 13#include <asm/mips-boards/generic.h>
14#include <asm/prom.h>
15
16int coherentio; /* 0 => no DMA cache coherency (may be set by user) */
17int hw_coherentio; /* 0 => no HW DMA cache coherency (reflects real HW) */
18 14
19const char *get_system_type(void) 15const char *get_system_type(void)
20{ 16{
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 239e4e32757f..96b42eb9b5e2 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -11,7 +11,6 @@
11#include <asm/time.h> 11#include <asm/time.h>
12#include <asm/irq.h> 12#include <asm/irq.h>
13#include <asm/mips-boards/generic.h> 13#include <asm/mips-boards/generic.h>
14#include <asm/mips-boards/prom.h>
15 14
16unsigned long cpu_khz; 15unsigned long cpu_khz;
17 16
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 3c05bf9e280a..e0873a31ebaa 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -2,13 +2,22 @@ if NLM_XLP_BOARD || NLM_XLR_BOARD
2 2
3if NLM_XLP_BOARD 3if NLM_XLP_BOARD
4config DT_XLP_EVP 4config DT_XLP_EVP
5 bool "Built-in device tree for XLP EVP/SVP boards" 5 bool "Built-in device tree for XLP EVP boards"
6 default y 6 default y
7 help 7 help
8 Add an FDT blob for XLP EVP and SVP boards into the kernel. 8 Add an FDT blob for XLP EVP boards into the kernel.
9 This DTB will be used if the firmware does not pass in a DTB 9 This DTB will be used if the firmware does not pass in a DTB
10 pointer to the kernel. The corresponding DTS file is at 10 pointer to the kernel. The corresponding DTS file is at
11 arch/mips/netlogic/dts/xlp_evp.dts 11 arch/mips/netlogic/dts/xlp_evp.dts
12
13config DT_XLP_SVP
14 bool "Built-in device tree for XLP SVP boards"
15 default y
16 help
17 Add an FDT blob for XLP VP boards into the kernel.
18 This DTB will be used if the firmware does not pass in a DTB
19 pointer to the kernel. The corresponding DTS file is at
20 arch/mips/netlogic/dts/xlp_svp.dts
12 21
13config NLM_MULTINODE 22config NLM_MULTINODE
14 bool "Support for multi-chip boards" 23 bool "Support for multi-chip boards"
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 2bb95dcfe20a..ffba52489bef 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -148,8 +148,7 @@ void nlm_cpus_done(void)
148int nlm_cpu_ready[NR_CPUS]; 148int nlm_cpu_ready[NR_CPUS];
149unsigned long nlm_next_gp; 149unsigned long nlm_next_gp;
150unsigned long nlm_next_sp; 150unsigned long nlm_next_sp;
151 151static cpumask_t phys_cpu_present_mask;
152cpumask_t phys_cpu_present_map;
153 152
154void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) 153void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
155{ 154{
@@ -169,11 +168,12 @@ void __init nlm_smp_setup(void)
169{ 168{
170 unsigned int boot_cpu; 169 unsigned int boot_cpu;
171 int num_cpus, i, ncore; 170 int num_cpus, i, ncore;
171 char buf[64];
172 172
173 boot_cpu = hard_smp_processor_id(); 173 boot_cpu = hard_smp_processor_id();
174 cpumask_clear(&phys_cpu_present_map); 174 cpumask_clear(&phys_cpu_present_mask);
175 175
176 cpumask_set_cpu(boot_cpu, &phys_cpu_present_map); 176 cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
177 __cpu_number_map[boot_cpu] = 0; 177 __cpu_number_map[boot_cpu] = 0;
178 __cpu_logical_map[0] = boot_cpu; 178 __cpu_logical_map[0] = boot_cpu;
179 set_cpu_possible(0, true); 179 set_cpu_possible(0, true);
@@ -185,7 +185,7 @@ void __init nlm_smp_setup(void)
185 * it is only set for ASPs (see smpboot.S) 185 * it is only set for ASPs (see smpboot.S)
186 */ 186 */
187 if (nlm_cpu_ready[i]) { 187 if (nlm_cpu_ready[i]) {
188 cpumask_set_cpu(i, &phys_cpu_present_map); 188 cpumask_set_cpu(i, &phys_cpu_present_mask);
189 __cpu_number_map[i] = num_cpus; 189 __cpu_number_map[i] = num_cpus;
190 __cpu_logical_map[num_cpus] = i; 190 __cpu_logical_map[num_cpus] = i;
191 set_cpu_possible(num_cpus, true); 191 set_cpu_possible(num_cpus, true);
@@ -193,16 +193,19 @@ void __init nlm_smp_setup(void)
193 } 193 }
194 } 194 }
195 195
196 cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
197 pr_info("Physical CPU mask: %s\n", buf);
198 cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
199 pr_info("Possible CPU mask: %s\n", buf);
200
196 /* check with the cores we have worken up */ 201 /* check with the cores we have worken up */
197 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++) 202 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
198 ncore += hweight32(nlm_get_node(i)->coremask); 203 ncore += hweight32(nlm_get_node(i)->coremask);
199 204
200 pr_info("Phys CPU present map: %lx, possible map %lx\n",
201 (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
202 (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
203
204 pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore, 205 pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
205 nlm_threads_per_core, num_cpus); 206 nlm_threads_per_core, num_cpus);
207
208 /* switch NMI handler to boot CPUs */
206 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 209 nlm_set_nmi_handler(nlm_boot_secondary_cpus);
207} 210}
208 211
diff --git a/arch/mips/netlogic/dts/Makefile b/arch/mips/netlogic/dts/Makefile
index d117d46413aa..aecb6fa9a9c3 100644
--- a/arch/mips/netlogic/dts/Makefile
+++ b/arch/mips/netlogic/dts/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o 1obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o
2obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o
diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts
index 7628b5464fc7..e14f42308064 100644
--- a/arch/mips/netlogic/dts/xlp_evp.dts
+++ b/arch/mips/netlogic/dts/xlp_evp.dts
@@ -20,7 +20,7 @@
20 #address-cells = <2>; 20 #address-cells = <2>;
21 #size-cells = <1>; 21 #size-cells = <1>;
22 compatible = "simple-bus"; 22 compatible = "simple-bus";
23 ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG 23 ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
24 1 0 0 0x16000000 0x01000000>; // GBU chipselects 24 1 0 0 0x16000000 0x01000000>; // GBU chipselects
25 25
26 serial0: serial@30000 { 26 serial0: serial@30000 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
new file mode 100644
index 000000000000..8af4bdbe5d99
--- /dev/null
+++ b/arch/mips/netlogic/dts/xlp_svp.dts
@@ -0,0 +1,124 @@
1/*
2 * XLP3XX Device Tree Source for SVP boards
3 */
4
5/dts-v1/;
6/ {
7 model = "netlogic,XLP-SVP";
8 compatible = "netlogic,xlp";
9 #address-cells = <2>;
10 #size-cells = <2>;
11
12 memory {
13 device_type = "memory";
14 reg = <0 0x00100000 0 0x0FF00000 // 255M at 1M
15 0 0x20000000 0 0xa0000000 // 2560M at 512M
16 0 0xe0000000 0 0x40000000>;
17 };
18
19 soc {
20 #address-cells = <2>;
21 #size-cells = <1>;
22 compatible = "simple-bus";
23 ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
24 1 0 0 0x16000000 0x01000000>; // GBU chipselects
25
26 serial0: serial@30000 {
27 device_type = "serial";
28 compatible = "ns16550";
29 reg = <0 0x30100 0xa00>;
30 reg-shift = <2>;
31 reg-io-width = <4>;
32 clock-frequency = <133333333>;
33 interrupt-parent = <&pic>;
34 interrupts = <17>;
35 };
36 serial1: serial@31000 {
37 device_type = "serial";
38 compatible = "ns16550";
39 reg = <0 0x31100 0xa00>;
40 reg-shift = <2>;
41 reg-io-width = <4>;
42 clock-frequency = <133333333>;
43 interrupt-parent = <&pic>;
44 interrupts = <18>;
45 };
46 i2c0: ocores@32000 {
47 compatible = "opencores,i2c-ocores";
48 #address-cells = <1>;
49 #size-cells = <0>;
50 reg = <0 0x32100 0xa00>;
51 reg-shift = <2>;
52 reg-io-width = <4>;
53 clock-frequency = <32000000>;
54 interrupt-parent = <&pic>;
55 interrupts = <30>;
56 };
57 i2c1: ocores@33000 {
58 compatible = "opencores,i2c-ocores";
59 #address-cells = <1>;
60 #size-cells = <0>;
61 reg = <0 0x33100 0xa00>;
62 reg-shift = <2>;
63 reg-io-width = <4>;
64 clock-frequency = <32000000>;
65 interrupt-parent = <&pic>;
66 interrupts = <31>;
67
68 rtc@68 {
69 compatible = "dallas,ds1374";
70 reg = <0x68>;
71 };
72
73 dtt@4c {
74 compatible = "national,lm90";
75 reg = <0x4c>;
76 };
77 };
78 pic: pic@4000 {
79 interrupt-controller;
80 #address-cells = <0>;
81 #interrupt-cells = <1>;
82 reg = <0 0x4000 0x200>;
83 };
84
85 nor_flash@1,0 {
86 compatible = "cfi-flash";
87 #address-cells = <1>;
88 #size-cells = <1>;
89 bank-width = <2>;
90 reg = <1 0 0x1000000>;
91
92 partition@0 {
93 label = "x-loader";
94 reg = <0x0 0x100000>; /* 1M */
95 read-only;
96 };
97
98 partition@100000 {
99 label = "u-boot";
100 reg = <0x100000 0x100000>; /* 1M */
101 };
102
103 partition@200000 {
104 label = "kernel";
105 reg = <0x200000 0x500000>; /* 5M */
106 };
107
108 partition@700000 {
109 label = "rootfs";
110 reg = <0x700000 0x800000>; /* 8M */
111 };
112
113 partition@f00000 {
114 label = "env";
115 reg = <0xf00000 0x100000>; /* 1M */
116 read-only;
117 };
118 };
119 };
120
121 chosen {
122 bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
123 };
124};
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index c68fd4026104..87560e4db35f 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -61,43 +61,61 @@ void nlm_node_init(int node)
61 61
62int nlm_irq_to_irt(int irq) 62int nlm_irq_to_irt(int irq)
63{ 63{
64 if (!PIC_IRQ_IS_IRT(irq)) 64 uint64_t pcibase;
65 return -1; 65 int devoff, irt;
66 66
67 switch (irq) { 67 switch (irq) {
68 case PIC_UART_0_IRQ: 68 case PIC_UART_0_IRQ:
69 return PIC_IRT_UART_0_INDEX; 69 devoff = XLP_IO_UART0_OFFSET(0);
70 break;
70 case PIC_UART_1_IRQ: 71 case PIC_UART_1_IRQ:
71 return PIC_IRT_UART_1_INDEX; 72 devoff = XLP_IO_UART1_OFFSET(0);
72 case PIC_PCIE_LINK_0_IRQ: 73 break;
73 return PIC_IRT_PCIE_LINK_0_INDEX;
74 case PIC_PCIE_LINK_1_IRQ:
75 return PIC_IRT_PCIE_LINK_1_INDEX;
76 case PIC_PCIE_LINK_2_IRQ:
77 return PIC_IRT_PCIE_LINK_2_INDEX;
78 case PIC_PCIE_LINK_3_IRQ:
79 return PIC_IRT_PCIE_LINK_3_INDEX;
80 case PIC_EHCI_0_IRQ: 74 case PIC_EHCI_0_IRQ:
81 return PIC_IRT_EHCI_0_INDEX; 75 devoff = XLP_IO_USB_EHCI0_OFFSET(0);
76 break;
82 case PIC_EHCI_1_IRQ: 77 case PIC_EHCI_1_IRQ:
83 return PIC_IRT_EHCI_1_INDEX; 78 devoff = XLP_IO_USB_EHCI1_OFFSET(0);
79 break;
84 case PIC_OHCI_0_IRQ: 80 case PIC_OHCI_0_IRQ:
85 return PIC_IRT_OHCI_0_INDEX; 81 devoff = XLP_IO_USB_OHCI0_OFFSET(0);
82 break;
86 case PIC_OHCI_1_IRQ: 83 case PIC_OHCI_1_IRQ:
87 return PIC_IRT_OHCI_1_INDEX; 84 devoff = XLP_IO_USB_OHCI1_OFFSET(0);
85 break;
88 case PIC_OHCI_2_IRQ: 86 case PIC_OHCI_2_IRQ:
89 return PIC_IRT_OHCI_2_INDEX; 87 devoff = XLP_IO_USB_OHCI2_OFFSET(0);
88 break;
90 case PIC_OHCI_3_IRQ: 89 case PIC_OHCI_3_IRQ:
91 return PIC_IRT_OHCI_3_INDEX; 90 devoff = XLP_IO_USB_OHCI3_OFFSET(0);
91 break;
92 case PIC_MMC_IRQ: 92 case PIC_MMC_IRQ:
93 return PIC_IRT_MMC_INDEX; 93 devoff = XLP_IO_SD_OFFSET(0);
94 break;
94 case PIC_I2C_0_IRQ: 95 case PIC_I2C_0_IRQ:
95 return PIC_IRT_I2C_0_INDEX; 96 devoff = XLP_IO_I2C0_OFFSET(0);
97 break;
96 case PIC_I2C_1_IRQ: 98 case PIC_I2C_1_IRQ:
97 return PIC_IRT_I2C_1_INDEX; 99 devoff = XLP_IO_I2C1_OFFSET(0);
100 break;
98 default: 101 default:
99 return -1; 102 devoff = 0;
103 break;
100 } 104 }
105
106 if (devoff != 0) {
107 pcibase = nlm_pcicfg_base(devoff);
108 irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff;
109 /* HW bug, I2C 1 irt entry is off by one */
110 if (irq == PIC_I2C_1_IRQ)
111 irt = irt + 1;
112 } else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) {
113 /* HW bug, PCI IRT entries are bad on early silicon, fix */
114 irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ);
115 } else {
116 irt = -1;
117 }
118 return irt;
101} 119}
102 120
103unsigned int nlm_get_core_frequency(int node, int core) 121unsigned int nlm_get_core_frequency(int node, int core)
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 4894d62043ac..af319143b591 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -56,7 +56,7 @@ uint64_t nlm_io_base;
56struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; 56struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
57cpumask_t nlm_cpumask = CPU_MASK_CPU0; 57cpumask_t nlm_cpumask = CPU_MASK_CPU0;
58unsigned int nlm_threads_per_core; 58unsigned int nlm_threads_per_core;
59extern u32 __dtb_start[]; 59extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[];
60 60
61static void nlm_linux_exit(void) 61static void nlm_linux_exit(void)
62{ 62{
@@ -82,8 +82,24 @@ void __init plat_mem_setup(void)
82 * 64-bit, so convert pointer. 82 * 64-bit, so convert pointer.
83 */ 83 */
84 fdtp = (void *)(long)fw_arg0; 84 fdtp = (void *)(long)fw_arg0;
85 if (!fdtp) 85 if (!fdtp) {
86 fdtp = __dtb_start; 86 switch (current_cpu_data.processor_id & 0xff00) {
87#ifdef CONFIG_DT_XLP_SVP
88 case PRID_IMP_NETLOGIC_XLP3XX:
89 fdtp = __dtb_xlp_svp_begin;
90 break;
91#endif
92#ifdef CONFIG_DT_XLP_EVP
93 case PRID_IMP_NETLOGIC_XLP8XX:
94 fdtp = __dtb_xlp_evp_begin;
95 break;
96#endif
97 default:
98 /* Pick a built-in if any, and hope for the best */
99 fdtp = __dtb_start;
100 break;
101 }
102 }
87 fdtp = phys_to_virt(__pa(fdtp)); 103 fdtp = phys_to_virt(__pa(fdtp));
88 early_init_devtree(fdtp); 104 early_init_devtree(fdtp);
89} 105}
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
index 1d0b66c62fd1..9c401dd78337 100644
--- a/arch/mips/netlogic/xlp/usb-init.c
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -42,7 +42,30 @@
42#include <asm/netlogic/haldefs.h> 42#include <asm/netlogic/haldefs.h>
43#include <asm/netlogic/xlp-hal/iomap.h> 43#include <asm/netlogic/xlp-hal/iomap.h>
44#include <asm/netlogic/xlp-hal/xlp.h> 44#include <asm/netlogic/xlp-hal/xlp.h>
45#include <asm/netlogic/xlp-hal/usb.h> 45
46/*
47 * USB glue logic registers, used only during initialization
48 */
49#define USB_CTL_0 0x01
50#define USB_PHY_0 0x0A
51#define USB_PHY_RESET 0x01
52#define USB_PHY_PORT_RESET_0 0x10
53#define USB_PHY_PORT_RESET_1 0x20
54#define USB_CONTROLLER_RESET 0x01
55#define USB_INT_STATUS 0x0E
56#define USB_INT_EN 0x0F
57#define USB_PHY_INTERRUPT_EN 0x01
58#define USB_OHCI_INTERRUPT_EN 0x02
59#define USB_OHCI_INTERRUPT1_EN 0x04
60#define USB_OHCI_INTERRUPT2_EN 0x08
61#define USB_CTRL_INTERRUPT_EN 0x10
62
63#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
64#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
65#define nlm_get_usb_pcibase(node, inst) \
66 nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
67#define nlm_get_usb_regbase(node, inst) \
68 (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
46 69
47static void nlm_usb_intr_en(int node, int port) 70static void nlm_usb_intr_en(int node, int port)
48{ 71{
@@ -99,23 +122,23 @@ static void nlm_usb_fixup_final(struct pci_dev *dev)
99 dev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 122 dev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
100 switch (dev->devfn) { 123 switch (dev->devfn) {
101 case 0x10: 124 case 0x10:
102 dev->irq = PIC_EHCI_0_IRQ; 125 dev->irq = PIC_EHCI_0_IRQ;
103 break; 126 break;
104 case 0x11: 127 case 0x11:
105 dev->irq = PIC_OHCI_0_IRQ; 128 dev->irq = PIC_OHCI_0_IRQ;
106 break; 129 break;
107 case 0x12: 130 case 0x12:
108 dev->irq = PIC_OHCI_1_IRQ; 131 dev->irq = PIC_OHCI_1_IRQ;
109 break; 132 break;
110 case 0x13: 133 case 0x13:
111 dev->irq = PIC_EHCI_1_IRQ; 134 dev->irq = PIC_EHCI_1_IRQ;
112 break; 135 break;
113 case 0x14: 136 case 0x14:
114 dev->irq = PIC_OHCI_2_IRQ; 137 dev->irq = PIC_OHCI_2_IRQ;
115 break; 138 break;
116 case 0x15: 139 case 0x15:
117 dev->irq = PIC_OHCI_3_IRQ; 140 dev->irq = PIC_OHCI_3_IRQ;
118 break; 141 break;
119 } 142 }
120} 143}
121DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI, 144DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1fd361462c03..e4b1140cdae0 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -41,7 +41,7 @@ static int (*save_perf_irq)(void);
41 * first hardware thread in the core for setup and init. 41 * first hardware thread in the core for setup and init.
42 * Skip CPUs with non-zero hardware thread id (4 hwt per core) 42 * Skip CPUs with non-zero hardware thread id (4 hwt per core)
43 */ 43 */
44#ifdef CONFIG_CPU_XLR 44#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
45#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0) 45#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
46#else 46#else
47#define oprofile_skip_cpu(c) 0 47#define oprofile_skip_cpu(c) 0
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 412ec025cf55..18517dd0f709 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -366,9 +366,9 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
366 if (!res) 366 if (!res)
367 return -EINVAL; 367 return -EINVAL;
368 368
369 apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res); 369 apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
370 if (!apc->cfg_base) 370 if (IS_ERR(apc->cfg_base))
371 return -ENOMEM; 371 return PTR_ERR(apc->cfg_base);
372 372
373 apc->irq = platform_get_irq(pdev, 0); 373 apc->irq = platform_get_irq(pdev, 0);
374 if (apc->irq < 0) 374 if (apc->irq < 0)
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 8a0700d448fe..65ec032fa0b4 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -365,25 +365,25 @@ static int ar724x_pci_probe(struct platform_device *pdev)
365 if (!res) 365 if (!res)
366 return -EINVAL; 366 return -EINVAL;
367 367
368 apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res); 368 apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
369 if (apc->ctrl_base == NULL) 369 if (IS_ERR(apc->ctrl_base))
370 return -EBUSY; 370 return PTR_ERR(apc->ctrl_base);
371 371
372 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); 372 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
373 if (!res) 373 if (!res)
374 return -EINVAL; 374 return -EINVAL;
375 375
376 apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res); 376 apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
377 if (!apc->devcfg_base) 377 if (IS_ERR(apc->devcfg_base))
378 return -EBUSY; 378 return PTR_ERR(apc->devcfg_base);
379 379
380 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base"); 380 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
381 if (!res) 381 if (!res)
382 return -EINVAL; 382 return -EINVAL;
383 383
384 apc->crp_base = devm_request_and_ioremap(&pdev->dev, res); 384 apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
385 if (apc->crp_base == NULL) 385 if (IS_ERR(apc->crp_base))
386 return -EBUSY; 386 return PTR_ERR(apc->crp_base);
387 387
388 apc->irq = platform_get_irq(pdev, 0); 388 apc->irq = platform_get_irq(pdev, 0);
389 if (apc->irq < 0) 389 if (apc->irq < 0)
diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c
index 88e781c6b5ba..2eb954239bc5 100644
--- a/arch/mips/pci/pci-bcm63xx.c
+++ b/arch/mips/pci/pci-bcm63xx.c
@@ -121,11 +121,17 @@ void __iomem *pci_iospace_start;
121static void __init bcm63xx_reset_pcie(void) 121static void __init bcm63xx_reset_pcie(void)
122{ 122{
123 u32 val; 123 u32 val;
124 u32 reg;
124 125
125 /* enable SERDES */ 126 /* enable SERDES */
126 val = bcm_misc_readl(MISC_SERDES_CTRL_REG); 127 if (BCMCPU_IS_6328())
128 reg = MISC_SERDES_CTRL_6328_REG;
129 else
130 reg = MISC_SERDES_CTRL_6362_REG;
131
132 val = bcm_misc_readl(reg);
127 val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN; 133 val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
128 bcm_misc_writel(val, MISC_SERDES_CTRL_REG); 134 bcm_misc_writel(val, reg);
129 135
130 /* reset the PCIe core */ 136 /* reset the PCIe core */
131 bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1); 137 bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
@@ -330,6 +336,7 @@ static int __init bcm63xx_pci_init(void)
330 336
331 switch (bcm63xx_get_cpu_id()) { 337 switch (bcm63xx_get_cpu_id()) {
332 case BCM6328_CPU_ID: 338 case BCM6328_CPU_ID:
339 case BCM6362_CPU_ID:
333 return bcm63xx_register_pcie(); 340 return bcm63xx_register_pcie();
334 case BCM6348_CPU_ID: 341 case BCM6348_CPU_ID:
335 case BCM6358_CPU_ID: 342 case BCM6358_CPU_ID:
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
index 5bd9d8f468cc..a01baff52cae 100644
--- a/arch/mips/powertv/init.c
+++ b/arch/mips/powertv/init.c
@@ -29,10 +29,11 @@
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include <asm/traps.h> 30#include <asm/traps.h>
31 31
32#include <asm/mips-boards/prom.h>
33#include <asm/mips-boards/generic.h> 32#include <asm/mips-boards/generic.h>
34#include <asm/mach-powertv/asic.h> 33#include <asm/mach-powertv/asic.h>
35 34
35#include "init.h"
36
36static int *_prom_envp; 37static int *_prom_envp;
37unsigned long _prom_memsize; 38unsigned long _prom_memsize;
38 39
diff --git a/arch/mips/powertv/init.h b/arch/mips/powertv/init.h
index b194c34ca966..c1a8bd0dbe4b 100644
--- a/arch/mips/powertv/init.h
+++ b/arch/mips/powertv/init.h
@@ -23,4 +23,6 @@
23#ifndef _POWERTV_INIT_H 23#ifndef _POWERTV_INIT_H
24#define _POWERTV_INIT_H 24#define _POWERTV_INIT_H
25extern unsigned long _prom_memsize; 25extern unsigned long _prom_memsize;
26extern void prom_meminit(void);
27extern char *prom_getenv(char *name);
26#endif 28#endif
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
index 6e5f1bdc59b5..bc2f3ca22b41 100644
--- a/arch/mips/powertv/memory.c
+++ b/arch/mips/powertv/memory.c
@@ -29,7 +29,6 @@
29#include <asm/page.h> 29#include <asm/page.h>
30#include <asm/sections.h> 30#include <asm/sections.h>
31 31
32#include <asm/mips-boards/prom.h>
33#include <asm/mach-powertv/asic.h> 32#include <asm/mach-powertv/asic.h>
34#include <asm/mach-powertv/ioremap.h> 33#include <asm/mach-powertv/ioremap.h>
35 34
diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c
index 820b8480f222..24689bff1039 100644
--- a/arch/mips/powertv/powertv_setup.c
+++ b/arch/mips/powertv/powertv_setup.c
@@ -31,7 +31,6 @@
31#include <asm/bootinfo.h> 31#include <asm/bootinfo.h>
32#include <asm/irq.h> 32#include <asm/irq.h>
33#include <asm/mips-boards/generic.h> 33#include <asm/mips-boards/generic.h>
34#include <asm/mips-boards/prom.h>
35#include <asm/dma.h> 34#include <asm/dma.h>
36#include <asm/asm.h> 35#include <asm/asm.h>
37#include <asm/traps.h> 36#include <asm/traps.h>
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index a0b0197cab0a..026e823d871d 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -6,12 +6,23 @@ choice
6 help 6 help
7 Select Ralink MIPS SoC type. 7 Select Ralink MIPS SoC type.
8 8
9 config SOC_RT288X
10 bool "RT288x"
11
9 config SOC_RT305X 12 config SOC_RT305X
10 bool "RT305x" 13 bool "RT305x"
11 select USB_ARCH_HAS_HCD 14 select USB_ARCH_HAS_HCD
12 select USB_ARCH_HAS_OHCI 15 select USB_ARCH_HAS_OHCI
13 select USB_ARCH_HAS_EHCI 16 select USB_ARCH_HAS_EHCI
14 17
18 config SOC_RT3883
19 bool "RT3883"
20 select USB_ARCH_HAS_OHCI
21 select USB_ARCH_HAS_EHCI
22
23 config SOC_MT7620
24 bool "MT7620"
25
15endchoice 26endchoice
16 27
17choice 28choice
@@ -23,10 +34,22 @@ choice
23 config DTB_RT_NONE 34 config DTB_RT_NONE
24 bool "None" 35 bool "None"
25 36
37 config DTB_RT2880_EVAL
38 bool "RT2880 eval kit"
39 depends on SOC_RT288X
40
26 config DTB_RT305X_EVAL 41 config DTB_RT305X_EVAL
27 bool "RT305x eval kit" 42 bool "RT305x eval kit"
28 depends on SOC_RT305X 43 depends on SOC_RT305X
29 44
45 config DTB_RT3883_EVAL
46 bool "RT3883 eval kit"
47 depends on SOC_RT3883
48
49 config DTB_MT7620A_EVAL
50 bool "MT7620A eval kit"
51 depends on SOC_MT7620
52
30endchoice 53endchoice
31 54
32endif 55endif
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
index 939757f0e71f..38cf1a880aaa 100644
--- a/arch/mips/ralink/Makefile
+++ b/arch/mips/ralink/Makefile
@@ -8,7 +8,10 @@
8 8
9obj-y := prom.o of.o reset.o clk.o irq.o 9obj-y := prom.o of.o reset.o clk.o irq.o
10 10
11obj-$(CONFIG_SOC_RT288X) += rt288x.o
11obj-$(CONFIG_SOC_RT305X) += rt305x.o 12obj-$(CONFIG_SOC_RT305X) += rt305x.o
13obj-$(CONFIG_SOC_RT3883) += rt3883.o
14obj-$(CONFIG_SOC_MT7620) += mt7620.o
12 15
13obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 16obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
14 17
diff --git a/arch/mips/ralink/Platform b/arch/mips/ralink/Platform
index 6babd65765e6..cda4b6645c50 100644
--- a/arch/mips/ralink/Platform
+++ b/arch/mips/ralink/Platform
@@ -5,6 +5,24 @@ core-$(CONFIG_RALINK) += arch/mips/ralink/
5cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink 5cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink
6 6
7# 7#
8# Ralink RT288x
9#
10load-$(CONFIG_SOC_RT288X) += 0xffffffff88000000
11cflags-$(CONFIG_SOC_RT288X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x
12
13#
8# Ralink RT305x 14# Ralink RT305x
9# 15#
10load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000 16load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000
17cflags-$(CONFIG_SOC_RT305X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x
18
19#
20# Ralink RT3883
21#
22load-$(CONFIG_SOC_RT3883) += 0xffffffff80000000
23cflags-$(CONFIG_SOC_RT3883) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883
24
25#
26# Ralink MT7620
27#
28load-$(CONFIG_SOC_MT7620) += 0xffffffff80000000
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
index 300990313e1b..83144c3fc5ac 100644
--- a/arch/mips/ralink/common.h
+++ b/arch/mips/ralink/common.h
@@ -22,13 +22,22 @@ struct ralink_pinmux {
22 struct ralink_pinmux_grp *mode; 22 struct ralink_pinmux_grp *mode;
23 struct ralink_pinmux_grp *uart; 23 struct ralink_pinmux_grp *uart;
24 int uart_shift; 24 int uart_shift;
25 u32 uart_mask;
25 void (*wdt_reset)(void); 26 void (*wdt_reset)(void);
27 struct ralink_pinmux_grp *pci;
28 int pci_shift;
29 u32 pci_mask;
26}; 30};
27extern struct ralink_pinmux gpio_pinmux; 31extern struct ralink_pinmux rt_gpio_pinmux;
28 32
29struct ralink_soc_info { 33struct ralink_soc_info {
30 unsigned char sys_type[RAMIPS_SYS_TYPE_LEN]; 34 unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
31 unsigned char *compatible; 35 unsigned char *compatible;
36
37 unsigned long mem_base;
38 unsigned long mem_size;
39 unsigned long mem_size_min;
40 unsigned long mem_size_max;
32}; 41};
33extern struct ralink_soc_info soc_info; 42extern struct ralink_soc_info soc_info;
34 43
diff --git a/arch/mips/ralink/dts/Makefile b/arch/mips/ralink/dts/Makefile
index 1a69fb300955..18194fa93e80 100644
--- a/arch/mips/ralink/dts/Makefile
+++ b/arch/mips/ralink/dts/Makefile
@@ -1 +1,4 @@
1obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o
1obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o 2obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
3obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o
4obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o
diff --git a/arch/mips/ralink/dts/mt7620a.dtsi b/arch/mips/ralink/dts/mt7620a.dtsi
new file mode 100644
index 000000000000..08bf24fefe9f
--- /dev/null
+++ b/arch/mips/ralink/dts/mt7620a.dtsi
@@ -0,0 +1,58 @@
1/ {
2 #address-cells = <1>;
3 #size-cells = <1>;
4 compatible = "ralink,mtk7620a-soc";
5
6 cpus {
7 cpu@0 {
8 compatible = "mips,mips24KEc";
9 };
10 };
11
12 cpuintc: cpuintc@0 {
13 #address-cells = <0>;
14 #interrupt-cells = <1>;
15 interrupt-controller;
16 compatible = "mti,cpu-interrupt-controller";
17 };
18
19 palmbus@10000000 {
20 compatible = "palmbus";
21 reg = <0x10000000 0x200000>;
22 ranges = <0x0 0x10000000 0x1FFFFF>;
23
24 #address-cells = <1>;
25 #size-cells = <1>;
26
27 sysc@0 {
28 compatible = "ralink,mt7620a-sysc";
29 reg = <0x0 0x100>;
30 };
31
32 intc: intc@200 {
33 compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc";
34 reg = <0x200 0x100>;
35
36 interrupt-controller;
37 #interrupt-cells = <1>;
38
39 interrupt-parent = <&cpuintc>;
40 interrupts = <2>;
41 };
42
43 memc@300 {
44 compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc";
45 reg = <0x300 0x100>;
46 };
47
48 uartlite@c00 {
49 compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a";
50 reg = <0xc00 0x100>;
51
52 interrupt-parent = <&intc>;
53 interrupts = <12>;
54
55 reg-shift = <2>;
56 };
57 };
58};
diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
new file mode 100644
index 000000000000..35eb874ab7f1
--- /dev/null
+++ b/arch/mips/ralink/dts/mt7620a_eval.dts
@@ -0,0 +1,16 @@
1/dts-v1/;
2
3/include/ "mt7620a.dtsi"
4
5/ {
6 compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc";
7 model = "Ralink MT7620A evaluation board";
8
9 memory@0 {
10 reg = <0x0 0x2000000>;
11 };
12
13 chosen {
14 bootargs = "console=ttyS0,57600";
15 };
16};
diff --git a/arch/mips/ralink/dts/rt2880.dtsi b/arch/mips/ralink/dts/rt2880.dtsi
new file mode 100644
index 000000000000..182afde2f2e1
--- /dev/null
+++ b/arch/mips/ralink/dts/rt2880.dtsi
@@ -0,0 +1,58 @@
1/ {
2 #address-cells = <1>;
3 #size-cells = <1>;
4 compatible = "ralink,rt2880-soc";
5
6 cpus {
7 cpu@0 {
8 compatible = "mips,mips4KEc";
9 };
10 };
11
12 cpuintc: cpuintc@0 {
13 #address-cells = <0>;
14 #interrupt-cells = <1>;
15 interrupt-controller;
16 compatible = "mti,cpu-interrupt-controller";
17 };
18
19 palmbus@300000 {
20 compatible = "palmbus";
21 reg = <0x300000 0x200000>;
22 ranges = <0x0 0x300000 0x1FFFFF>;
23
24 #address-cells = <1>;
25 #size-cells = <1>;
26
27 sysc@0 {
28 compatible = "ralink,rt2880-sysc";
29 reg = <0x0 0x100>;
30 };
31
32 intc: intc@200 {
33 compatible = "ralink,rt2880-intc";
34 reg = <0x200 0x100>;
35
36 interrupt-controller;
37 #interrupt-cells = <1>;
38
39 interrupt-parent = <&cpuintc>;
40 interrupts = <2>;
41 };
42
43 memc@300 {
44 compatible = "ralink,rt2880-memc";
45 reg = <0x300 0x100>;
46 };
47
48 uartlite@c00 {
49 compatible = "ralink,rt2880-uart", "ns16550a";
50 reg = <0xc00 0x100>;
51
52 interrupt-parent = <&intc>;
53 interrupts = <8>;
54
55 reg-shift = <2>;
56 };
57 };
58};
diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
new file mode 100644
index 000000000000..322d7002595b
--- /dev/null
+++ b/arch/mips/ralink/dts/rt2880_eval.dts
@@ -0,0 +1,46 @@
1/dts-v1/;
2
3/include/ "rt2880.dtsi"
4
5/ {
6 compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc";
7 model = "Ralink RT2880 evaluation board";
8
9 memory@0 {
10 reg = <0x8000000 0x2000000>;
11 };
12
13 chosen {
14 bootargs = "console=ttyS0,57600";
15 };
16
17 cfi@1f000000 {
18 compatible = "cfi-flash";
19 reg = <0x1f000000 0x400000>;
20
21 bank-width = <2>;
22 device-width = <2>;
23 #address-cells = <1>;
24 #size-cells = <1>;
25
26 partition@0 {
27 label = "uboot";
28 reg = <0x0 0x30000>;
29 read-only;
30 };
31 partition@30000 {
32 label = "uboot-env";
33 reg = <0x30000 0x10000>;
34 read-only;
35 };
36 partition@40000 {
37 label = "calibration";
38 reg = <0x40000 0x10000>;
39 read-only;
40 };
41 partition@50000 {
42 label = "linux";
43 reg = <0x50000 0x3b0000>;
44 };
45 };
46};
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi
index 069d0660e1dd..ef7da1e227e6 100644
--- a/arch/mips/ralink/dts/rt3050.dtsi
+++ b/arch/mips/ralink/dts/rt3050.dtsi
@@ -1,7 +1,7 @@
1/ { 1/ {
2 #address-cells = <1>; 2 #address-cells = <1>;
3 #size-cells = <1>; 3 #size-cells = <1>;
4 compatible = "ralink,rt3050-soc", "ralink,rt3052-soc"; 4 compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc";
5 5
6 cpus { 6 cpus {
7 cpu@0 { 7 cpu@0 {
@@ -9,10 +9,6 @@
9 }; 9 };
10 }; 10 };
11 11
12 chosen {
13 bootargs = "console=ttyS0,57600 init=/init";
14 };
15
16 cpuintc: cpuintc@0 { 12 cpuintc: cpuintc@0 {
17 #address-cells = <0>; 13 #address-cells = <0>;
18 #interrupt-cells = <1>; 14 #interrupt-cells = <1>;
@@ -23,7 +19,7 @@
23 palmbus@10000000 { 19 palmbus@10000000 {
24 compatible = "palmbus"; 20 compatible = "palmbus";
25 reg = <0x10000000 0x200000>; 21 reg = <0x10000000 0x200000>;
26 ranges = <0x0 0x10000000 0x1FFFFF>; 22 ranges = <0x0 0x10000000 0x1FFFFF>;
27 23
28 #address-cells = <1>; 24 #address-cells = <1>;
29 #size-cells = <1>; 25 #size-cells = <1>;
@@ -33,11 +29,6 @@
33 reg = <0x0 0x100>; 29 reg = <0x0 0x100>;
34 }; 30 };
35 31
36 timer@100 {
37 compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
38 reg = <0x100 0x100>;
39 };
40
41 intc: intc@200 { 32 intc: intc@200 {
42 compatible = "ralink,rt3052-intc", "ralink,rt2880-intc"; 33 compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
43 reg = <0x200 0x100>; 34 reg = <0x200 0x100>;
@@ -54,45 +45,6 @@
54 reg = <0x300 0x100>; 45 reg = <0x300 0x100>;
55 }; 46 };
56 47
57 gpio0: gpio@600 {
58 compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
59 reg = <0x600 0x34>;
60
61 gpio-controller;
62 #gpio-cells = <2>;
63
64 ralink,ngpio = <24>;
65 ralink,regs = [ 00 04 08 0c
66 20 24 28 2c
67 30 34 ];
68 };
69
70 gpio1: gpio@638 {
71 compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
72 reg = <0x638 0x24>;
73
74 gpio-controller;
75 #gpio-cells = <2>;
76
77 ralink,ngpio = <16>;
78 ralink,regs = [ 00 04 08 0c
79 10 14 18 1c
80 20 24 ];
81 };
82
83 gpio2: gpio@660 {
84 compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
85 reg = <0x660 0x24>;
86
87 gpio-controller;
88 #gpio-cells = <2>;
89
90 ralink,ngpio = <12>;
91 ralink,regs = [ 00 04 08 0c
92 10 14 18 1c
93 20 24 ];
94 };
95
96 uartlite@c00 { 48 uartlite@c00 {
97 compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a"; 49 compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
98 reg = <0xc00 0x100>; 50 reg = <0xc00 0x100>;
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
index 148a590bc419..c18c9a84f4c4 100644
--- a/arch/mips/ralink/dts/rt3052_eval.dts
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -1,10 +1,8 @@
1/dts-v1/; 1/dts-v1/;
2 2
3/include/ "rt3050.dtsi" 3#include "rt3050.dtsi"
4 4
5/ { 5/ {
6 #address-cells = <1>;
7 #size-cells = <1>;
8 compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc"; 6 compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
9 model = "Ralink RT3052 evaluation board"; 7 model = "Ralink RT3052 evaluation board";
10 8
@@ -12,12 +10,8 @@
12 reg = <0x0 0x2000000>; 10 reg = <0x0 0x2000000>;
13 }; 11 };
14 12
15 palmbus@10000000 { 13 chosen {
16 sysc@0 { 14 bootargs = "console=ttyS0,57600";
17 ralink,pinmmux = "uartlite", "spi";
18 ralink,uartmux = "gpio";
19 ralink,wdtmux = <0>;
20 };
21 }; 15 };
22 16
23 cfi@1f000000 { 17 cfi@1f000000 {
diff --git a/arch/mips/ralink/dts/rt3883.dtsi b/arch/mips/ralink/dts/rt3883.dtsi
new file mode 100644
index 000000000000..3b131dd0d5ac
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3883.dtsi
@@ -0,0 +1,58 @@
1/ {
2 #address-cells = <1>;
3 #size-cells = <1>;
4 compatible = "ralink,rt3883-soc";
5
6 cpus {
7 cpu@0 {
8 compatible = "mips,mips74Kc";
9 };
10 };
11
12 cpuintc: cpuintc@0 {
13 #address-cells = <0>;
14 #interrupt-cells = <1>;
15 interrupt-controller;
16 compatible = "mti,cpu-interrupt-controller";
17 };
18
19 palmbus@10000000 {
20 compatible = "palmbus";
21 reg = <0x10000000 0x200000>;
22 ranges = <0x0 0x10000000 0x1FFFFF>;
23
24 #address-cells = <1>;
25 #size-cells = <1>;
26
27 sysc@0 {
28 compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc";
29 reg = <0x0 0x100>;
30 };
31
32 intc: intc@200 {
33 compatible = "ralink,rt3883-intc", "ralink,rt2880-intc";
34 reg = <0x200 0x100>;
35
36 interrupt-controller;
37 #interrupt-cells = <1>;
38
39 interrupt-parent = <&cpuintc>;
40 interrupts = <2>;
41 };
42
43 memc@300 {
44 compatible = "ralink,rt3883-memc", "ralink,rt3050-memc";
45 reg = <0x300 0x100>;
46 };
47
48 uartlite@c00 {
49 compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a";
50 reg = <0xc00 0x100>;
51
52 interrupt-parent = <&intc>;
53 interrupts = <12>;
54
55 reg-shift = <2>;
56 };
57 };
58};
diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
new file mode 100644
index 000000000000..2fa6b330bf4f
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3883_eval.dts
@@ -0,0 +1,16 @@
1/dts-v1/;
2
3/include/ "rt3883.dtsi"
4
5/ {
6 compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc";
7 model = "Ralink RT3883 evaluation board";
8
9 memory@0 {
10 reg = <0x0 0x2000000>;
11 };
12
13 chosen {
14 bootargs = "console=ttyS0,57600";
15 };
16};
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
index c4ae47eb24ab..b46d0419d09b 100644
--- a/arch/mips/ralink/early_printk.c
+++ b/arch/mips/ralink/early_printk.c
@@ -11,7 +11,11 @@
11 11
12#include <asm/addrspace.h> 12#include <asm/addrspace.h>
13 13
14#ifdef CONFIG_SOC_RT288X
15#define EARLY_UART_BASE 0x300c00
16#else
14#define EARLY_UART_BASE 0x10000c00 17#define EARLY_UART_BASE 0x10000c00
18#endif
15 19
16#define UART_REG_RX 0x00 20#define UART_REG_RX 0x00
17#define UART_REG_TX 0x04 21#define UART_REG_TX 0x04
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 6d054c5ec9ab..320b1f1043ff 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -31,6 +31,7 @@
31#define INTC_INT_GLOBAL BIT(31) 31#define INTC_INT_GLOBAL BIT(31)
32 32
33#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2) 33#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2)
34#define RALINK_CPU_IRQ_PCI (MIPS_CPU_IRQ_BASE + 4)
34#define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5) 35#define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5)
35#define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6) 36#define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6)
36#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7) 37#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
@@ -104,6 +105,9 @@ asmlinkage void plat_irq_dispatch(void)
104 else if (pending & STATUSF_IP6) 105 else if (pending & STATUSF_IP6)
105 do_IRQ(RALINK_CPU_IRQ_WIFI); 106 do_IRQ(RALINK_CPU_IRQ_WIFI);
106 107
108 else if (pending & STATUSF_IP4)
109 do_IRQ(RALINK_CPU_IRQ_PCI);
110
107 else if (pending & STATUSF_IP2) 111 else if (pending & STATUSF_IP2)
108 do_IRQ(RALINK_CPU_IRQ_INTC); 112 do_IRQ(RALINK_CPU_IRQ_INTC);
109 113
@@ -162,6 +166,7 @@ static int __init intc_of_init(struct device_node *node,
162 irq_set_chained_handler(irq, ralink_intc_irq_handler); 166 irq_set_chained_handler(irq, ralink_intc_irq_handler);
163 irq_set_handler_data(irq, domain); 167 irq_set_handler_data(irq, domain);
164 168
169 /* tell the kernel which irq is used for performance monitoring */
165 cp0_perfcount_irq = irq_create_mapping(domain, 9); 170 cp0_perfcount_irq = irq_create_mapping(domain, 9);
166 171
167 return 0; 172 return 0;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
new file mode 100644
index 000000000000..0018b1a661f6
--- /dev/null
+++ b/arch/mips/ralink/mt7620.c
@@ -0,0 +1,234 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
9 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/module.h>
16
17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/mt7620.h>
20
21#include "common.h"
22
23/* does the board have sdram or ddram */
24static int dram_type;
25
26/* the pll dividers */
27static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 };
28
29static struct ralink_pinmux_grp mode_mux[] = {
30 {
31 .name = "i2c",
32 .mask = MT7620_GPIO_MODE_I2C,
33 .gpio_first = 1,
34 .gpio_last = 2,
35 }, {
36 .name = "spi",
37 .mask = MT7620_GPIO_MODE_SPI,
38 .gpio_first = 3,
39 .gpio_last = 6,
40 }, {
41 .name = "uartlite",
42 .mask = MT7620_GPIO_MODE_UART1,
43 .gpio_first = 15,
44 .gpio_last = 16,
45 }, {
46 .name = "wdt",
47 .mask = MT7620_GPIO_MODE_WDT,
48 .gpio_first = 17,
49 .gpio_last = 17,
50 }, {
51 .name = "mdio",
52 .mask = MT7620_GPIO_MODE_MDIO,
53 .gpio_first = 22,
54 .gpio_last = 23,
55 }, {
56 .name = "rgmii1",
57 .mask = MT7620_GPIO_MODE_RGMII1,
58 .gpio_first = 24,
59 .gpio_last = 35,
60 }, {
61 .name = "spi refclk",
62 .mask = MT7620_GPIO_MODE_SPI_REF_CLK,
63 .gpio_first = 37,
64 .gpio_last = 39,
65 }, {
66 .name = "jtag",
67 .mask = MT7620_GPIO_MODE_JTAG,
68 .gpio_first = 40,
69 .gpio_last = 44,
70 }, {
71 /* shared lines with jtag */
72 .name = "ephy",
73 .mask = MT7620_GPIO_MODE_EPHY,
74 .gpio_first = 40,
75 .gpio_last = 44,
76 }, {
77 .name = "nand",
78 .mask = MT7620_GPIO_MODE_JTAG,
79 .gpio_first = 45,
80 .gpio_last = 59,
81 }, {
82 .name = "rgmii2",
83 .mask = MT7620_GPIO_MODE_RGMII2,
84 .gpio_first = 60,
85 .gpio_last = 71,
86 }, {
87 .name = "wled",
88 .mask = MT7620_GPIO_MODE_WLED,
89 .gpio_first = 72,
90 .gpio_last = 72,
91 }, {0}
92};
93
94static struct ralink_pinmux_grp uart_mux[] = {
95 {
96 .name = "uartf",
97 .mask = MT7620_GPIO_MODE_UARTF,
98 .gpio_first = 7,
99 .gpio_last = 14,
100 }, {
101 .name = "pcm uartf",
102 .mask = MT7620_GPIO_MODE_PCM_UARTF,
103 .gpio_first = 7,
104 .gpio_last = 14,
105 }, {
106 .name = "pcm i2s",
107 .mask = MT7620_GPIO_MODE_PCM_I2S,
108 .gpio_first = 7,
109 .gpio_last = 14,
110 }, {
111 .name = "i2s uartf",
112 .mask = MT7620_GPIO_MODE_I2S_UARTF,
113 .gpio_first = 7,
114 .gpio_last = 14,
115 }, {
116 .name = "pcm gpio",
117 .mask = MT7620_GPIO_MODE_PCM_GPIO,
118 .gpio_first = 11,
119 .gpio_last = 14,
120 }, {
121 .name = "gpio uartf",
122 .mask = MT7620_GPIO_MODE_GPIO_UARTF,
123 .gpio_first = 7,
124 .gpio_last = 10,
125 }, {
126 .name = "gpio i2s",
127 .mask = MT7620_GPIO_MODE_GPIO_I2S,
128 .gpio_first = 7,
129 .gpio_last = 10,
130 }, {
131 .name = "gpio",
132 .mask = MT7620_GPIO_MODE_GPIO,
133 }, {0}
134};
135
136struct ralink_pinmux rt_gpio_pinmux = {
137 .mode = mode_mux,
138 .uart = uart_mux,
139 .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT,
140 .uart_mask = MT7620_GPIO_MODE_UART0_MASK,
141};
142
143void __init ralink_clk_init(void)
144{
145 unsigned long cpu_rate, sys_rate;
146 u32 c0 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0);
147 u32 c1 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1);
148 u32 swconfig = (c0 >> CPLL_SW_CONFIG_SHIFT) & CPLL_SW_CONFIG_MASK;
149 u32 cpu_clk = (c1 >> CPLL_CPU_CLK_SHIFT) & CPLL_CPU_CLK_MASK;
150
151 if (cpu_clk) {
152 cpu_rate = 480000000;
153 } else if (!swconfig) {
154 cpu_rate = 600000000;
155 } else {
156 u32 m = (c0 >> CPLL_MULT_RATIO_SHIFT) & CPLL_MULT_RATIO;
157 u32 d = (c0 >> CPLL_DIV_RATIO_SHIFT) & CPLL_DIV_RATIO;
158
159 cpu_rate = ((40 * (m + 24)) / mt7620_clk_divider[d]) * 1000000;
160 }
161
162 if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM)
163 sys_rate = cpu_rate / 4;
164 else
165 sys_rate = cpu_rate / 3;
166
167 ralink_clk_add("cpu", cpu_rate);
168 ralink_clk_add("10000100.timer", 40000000);
169 ralink_clk_add("10000500.uart", 40000000);
170 ralink_clk_add("10000c00.uartlite", 40000000);
171}
172
173void __init ralink_of_remap(void)
174{
175 rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc");
176 rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc");
177
178 if (!rt_sysc_membase || !rt_memc_membase)
179 panic("Failed to remap core resources");
180}
181
182void prom_soc_init(struct ralink_soc_info *soc_info)
183{
184 void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
185 unsigned char *name = NULL;
186 u32 n0;
187 u32 n1;
188 u32 rev;
189 u32 cfg0;
190
191 n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
192 n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
193
194 if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) {
195 name = "MT7620N";
196 soc_info->compatible = "ralink,mt7620n-soc";
197 } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) {
198 name = "MT7620A";
199 soc_info->compatible = "ralink,mt7620a-soc";
200 } else {
201 panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
202 }
203
204 rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
205
206 snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
207 "Ralink %s ver:%u eco:%u",
208 name,
209 (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
210 (rev & CHIP_REV_ECO_MASK));
211
212 cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
213 dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK;
214
215 switch (dram_type) {
216 case SYSCFG0_DRAM_TYPE_SDRAM:
217 soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
218 soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
219 break;
220
221 case SYSCFG0_DRAM_TYPE_DDR1:
222 soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
223 soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
224 break;
225
226 case SYSCFG0_DRAM_TYPE_DDR2:
227 soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
228 soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
229 break;
230 default:
231 BUG();
232 }
233 soc_info->mem_base = MT7620_DRAM_BASE;
234}
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 4165e70775be..fb1569580def 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -11,6 +11,7 @@
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sizes.h>
14#include <linux/of_fdt.h> 15#include <linux/of_fdt.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/bootmem.h> 17#include <linux/bootmem.h>
@@ -85,6 +86,14 @@ void __init plat_mem_setup(void)
85 * parsed resulting in our memory appearing 86 * parsed resulting in our memory appearing
86 */ 87 */
87 __dt_setup_arch(&__dtb_start); 88 __dt_setup_arch(&__dtb_start);
89
90 if (soc_info.mem_size)
91 add_memory_region(soc_info.mem_base, soc_info.mem_size,
92 BOOT_MEM_RAM);
93 else
94 detect_memory_region(soc_info.mem_base,
95 soc_info.mem_size_min * SZ_1M,
96 soc_info.mem_size_max * SZ_1M);
88} 97}
89 98
90static int __init plat_of_setup(void) 99static int __init plat_of_setup(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
new file mode 100644
index 000000000000..f87de1ab2198
--- /dev/null
+++ b/arch/mips/ralink/rt288x.c
@@ -0,0 +1,143 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
9 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/module.h>
16
17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/rt288x.h>
20
21#include "common.h"
22
23static struct ralink_pinmux_grp mode_mux[] = {
24 {
25 .name = "i2c",
26 .mask = RT2880_GPIO_MODE_I2C,
27 .gpio_first = 1,
28 .gpio_last = 2,
29 }, {
30 .name = "spi",
31 .mask = RT2880_GPIO_MODE_SPI,
32 .gpio_first = 3,
33 .gpio_last = 6,
34 }, {
35 .name = "uartlite",
36 .mask = RT2880_GPIO_MODE_UART0,
37 .gpio_first = 7,
38 .gpio_last = 14,
39 }, {
40 .name = "jtag",
41 .mask = RT2880_GPIO_MODE_JTAG,
42 .gpio_first = 17,
43 .gpio_last = 21,
44 }, {
45 .name = "mdio",
46 .mask = RT2880_GPIO_MODE_MDIO,
47 .gpio_first = 22,
48 .gpio_last = 23,
49 }, {
50 .name = "sdram",
51 .mask = RT2880_GPIO_MODE_SDRAM,
52 .gpio_first = 24,
53 .gpio_last = 39,
54 }, {
55 .name = "pci",
56 .mask = RT2880_GPIO_MODE_PCI,
57 .gpio_first = 40,
58 .gpio_last = 71,
59 }, {0}
60};
61
62static void rt288x_wdt_reset(void)
63{
64 u32 t;
65
66 /* enable WDT reset output on pin SRAM_CS_N */
67 t = rt_sysc_r32(SYSC_REG_CLKCFG);
68 t |= CLKCFG_SRAM_CS_N_WDT;
69 rt_sysc_w32(t, SYSC_REG_CLKCFG);
70}
71
72struct ralink_pinmux rt_gpio_pinmux = {
73 .mode = mode_mux,
74 .wdt_reset = rt288x_wdt_reset,
75};
76
77void __init ralink_clk_init(void)
78{
79 unsigned long cpu_rate;
80 u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
81 t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
82
83 switch (t) {
84 case SYSTEM_CONFIG_CPUCLK_250:
85 cpu_rate = 250000000;
86 break;
87 case SYSTEM_CONFIG_CPUCLK_266:
88 cpu_rate = 266666667;
89 break;
90 case SYSTEM_CONFIG_CPUCLK_280:
91 cpu_rate = 280000000;
92 break;
93 case SYSTEM_CONFIG_CPUCLK_300:
94 cpu_rate = 300000000;
95 break;
96 }
97
98 ralink_clk_add("cpu", cpu_rate);
99 ralink_clk_add("300100.timer", cpu_rate / 2);
100 ralink_clk_add("300120.watchdog", cpu_rate / 2);
101 ralink_clk_add("300500.uart", cpu_rate / 2);
102 ralink_clk_add("300c00.uartlite", cpu_rate / 2);
103 ralink_clk_add("400000.ethernet", cpu_rate / 2);
104}
105
106void __init ralink_of_remap(void)
107{
108 rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc");
109 rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc");
110
111 if (!rt_sysc_membase || !rt_memc_membase)
112 panic("Failed to remap core resources");
113}
114
115void prom_soc_init(struct ralink_soc_info *soc_info)
116{
117 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
118 const char *name;
119 u32 n0;
120 u32 n1;
121 u32 id;
122
123 n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
124 n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
125 id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
126
127 if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) {
128 soc_info->compatible = "ralink,r2880-soc";
129 name = "RT2880";
130 } else {
131 panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1);
132 }
133
134 snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
135 "Ralink %s id:%u rev:%u",
136 name,
137 (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
138 (id & CHIP_ID_REV_MASK));
139
140 soc_info->mem_base = RT2880_SDRAM_BASE;
141 soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
142 soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
143}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index 0a4bbdcf59d9..ca7ee3a33790 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -22,7 +22,7 @@
22 22
23enum rt305x_soc_type rt305x_soc; 23enum rt305x_soc_type rt305x_soc;
24 24
25struct ralink_pinmux_grp mode_mux[] = { 25static struct ralink_pinmux_grp mode_mux[] = {
26 { 26 {
27 .name = "i2c", 27 .name = "i2c",
28 .mask = RT305X_GPIO_MODE_I2C, 28 .mask = RT305X_GPIO_MODE_I2C,
@@ -61,7 +61,7 @@ struct ralink_pinmux_grp mode_mux[] = {
61 }, {0} 61 }, {0}
62}; 62};
63 63
64struct ralink_pinmux_grp uart_mux[] = { 64static struct ralink_pinmux_grp uart_mux[] = {
65 { 65 {
66 .name = "uartf", 66 .name = "uartf",
67 .mask = RT305X_GPIO_MODE_UARTF, 67 .mask = RT305X_GPIO_MODE_UARTF,
@@ -91,19 +91,19 @@ struct ralink_pinmux_grp uart_mux[] = {
91 .name = "gpio uartf", 91 .name = "gpio uartf",
92 .mask = RT305X_GPIO_MODE_GPIO_UARTF, 92 .mask = RT305X_GPIO_MODE_GPIO_UARTF,
93 .gpio_first = RT305X_GPIO_7, 93 .gpio_first = RT305X_GPIO_7,
94 .gpio_last = RT305X_GPIO_14, 94 .gpio_last = RT305X_GPIO_10,
95 }, { 95 }, {
96 .name = "gpio i2s", 96 .name = "gpio i2s",
97 .mask = RT305X_GPIO_MODE_GPIO_I2S, 97 .mask = RT305X_GPIO_MODE_GPIO_I2S,
98 .gpio_first = RT305X_GPIO_7, 98 .gpio_first = RT305X_GPIO_7,
99 .gpio_last = RT305X_GPIO_14, 99 .gpio_last = RT305X_GPIO_10,
100 }, { 100 }, {
101 .name = "gpio", 101 .name = "gpio",
102 .mask = RT305X_GPIO_MODE_GPIO, 102 .mask = RT305X_GPIO_MODE_GPIO,
103 }, {0} 103 }, {0}
104}; 104};
105 105
106void rt305x_wdt_reset(void) 106static void rt305x_wdt_reset(void)
107{ 107{
108 u32 t; 108 u32 t;
109 109
@@ -114,16 +114,53 @@ void rt305x_wdt_reset(void)
114 rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); 114 rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
115} 115}
116 116
117struct ralink_pinmux gpio_pinmux = { 117struct ralink_pinmux rt_gpio_pinmux = {
118 .mode = mode_mux, 118 .mode = mode_mux,
119 .uart = uart_mux, 119 .uart = uart_mux,
120 .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT, 120 .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
121 .uart_mask = RT305X_GPIO_MODE_UART0_MASK,
121 .wdt_reset = rt305x_wdt_reset, 122 .wdt_reset = rt305x_wdt_reset,
122}; 123};
123 124
125static unsigned long rt5350_get_mem_size(void)
126{
127 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
128 unsigned long ret;
129 u32 t;
130
131 t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG);
132 t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
133 RT5350_SYSCFG0_DRAM_SIZE_MASK;
134
135 switch (t) {
136 case RT5350_SYSCFG0_DRAM_SIZE_2M:
137 ret = 2;
138 break;
139 case RT5350_SYSCFG0_DRAM_SIZE_8M:
140 ret = 8;
141 break;
142 case RT5350_SYSCFG0_DRAM_SIZE_16M:
143 ret = 16;
144 break;
145 case RT5350_SYSCFG0_DRAM_SIZE_32M:
146 ret = 32;
147 break;
148 case RT5350_SYSCFG0_DRAM_SIZE_64M:
149 ret = 64;
150 break;
151 default:
152 panic("rt5350: invalid DRAM size: %u", t);
153 break;
154 }
155
156 return ret;
157}
158
124void __init ralink_clk_init(void) 159void __init ralink_clk_init(void)
125{ 160{
126 unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate; 161 unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
162 unsigned long wmac_rate = 40000000;
163
127 u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); 164 u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
128 165
129 if (soc_is_rt305x() || soc_is_rt3350()) { 166 if (soc_is_rt305x() || soc_is_rt3350()) {
@@ -176,11 +213,21 @@ void __init ralink_clk_init(void)
176 BUG(); 213 BUG();
177 } 214 }
178 215
216 if (soc_is_rt3352() || soc_is_rt5350()) {
217 u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0);
218
219 if (!(val & RT3352_CLKCFG0_XTAL_SEL))
220 wmac_rate = 20000000;
221 }
222
179 ralink_clk_add("cpu", cpu_rate); 223 ralink_clk_add("cpu", cpu_rate);
180 ralink_clk_add("10000b00.spi", sys_rate); 224 ralink_clk_add("10000b00.spi", sys_rate);
181 ralink_clk_add("10000100.timer", wdt_rate); 225 ralink_clk_add("10000100.timer", wdt_rate);
226 ralink_clk_add("10000120.watchdog", wdt_rate);
182 ralink_clk_add("10000500.uart", uart_rate); 227 ralink_clk_add("10000500.uart", uart_rate);
183 ralink_clk_add("10000c00.uartlite", uart_rate); 228 ralink_clk_add("10000c00.uartlite", uart_rate);
229 ralink_clk_add("10100000.ethernet", sys_rate);
230 ralink_clk_add("10180000.wmac", wmac_rate);
184} 231}
185 232
186void __init ralink_of_remap(void) 233void __init ralink_of_remap(void)
@@ -239,4 +286,15 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
239 name, 286 name,
240 (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK, 287 (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
241 (id & CHIP_ID_REV_MASK)); 288 (id & CHIP_ID_REV_MASK));
289
290 soc_info->mem_base = RT305X_SDRAM_BASE;
291 if (soc_is_rt5350()) {
292 soc_info->mem_size = rt5350_get_mem_size();
293 } else if (soc_is_rt305x() || soc_is_rt3350()) {
294 soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
295 soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
296 } else if (soc_is_rt3352()) {
297 soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
298 soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
299 }
242} 300}
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
new file mode 100644
index 000000000000..b474ac284b83
--- /dev/null
+++ b/arch/mips/ralink/rt3883.c
@@ -0,0 +1,246 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
9 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/module.h>
16
17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/rt3883.h>
20
21#include "common.h"
22
23static struct ralink_pinmux_grp mode_mux[] = {
24 {
25 .name = "i2c",
26 .mask = RT3883_GPIO_MODE_I2C,
27 .gpio_first = RT3883_GPIO_I2C_SD,
28 .gpio_last = RT3883_GPIO_I2C_SCLK,
29 }, {
30 .name = "spi",
31 .mask = RT3883_GPIO_MODE_SPI,
32 .gpio_first = RT3883_GPIO_SPI_CS0,
33 .gpio_last = RT3883_GPIO_SPI_MISO,
34 }, {
35 .name = "uartlite",
36 .mask = RT3883_GPIO_MODE_UART1,
37 .gpio_first = RT3883_GPIO_UART1_TXD,
38 .gpio_last = RT3883_GPIO_UART1_RXD,
39 }, {
40 .name = "jtag",
41 .mask = RT3883_GPIO_MODE_JTAG,
42 .gpio_first = RT3883_GPIO_JTAG_TDO,
43 .gpio_last = RT3883_GPIO_JTAG_TCLK,
44 }, {
45 .name = "mdio",
46 .mask = RT3883_GPIO_MODE_MDIO,
47 .gpio_first = RT3883_GPIO_MDIO_MDC,
48 .gpio_last = RT3883_GPIO_MDIO_MDIO,
49 }, {
50 .name = "ge1",
51 .mask = RT3883_GPIO_MODE_GE1,
52 .gpio_first = RT3883_GPIO_GE1_TXD0,
53 .gpio_last = RT3883_GPIO_GE1_RXCLK,
54 }, {
55 .name = "ge2",
56 .mask = RT3883_GPIO_MODE_GE2,
57 .gpio_first = RT3883_GPIO_GE2_TXD0,
58 .gpio_last = RT3883_GPIO_GE2_RXCLK,
59 }, {
60 .name = "pci",
61 .mask = RT3883_GPIO_MODE_PCI,
62 .gpio_first = RT3883_GPIO_PCI_AD0,
63 .gpio_last = RT3883_GPIO_PCI_AD31,
64 }, {
65 .name = "lna a",
66 .mask = RT3883_GPIO_MODE_LNA_A,
67 .gpio_first = RT3883_GPIO_LNA_PE_A0,
68 .gpio_last = RT3883_GPIO_LNA_PE_A2,
69 }, {
70 .name = "lna g",
71 .mask = RT3883_GPIO_MODE_LNA_G,
72 .gpio_first = RT3883_GPIO_LNA_PE_G0,
73 .gpio_last = RT3883_GPIO_LNA_PE_G2,
74 }, {0}
75};
76
77static struct ralink_pinmux_grp uart_mux[] = {
78 {
79 .name = "uartf",
80 .mask = RT3883_GPIO_MODE_UARTF,
81 .gpio_first = RT3883_GPIO_7,
82 .gpio_last = RT3883_GPIO_14,
83 }, {
84 .name = "pcm uartf",
85 .mask = RT3883_GPIO_MODE_PCM_UARTF,
86 .gpio_first = RT3883_GPIO_7,
87 .gpio_last = RT3883_GPIO_14,
88 }, {
89 .name = "pcm i2s",
90 .mask = RT3883_GPIO_MODE_PCM_I2S,
91 .gpio_first = RT3883_GPIO_7,
92 .gpio_last = RT3883_GPIO_14,
93 }, {
94 .name = "i2s uartf",
95 .mask = RT3883_GPIO_MODE_I2S_UARTF,
96 .gpio_first = RT3883_GPIO_7,
97 .gpio_last = RT3883_GPIO_14,
98 }, {
99 .name = "pcm gpio",
100 .mask = RT3883_GPIO_MODE_PCM_GPIO,
101 .gpio_first = RT3883_GPIO_11,
102 .gpio_last = RT3883_GPIO_14,
103 }, {
104 .name = "gpio uartf",
105 .mask = RT3883_GPIO_MODE_GPIO_UARTF,
106 .gpio_first = RT3883_GPIO_7,
107 .gpio_last = RT3883_GPIO_10,
108 }, {
109 .name = "gpio i2s",
110 .mask = RT3883_GPIO_MODE_GPIO_I2S,
111 .gpio_first = RT3883_GPIO_7,
112 .gpio_last = RT3883_GPIO_10,
113 }, {
114 .name = "gpio",
115 .mask = RT3883_GPIO_MODE_GPIO,
116 }, {0}
117};
118
119static struct ralink_pinmux_grp pci_mux[] = {
120 {
121 .name = "pci-dev",
122 .mask = 0,
123 .gpio_first = RT3883_GPIO_PCI_AD0,
124 .gpio_last = RT3883_GPIO_PCI_AD31,
125 }, {
126 .name = "pci-host2",
127 .mask = 1,
128 .gpio_first = RT3883_GPIO_PCI_AD0,
129 .gpio_last = RT3883_GPIO_PCI_AD31,
130 }, {
131 .name = "pci-host1",
132 .mask = 2,
133 .gpio_first = RT3883_GPIO_PCI_AD0,
134 .gpio_last = RT3883_GPIO_PCI_AD31,
135 }, {
136 .name = "pci-fnc",
137 .mask = 3,
138 .gpio_first = RT3883_GPIO_PCI_AD0,
139 .gpio_last = RT3883_GPIO_PCI_AD31,
140 }, {
141 .name = "pci-gpio",
142 .mask = 7,
143 .gpio_first = RT3883_GPIO_PCI_AD0,
144 .gpio_last = RT3883_GPIO_PCI_AD31,
145 }, {0}
146};
147
148static void rt3883_wdt_reset(void)
149{
150 u32 t;
151
152 /* enable WDT reset output on GPIO 2 */
153 t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
154 t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
155 rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
156}
157
158struct ralink_pinmux rt_gpio_pinmux = {
159 .mode = mode_mux,
160 .uart = uart_mux,
161 .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT,
162 .uart_mask = RT3883_GPIO_MODE_UART0_MASK,
163 .wdt_reset = rt3883_wdt_reset,
164 .pci = pci_mux,
165 .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT,
166 .pci_mask = RT3883_GPIO_MODE_PCI_MASK,
167};
168
169void __init ralink_clk_init(void)
170{
171 unsigned long cpu_rate, sys_rate;
172 u32 syscfg0;
173 u32 clksel;
174 u32 ddr2;
175
176 syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0);
177 clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) &
178 RT3883_SYSCFG0_CPUCLK_MASK);
179 ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2;
180
181 switch (clksel) {
182 case RT3883_SYSCFG0_CPUCLK_250:
183 cpu_rate = 250000000;
184 sys_rate = (ddr2) ? 125000000 : 83000000;
185 break;
186 case RT3883_SYSCFG0_CPUCLK_384:
187 cpu_rate = 384000000;
188 sys_rate = (ddr2) ? 128000000 : 96000000;
189 break;
190 case RT3883_SYSCFG0_CPUCLK_480:
191 cpu_rate = 480000000;
192 sys_rate = (ddr2) ? 160000000 : 120000000;
193 break;
194 case RT3883_SYSCFG0_CPUCLK_500:
195 cpu_rate = 500000000;
196 sys_rate = (ddr2) ? 166000000 : 125000000;
197 break;
198 }
199
200 ralink_clk_add("cpu", cpu_rate);
201 ralink_clk_add("10000100.timer", sys_rate);
202 ralink_clk_add("10000120.watchdog", sys_rate);
203 ralink_clk_add("10000500.uart", 40000000);
204 ralink_clk_add("10000b00.spi", sys_rate);
205 ralink_clk_add("10000c00.uartlite", 40000000);
206 ralink_clk_add("10100000.ethernet", sys_rate);
207}
208
209void __init ralink_of_remap(void)
210{
211 rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc");
212 rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc");
213
214 if (!rt_sysc_membase || !rt_memc_membase)
215 panic("Failed to remap core resources");
216}
217
218void prom_soc_init(struct ralink_soc_info *soc_info)
219{
220 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE);
221 const char *name;
222 u32 n0;
223 u32 n1;
224 u32 id;
225
226 n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3);
227 n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7);
228 id = __raw_readl(sysc + RT3883_SYSC_REG_REVID);
229
230 if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) {
231 soc_info->compatible = "ralink,rt3883-soc";
232 name = "RT3883";
233 } else {
234 panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1);
235 }
236
237 snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
238 "Ralink %s ver:%u eco:%u",
239 name,
240 (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK,
241 (id & RT3883_REVID_ECO_ID_MASK));
242
243 soc_info->mem_base = RT3883_SDRAM_BASE;
244 soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
245 soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
246}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index 1d1919a44e88..7a53b1e28a93 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -114,7 +114,7 @@ void __init replicate_kernel_text()
114 * data structures on the first couple of pages of the first slot of each 114 * data structures on the first couple of pages of the first slot of each
115 * node. If this is the case, getfirstfree(node) > getslotstart(node, 0). 115 * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
116 */ 116 */
117pfn_t node_getfirstfree(cnodeid_t cnode) 117unsigned long node_getfirstfree(cnodeid_t cnode)
118{ 118{
119 unsigned long loadbase = REP_BASE; 119 unsigned long loadbase = REP_BASE;
120 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 120 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 5f2bddb1860e..1230f56429d7 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -255,14 +255,14 @@ static void __init dump_topology(void)
255 } 255 }
256} 256}
257 257
258static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot) 258static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
259{ 259{
260 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 260 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
261 261
262 return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); 262 return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
263} 263}
264 264
265static pfn_t __init slot_psize_compute(cnodeid_t node, int slot) 265static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
266{ 266{
267 nasid_t nasid; 267 nasid_t nasid;
268 lboard_t *brd; 268 lboard_t *brd;
@@ -353,7 +353,7 @@ static void __init mlreset(void)
353 353
354static void __init szmem(void) 354static void __init szmem(void)
355{ 355{
356 pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ 356 unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
357 int slot; 357 int slot;
358 cnodeid_t node; 358 cnodeid_t node;
359 359
@@ -390,10 +390,10 @@ static void __init szmem(void)
390 390
391static void __init node_mem_init(cnodeid_t node) 391static void __init node_mem_init(cnodeid_t node)
392{ 392{
393 pfn_t slot_firstpfn = slot_getbasepfn(node, 0); 393 unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
394 pfn_t slot_freepfn = node_getfirstfree(node); 394 unsigned long slot_freepfn = node_getfirstfree(node);
395 unsigned long bootmap_size; 395 unsigned long bootmap_size;
396 pfn_t start_pfn, end_pfn; 396 unsigned long start_pfn, end_pfn;
397 397
398 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); 398 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
399 399
@@ -467,7 +467,7 @@ void __init paging_init(void)
467 pagetable_init(); 467 pagetable_init();
468 468
469 for_each_online_node(node) { 469 for_each_online_node(node) {
470 pfn_t start_pfn, end_pfn; 470 unsigned long start_pfn, end_pfn;
471 471
472 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); 472 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
473 473
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index fff58ac176f3..2e21b761cb9c 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -69,7 +69,7 @@ static void rt_set_mode(enum clock_event_mode mode,
69 /* Nothing to do ... */ 69 /* Nothing to do ... */
70} 70}
71 71
72int rt_timer_irq; 72unsigned int rt_timer_irq;
73 73
74static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent); 74static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
75static DEFINE_PER_CPU(char [11], hub_rt_name); 75static DEFINE_PER_CPU(char [11], hub_rt_name);