aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/Kbuild2
-rw-r--r--arch/mips/Kbuild.platforms1
-rw-r--r--arch/mips/Kconfig142
-rw-r--r--arch/mips/Kconfig.debug9
-rw-r--r--arch/mips/Makefile1
-rw-r--r--arch/mips/alchemy/board-xxs1500.c2
-rw-r--r--arch/mips/alchemy/common/setup.c6
-rw-r--r--arch/mips/alchemy/common/usb.c26
-rw-r--r--arch/mips/alchemy/devboards/pm.c4
-rw-r--r--arch/mips/bcm47xx/prom.c19
-rw-r--r--arch/mips/cavium-octeon/Kconfig23
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c166
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/cavium-octeon/setup.c17
-rw-r--r--arch/mips/cavium-octeon/smp.c25
-rw-r--r--arch/mips/configs/ath79_defconfig3
-rw-r--r--arch/mips/configs/db1xxx_defconfig1
-rw-r--r--arch/mips/configs/maltasmtc_defconfig196
-rw-r--r--arch/mips/configs/maltasmvp_defconfig3
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig3
-rw-r--r--arch/mips/configs/mips_paravirt_defconfig103
-rw-r--r--arch/mips/configs/rt305x_defconfig2
-rw-r--r--arch/mips/dec/setup.c5
-rw-r--r--arch/mips/include/asm/asmmacro.h62
-rw-r--r--arch/mips/include/asm/branch.h30
-rw-r--r--arch/mips/include/asm/cacheflush.h6
-rw-r--r--arch/mips/include/asm/cmp.h1
-rw-r--r--arch/mips/include/asm/cpu-features.h20
-rw-r--r--arch/mips/include/asm/cpu-info.h13
-rw-r--r--arch/mips/include/asm/cpu-type.h4
-rw-r--r--arch/mips/include/asm/cpu.h3
-rw-r--r--arch/mips/include/asm/dec/kn05.h15
-rw-r--r--arch/mips/include/asm/fixmap.h4
-rw-r--r--arch/mips/include/asm/fpu.h7
-rw-r--r--arch/mips/include/asm/fpu_emulator.h21
-rw-r--r--arch/mips/include/asm/gic.h1
-rw-r--r--arch/mips/include/asm/gio_device.h4
-rw-r--r--arch/mips/include/asm/idle.h14
-rw-r--r--arch/mips/include/asm/irq.h96
-rw-r--r--arch/mips/include/asm/irqflags.h32
-rw-r--r--arch/mips/include/asm/kvm_para.h109
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h2
-rw-r--r--arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h30
-rw-r--r--arch/mips/include/asm/mach-malta/malta-pm.h37
-rw-r--r--arch/mips/include/asm/mach-netlogic/topology.h2
-rw-r--r--arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h36
-rw-r--r--arch/mips/include/asm/mach-paravirt/irq.h19
-rw-r--r--arch/mips/include/asm/mach-paravirt/kernel-entry-init.h50
-rw-r--r--arch/mips/include/asm/mach-paravirt/war.h25
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h4
-rw-r--r--arch/mips/include/asm/mach-ralink/war.h1
-rw-r--r--arch/mips/include/asm/mach-sead3/kernel-entry-init.h31
-rw-r--r--arch/mips/include/asm/mips-boards/piix4.h12
-rw-r--r--arch/mips/include/asm/mips-cpc.h34
-rw-r--r--arch/mips/include/asm/mips_mt.h5
-rw-r--r--arch/mips/include/asm/mipsmtregs.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h151
-rw-r--r--arch/mips/include/asm/mmu_context.h122
-rw-r--r--arch/mips/include/asm/module.h8
-rw-r--r--arch/mips/include/asm/msa.h13
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h5
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/iomap.h18
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pcibus.h14
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h4
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/sys.h35
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/xlp.h19
-rw-r--r--arch/mips/include/asm/nile4.h2
-rw-r--r--arch/mips/include/asm/octeon/octeon.h1
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/include/asm/pm-cps.h51
-rw-r--r--arch/mips/include/asm/pm.h159
-rw-r--r--arch/mips/include/asm/ptrace.h3
-rw-r--r--arch/mips/include/asm/r4kcache.h7
-rw-r--r--arch/mips/include/asm/sgi/ip22.h2
-rw-r--r--arch/mips/include/asm/smp-cps.h19
-rw-r--r--arch/mips/include/asm/smp-ops.h1
-rw-r--r--arch/mips/include/asm/smp.h3
-rw-r--r--arch/mips/include/asm/smtc.h78
-rw-r--r--arch/mips/include/asm/smtc_ipi.h129
-rw-r--r--arch/mips/include/asm/smtc_proc.h23
-rw-r--r--arch/mips/include/asm/stackframe.h196
-rw-r--r--arch/mips/include/asm/thread_info.h11
-rw-r--r--arch/mips/include/asm/time.h5
-rw-r--r--arch/mips/include/asm/timex.h65
-rw-r--r--arch/mips/include/asm/uasm.h23
-rw-r--r--arch/mips/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/include/uapi/asm/bitfield.h29
-rw-r--r--arch/mips/include/uapi/asm/inst.h64
-rw-r--r--arch/mips/include/uapi/asm/kvm_para.h6
-rw-r--r--arch/mips/include/uapi/asm/types.h5
-rw-r--r--arch/mips/kernel/Makefile7
-rw-r--r--arch/mips/kernel/asm-offsets.c32
-rw-r--r--arch/mips/kernel/branch.c202
-rw-r--r--arch/mips/kernel/cevt-gic.c5
-rw-r--r--arch/mips/kernel/cevt-r4k.c24
-rw-r--r--arch/mips/kernel/cevt-smtc.c324
-rw-r--r--arch/mips/kernel/cps-vec.S328
-rw-r--r--arch/mips/kernel/cpu-probe.c21
-rw-r--r--arch/mips/kernel/entry.S38
-rw-r--r--arch/mips/kernel/genex.S54
-rw-r--r--arch/mips/kernel/head.S56
-rw-r--r--arch/mips/kernel/i8259.c4
-rw-r--r--arch/mips/kernel/idle.c25
-rw-r--r--arch/mips/kernel/irq-gic.c15
-rw-r--r--arch/mips/kernel/irq-msc01.c5
-rw-r--r--arch/mips/kernel/irq.c17
-rw-r--r--arch/mips/kernel/mips-cpc.c28
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/mips-mt.c18
-rw-r--r--arch/mips/kernel/octeon_switch.S84
-rw-r--r--arch/mips/kernel/pm-cps.c716
-rw-r--r--arch/mips/kernel/pm.c99
-rw-r--r--arch/mips/kernel/process.c7
-rw-r--r--arch/mips/kernel/r4k_switch.S36
-rw-r--r--arch/mips/kernel/rtlx-mt.c1
-rw-r--r--arch/mips/kernel/smp-bmips.c9
-rw-r--r--arch/mips/kernel/smp-cmp.c15
-rw-r--r--arch/mips/kernel/smp-cps.c431
-rw-r--r--arch/mips/kernel/smp-gic.c11
-rw-r--r--arch/mips/kernel/smp-mt.c5
-rw-r--r--arch/mips/kernel/smp-up.c6
-rw-r--r--arch/mips/kernel/smp.c61
-rw-r--r--arch/mips/kernel/smtc-asm.S133
-rw-r--r--arch/mips/kernel/smtc-proc.c102
-rw-r--r--arch/mips/kernel/smtc.c1528
-rw-r--r--arch/mips/kernel/sync-r4k.c18
-rw-r--r--arch/mips/kernel/time.c1
-rw-r--r--arch/mips/kernel/traps.c120
-rw-r--r--arch/mips/kernel/vpe-mt.c16
-rw-r--r--arch/mips/lantiq/irq.c4
-rw-r--r--arch/mips/lib/delay.c8
-rw-r--r--arch/mips/lib/mips-atomic.c46
-rw-r--r--arch/mips/loongson/Kconfig5
-rw-r--r--arch/mips/loongson/loongson-3/smp.c8
-rw-r--r--arch/mips/loongson1/Kconfig1
-rw-r--r--arch/mips/math-emu/Makefile16
-rw-r--r--arch/mips/math-emu/cp1emu.c934
-rw-r--r--arch/mips/math-emu/dp_add.c71
-rw-r--r--arch/mips/math-emu/dp_cmp.c24
-rw-r--r--arch/mips/math-emu/dp_div.c94
-rw-r--r--arch/mips/math-emu/dp_fint.c33
-rw-r--r--arch/mips/math-emu/dp_flong.c28
-rw-r--r--arch/mips/math-emu/dp_frexp.c52
-rw-r--r--arch/mips/math-emu/dp_fsp.c32
-rw-r--r--arch/mips/math-emu/dp_logb.c53
-rw-r--r--arch/mips/math-emu/dp_modf.c79
-rw-r--r--arch/mips/math-emu/dp_mul.c143
-rw-r--r--arch/mips/math-emu/dp_scalb.c57
-rw-r--r--arch/mips/math-emu/dp_simple.c39
-rw-r--r--arch/mips/math-emu/dp_sqrt.c46
-rw-r--r--arch/mips/math-emu/dp_sub.c55
-rw-r--r--arch/mips/math-emu/dp_tint.c69
-rw-r--r--arch/mips/math-emu/dp_tlong.c68
-rw-r--r--arch/mips/math-emu/dsemul.c35
-rw-r--r--arch/mips/math-emu/ieee754.c146
-rw-r--r--arch/mips/math-emu/ieee754.h322
-rw-r--r--arch/mips/math-emu/ieee754d.c39
-rw-r--r--arch/mips/math-emu/ieee754dp.c122
-rw-r--r--arch/mips/math-emu/ieee754dp.h70
-rw-r--r--arch/mips/math-emu/ieee754int.h201
-rw-r--r--arch/mips/math-emu/ieee754m.c55
-rw-r--r--arch/mips/math-emu/ieee754sp.c126
-rw-r--r--arch/mips/math-emu/ieee754sp.h79
-rw-r--r--arch/mips/math-emu/ieee754xcpt.c47
-rw-r--r--arch/mips/math-emu/kernel_linkage.c45
-rw-r--r--arch/mips/math-emu/me-debugfs.c67
-rw-r--r--arch/mips/math-emu/sp_add.c72
-rw-r--r--arch/mips/math-emu/sp_cmp.c24
-rw-r--r--arch/mips/math-emu/sp_div.c93
-rw-r--r--arch/mips/math-emu/sp_fdp.c56
-rw-r--r--arch/mips/math-emu/sp_fint.c30
-rw-r--r--arch/mips/math-emu/sp_flong.c30
-rw-r--r--arch/mips/math-emu/sp_frexp.c52
-rw-r--r--arch/mips/math-emu/sp_logb.c53
-rw-r--r--arch/mips/math-emu/sp_modf.c79
-rw-r--r--arch/mips/math-emu/sp_mul.c139
-rw-r--r--arch/mips/math-emu/sp_scalb.c57
-rw-r--r--arch/mips/math-emu/sp_simple.c39
-rw-r--r--arch/mips/math-emu/sp_sqrt.c35
-rw-r--r--arch/mips/math-emu/sp_sub.c57
-rw-r--r--arch/mips/math-emu/sp_tint.c67
-rw-r--r--arch/mips/math-emu/sp_tlong.c69
-rw-r--r--arch/mips/mm/c-r4k.c77
-rw-r--r--arch/mips/mm/init.c82
-rw-r--r--arch/mips/mm/tlb-r4k.c88
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/mm/uasm-micromips.c14
-rw-r--r--arch/mips/mm/uasm-mips.c15
-rw-r--r--arch/mips/mm/uasm.c51
-rw-r--r--arch/mips/mti-malta/Makefile3
-rw-r--r--arch/mips/mti-malta/malta-init.c6
-rw-r--r--arch/mips/mti-malta/malta-int.c19
-rw-r--r--arch/mips/mti-malta/malta-memory.c4
-rw-r--r--arch/mips/mti-malta/malta-pm.c96
-rw-r--r--arch/mips/mti-malta/malta-reset.c14
-rw-r--r--arch/mips/mti-malta/malta-setup.c4
-rw-r--r--arch/mips/mti-malta/malta-smtc.c162
-rw-r--r--arch/mips/mti-sead3/sead3-pic32-i2c-drv.c36
-rw-r--r--arch/mips/net/Makefile3
-rw-r--r--arch/mips/net/bpf_jit.c1399
-rw-r--r--arch/mips/net/bpf_jit.h44
-rw-r--r--arch/mips/netlogic/common/irq.c2
-rw-r--r--arch/mips/netlogic/common/reset.S39
-rw-r--r--arch/mips/netlogic/common/smp.c12
-rw-r--r--arch/mips/netlogic/common/smpboot.S12
-rw-r--r--arch/mips/netlogic/common/time.c5
-rw-r--r--arch/mips/netlogic/dts/xlp_gvp.dts5
-rw-r--r--arch/mips/netlogic/xlp/Makefile2
-rw-r--r--arch/mips/netlogic/xlp/ahci-init-xlp2.c377
-rw-r--r--arch/mips/netlogic/xlp/ahci-init.c209
-rw-r--r--arch/mips/netlogic/xlp/dt.c3
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c283
-rw-r--r--arch/mips/netlogic/xlp/setup.c3
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c16
-rw-r--r--arch/mips/paravirt/Kconfig6
-rw-r--r--arch/mips/paravirt/Makefile14
-rw-r--r--arch/mips/paravirt/Platform8
-rw-r--r--arch/mips/paravirt/paravirt-irq.c368
-rw-r--r--arch/mips/paravirt/paravirt-smp.c143
-rw-r--r--arch/mips/paravirt/serial.c40
-rw-r--r--arch/mips/paravirt/setup.c67
-rw-r--r--arch/mips/pci/Makefile2
-rw-r--r--arch/mips/pci/fixup-malta.c6
-rw-r--r--arch/mips/pci/msi-octeon.c6
-rw-r--r--arch/mips/pci/msi-xlp.c184
-rw-r--r--arch/mips/pci/ops-pmcmsp.c2
-rw-r--r--arch/mips/pci/ops-tx3927.c2
-rw-r--r--arch/mips/pci/ops-tx4927.c9
-rw-r--r--arch/mips/pci/pci-virtio-guest.c131
-rw-r--r--arch/mips/pmcs-msp71xx/Makefile1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_eth.c76
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq.c16
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_cic.c7
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_per.c3
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c17
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smtc.c104
-rw-r--r--arch/mips/pmcs-msp71xx/msp_usb.c90
-rw-r--r--arch/mips/pnx833x/common/platform.c73
-rw-r--r--arch/mips/sgi-ip22/ip22-gio.c42
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c7
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c5
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c11
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c8
-rw-r--r--arch/mips/sibyte/sb1250/smp.c8
-rw-r--r--arch/mips/txx9/generic/setup.c4
247 files changed, 8331 insertions, 7863 deletions
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index d2cfe45f332b..dd295335891a 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -16,7 +16,7 @@ obj- := $(platform-)
16 16
17obj-y += kernel/ 17obj-y += kernel/
18obj-y += mm/ 18obj-y += mm/
19obj-y += math-emu/ 19obj-y += net/
20 20
21ifdef CONFIG_KVM 21ifdef CONFIG_KVM
22obj-y += kvm/ 22obj-y += kvm/
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 6e239123d6fe..f5e18bf3275e 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -18,6 +18,7 @@ platforms += loongson1
18platforms += mti-malta 18platforms += mti-malta
19platforms += mti-sead3 19platforms += mti-sead3
20platforms += netlogic 20platforms += netlogic
21platforms += paravirt
21platforms += pmcs-msp71xx 22platforms += pmcs-msp71xx
22platforms += pnx833x 23platforms += pnx833x
23platforms += ralink 24platforms += ralink
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5cd695f905a1..f6521e49e390 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -12,6 +12,7 @@ config MIPS
12 select HAVE_ARCH_KGDB 12 select HAVE_ARCH_KGDB
13 select HAVE_ARCH_SECCOMP_FILTER 13 select HAVE_ARCH_SECCOMP_FILTER
14 select HAVE_ARCH_TRACEHOOK 14 select HAVE_ARCH_TRACEHOOK
15 select HAVE_BPF_JIT if !CPU_MICROMIPS
15 select ARCH_HAVE_CUSTOM_GPIO_H 16 select ARCH_HAVE_CUSTOM_GPIO_H
16 select HAVE_FUNCTION_TRACER 17 select HAVE_FUNCTION_TRACER
17 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 18 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -50,6 +51,8 @@ config MIPS
50 select CLONE_BACKWARDS 51 select CLONE_BACKWARDS
51 select HAVE_DEBUG_STACKOVERFLOW 52 select HAVE_DEBUG_STACKOVERFLOW
52 select HAVE_CC_STACKPROTECTOR 53 select HAVE_CC_STACKPROTECTOR
54 select CPU_PM if CPU_IDLE
55 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
53 56
54menu "Machine selection" 57menu "Machine selection"
55 58
@@ -83,6 +86,7 @@ config AR7
83 select SYS_HAS_EARLY_PRINTK 86 select SYS_HAS_EARLY_PRINTK
84 select SYS_SUPPORTS_32BIT_KERNEL 87 select SYS_SUPPORTS_32BIT_KERNEL
85 select SYS_SUPPORTS_LITTLE_ENDIAN 88 select SYS_SUPPORTS_LITTLE_ENDIAN
89 select SYS_SUPPORTS_MIPS16
86 select SYS_SUPPORTS_ZBOOT_UART16550 90 select SYS_SUPPORTS_ZBOOT_UART16550
87 select ARCH_REQUIRE_GPIOLIB 91 select ARCH_REQUIRE_GPIOLIB
88 select VLYNQ 92 select VLYNQ
@@ -106,6 +110,7 @@ config ATH79
106 select SYS_HAS_EARLY_PRINTK 110 select SYS_HAS_EARLY_PRINTK
107 select SYS_SUPPORTS_32BIT_KERNEL 111 select SYS_SUPPORTS_32BIT_KERNEL
108 select SYS_SUPPORTS_BIG_ENDIAN 112 select SYS_SUPPORTS_BIG_ENDIAN
113 select SYS_SUPPORTS_MIPS16
109 help 114 help
110 Support for the Atheros AR71XX/AR724X/AR913X SoCs. 115 Support for the Atheros AR71XX/AR724X/AR913X SoCs.
111 116
@@ -122,6 +127,7 @@ config BCM47XX
122 select NO_EXCEPT_FILL 127 select NO_EXCEPT_FILL
123 select SYS_SUPPORTS_32BIT_KERNEL 128 select SYS_SUPPORTS_32BIT_KERNEL
124 select SYS_SUPPORTS_LITTLE_ENDIAN 129 select SYS_SUPPORTS_LITTLE_ENDIAN
130 select SYS_SUPPORTS_MIPS16
125 select SYS_HAS_EARLY_PRINTK 131 select SYS_HAS_EARLY_PRINTK
126 select USE_GENERIC_EARLY_PRINTK_8250 132 select USE_GENERIC_EARLY_PRINTK_8250
127 help 133 help
@@ -168,9 +174,9 @@ config MACH_DECSTATION
168 bool "DECstations" 174 bool "DECstations"
169 select BOOT_ELF32 175 select BOOT_ELF32
170 select CEVT_DS1287 176 select CEVT_DS1287
171 select CEVT_R4K 177 select CEVT_R4K if CPU_R4X00
172 select CSRC_IOASIC 178 select CSRC_IOASIC
173 select CSRC_R4K 179 select CSRC_R4K if CPU_R4X00
174 select CPU_DADDI_WORKAROUNDS if 64BIT 180 select CPU_DADDI_WORKAROUNDS if 64BIT
175 select CPU_R4000_WORKAROUNDS if 64BIT 181 select CPU_R4000_WORKAROUNDS if 64BIT
176 select CPU_R4400_WORKAROUNDS if 64BIT 182 select CPU_R4400_WORKAROUNDS if 64BIT
@@ -248,6 +254,7 @@ config LANTIQ
248 select SYS_HAS_CPU_MIPS32_R2 254 select SYS_HAS_CPU_MIPS32_R2
249 select SYS_SUPPORTS_BIG_ENDIAN 255 select SYS_SUPPORTS_BIG_ENDIAN
250 select SYS_SUPPORTS_32BIT_KERNEL 256 select SYS_SUPPORTS_32BIT_KERNEL
257 select SYS_SUPPORTS_MIPS16
251 select SYS_SUPPORTS_MULTITHREADING 258 select SYS_SUPPORTS_MULTITHREADING
252 select SYS_HAS_EARLY_PRINTK 259 select SYS_HAS_EARLY_PRINTK
253 select ARCH_REQUIRE_GPIOLIB 260 select ARCH_REQUIRE_GPIOLIB
@@ -330,6 +337,7 @@ config MIPS_MALTA
330 select SYS_SUPPORTS_LITTLE_ENDIAN 337 select SYS_SUPPORTS_LITTLE_ENDIAN
331 select SYS_SUPPORTS_MIPS_CMP 338 select SYS_SUPPORTS_MIPS_CMP
332 select SYS_SUPPORTS_MIPS_CPS 339 select SYS_SUPPORTS_MIPS_CPS
340 select SYS_SUPPORTS_MIPS16
333 select SYS_SUPPORTS_MULTITHREADING 341 select SYS_SUPPORTS_MULTITHREADING
334 select SYS_SUPPORTS_SMARTMIPS 342 select SYS_SUPPORTS_SMARTMIPS
335 select SYS_SUPPORTS_ZBOOT 343 select SYS_SUPPORTS_ZBOOT
@@ -361,6 +369,7 @@ config MIPS_SEAD3
361 select SYS_SUPPORTS_LITTLE_ENDIAN 369 select SYS_SUPPORTS_LITTLE_ENDIAN
362 select SYS_SUPPORTS_SMARTMIPS 370 select SYS_SUPPORTS_SMARTMIPS
363 select SYS_SUPPORTS_MICROMIPS 371 select SYS_SUPPORTS_MICROMIPS
372 select SYS_SUPPORTS_MIPS16
364 select USB_EHCI_BIG_ENDIAN_DESC 373 select USB_EHCI_BIG_ENDIAN_DESC
365 select USB_EHCI_BIG_ENDIAN_MMIO 374 select USB_EHCI_BIG_ENDIAN_MMIO
366 select USE_OF 375 select USE_OF
@@ -380,6 +389,7 @@ config MACH_VR41XX
380 select CEVT_R4K 389 select CEVT_R4K
381 select CSRC_R4K 390 select CSRC_R4K
382 select SYS_HAS_CPU_VR41XX 391 select SYS_HAS_CPU_VR41XX
392 select SYS_SUPPORTS_MIPS16
383 select ARCH_REQUIRE_GPIOLIB 393 select ARCH_REQUIRE_GPIOLIB
384 394
385config NXP_STB220 395config NXP_STB220
@@ -407,6 +417,7 @@ config PMC_MSP
407 select SYS_HAS_CPU_MIPS32_R2 417 select SYS_HAS_CPU_MIPS32_R2
408 select SYS_SUPPORTS_32BIT_KERNEL 418 select SYS_SUPPORTS_32BIT_KERNEL
409 select SYS_SUPPORTS_BIG_ENDIAN 419 select SYS_SUPPORTS_BIG_ENDIAN
420 select SYS_SUPPORTS_MIPS16
410 select IRQ_CPU 421 select IRQ_CPU
411 select SERIAL_8250 422 select SERIAL_8250
412 select SERIAL_8250_CONSOLE 423 select SERIAL_8250_CONSOLE
@@ -430,6 +441,7 @@ config RALINK
430 select SYS_HAS_CPU_MIPS32_R2 441 select SYS_HAS_CPU_MIPS32_R2
431 select SYS_SUPPORTS_32BIT_KERNEL 442 select SYS_SUPPORTS_32BIT_KERNEL
432 select SYS_SUPPORTS_LITTLE_ENDIAN 443 select SYS_SUPPORTS_LITTLE_ENDIAN
444 select SYS_SUPPORTS_MIPS16
433 select SYS_HAS_EARLY_PRINTK 445 select SYS_HAS_EARLY_PRINTK
434 select HAVE_MACH_CLKDEV 446 select HAVE_MACH_CLKDEV
435 select CLKDEV_LOOKUP 447 select CLKDEV_LOOKUP
@@ -674,7 +686,6 @@ config SNI_RM
674 select SYS_SUPPORTS_BIG_ENDIAN 686 select SYS_SUPPORTS_BIG_ENDIAN
675 select SYS_SUPPORTS_HIGHMEM 687 select SYS_SUPPORTS_HIGHMEM
676 select SYS_SUPPORTS_LITTLE_ENDIAN 688 select SYS_SUPPORTS_LITTLE_ENDIAN
677 select USE_GENERIC_EARLY_PRINTK_8250
678 help 689 help
679 The SNI RM200/300/400 are MIPS-based machines manufactured by 690 The SNI RM200/300/400 are MIPS-based machines manufactured by
680 Siemens Nixdorf Informationssysteme (SNI), parent company of Pyramid 691 Siemens Nixdorf Informationssysteme (SNI), parent company of Pyramid
@@ -721,6 +732,11 @@ config CAVIUM_OCTEON_SOC
721 select ZONE_DMA32 732 select ZONE_DMA32
722 select HOLES_IN_ZONE 733 select HOLES_IN_ZONE
723 select ARCH_REQUIRE_GPIOLIB 734 select ARCH_REQUIRE_GPIOLIB
735 select LIBFDT
736 select USE_OF
737 select ARCH_SPARSEMEM_ENABLE
738 select SYS_SUPPORTS_SMP
739 select NR_CPUS_DEFAULT_16
724 help 740 help
725 This option supports all of the Octeon reference boards from Cavium 741 This option supports all of the Octeon reference boards from Cavium
726 Networks. It builds a kernel that dynamically determines the Octeon 742 Networks. It builds a kernel that dynamically determines the Octeon
@@ -789,6 +805,25 @@ config NLM_XLP_BOARD
789 This board is based on Netlogic XLP Processor. 805 This board is based on Netlogic XLP Processor.
790 Say Y here if you have a XLP based board. 806 Say Y here if you have a XLP based board.
791 807
808config MIPS_PARAVIRT
809 bool "Para-Virtualized guest system"
810 select CEVT_R4K
811 select CSRC_R4K
812 select DMA_COHERENT
813 select SYS_SUPPORTS_64BIT_KERNEL
814 select SYS_SUPPORTS_32BIT_KERNEL
815 select SYS_SUPPORTS_BIG_ENDIAN
816 select SYS_SUPPORTS_SMP
817 select NR_CPUS_DEFAULT_4
818 select SYS_HAS_EARLY_PRINTK
819 select SYS_HAS_CPU_MIPS32_R2
820 select SYS_HAS_CPU_MIPS64_R2
821 select SYS_HAS_CPU_CAVIUM_OCTEON
822 select HW_HAS_PCI
823 select SWAP_IO_SPACE
824 help
825 This option supports guest running under ????
826
792endchoice 827endchoice
793 828
794source "arch/mips/alchemy/Kconfig" 829source "arch/mips/alchemy/Kconfig"
@@ -809,6 +844,7 @@ source "arch/mips/cavium-octeon/Kconfig"
809source "arch/mips/loongson/Kconfig" 844source "arch/mips/loongson/Kconfig"
810source "arch/mips/loongson1/Kconfig" 845source "arch/mips/loongson1/Kconfig"
811source "arch/mips/netlogic/Kconfig" 846source "arch/mips/netlogic/Kconfig"
847source "arch/mips/paravirt/Kconfig"
812 848
813endmenu 849endmenu
814 850
@@ -1059,6 +1095,7 @@ config SOC_PNX833X
1059 select SYS_SUPPORTS_32BIT_KERNEL 1095 select SYS_SUPPORTS_32BIT_KERNEL
1060 select SYS_SUPPORTS_LITTLE_ENDIAN 1096 select SYS_SUPPORTS_LITTLE_ENDIAN
1061 select SYS_SUPPORTS_BIG_ENDIAN 1097 select SYS_SUPPORTS_BIG_ENDIAN
1098 select SYS_SUPPORTS_MIPS16
1062 select CPU_MIPSR2_IRQ_VI 1099 select CPU_MIPSR2_IRQ_VI
1063 1100
1064config SOC_PNX8335 1101config SOC_PNX8335
@@ -1398,16 +1435,11 @@ config CPU_SB1
1398config CPU_CAVIUM_OCTEON 1435config CPU_CAVIUM_OCTEON
1399 bool "Cavium Octeon processor" 1436 bool "Cavium Octeon processor"
1400 depends on SYS_HAS_CPU_CAVIUM_OCTEON 1437 depends on SYS_HAS_CPU_CAVIUM_OCTEON
1401 select ARCH_SPARSEMEM_ENABLE
1402 select CPU_HAS_PREFETCH 1438 select CPU_HAS_PREFETCH
1403 select CPU_SUPPORTS_64BIT_KERNEL 1439 select CPU_SUPPORTS_64BIT_KERNEL
1404 select SYS_SUPPORTS_SMP
1405 select NR_CPUS_DEFAULT_16
1406 select WEAK_ORDERING 1440 select WEAK_ORDERING
1407 select CPU_SUPPORTS_HIGHMEM 1441 select CPU_SUPPORTS_HIGHMEM
1408 select CPU_SUPPORTS_HUGEPAGES 1442 select CPU_SUPPORTS_HUGEPAGES
1409 select LIBFDT
1410 select USE_OF
1411 select USB_EHCI_BIG_ENDIAN_MMIO 1443 select USB_EHCI_BIG_ENDIAN_MMIO
1412 select MIPS_L1_CACHE_SHIFT_7 1444 select MIPS_L1_CACHE_SHIFT_7
1413 help 1445 help
@@ -1659,6 +1691,12 @@ config SYS_HAS_CPU_XLR
1659config SYS_HAS_CPU_XLP 1691config SYS_HAS_CPU_XLP
1660 bool 1692 bool
1661 1693
1694config MIPS_MALTA_PM
1695 depends on MIPS_MALTA
1696 depends on PCI
1697 bool
1698 default y
1699
1662# 1700#
1663# CPU may reorder R->R, R->W, W->R, W->W 1701# CPU may reorder R->R, R->W, W->R, W->W
1664# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC 1702# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -1842,7 +1880,7 @@ config FORCE_MAX_ZONEORDER
1842 1880
1843config CEVT_GIC 1881config CEVT_GIC
1844 bool "Use GIC global counter for clock events" 1882 bool "Use GIC global counter for clock events"
1845 depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC) 1883 depends on IRQ_GIC && !MIPS_SEAD3
1846 help 1884 help
1847 Use the GIC global counter for the clock events. The R4K clock 1885 Use the GIC global counter for the clock events. The R4K clock
1848 event driver is always present, so if the platform ends up not 1886 event driver is always present, so if the platform ends up not
@@ -1895,19 +1933,8 @@ config CPU_R4K_CACHE_TLB
1895 bool 1933 bool
1896 default y if !(CPU_R3000 || CPU_R8000 || CPU_SB1 || CPU_TX39XX || CPU_CAVIUM_OCTEON) 1934 default y if !(CPU_R3000 || CPU_R8000 || CPU_SB1 || CPU_TX39XX || CPU_CAVIUM_OCTEON)
1897 1935
1898choice
1899 prompt "MIPS MT options"
1900
1901config MIPS_MT_DISABLED
1902 bool "Disable multithreading support"
1903 help
1904 Use this option if your platform does not support the MT ASE
1905 which is hardware multithreading support. On systems without
1906 an MT-enabled processor, this will be the only option that is
1907 available in this menu.
1908
1909config MIPS_MT_SMP 1936config MIPS_MT_SMP
1910 bool "Use 1 TC on each available VPE for SMP" 1937 bool "MIPS MT SMP support (1 TC on each available VPE)"
1911 depends on SYS_SUPPORTS_MULTITHREADING 1938 depends on SYS_SUPPORTS_MULTITHREADING
1912 select CPU_MIPSR2_IRQ_VI 1939 select CPU_MIPSR2_IRQ_VI
1913 select CPU_MIPSR2_IRQ_EI 1940 select CPU_MIPSR2_IRQ_EI
@@ -1926,26 +1953,6 @@ config MIPS_MT_SMP
1926 Intel Hyperthreading feature. For further information go to 1953 Intel Hyperthreading feature. For further information go to
1927 <http://www.imgtec.com/mips/mips-multithreading.asp>. 1954 <http://www.imgtec.com/mips/mips-multithreading.asp>.
1928 1955
1929config MIPS_MT_SMTC
1930 bool "Use all TCs on all VPEs for SMP (DEPRECATED)"
1931 depends on CPU_MIPS32_R2
1932 depends on SYS_SUPPORTS_MULTITHREADING
1933 depends on !MIPS_CPS
1934 select CPU_MIPSR2_IRQ_VI
1935 select CPU_MIPSR2_IRQ_EI
1936 select MIPS_MT
1937 select SMP
1938 select SMP_UP
1939 select SYS_SUPPORTS_SMP
1940 select NR_CPUS_DEFAULT_8
1941 help
1942 This is a kernel model which is known as SMTC. This is
1943 supported on cores with the MT ASE and presents all TCs
1944 available on all VPEs to support SMP. For further
1945 information see <http://www.linux-mips.org/wiki/34K#SMTC>.
1946
1947endchoice
1948
1949config MIPS_MT 1956config MIPS_MT
1950 bool 1957 bool
1951 1958
@@ -1967,7 +1974,7 @@ config SYS_SUPPORTS_MULTITHREADING
1967config MIPS_MT_FPAFF 1974config MIPS_MT_FPAFF
1968 bool "Dynamic FPU affinity for FP-intensive threads" 1975 bool "Dynamic FPU affinity for FP-intensive threads"
1969 default y 1976 default y
1970 depends on MIPS_MT_SMP || MIPS_MT_SMTC 1977 depends on MIPS_MT_SMP
1971 1978
1972config MIPS_VPE_LOADER 1979config MIPS_VPE_LOADER
1973 bool "VPE loader support." 1980 bool "VPE loader support."
@@ -1989,29 +1996,6 @@ config MIPS_VPE_LOADER_MT
1989 default "y" 1996 default "y"
1990 depends on MIPS_VPE_LOADER && !MIPS_CMP 1997 depends on MIPS_VPE_LOADER && !MIPS_CMP
1991 1998
1992config MIPS_MT_SMTC_IM_BACKSTOP
1993 bool "Use per-TC register bits as backstop for inhibited IM bits"
1994 depends on MIPS_MT_SMTC
1995 default n
1996 help
1997 To support multiple TC microthreads acting as "CPUs" within
1998 a VPE, VPE-wide interrupt mask bits must be specially manipulated
1999 during interrupt handling. To support legacy drivers and interrupt
2000 controller management code, SMTC has a "backstop" to track and
2001 if necessary restore the interrupt mask. This has some performance
2002 impact on interrupt service overhead.
2003
2004config MIPS_MT_SMTC_IRQAFF
2005 bool "Support IRQ affinity API"
2006 depends on MIPS_MT_SMTC
2007 default n
2008 help
2009 Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
2010 for SMTC Linux kernel. Requires platform support, of which
2011 an example can be found in the MIPS kernel i8259 and Malta
2012 platform code. Adds some overhead to interrupt dispatch, and
2013 should be used only if you know what you are doing.
2014
2015config MIPS_VPE_LOADER_TOM 1999config MIPS_VPE_LOADER_TOM
2016 bool "Load VPE program into memory hidden from linux" 2000 bool "Load VPE program into memory hidden from linux"
2017 depends on MIPS_VPE_LOADER 2001 depends on MIPS_VPE_LOADER
@@ -2039,7 +2023,7 @@ config MIPS_VPE_APSP_API_MT
2039 2023
2040config MIPS_CMP 2024config MIPS_CMP
2041 bool "MIPS CMP framework support (DEPRECATED)" 2025 bool "MIPS CMP framework support (DEPRECATED)"
2042 depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC 2026 depends on SYS_SUPPORTS_MIPS_CMP
2043 select MIPS_GIC_IPI 2027 select MIPS_GIC_IPI
2044 select SYNC_R4K 2028 select SYNC_R4K
2045 select WEAK_ORDERING 2029 select WEAK_ORDERING
@@ -2057,9 +2041,11 @@ config MIPS_CPS
2057 depends on SYS_SUPPORTS_MIPS_CPS 2041 depends on SYS_SUPPORTS_MIPS_CPS
2058 select MIPS_CM 2042 select MIPS_CM
2059 select MIPS_CPC 2043 select MIPS_CPC
2044 select MIPS_CPS_PM if HOTPLUG_CPU
2060 select MIPS_GIC_IPI 2045 select MIPS_GIC_IPI
2061 select SMP 2046 select SMP
2062 select SYNC_R4K if (CEVT_R4K || CSRC_R4K) 2047 select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
2048 select SYS_SUPPORTS_HOTPLUG_CPU
2063 select SYS_SUPPORTS_SMP 2049 select SYS_SUPPORTS_SMP
2064 select WEAK_ORDERING 2050 select WEAK_ORDERING
2065 help 2051 help
@@ -2069,6 +2055,9 @@ config MIPS_CPS
2069 no external assistance. It is safe to enable this when hardware 2055 no external assistance. It is safe to enable this when hardware
2070 support is unavailable. 2056 support is unavailable.
2071 2057
2058config MIPS_CPS_PM
2059 bool
2060
2072config MIPS_GIC_IPI 2061config MIPS_GIC_IPI
2073 bool 2062 bool
2074 2063
@@ -2199,6 +2188,13 @@ config SYS_SUPPORTS_SMARTMIPS
2199config SYS_SUPPORTS_MICROMIPS 2188config SYS_SUPPORTS_MICROMIPS
2200 bool 2189 bool
2201 2190
2191config SYS_SUPPORTS_MIPS16
2192 bool
2193 help
2194 This option must be set if a kernel might be executed on a MIPS16-
2195 enabled CPU even if MIPS16 is not actually being used. In other
2196 words, it makes the kernel MIPS16-tolerant.
2197
2202config CPU_SUPPORTS_MSA 2198config CPU_SUPPORTS_MSA
2203 bool 2199 bool
2204 2200
@@ -2239,7 +2235,7 @@ config NODES_SHIFT
2239 2235
2240config HW_PERF_EVENTS 2236config HW_PERF_EVENTS
2241 bool "Enable hardware performance counter support for perf events" 2237 bool "Enable hardware performance counter support for perf events"
2242 depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) 2238 depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP)
2243 default y 2239 default y
2244 help 2240 help
2245 Enable hardware performance counter support for perf events. If 2241 Enable hardware performance counter support for perf events. If
@@ -2297,8 +2293,8 @@ config NR_CPUS_DEFAULT_64
2297 bool 2293 bool
2298 2294
2299config NR_CPUS 2295config NR_CPUS
2300 int "Maximum number of CPUs (2-64)" 2296 int "Maximum number of CPUs (2-256)"
2301 range 2 64 2297 range 2 256
2302 depends on SMP 2298 depends on SMP
2303 default "4" if NR_CPUS_DEFAULT_4 2299 default "4" if NR_CPUS_DEFAULT_4
2304 default "8" if NR_CPUS_DEFAULT_8 2300 default "8" if NR_CPUS_DEFAULT_8
@@ -2671,12 +2667,16 @@ endmenu
2671config MIPS_EXTERNAL_TIMER 2667config MIPS_EXTERNAL_TIMER
2672 bool 2668 bool
2673 2669
2674if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
2675menu "CPU Power Management" 2670menu "CPU Power Management"
2671
2672if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
2676source "drivers/cpufreq/Kconfig" 2673source "drivers/cpufreq/Kconfig"
2677endmenu
2678endif 2674endif
2679 2675
2676source "drivers/cpuidle/Kconfig"
2677
2678endmenu
2679
2680source "net/Kconfig" 2680source "net/Kconfig"
2681 2681
2682source "drivers/Kconfig" 2682source "drivers/Kconfig"
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 25de29211d76..3a2b775e8458 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -79,15 +79,6 @@ config CMDLINE_OVERRIDE
79 79
80 Normally, you will choose 'N' here. 80 Normally, you will choose 'N' here.
81 81
82config SMTC_IDLE_HOOK_DEBUG
83 bool "Enable additional debug checks before going into CPU idle loop"
84 depends on DEBUG_KERNEL && MIPS_MT_SMTC
85 help
86 This option enables Enable additional debug checks before going into
87 CPU idle loop. For details on these checks, see
88 arch/mips/kernel/smtc.c. This debugging option result in significant
89 overhead so should be disabled in production kernels.
90
91config SB1XXX_CORELIS 82config SB1XXX_CORELIS
92 bool "Corelis Debugger" 83 bool "Corelis Debugger"
93 depends on SIBYTE_SB1xxx_SOC 84 depends on SIBYTE_SB1xxx_SOC
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 78bab83db948..a8521de14791 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -251,6 +251,7 @@ OBJCOPYFLAGS += --remove-section=.reginfo
251head-y := arch/mips/kernel/head.o 251head-y := arch/mips/kernel/head.o
252 252
253libs-y += arch/mips/lib/ 253libs-y += arch/mips/lib/
254libs-y += arch/mips/math-emu/
254 255
255# See arch/mips/Kbuild for content of core part of the kernel 256# See arch/mips/Kbuild for content of core part of the kernel
256core-y += arch/mips/ 257core-y += arch/mips/
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
index bd5513650293..3fb814be0e91 100644
--- a/arch/mips/alchemy/board-xxs1500.c
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -49,7 +49,7 @@ void __init prom_init(void)
49 prom_init_cmdline(); 49 prom_init_cmdline();
50 50
51 memsize_str = prom_getenv("memsize"); 51 memsize_str = prom_getenv("memsize");
52 if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) 52 if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
53 memsize = 0x04000000; 53 memsize = 0x04000000;
54 54
55 add_memory_region(0, memsize, BOOT_MEM_RAM); 55 add_memory_region(0, memsize, BOOT_MEM_RAM);
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 566a1743f685..8267e3c97721 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -67,6 +67,12 @@ void __init plat_mem_setup(void)
67 case ALCHEMY_CPU_AU1500: 67 case ALCHEMY_CPU_AU1500:
68 case ALCHEMY_CPU_AU1100: 68 case ALCHEMY_CPU_AU1100:
69 coherentio = 0; 69 coherentio = 0;
70 break;
71 case ALCHEMY_CPU_AU1200:
72 /* Au1200 AB USB does not support coherent memory */
73 if (0 == (read_c0_prid() & PRID_REV_MASK))
74 coherentio = 0;
75 break;
70 } 76 }
71 77
72 board_setup(); /* board specific setup */ 78 board_setup(); /* board specific setup */
diff --git a/arch/mips/alchemy/common/usb.c b/arch/mips/alchemy/common/usb.c
index 2adc7edda49c..d193dbea84a1 100644
--- a/arch/mips/alchemy/common/usb.c
+++ b/arch/mips/alchemy/common/usb.c
@@ -355,47 +355,25 @@ static inline void __au1200_udc_control(void __iomem *base, int enable)
355 } 355 }
356} 356}
357 357
358static inline int au1200_coherency_bug(void)
359{
360#if defined(CONFIG_DMA_COHERENT)
361 /* Au1200 AB USB does not support coherent memory */
362 if (!(read_c0_prid() & PRID_REV_MASK)) {
363 printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n");
364 printk(KERN_INFO "Au1200 USB: update your board or re-configure"
365 " the kernel\n");
366 return -ENODEV;
367 }
368#endif
369 return 0;
370}
371
372static inline int au1200_usb_control(int block, int enable) 358static inline int au1200_usb_control(int block, int enable)
373{ 359{
374 void __iomem *base = 360 void __iomem *base =
375 (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR); 361 (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
376 int ret = 0;
377 362
378 switch (block) { 363 switch (block) {
379 case ALCHEMY_USB_OHCI0: 364 case ALCHEMY_USB_OHCI0:
380 ret = au1200_coherency_bug();
381 if (ret && enable)
382 goto out;
383 __au1200_ohci_control(base, enable); 365 __au1200_ohci_control(base, enable);
384 break; 366 break;
385 case ALCHEMY_USB_UDC0: 367 case ALCHEMY_USB_UDC0:
386 __au1200_udc_control(base, enable); 368 __au1200_udc_control(base, enable);
387 break; 369 break;
388 case ALCHEMY_USB_EHCI0: 370 case ALCHEMY_USB_EHCI0:
389 ret = au1200_coherency_bug();
390 if (ret && enable)
391 goto out;
392 __au1200_ehci_control(base, enable); 371 __au1200_ehci_control(base, enable);
393 break; 372 break;
394 default: 373 default:
395 ret = -ENODEV; 374 return -ENODEV;
396 } 375 }
397out: 376 return 0;
398 return ret;
399} 377}
400 378
401 379
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index b86bff31d1d3..61e90fe9eab1 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -158,7 +158,7 @@ static ssize_t db1x_pmattr_store(struct kobject *kobj,
158 int tmp; 158 int tmp;
159 159
160 if (ATTRCMP(timer_timeout)) { 160 if (ATTRCMP(timer_timeout)) {
161 tmp = strict_strtoul(instr, 0, &l); 161 tmp = kstrtoul(instr, 0, &l);
162 if (tmp) 162 if (tmp)
163 return tmp; 163 return tmp;
164 164
@@ -181,7 +181,7 @@ static ssize_t db1x_pmattr_store(struct kobject *kobj,
181 } 181 }
182 182
183 } else if (ATTRCMP(wakemsk)) { 183 } else if (ATTRCMP(wakemsk)) {
184 tmp = strict_strtoul(instr, 0, &l); 184 tmp = kstrtoul(instr, 0, &l);
185 if (tmp) 185 if (tmp)
186 return tmp; 186 return tmp;
187 187
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index 0af808dfd1ca..1a03a2f43496 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -69,15 +69,18 @@ static __init void prom_init_mem(void)
69 * BCM47XX uses 128MB for addressing the ram, if the system contains 69 * BCM47XX uses 128MB for addressing the ram, if the system contains
70 * less that that amount of ram it remaps the ram more often into the 70 * less that that amount of ram it remaps the ram more often into the
71 * available space. 71 * available space.
72 * Accessing memory after 128MB will cause an exception.
73 * max contains the biggest possible address supported by the platform.
74 * If the method wants to try something above we assume 128MB ram.
75 */ 72 */
76 off = (unsigned long)prom_init; 73
77 max = off | ((128 << 20) - 1); 74 /* Physical address, without mapping to any kernel segment */
78 for (mem = (1 << 20); mem < (128 << 20); mem += (1 << 20)) { 75 off = CPHYSADDR((unsigned long)prom_init);
79 if ((off + mem) > max) { 76
80 mem = (128 << 20); 77 /* Accessing memory after 128 MiB will cause an exception */
78 max = 128 << 20;
79
80 for (mem = 1 << 20; mem < max; mem += 1 << 20) {
81 /* Loop condition may be not enough, off may be over 1 MiB */
82 if (off + mem >= max) {
83 mem = max;
81 printk(KERN_DEBUG "assume 128MB RAM\n"); 84 printk(KERN_DEBUG "assume 128MB RAM\n");
82 break; 85 break;
83 } 86 }
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 227705d9d5ae..602866657938 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -10,6 +10,17 @@ config CAVIUM_CN63XXP1
10 non-CN63XXP1 hardware, so it is recommended to select "n" 10 non-CN63XXP1 hardware, so it is recommended to select "n"
11 unless it is known the workarounds are needed. 11 unless it is known the workarounds are needed.
12 12
13config CAVIUM_OCTEON_CVMSEG_SIZE
14 int "Number of L1 cache lines reserved for CVMSEG memory"
15 range 0 54
16 default 1
17 help
18 CVMSEG LM is a segment that accesses portions of the dcache as a
19 local memory; the larger CVMSEG is, the smaller the cache is.
20 This selects the size of CVMSEG LM, which is in cache blocks. The
21 legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
22 between zero and 6192 bytes).
23
13endif # CPU_CAVIUM_OCTEON 24endif # CPU_CAVIUM_OCTEON
14 25
15if CAVIUM_OCTEON_SOC 26if CAVIUM_OCTEON_SOC
@@ -23,17 +34,6 @@ config CAVIUM_OCTEON_2ND_KERNEL
23 with this option to be run at the same time as one built without this 34 with this option to be run at the same time as one built without this
24 option. 35 option.
25 36
26config CAVIUM_OCTEON_CVMSEG_SIZE
27 int "Number of L1 cache lines reserved for CVMSEG memory"
28 range 0 54
29 default 1
30 help
31 CVMSEG LM is a segment that accesses portions of the dcache as a
32 local memory; the larger CVMSEG is, the smaller the cache is.
33 This selects the size of CVMSEG LM, which is in cache blocks. The
34 legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
35 between zero and 6192 bytes).
36
37config CAVIUM_OCTEON_LOCK_L2 37config CAVIUM_OCTEON_LOCK_L2
38 bool "Lock often used kernel code in the L2" 38 bool "Lock often used kernel code in the L2"
39 default "y" 39 default "y"
@@ -86,7 +86,6 @@ config SWIOTLB
86 select IOMMU_HELPER 86 select IOMMU_HELPER
87 select NEED_SG_DMA_LENGTH 87 select NEED_SG_DMA_LENGTH
88 88
89
90config OCTEON_ILM 89config OCTEON_ILM
91 tristate "Module to measure interrupt latency using Octeon CIU Timer" 90 tristate "Module to measure interrupt latency using Octeon CIU Timer"
92 help 91 help
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 8553ad5c72b6..7e5cf7a5e2f3 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -106,6 +106,158 @@ int cvmx_helper_ports_on_interface(int interface)
106EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface); 106EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface);
107 107
108/** 108/**
109 * @INTERNAL
110 * Return interface mode for CN68xx.
111 */
112static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface)
113{
114 union cvmx_mio_qlmx_cfg qlm_cfg;
115 switch (interface) {
116 case 0:
117 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
118 /* QLM is disabled when QLM SPD is 15. */
119 if (qlm_cfg.s.qlm_spd == 15)
120 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
121
122 if (qlm_cfg.s.qlm_cfg == 2)
123 return CVMX_HELPER_INTERFACE_MODE_SGMII;
124 else if (qlm_cfg.s.qlm_cfg == 3)
125 return CVMX_HELPER_INTERFACE_MODE_XAUI;
126 else
127 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
128 case 2:
129 case 3:
130 case 4:
131 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface));
132 /* QLM is disabled when QLM SPD is 15. */
133 if (qlm_cfg.s.qlm_spd == 15)
134 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
135
136 if (qlm_cfg.s.qlm_cfg == 2)
137 return CVMX_HELPER_INTERFACE_MODE_SGMII;
138 else if (qlm_cfg.s.qlm_cfg == 3)
139 return CVMX_HELPER_INTERFACE_MODE_XAUI;
140 else
141 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
142 case 7:
143 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(3));
144 /* QLM is disabled when QLM SPD is 15. */
145 if (qlm_cfg.s.qlm_spd == 15) {
146 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
147 } else if (qlm_cfg.s.qlm_cfg != 0) {
148 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
149 if (qlm_cfg.s.qlm_cfg != 0)
150 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
151 }
152 return CVMX_HELPER_INTERFACE_MODE_NPI;
153 case 8:
154 return CVMX_HELPER_INTERFACE_MODE_LOOP;
155 default:
156 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
157 }
158}
159
160/**
161 * @INTERNAL
162 * Return interface mode for an Octeon II
163 */
164static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
165{
166 union cvmx_gmxx_inf_mode mode;
167
168 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
169 return __cvmx_get_mode_cn68xx(interface);
170
171 if (interface == 2)
172 return CVMX_HELPER_INTERFACE_MODE_NPI;
173
174 if (interface == 3)
175 return CVMX_HELPER_INTERFACE_MODE_LOOP;
176
177 /* Only present in CN63XX & CN66XX Octeon model */
178 if ((OCTEON_IS_MODEL(OCTEON_CN63XX) &&
179 (interface == 4 || interface == 5)) ||
180 (OCTEON_IS_MODEL(OCTEON_CN66XX) &&
181 interface >= 4 && interface <= 7)) {
182 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
183 }
184
185 if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
186 union cvmx_mio_qlmx_cfg mio_qlm_cfg;
187
188 /* QLM2 is SGMII0 and QLM1 is SGMII1 */
189 if (interface == 0)
190 mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
191 else if (interface == 1)
192 mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
193 else
194 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
195
196 if (mio_qlm_cfg.s.qlm_spd == 15)
197 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
198
199 if (mio_qlm_cfg.s.qlm_cfg == 9)
200 return CVMX_HELPER_INTERFACE_MODE_SGMII;
201 else if (mio_qlm_cfg.s.qlm_cfg == 11)
202 return CVMX_HELPER_INTERFACE_MODE_XAUI;
203 else
204 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
205 } else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
206 union cvmx_mio_qlmx_cfg qlm_cfg;
207
208 if (interface == 0) {
209 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
210 if (qlm_cfg.s.qlm_cfg == 2)
211 return CVMX_HELPER_INTERFACE_MODE_SGMII;
212 else if (qlm_cfg.s.qlm_cfg == 3)
213 return CVMX_HELPER_INTERFACE_MODE_XAUI;
214 else
215 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
216 } else if (interface == 1) {
217 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
218 if (qlm_cfg.s.qlm_cfg == 2)
219 return CVMX_HELPER_INTERFACE_MODE_SGMII;
220 else if (qlm_cfg.s.qlm_cfg == 3)
221 return CVMX_HELPER_INTERFACE_MODE_XAUI;
222 else
223 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
224 }
225 } else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
226 if (interface == 0) {
227 union cvmx_mio_qlmx_cfg qlm_cfg;
228 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
229 if (qlm_cfg.s.qlm_cfg == 2)
230 return CVMX_HELPER_INTERFACE_MODE_SGMII;
231 }
232 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
233 }
234
235 if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX))
236 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
237
238 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
239
240 if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
241 switch (mode.cn63xx.mode) {
242 case 0:
243 return CVMX_HELPER_INTERFACE_MODE_SGMII;
244 case 1:
245 return CVMX_HELPER_INTERFACE_MODE_XAUI;
246 default:
247 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
248 }
249 } else {
250 if (!mode.s.en)
251 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
252
253 if (mode.s.type)
254 return CVMX_HELPER_INTERFACE_MODE_GMII;
255 else
256 return CVMX_HELPER_INTERFACE_MODE_RGMII;
257 }
258}
259
260/**
109 * Get the operating mode of an interface. Depending on the Octeon 261 * Get the operating mode of an interface. Depending on the Octeon
110 * chip and configuration, this function returns an enumeration 262 * chip and configuration, this function returns an enumeration
111 * of the type of packet I/O supported by an interface. 263 * of the type of packet I/O supported by an interface.
@@ -118,6 +270,20 @@ EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface);
118cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface) 270cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
119{ 271{
120 union cvmx_gmxx_inf_mode mode; 272 union cvmx_gmxx_inf_mode mode;
273
274 if (interface < 0 ||
275 interface >= cvmx_helper_get_number_of_interfaces())
276 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
277
278 /*
279 * Octeon II models
280 */
281 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
282 return __cvmx_get_mode_octeon2(interface);
283
284 /*
285 * Octeon and Octeon Plus models
286 */
121 if (interface == 2) 287 if (interface == 2)
122 return CVMX_HELPER_INTERFACE_MODE_NPI; 288 return CVMX_HELPER_INTERFACE_MODE_NPI;
123 289
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 3aa5b46b2d40..1b82ac6921e0 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1260,11 +1260,13 @@ static void __init octeon_irq_init_ciu(void)
1260 for (i = 0; i < 4; i++) 1260 for (i = 0; i < 4; i++)
1261 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1261 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1262 1262
1263 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
1263 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1264 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
1264 for (i = 0; i < 4; i++) 1265 for (i = 0; i < 4; i++)
1265 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1266 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
1266 1267
1267 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1268 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
1269 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
1268 1270
1269 /* CIU_1 */ 1271 /* CIU_1 */
1270 for (i = 0; i < 16; i++) 1272 for (i = 0; i < 16; i++)
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 331b837cec57..989781fbae76 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -729,17 +729,6 @@ void __init prom_init(void)
729 octeon_write_lcd("Linux"); 729 octeon_write_lcd("Linux");
730#endif 730#endif
731 731
732#ifdef CONFIG_CAVIUM_GDB
733 /*
734 * When debugging the linux kernel, force the cores to enter
735 * the debug exception handler to break in.
736 */
737 if (octeon_get_boot_debug_flag()) {
738 cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
739 cvmx_read_csr(CVMX_CIU_DINT);
740 }
741#endif
742
743 octeon_setup_delays(); 732 octeon_setup_delays();
744 733
745 /* 734 /*
@@ -779,12 +768,6 @@ void __init prom_init(void)
779 MAX_MEMORY = 32ull << 30; 768 MAX_MEMORY = 32ull << 30;
780 if (*p == '@') 769 if (*p == '@')
781 RESERVE_LOW_MEM = memparse(p + 1, &p); 770 RESERVE_LOW_MEM = memparse(p + 1, &p);
782 } else if (strcmp(arg, "ecc_verbose") == 0) {
783#ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
784 __cvmx_interrupt_ecc_report_single_bit_errors = 1;
785 pr_notice("Reporting of single bit ECC errors is "
786 "turned on\n");
787#endif
788#ifdef CONFIG_KEXEC 771#ifdef CONFIG_KEXEC
789 } else if (strncmp(arg, "crashkernel=", 12) == 0) { 772 } else if (strncmp(arg, "crashkernel=", 12) == 0) {
790 crashk_size = memparse(arg+12, &p); 773 crashk_size = memparse(arg+12, &p);
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 67a078ffc464..a7b3ae104d8c 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -218,15 +218,6 @@ void octeon_prepare_cpus(unsigned int max_cpus)
218 */ 218 */
219static void octeon_smp_finish(void) 219static void octeon_smp_finish(void)
220{ 220{
221#ifdef CONFIG_CAVIUM_GDB
222 unsigned long tmp;
223 /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
224 to be not masked by this core so we know the signal is received by
225 someone */
226 asm volatile ("dmfc0 %0, $22\n"
227 "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
228#endif
229
230 octeon_user_io_init(); 221 octeon_user_io_init();
231 222
232 /* to generate the first CPU timer interrupt */ 223 /* to generate the first CPU timer interrupt */
@@ -234,21 +225,6 @@ static void octeon_smp_finish(void)
234 local_irq_enable(); 225 local_irq_enable();
235} 226}
236 227
237/**
238 * Hook for after all CPUs are online
239 */
240static void octeon_cpus_done(void)
241{
242#ifdef CONFIG_CAVIUM_GDB
243 unsigned long tmp;
244 /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
245 to be not masked by this core so we know the signal is received by
246 someone */
247 asm volatile ("dmfc0 %0, $22\n"
248 "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
249#endif
250}
251
252#ifdef CONFIG_HOTPLUG_CPU 228#ifdef CONFIG_HOTPLUG_CPU
253 229
254/* State of each CPU. */ 230/* State of each CPU. */
@@ -405,7 +381,6 @@ struct plat_smp_ops octeon_smp_ops = {
405 .send_ipi_mask = octeon_send_ipi_mask, 381 .send_ipi_mask = octeon_send_ipi_mask,
406 .init_secondary = octeon_init_secondary, 382 .init_secondary = octeon_init_secondary,
407 .smp_finish = octeon_smp_finish, 383 .smp_finish = octeon_smp_finish,
408 .cpus_done = octeon_cpus_done,
409 .boot_secondary = octeon_boot_secondary, 384 .boot_secondary = octeon_boot_secondary,
410 .smp_setup = octeon_smp_setup, 385 .smp_setup = octeon_smp_setup,
411 .prepare_cpus = octeon_prepare_cpus, 386 .prepare_cpus = octeon_prepare_cpus,
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index e3a3836508ec..134879c1310a 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -46,7 +46,6 @@ CONFIG_MTD=y
46CONFIG_MTD_REDBOOT_PARTS=y 46CONFIG_MTD_REDBOOT_PARTS=y
47CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2 47CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
48CONFIG_MTD_CMDLINE_PARTS=y 48CONFIG_MTD_CMDLINE_PARTS=y
49CONFIG_MTD_CHAR=y
50CONFIG_MTD_BLOCK=y 49CONFIG_MTD_BLOCK=y
51CONFIG_MTD_CFI=y 50CONFIG_MTD_CFI=y
52CONFIG_MTD_JEDECPROBE=y 51CONFIG_MTD_JEDECPROBE=y
@@ -54,7 +53,7 @@ CONFIG_MTD_CFI_AMDSTD=y
54CONFIG_MTD_COMPLEX_MAPPINGS=y 53CONFIG_MTD_COMPLEX_MAPPINGS=y
55CONFIG_MTD_PHYSMAP=y 54CONFIG_MTD_PHYSMAP=y
56CONFIG_MTD_M25P80=y 55CONFIG_MTD_M25P80=y
57# CONFIG_M25PXX_USE_FAST_READ is not set 56CONFIG_MTD_SPI_NOR=y
58CONFIG_NETDEVICES=y 57CONFIG_NETDEVICES=y
59# CONFIG_NET_PACKET_ENGINE is not set 58# CONFIG_NET_PACKET_ENGINE is not set
60CONFIG_ATH_COMMON=m 59CONFIG_ATH_COMMON=m
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index c99b6eeda90b..a64b30b96a0d 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -113,6 +113,7 @@ CONFIG_MTD_NAND=y
113CONFIG_MTD_NAND_ECC_BCH=y 113CONFIG_MTD_NAND_ECC_BCH=y
114CONFIG_MTD_NAND_AU1550=y 114CONFIG_MTD_NAND_AU1550=y
115CONFIG_MTD_NAND_PLATFORM=y 115CONFIG_MTD_NAND_PLATFORM=y
116CONFIG_MTD_SPI_NOR=y
116CONFIG_EEPROM_AT24=y 117CONFIG_EEPROM_AT24=y
117CONFIG_EEPROM_AT25=y 118CONFIG_EEPROM_AT25=y
118CONFIG_SCSI_TGT=y 119CONFIG_SCSI_TGT=y
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
deleted file mode 100644
index eb316447588c..000000000000
--- a/arch/mips/configs/maltasmtc_defconfig
+++ /dev/null
@@ -1,196 +0,0 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_PAGE_SIZE_16KB=y
5CONFIG_MIPS_MT_SMTC=y
6# CONFIG_MIPS_MT_FPAFF is not set
7CONFIG_NR_CPUS=9
8CONFIG_HZ_48=y
9CONFIG_LOCALVERSION="smtc"
10CONFIG_SYSVIPC=y
11CONFIG_POSIX_MQUEUE=y
12CONFIG_AUDIT=y
13CONFIG_IKCONFIG=y
14CONFIG_IKCONFIG_PROC=y
15CONFIG_LOG_BUF_SHIFT=15
16CONFIG_SYSCTL_SYSCALL=y
17CONFIG_EMBEDDED=y
18CONFIG_SLAB=y
19CONFIG_MODULES=y
20CONFIG_MODULE_UNLOAD=y
21CONFIG_MODVERSIONS=y
22CONFIG_MODULE_SRCVERSION_ALL=y
23# CONFIG_BLK_DEV_BSG is not set
24CONFIG_PCI=y
25# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
26CONFIG_NET=y
27CONFIG_PACKET=y
28CONFIG_UNIX=y
29CONFIG_XFRM_USER=m
30CONFIG_NET_KEY=y
31CONFIG_INET=y
32CONFIG_IP_MULTICAST=y
33CONFIG_IP_ADVANCED_ROUTER=y
34CONFIG_IP_MULTIPLE_TABLES=y
35CONFIG_IP_ROUTE_MULTIPATH=y
36CONFIG_IP_ROUTE_VERBOSE=y
37CONFIG_IP_PNP=y
38CONFIG_IP_PNP_DHCP=y
39CONFIG_IP_PNP_BOOTP=y
40CONFIG_NET_IPIP=m
41CONFIG_IP_MROUTE=y
42CONFIG_IP_PIMSM_V1=y
43CONFIG_IP_PIMSM_V2=y
44CONFIG_SYN_COOKIES=y
45CONFIG_INET_AH=m
46CONFIG_INET_ESP=m
47CONFIG_INET_IPCOMP=m
48# CONFIG_INET_LRO is not set
49CONFIG_INET6_AH=m
50CONFIG_INET6_ESP=m
51CONFIG_INET6_IPCOMP=m
52CONFIG_IPV6_TUNNEL=m
53CONFIG_BRIDGE=m
54CONFIG_VLAN_8021Q=m
55CONFIG_ATALK=m
56CONFIG_DEV_APPLETALK=m
57CONFIG_IPDDP=m
58CONFIG_IPDDP_ENCAP=y
59CONFIG_NET_SCHED=y
60CONFIG_NET_SCH_CBQ=m
61CONFIG_NET_SCH_HTB=m
62CONFIG_NET_SCH_HFSC=m
63CONFIG_NET_SCH_PRIO=m
64CONFIG_NET_SCH_RED=m
65CONFIG_NET_SCH_SFQ=m
66CONFIG_NET_SCH_TEQL=m
67CONFIG_NET_SCH_TBF=m
68CONFIG_NET_SCH_GRED=m
69CONFIG_NET_SCH_DSMARK=m
70CONFIG_NET_SCH_NETEM=m
71CONFIG_NET_SCH_INGRESS=m
72CONFIG_NET_CLS_BASIC=m
73CONFIG_NET_CLS_TCINDEX=m
74CONFIG_NET_CLS_ROUTE4=m
75CONFIG_NET_CLS_FW=m
76CONFIG_NET_CLS_U32=m
77CONFIG_NET_CLS_RSVP=m
78CONFIG_NET_CLS_RSVP6=m
79CONFIG_NET_CLS_ACT=y
80CONFIG_NET_ACT_POLICE=y
81CONFIG_NET_CLS_IND=y
82# CONFIG_WIRELESS is not set
83CONFIG_DEVTMPFS=y
84CONFIG_BLK_DEV_LOOP=y
85CONFIG_BLK_DEV_CRYPTOLOOP=m
86CONFIG_IDE=y
87# CONFIG_IDE_PROC_FS is not set
88# CONFIG_IDEPCI_PCIBUS_ORDER is not set
89CONFIG_BLK_DEV_GENERIC=y
90CONFIG_BLK_DEV_PIIX=y
91CONFIG_SCSI=y
92CONFIG_BLK_DEV_SD=y
93CONFIG_CHR_DEV_SG=y
94# CONFIG_SCSI_LOWLEVEL is not set
95CONFIG_NETDEVICES=y
96# CONFIG_NET_VENDOR_3COM is not set
97# CONFIG_NET_VENDOR_ADAPTEC is not set
98# CONFIG_NET_VENDOR_ALTEON is not set
99CONFIG_PCNET32=y
100# CONFIG_NET_VENDOR_ATHEROS is not set
101# CONFIG_NET_VENDOR_BROADCOM is not set
102# CONFIG_NET_VENDOR_BROCADE is not set
103# CONFIG_NET_VENDOR_CHELSIO is not set
104# CONFIG_NET_VENDOR_CISCO is not set
105# CONFIG_NET_VENDOR_DEC is not set
106# CONFIG_NET_VENDOR_DLINK is not set
107# CONFIG_NET_VENDOR_EMULEX is not set
108# CONFIG_NET_VENDOR_EXAR is not set
109# CONFIG_NET_VENDOR_HP is not set
110# CONFIG_NET_VENDOR_INTEL is not set
111# CONFIG_NET_VENDOR_MARVELL is not set
112# CONFIG_NET_VENDOR_MELLANOX is not set
113# CONFIG_NET_VENDOR_MICREL is not set
114# CONFIG_NET_VENDOR_MYRI is not set
115# CONFIG_NET_VENDOR_NATSEMI is not set
116# CONFIG_NET_VENDOR_NVIDIA is not set
117# CONFIG_NET_VENDOR_OKI is not set
118# CONFIG_NET_PACKET_ENGINE is not set
119# CONFIG_NET_VENDOR_QLOGIC is not set
120# CONFIG_NET_VENDOR_REALTEK is not set
121# CONFIG_NET_VENDOR_RDC is not set
122# CONFIG_NET_VENDOR_SEEQ is not set
123# CONFIG_NET_VENDOR_SILAN is not set
124# CONFIG_NET_VENDOR_SIS is not set
125# CONFIG_NET_VENDOR_SMSC is not set
126# CONFIG_NET_VENDOR_STMICRO is not set
127# CONFIG_NET_VENDOR_SUN is not set
128# CONFIG_NET_VENDOR_TEHUTI is not set
129# CONFIG_NET_VENDOR_TI is not set
130# CONFIG_NET_VENDOR_TOSHIBA is not set
131# CONFIG_NET_VENDOR_VIA is not set
132# CONFIG_WLAN is not set
133# CONFIG_VT is not set
134CONFIG_LEGACY_PTY_COUNT=16
135CONFIG_SERIAL_8250=y
136CONFIG_SERIAL_8250_CONSOLE=y
137CONFIG_HW_RANDOM=y
138# CONFIG_HWMON is not set
139CONFIG_VIDEO_OUTPUT_CONTROL=m
140CONFIG_FB=y
141CONFIG_FIRMWARE_EDID=y
142CONFIG_FB_MATROX=y
143CONFIG_FB_MATROX_G=y
144CONFIG_USB=y
145CONFIG_USB_EHCI_HCD=y
146# CONFIG_USB_EHCI_TT_NEWSCHED is not set
147CONFIG_USB_UHCI_HCD=y
148CONFIG_USB_STORAGE=y
149CONFIG_NEW_LEDS=y
150CONFIG_LEDS_CLASS=y
151CONFIG_LEDS_TRIGGERS=y
152CONFIG_LEDS_TRIGGER_TIMER=y
153CONFIG_LEDS_TRIGGER_IDE_DISK=y
154CONFIG_LEDS_TRIGGER_HEARTBEAT=y
155CONFIG_LEDS_TRIGGER_BACKLIGHT=y
156CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
157CONFIG_RTC_CLASS=y
158CONFIG_RTC_DRV_CMOS=y
159CONFIG_EXT2_FS=y
160CONFIG_EXT3_FS=y
161# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
162CONFIG_XFS_FS=y
163CONFIG_XFS_QUOTA=y
164CONFIG_XFS_POSIX_ACL=y
165CONFIG_QUOTA=y
166CONFIG_QFMT_V2=y
167CONFIG_MSDOS_FS=m
168CONFIG_VFAT_FS=m
169CONFIG_PROC_KCORE=y
170CONFIG_TMPFS=y
171CONFIG_NFS_FS=y
172CONFIG_ROOT_NFS=y
173CONFIG_CIFS=m
174CONFIG_CIFS_WEAK_PW_HASH=y
175CONFIG_CIFS_XATTR=y
176CONFIG_CIFS_POSIX=y
177CONFIG_NLS_CODEPAGE_437=m
178CONFIG_NLS_ISO8859_1=m
179# CONFIG_FTRACE is not set
180CONFIG_CRYPTO_NULL=m
181CONFIG_CRYPTO_PCBC=m
182CONFIG_CRYPTO_HMAC=y
183CONFIG_CRYPTO_MICHAEL_MIC=m
184CONFIG_CRYPTO_SHA512=m
185CONFIG_CRYPTO_TGR192=m
186CONFIG_CRYPTO_WP512=m
187CONFIG_CRYPTO_ANUBIS=m
188CONFIG_CRYPTO_BLOWFISH=m
189CONFIG_CRYPTO_CAST5=m
190CONFIG_CRYPTO_CAST6=m
191CONFIG_CRYPTO_KHAZAD=m
192CONFIG_CRYPTO_SERPENT=m
193CONFIG_CRYPTO_TEA=m
194CONFIG_CRYPTO_TWOFISH=m
195# CONFIG_CRYPTO_ANSI_CPRNG is not set
196# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index 10ef3bed5f43..f8a32315bb38 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -4,10 +4,9 @@ CONFIG_CPU_MIPS32_R2=y
4CONFIG_PAGE_SIZE_16KB=y 4CONFIG_PAGE_SIZE_16KB=y
5CONFIG_MIPS_MT_SMP=y 5CONFIG_MIPS_MT_SMP=y
6CONFIG_SCHED_SMT=y 6CONFIG_SCHED_SMT=y
7CONFIG_MIPS_CMP=y 7CONFIG_MIPS_CPS=y
8CONFIG_NR_CPUS=8 8CONFIG_NR_CPUS=8
9CONFIG_HZ_100=y 9CONFIG_HZ_100=y
10CONFIG_LOCALVERSION="cmp"
11CONFIG_SYSVIPC=y 10CONFIG_SYSVIPC=y
12CONFIG_POSIX_MQUEUE=y 11CONFIG_POSIX_MQUEUE=y
13CONFIG_AUDIT=y 12CONFIG_AUDIT=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index 2d3002cba102..c83338a39917 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -5,10 +5,9 @@ CONFIG_CPU_MIPS32_3_5_FEATURES=y
5CONFIG_PAGE_SIZE_16KB=y 5CONFIG_PAGE_SIZE_16KB=y
6CONFIG_MIPS_MT_SMP=y 6CONFIG_MIPS_MT_SMP=y
7CONFIG_SCHED_SMT=y 7CONFIG_SCHED_SMT=y
8CONFIG_MIPS_CMP=y 8CONFIG_MIPS_CPS=y
9CONFIG_NR_CPUS=8 9CONFIG_NR_CPUS=8
10CONFIG_HZ_100=y 10CONFIG_HZ_100=y
11CONFIG_LOCALVERSION="cmp"
12CONFIG_SYSVIPC=y 11CONFIG_SYSVIPC=y
13CONFIG_POSIX_MQUEUE=y 12CONFIG_POSIX_MQUEUE=y
14CONFIG_AUDIT=y 13CONFIG_AUDIT=y
diff --git a/arch/mips/configs/mips_paravirt_defconfig b/arch/mips/configs/mips_paravirt_defconfig
new file mode 100644
index 000000000000..84cfcb4bf2ea
--- /dev/null
+++ b/arch/mips/configs/mips_paravirt_defconfig
@@ -0,0 +1,103 @@
1CONFIG_MIPS_PARAVIRT=y
2CONFIG_CPU_MIPS64_R2=y
3CONFIG_64BIT=y
4CONFIG_TRANSPARENT_HUGEPAGE=y
5CONFIG_SMP=y
6CONFIG_HZ_1000=y
7CONFIG_PREEMPT=y
8CONFIG_SYSVIPC=y
9CONFIG_BSD_PROCESS_ACCT=y
10CONFIG_BSD_PROCESS_ACCT_V3=y
11CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y
13CONFIG_LOG_BUF_SHIFT=14
14CONFIG_RELAY=y
15CONFIG_BLK_DEV_INITRD=y
16CONFIG_EXPERT=y
17CONFIG_SLAB=y
18CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y
20# CONFIG_BLK_DEV_BSG is not set
21CONFIG_PCI=y
22CONFIG_MIPS32_COMPAT=y
23CONFIG_MIPS32_O32=y
24CONFIG_MIPS32_N32=y
25CONFIG_NET=y
26CONFIG_PACKET=y
27CONFIG_UNIX=y
28CONFIG_INET=y
29CONFIG_IP_MULTICAST=y
30CONFIG_IP_ADVANCED_ROUTER=y
31CONFIG_IP_MULTIPLE_TABLES=y
32CONFIG_IP_ROUTE_MULTIPATH=y
33CONFIG_IP_ROUTE_VERBOSE=y
34CONFIG_IP_PNP=y
35CONFIG_IP_PNP_DHCP=y
36CONFIG_IP_PNP_BOOTP=y
37CONFIG_IP_PNP_RARP=y
38CONFIG_IP_MROUTE=y
39CONFIG_IP_PIMSM_V1=y
40CONFIG_IP_PIMSM_V2=y
41CONFIG_SYN_COOKIES=y
42# CONFIG_INET_LRO is not set
43CONFIG_IPV6=y
44# CONFIG_WIRELESS is not set
45CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
46# CONFIG_FW_LOADER is not set
47CONFIG_BLK_DEV_LOOP=y
48CONFIG_VIRTIO_BLK=y
49CONFIG_SCSI=y
50CONFIG_BLK_DEV_SD=y
51CONFIG_NETDEVICES=y
52CONFIG_VIRTIO_NET=y
53# CONFIG_NET_VENDOR_BROADCOM is not set
54# CONFIG_NET_VENDOR_INTEL is not set
55# CONFIG_NET_VENDOR_MARVELL is not set
56# CONFIG_NET_VENDOR_MICREL is not set
57# CONFIG_NET_VENDOR_NATSEMI is not set
58# CONFIG_NET_VENDOR_SMSC is not set
59# CONFIG_NET_VENDOR_STMICRO is not set
60# CONFIG_NET_VENDOR_WIZNET is not set
61CONFIG_PHYLIB=y
62CONFIG_MARVELL_PHY=y
63CONFIG_BROADCOM_PHY=y
64CONFIG_BCM87XX_PHY=y
65# CONFIG_WLAN is not set
66# CONFIG_INPUT is not set
67# CONFIG_SERIO is not set
68# CONFIG_VT is not set
69CONFIG_VIRTIO_CONSOLE=y
70# CONFIG_HW_RANDOM is not set
71# CONFIG_HWMON is not set
72# CONFIG_USB_SUPPORT is not set
73CONFIG_VIRTIO_PCI=y
74CONFIG_VIRTIO_BALLOON=y
75CONFIG_VIRTIO_MMIO=y
76# CONFIG_IOMMU_SUPPORT is not set
77CONFIG_EXT4_FS=y
78CONFIG_EXT4_FS_POSIX_ACL=y
79CONFIG_EXT4_FS_SECURITY=y
80CONFIG_MSDOS_FS=y
81CONFIG_VFAT_FS=y
82CONFIG_PROC_KCORE=y
83CONFIG_TMPFS=y
84CONFIG_HUGETLBFS=y
85# CONFIG_MISC_FILESYSTEMS is not set
86CONFIG_NFS_FS=y
87CONFIG_NFS_V4=y
88CONFIG_NFS_V4_1=y
89CONFIG_ROOT_NFS=y
90CONFIG_NLS_CODEPAGE_437=y
91CONFIG_NLS_ASCII=y
92CONFIG_NLS_ISO8859_1=y
93CONFIG_NLS_UTF8=y
94CONFIG_DEBUG_INFO=y
95CONFIG_DEBUG_FS=y
96CONFIG_MAGIC_SYSRQ=y
97# CONFIG_SCHED_DEBUG is not set
98# CONFIG_FTRACE is not set
99CONFIG_CRYPTO_CBC=y
100CONFIG_CRYPTO_HMAC=y
101CONFIG_CRYPTO_MD5=y
102CONFIG_CRYPTO_DES=y
103# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
index d1741bcf8949..d14ae2fa7d13 100644
--- a/arch/mips/configs/rt305x_defconfig
+++ b/arch/mips/configs/rt305x_defconfig
@@ -81,7 +81,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
81# CONFIG_FIRMWARE_IN_KERNEL is not set 81# CONFIG_FIRMWARE_IN_KERNEL is not set
82CONFIG_MTD=y 82CONFIG_MTD=y
83CONFIG_MTD_CMDLINE_PARTS=y 83CONFIG_MTD_CMDLINE_PARTS=y
84CONFIG_MTD_CHAR=y
85CONFIG_MTD_BLOCK=y 84CONFIG_MTD_BLOCK=y
86CONFIG_MTD_CFI=y 85CONFIG_MTD_CFI=y
87CONFIG_MTD_CFI_AMDSTD=y 86CONFIG_MTD_CFI_AMDSTD=y
@@ -89,6 +88,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
89CONFIG_MTD_PHYSMAP=y 88CONFIG_MTD_PHYSMAP=y
90CONFIG_MTD_PHYSMAP_OF=y 89CONFIG_MTD_PHYSMAP_OF=y
91CONFIG_MTD_M25P80=y 90CONFIG_MTD_M25P80=y
91CONFIG_MTD_SPI_NOR=y
92CONFIG_EEPROM_93CX6=m 92CONFIG_EEPROM_93CX6=m
93CONFIG_SCSI=y 93CONFIG_SCSI=y
94CONFIG_BLK_DEV_SD=y 94CONFIG_BLK_DEV_SD=y
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index 56e6e2c23683..41bbffd9cc0e 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -23,6 +23,7 @@
23#include <asm/bootinfo.h> 23#include <asm/bootinfo.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/cpu-features.h> 25#include <asm/cpu-features.h>
26#include <asm/cpu-type.h>
26#include <asm/irq.h> 27#include <asm/irq.h>
27#include <asm/irq_cpu.h> 28#include <asm/irq_cpu.h>
28#include <asm/mipsregs.h> 29#include <asm/mipsregs.h>
@@ -748,6 +749,10 @@ void __init arch_init_irq(void)
748 cpu_fpu_mask = 0; 749 cpu_fpu_mask = 0;
749 dec_interrupt[DEC_IRQ_FPU] = -1; 750 dec_interrupt[DEC_IRQ_FPU] = -1;
750 } 751 }
752 /* Free the halt interrupt unused on R4k systems. */
753 if (current_cpu_type() == CPU_R4000SC ||
754 current_cpu_type() == CPU_R4400SC)
755 dec_interrupt[DEC_IRQ_HALT] = -1;
751 756
752 /* Register board interrupts: FPU and cascade. */ 757 /* Register board interrupts: FPU and cascade. */
753 if (dec_interrupt[DEC_IRQ_FPU] >= 0) 758 if (dec_interrupt[DEC_IRQ_FPU] >= 0)
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index b464b8b1147a..935543f14538 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -17,26 +17,8 @@
17#ifdef CONFIG_64BIT 17#ifdef CONFIG_64BIT
18#include <asm/asmmacro-64.h> 18#include <asm/asmmacro-64.h>
19#endif 19#endif
20#ifdef CONFIG_MIPS_MT_SMTC
21#include <asm/mipsmtregs.h>
22#endif
23
24#ifdef CONFIG_MIPS_MT_SMTC
25 .macro local_irq_enable reg=t0
26 mfc0 \reg, CP0_TCSTATUS
27 ori \reg, \reg, TCSTATUS_IXMT
28 xori \reg, \reg, TCSTATUS_IXMT
29 mtc0 \reg, CP0_TCSTATUS
30 _ehb
31 .endm
32 20
33 .macro local_irq_disable reg=t0 21#ifdef CONFIG_CPU_MIPSR2
34 mfc0 \reg, CP0_TCSTATUS
35 ori \reg, \reg, TCSTATUS_IXMT
36 mtc0 \reg, CP0_TCSTATUS
37 _ehb
38 .endm
39#elif defined(CONFIG_CPU_MIPSR2)
40 .macro local_irq_enable reg=t0 22 .macro local_irq_enable reg=t0
41 ei 23 ei
42 irq_enable_hazard 24 irq_enable_hazard
@@ -71,7 +53,7 @@
71 sw \reg, TI_PRE_COUNT($28) 53 sw \reg, TI_PRE_COUNT($28)
72#endif 54#endif
73 .endm 55 .endm
74#endif /* CONFIG_MIPS_MT_SMTC */ 56#endif /* CONFIG_CPU_MIPSR2 */
75 57
76 .macro fpu_save_16even thread tmp=t0 58 .macro fpu_save_16even thread tmp=t0
77 cfc1 \tmp, fcr31 59 cfc1 \tmp, fcr31
@@ -267,13 +249,35 @@
267 .set pop 249 .set pop
268 .endm 250 .endm
269#else 251#else
252
253#ifdef CONFIG_CPU_MICROMIPS
254#define CFC_MSA_INSN 0x587e0056
255#define CTC_MSA_INSN 0x583e0816
256#define LDD_MSA_INSN 0x58000837
257#define STD_MSA_INSN 0x5800083f
258#define COPY_UW_MSA_INSN 0x58f00056
259#define COPY_UD_MSA_INSN 0x58f80056
260#define INSERT_W_MSA_INSN 0x59300816
261#define INSERT_D_MSA_INSN 0x59380816
262#else
263#define CFC_MSA_INSN 0x787e0059
264#define CTC_MSA_INSN 0x783e0819
265#define LDD_MSA_INSN 0x78000823
266#define STD_MSA_INSN 0x78000827
267#define COPY_UW_MSA_INSN 0x78f00059
268#define COPY_UD_MSA_INSN 0x78f80059
269#define INSERT_W_MSA_INSN 0x79300819
270#define INSERT_D_MSA_INSN 0x79380819
271#endif
272
270 /* 273 /*
271 * Temporary until all toolchains in use include MSA support. 274 * Temporary until all toolchains in use include MSA support.
272 */ 275 */
273 .macro cfcmsa rd, cs 276 .macro cfcmsa rd, cs
274 .set push 277 .set push
275 .set noat 278 .set noat
276 .word 0x787e0059 | (\cs << 11) 279 .insn
280 .word CFC_MSA_INSN | (\cs << 11)
277 move \rd, $1 281 move \rd, $1
278 .set pop 282 .set pop
279 .endm 283 .endm
@@ -282,7 +286,7 @@
282 .set push 286 .set push
283 .set noat 287 .set noat
284 move $1, \rs 288 move $1, \rs
285 .word 0x783e0819 | (\cd << 6) 289 .word CTC_MSA_INSN | (\cd << 6)
286 .set pop 290 .set pop
287 .endm 291 .endm
288 292
@@ -290,7 +294,7 @@
290 .set push 294 .set push
291 .set noat 295 .set noat
292 add $1, \base, \off 296 add $1, \base, \off
293 .word 0x78000823 | (\wd << 6) 297 .word LDD_MSA_INSN | (\wd << 6)
294 .set pop 298 .set pop
295 .endm 299 .endm
296 300
@@ -298,14 +302,15 @@
298 .set push 302 .set push
299 .set noat 303 .set noat
300 add $1, \base, \off 304 add $1, \base, \off
301 .word 0x78000827 | (\wd << 6) 305 .word STD_MSA_INSN | (\wd << 6)
302 .set pop 306 .set pop
303 .endm 307 .endm
304 308
305 .macro copy_u_w rd, ws, n 309 .macro copy_u_w rd, ws, n
306 .set push 310 .set push
307 .set noat 311 .set noat
308 .word 0x78f00059 | (\n << 16) | (\ws << 11) 312 .insn
313 .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
309 /* move triggers an assembler bug... */ 314 /* move triggers an assembler bug... */
310 or \rd, $1, zero 315 or \rd, $1, zero
311 .set pop 316 .set pop
@@ -314,7 +319,8 @@
314 .macro copy_u_d rd, ws, n 319 .macro copy_u_d rd, ws, n
315 .set push 320 .set push
316 .set noat 321 .set noat
317 .word 0x78f80059 | (\n << 16) | (\ws << 11) 322 .insn
323 .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
318 /* move triggers an assembler bug... */ 324 /* move triggers an assembler bug... */
319 or \rd, $1, zero 325 or \rd, $1, zero
320 .set pop 326 .set pop
@@ -325,7 +331,7 @@
325 .set noat 331 .set noat
326 /* move triggers an assembler bug... */ 332 /* move triggers an assembler bug... */
327 or $1, \rs, zero 333 or $1, \rs, zero
328 .word 0x79300819 | (\n << 16) | (\wd << 6) 334 .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
329 .set pop 335 .set pop
330 .endm 336 .endm
331 337
@@ -334,7 +340,7 @@
334 .set noat 340 .set noat
335 /* move triggers an assembler bug... */ 341 /* move triggers an assembler bug... */
336 or $1, \rs, zero 342 or $1, \rs, zero
337 .word 0x79380819 | (\n << 16) | (\wd << 6) 343 .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
338 .set pop 344 .set pop
339 .endm 345 .endm
340#endif 346#endif
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index e28a3e0eb3cb..de781cf54bc7 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -8,6 +8,8 @@
8#ifndef _ASM_BRANCH_H 8#ifndef _ASM_BRANCH_H
9#define _ASM_BRANCH_H 9#define _ASM_BRANCH_H
10 10
11#include <asm/cpu-features.h>
12#include <asm/mipsregs.h>
11#include <asm/ptrace.h> 13#include <asm/ptrace.h>
12#include <asm/inst.h> 14#include <asm/inst.h>
13 15
@@ -18,12 +20,40 @@ extern int __compute_return_epc_for_insn(struct pt_regs *regs,
18extern int __microMIPS_compute_return_epc(struct pt_regs *regs); 20extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
19extern int __MIPS16e_compute_return_epc(struct pt_regs *regs); 21extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
20 22
23/*
24 * microMIPS bitfields
25 */
26#define MM_POOL32A_MINOR_MASK 0x3f
27#define MM_POOL32A_MINOR_SHIFT 0x6
28#define MM_MIPS32_COND_FC 0x30
29
30extern int __mm_isBranchInstr(struct pt_regs *regs,
31 struct mm_decoded_insn dec_insn, unsigned long *contpc);
32
33static inline int mm_isBranchInstr(struct pt_regs *regs,
34 struct mm_decoded_insn dec_insn, unsigned long *contpc)
35{
36 if (!cpu_has_mmips)
37 return 0;
38
39 return __mm_isBranchInstr(regs, dec_insn, contpc);
40}
21 41
22static inline int delay_slot(struct pt_regs *regs) 42static inline int delay_slot(struct pt_regs *regs)
23{ 43{
24 return regs->cp0_cause & CAUSEF_BD; 44 return regs->cp0_cause & CAUSEF_BD;
25} 45}
26 46
47static inline void clear_delay_slot(struct pt_regs *regs)
48{
49 regs->cp0_cause &= ~CAUSEF_BD;
50}
51
52static inline void set_delay_slot(struct pt_regs *regs)
53{
54 regs->cp0_cause |= CAUSEF_BD;
55}
56
27static inline unsigned long exception_epc(struct pt_regs *regs) 57static inline unsigned long exception_epc(struct pt_regs *regs)
28{ 58{
29 if (likely(!delay_slot(regs))) 59 if (likely(!delay_slot(regs)))
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 69468ded2828..e08381a37f8b 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -113,6 +113,12 @@ unsigned long run_uncached(void *func);
113 113
114extern void *kmap_coherent(struct page *page, unsigned long addr); 114extern void *kmap_coherent(struct page *page, unsigned long addr);
115extern void kunmap_coherent(void); 115extern void kunmap_coherent(void);
116extern void *kmap_noncoherent(struct page *page, unsigned long addr);
117
118static inline void kunmap_noncoherent(void)
119{
120 kunmap_coherent();
121}
116 122
117#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 123#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
118static inline void flush_kernel_dcache_page(struct page *page) 124static inline void flush_kernel_dcache_page(struct page *page)
diff --git a/arch/mips/include/asm/cmp.h b/arch/mips/include/asm/cmp.h
index 89a73fb93ae6..033d97303c85 100644
--- a/arch/mips/include/asm/cmp.h
+++ b/arch/mips/include/asm/cmp.h
@@ -10,7 +10,6 @@ extern void cmp_smp_setup(void);
10extern void cmp_smp_finish(void); 10extern void cmp_smp_finish(void);
11extern void cmp_boot_secondary(int cpu, struct task_struct *t); 11extern void cmp_boot_secondary(int cpu, struct task_struct *t);
12extern void cmp_init_secondary(void); 12extern void cmp_init_secondary(void);
13extern void cmp_cpus_done(void);
14extern void cmp_prepare_cpus(unsigned int max_cpus); 13extern void cmp_prepare_cpus(unsigned int max_cpus);
15 14
16/* This is platform specific */ 15/* This is platform specific */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index f56cc975b92f..c7d8c997d93e 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -110,9 +110,15 @@
110#ifndef cpu_has_smartmips 110#ifndef cpu_has_smartmips
111#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) 111#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
112#endif 112#endif
113
113#ifndef cpu_has_rixi 114#ifndef cpu_has_rixi
114#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI) 115# ifdef CONFIG_64BIT
116# define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
117# else /* CONFIG_32BIT */
118# define cpu_has_rixi ((cpu_data[0].options & MIPS_CPU_RIXI) && !cpu_has_64bits)
119# endif
115#endif 120#endif
121
116#ifndef cpu_has_mmips 122#ifndef cpu_has_mmips
117# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS 123# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
118# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS) 124# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
@@ -120,6 +126,7 @@
120# define cpu_has_mmips 0 126# define cpu_has_mmips 0
121# endif 127# endif
122#endif 128#endif
129
123#ifndef cpu_has_vtag_icache 130#ifndef cpu_has_vtag_icache
124#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) 131#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
125#endif 132#endif
@@ -183,6 +190,17 @@
183/* 190/*
184 * Shortcuts ... 191 * Shortcuts ...
185 */ 192 */
193#define cpu_has_mips_2_3_4_5 (cpu_has_mips_2 | cpu_has_mips_3_4_5)
194#define cpu_has_mips_3_4_5 (cpu_has_mips_3 | cpu_has_mips_4_5)
195#define cpu_has_mips_4_5 (cpu_has_mips_4 | cpu_has_mips_5)
196
197#define cpu_has_mips_2_3_4_5_r (cpu_has_mips_2 | cpu_has_mips_3_4_5_r)
198#define cpu_has_mips_3_4_5_r (cpu_has_mips_3 | cpu_has_mips_4_5_r)
199#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r)
200#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r)
201
202#define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2)
203
186#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) 204#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
187#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) 205#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
188#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) 206#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index ff2707ab3295..47d5967ce7ef 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -65,18 +65,13 @@ struct cpuinfo_mips {
65#ifdef CONFIG_64BIT 65#ifdef CONFIG_64BIT
66 int vmbits; /* Virtual memory size in bits */ 66 int vmbits; /* Virtual memory size in bits */
67#endif 67#endif
68#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 68#ifdef CONFIG_MIPS_MT_SMP
69 /* 69 /*
70 * In the MIPS MT "SMTC" model, each TC is considered 70 * There is not necessarily a 1:1 mapping of VPE num to CPU number
71 * to be a "CPU" for the purposes of scheduling, but 71 * in particular on multi-core systems.
72 * exception resources, ASID spaces, etc, are common
73 * to all TCs within the same VPE.
74 */ 72 */
75 int vpe_id; /* Virtual Processor number */ 73 int vpe_id; /* Virtual Processor number */
76#endif 74#endif
77#ifdef CONFIG_MIPS_MT_SMTC
78 int tc_id; /* Thread Context number */
79#endif
80 void *data; /* Additional data */ 75 void *data; /* Additional data */
81 unsigned int watch_reg_count; /* Number that exist */ 76 unsigned int watch_reg_count; /* Number that exist */
82 unsigned int watch_reg_use_cnt; /* Usable by ptrace */ 77 unsigned int watch_reg_use_cnt; /* Usable by ptrace */
@@ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args {
117 unsigned long n; 112 unsigned long n;
118}; 113};
119 114
120#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 115#ifdef CONFIG_MIPS_MT_SMP
121# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) 116# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
122#else 117#else
123# define cpu_vpe_id(cpuinfo) 0 118# define cpu_vpe_id(cpuinfo) 0
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 721906130a57..b4e2bd87df50 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -155,9 +155,6 @@ static inline int __pure __get_cpu_type(const int cpu_type)
155 case CPU_RM7000: 155 case CPU_RM7000:
156 case CPU_SR71000: 156 case CPU_SR71000:
157#endif 157#endif
158#ifdef CONFIG_SYS_HAS_CPU_RM9000
159 case CPU_RM9000:
160#endif
161#ifdef CONFIG_SYS_HAS_CPU_SB1 158#ifdef CONFIG_SYS_HAS_CPU_SB1
162 case CPU_SB1: 159 case CPU_SB1:
163 case CPU_SB1A: 160 case CPU_SB1A:
@@ -166,6 +163,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
166 case CPU_CAVIUM_OCTEON: 163 case CPU_CAVIUM_OCTEON:
167 case CPU_CAVIUM_OCTEON_PLUS: 164 case CPU_CAVIUM_OCTEON_PLUS:
168 case CPU_CAVIUM_OCTEON2: 165 case CPU_CAVIUM_OCTEON2:
166 case CPU_CAVIUM_OCTEON3:
169#endif 167#endif
170 168
171#if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \ 169#if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 530eb8b3a68e..129d08701e91 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -201,6 +201,7 @@
201#define PRID_IMP_NETLOGIC_XLP3XX 0x1100 201#define PRID_IMP_NETLOGIC_XLP3XX 0x1100
202#define PRID_IMP_NETLOGIC_XLP2XX 0x1200 202#define PRID_IMP_NETLOGIC_XLP2XX 0x1200
203#define PRID_IMP_NETLOGIC_XLP9XX 0x1500 203#define PRID_IMP_NETLOGIC_XLP9XX 0x1500
204#define PRID_IMP_NETLOGIC_XLP5XX 0x1300
204 205
205/* 206/*
206 * Particular Revision values for bits 7:0 of the PRId register. 207 * Particular Revision values for bits 7:0 of the PRId register.
@@ -281,7 +282,7 @@ enum cpu_type_enum {
281 CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000, 282 CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000,
282 CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122, 283 CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122,
283 CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000, 284 CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
284 CPU_SR71000, CPU_RM9000, CPU_TX49XX, 285 CPU_SR71000, CPU_TX49XX,
285 286
286 /* 287 /*
287 * R8000 class processors 288 * R8000 class processors
diff --git a/arch/mips/include/asm/dec/kn05.h b/arch/mips/include/asm/dec/kn05.h
index 56d22dc8803a..8e14f677e5ef 100644
--- a/arch/mips/include/asm/dec/kn05.h
+++ b/arch/mips/include/asm/dec/kn05.h
@@ -49,12 +49,20 @@
49#define KN4K_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */ 49#define KN4K_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
50 50
51/* 51/*
52 * MB ASIC interrupt bits.
53 */
54#define KN4K_MB_INR_MB 4 /* ??? */
55#define KN4K_MB_INR_MT 3 /* memory, I/O bus read/write errors */
56#define KN4K_MB_INR_RES_2 2 /* unused */
57#define KN4K_MB_INR_RTC 1 /* RTC */
58#define KN4K_MB_INR_TC 0 /* I/O ASIC cascade */
59
60/*
52 * Bits for the MB interrupt register. 61 * Bits for the MB interrupt register.
53 * The register appears read-only. 62 * The register appears read-only.
54 */ 63 */
55#define KN4K_MB_INT_TC (1<<0) /* TURBOchannel? */ 64#define KN4K_MB_INT_IRQ (0x1f<<0) /* CPU Int[4:0] status. */
56#define KN4K_MB_INT_RTC (1<<1) /* RTC? */ 65#define KN4K_MB_INT_IRQ_N(n) (1<<(n)) /* Individual status bits. */
57#define KN4K_MB_INT_MT (1<<3) /* I/O ASIC cascade */
58 66
59/* 67/*
60 * Bits for the MB control & status register. 68 * Bits for the MB control & status register.
@@ -70,6 +78,7 @@
70#define KN4K_MB_CSR_NC (1<<14) /* ??? */ 78#define KN4K_MB_CSR_NC (1<<14) /* ??? */
71#define KN4K_MB_CSR_EE (1<<15) /* (bus) Exception Enable? */ 79#define KN4K_MB_CSR_EE (1<<15) /* (bus) Exception Enable? */
72#define KN4K_MB_CSR_MSK (0x1f<<16) /* CPU Int[4:0] mask */ 80#define KN4K_MB_CSR_MSK (0x1f<<16) /* CPU Int[4:0] mask */
81#define KN4K_MB_CSR_MSK_N(n) (1<<((n)+16)) /* Individual mask bits. */
73#define KN4K_MB_CSR_FW (1<<21) /* ??? */ 82#define KN4K_MB_CSR_FW (1<<21) /* ??? */
74#define KN4K_MB_CSR_W (1<<31) /* ??? */ 83#define KN4K_MB_CSR_W (1<<31) /* ??? */
75 84
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 8c012af2f451..6842ffafd1e7 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -48,11 +48,7 @@
48enum fixed_addresses { 48enum fixed_addresses {
49#define FIX_N_COLOURS 8 49#define FIX_N_COLOURS 8
50 FIX_CMAP_BEGIN, 50 FIX_CMAP_BEGIN,
51#ifdef CONFIG_MIPS_MT_SMTC
52 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2),
53#else
54 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), 51 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2),
55#endif
56#ifdef CONFIG_HIGHMEM 52#ifdef CONFIG_HIGHMEM
57 /* reserved pte's for temporary kernel mappings */ 53 /* reserved pte's for temporary kernel mappings */
58 FIX_KMAP_BEGIN = FIX_CMAP_END + 1, 54 FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 4d86b72750c7..a939574f8293 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -17,6 +17,7 @@
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/cpu.h> 18#include <asm/cpu.h>
19#include <asm/cpu-features.h> 19#include <asm/cpu-features.h>
20#include <asm/fpu_emulator.h>
20#include <asm/hazards.h> 21#include <asm/hazards.h>
21#include <asm/processor.h> 22#include <asm/processor.h>
22#include <asm/current.h> 23#include <asm/current.h>
@@ -28,7 +29,6 @@
28struct sigcontext; 29struct sigcontext;
29struct sigcontext32; 30struct sigcontext32;
30 31
31extern void fpu_emulator_init_fpu(void);
32extern void _init_fpu(void); 32extern void _init_fpu(void);
33extern void _save_fp(struct task_struct *); 33extern void _save_fp(struct task_struct *);
34extern void _restore_fp(struct task_struct *); 34extern void _restore_fp(struct task_struct *);
@@ -156,15 +156,16 @@ static inline int init_fpu(void)
156 int ret = 0; 156 int ret = 0;
157 157
158 preempt_disable(); 158 preempt_disable();
159
159 if (cpu_has_fpu) { 160 if (cpu_has_fpu) {
160 ret = __own_fpu(); 161 ret = __own_fpu();
161 if (!ret) 162 if (!ret)
162 _init_fpu(); 163 _init_fpu();
163 } else { 164 } else
164 fpu_emulator_init_fpu(); 165 fpu_emulator_init_fpu();
165 }
166 166
167 preempt_enable(); 167 preempt_enable();
168
168 return ret; 169 return ret;
169} 170}
170 171
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 2abb587d5ab4..0195745b4b1b 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -23,9 +23,12 @@
23#ifndef _ASM_FPU_EMULATOR_H 23#ifndef _ASM_FPU_EMULATOR_H
24#define _ASM_FPU_EMULATOR_H 24#define _ASM_FPU_EMULATOR_H
25 25
26#include <linux/sched.h>
26#include <asm/break.h> 27#include <asm/break.h>
28#include <asm/thread_info.h>
27#include <asm/inst.h> 29#include <asm/inst.h>
28#include <asm/local.h> 30#include <asm/local.h>
31#include <asm/processor.h>
29 32
30#ifdef CONFIG_DEBUG_FS 33#ifdef CONFIG_DEBUG_FS
31 34
@@ -36,6 +39,11 @@ struct mips_fpu_emulator_stats {
36 local_t cp1ops; 39 local_t cp1ops;
37 local_t cp1xops; 40 local_t cp1xops;
38 local_t errors; 41 local_t errors;
42 local_t ieee754_inexact;
43 local_t ieee754_underflow;
44 local_t ieee754_overflow;
45 local_t ieee754_zerodiv;
46 local_t ieee754_invalidop;
39}; 47};
40 48
41DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); 49DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
@@ -71,4 +79,17 @@ int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
71 */ 79 */
72#define BREAK_MATH (0x0000000d | (BRK_MEMU << 16)) 80#define BREAK_MATH (0x0000000d | (BRK_MEMU << 16))
73 81
82#define SIGNALLING_NAN 0x7ff800007ff80000LL
83
84static inline void fpu_emulator_init_fpu(void)
85{
86 struct task_struct *t = current;
87 int i;
88
89 t->thread.fpu.fcr31 = 0;
90
91 for (i = 0; i < 32; i++)
92 set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
93}
94
74#endif /* _ASM_FPU_EMULATOR_H */ 95#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 082716690589..10f6a99f92c2 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -380,6 +380,7 @@ extern unsigned int gic_compare_int (void);
380extern cycle_t gic_read_count(void); 380extern cycle_t gic_read_count(void);
381extern cycle_t gic_read_compare(void); 381extern cycle_t gic_read_compare(void);
382extern void gic_write_compare(cycle_t cnt); 382extern void gic_write_compare(cycle_t cnt);
383extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
383extern void gic_send_ipi(unsigned int intr); 384extern void gic_send_ipi(unsigned int intr);
384extern unsigned int plat_ipi_call_int_xlate(unsigned int); 385extern unsigned int plat_ipi_call_int_xlate(unsigned int);
385extern unsigned int plat_ipi_resched_int_xlate(unsigned int); 386extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
index 0878701712f8..4be1a57cdbb0 100644
--- a/arch/mips/include/asm/gio_device.h
+++ b/arch/mips/include/asm/gio_device.h
@@ -50,7 +50,7 @@ static inline void gio_device_free(struct gio_device *dev)
50extern int gio_register_driver(struct gio_driver *); 50extern int gio_register_driver(struct gio_driver *);
51extern void gio_unregister_driver(struct gio_driver *); 51extern void gio_unregister_driver(struct gio_driver *);
52 52
53#define gio_get_drvdata(_dev) drv_get_drvdata(&(_dev)->dev) 53#define gio_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
54#define gio_set_drvdata(_dev, data) drv_set_drvdata(&(_dev)->dev, (data)) 54#define gio_set_drvdata(_dev, data) dev_set_drvdata(&(_dev)->dev, (data))
55 55
56extern void gio_set_master(struct gio_device *); 56extern void gio_set_master(struct gio_device *);
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
index d192158886b1..d9f932de80e9 100644
--- a/arch/mips/include/asm/idle.h
+++ b/arch/mips/include/asm/idle.h
@@ -1,6 +1,7 @@
1#ifndef __ASM_IDLE_H 1#ifndef __ASM_IDLE_H
2#define __ASM_IDLE_H 2#define __ASM_IDLE_H
3 3
4#include <linux/cpuidle.h>
4#include <linux/linkage.h> 5#include <linux/linkage.h>
5 6
6extern void (*cpu_wait)(void); 7extern void (*cpu_wait)(void);
@@ -20,4 +21,17 @@ static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
20 addr < (unsigned long)__pastwait; 21 addr < (unsigned long)__pastwait;
21} 22}
22 23
24extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
25 struct cpuidle_driver *drv, int index);
26
27#define MIPS_CPUIDLE_WAIT_STATE {\
28 .enter = mips_cpuidle_wait_enter,\
29 .exit_latency = 1,\
30 .target_residency = 1,\
31 .power_usage = UINT_MAX,\
32 .flags = CPUIDLE_FLAG_TIME_VALID,\
33 .name = "wait",\
34 .desc = "MIPS wait",\
35}
36
23#endif /* __ASM_IDLE_H */ 37#endif /* __ASM_IDLE_H */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 7bc2cdb35057..ae1f7b24dd1a 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq)
26#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 26#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
27#endif 27#endif
28 28
29#ifdef CONFIG_MIPS_MT_SMTC
30
31struct irqaction;
32
33extern unsigned long irq_hwmask[];
34extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
35 unsigned long hwmask);
36
37static inline void smtc_im_ack_irq(unsigned int irq)
38{
39 if (irq_hwmask[irq] & ST0_IM)
40 set_c0_status(irq_hwmask[irq] & ST0_IM);
41}
42
43#else
44
45static inline void smtc_im_ack_irq(unsigned int irq)
46{
47}
48
49#endif /* CONFIG_MIPS_MT_SMTC */
50
51#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
52#include <linux/cpumask.h>
53
54extern int plat_set_irq_affinity(struct irq_data *d,
55 const struct cpumask *affinity, bool force);
56extern void smtc_forward_irq(struct irq_data *d);
57
58/*
59 * IRQ affinity hook invoked at the beginning of interrupt dispatch
60 * if option is enabled.
61 *
62 * Up through Linux 2.6.22 (at least) cpumask operations are very
63 * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
64 * used a "fast path" per-IRQ-descriptor cache of affinity information
65 * to reduce latency. As there is a project afoot to optimize the
66 * cpumask implementations, this version is optimistically assuming
67 * that cpumask.h macro overhead is reasonable during interrupt dispatch.
68 */
69static inline int handle_on_other_cpu(unsigned int irq)
70{
71 struct irq_data *d = irq_get_irq_data(irq);
72
73 if (cpumask_test_cpu(smp_processor_id(), d->affinity))
74 return 0;
75 smtc_forward_irq(d);
76 return 1;
77}
78
79#else /* Not doing SMTC affinity */
80
81static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
82
83#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
84
85#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
86
87static inline void smtc_im_backstop(unsigned int irq)
88{
89 if (irq_hwmask[irq] & 0x0000ff00)
90 write_c0_tccontext(read_c0_tccontext() &
91 ~(irq_hwmask[irq] & 0x0000ff00));
92}
93
94/*
95 * Clear interrupt mask handling "backstop" if irq_hwmask
96 * entry so indicates. This implies that the ack() or end()
97 * functions will take over re-enabling the low-level mask.
98 * Otherwise it will be done on return from exception.
99 */
100static inline int smtc_handle_on_other_cpu(unsigned int irq)
101{
102 int ret = handle_on_other_cpu(irq);
103
104 if (!ret)
105 smtc_im_backstop(irq);
106 return ret;
107}
108
109#else
110
111static inline void smtc_im_backstop(unsigned int irq) { }
112static inline int smtc_handle_on_other_cpu(unsigned int irq)
113{
114 return handle_on_other_cpu(irq);
115}
116
117#endif
118
119extern void do_IRQ(unsigned int irq); 29extern void do_IRQ(unsigned int irq);
120 30
121#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
122
123extern void do_IRQ_no_affinity(unsigned int irq);
124
125#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
126
127extern void arch_init_irq(void); 31extern void arch_init_irq(void);
128extern void spurious_interrupt(void); 32extern void spurious_interrupt(void);
129 33
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 45c00951888b..0fa5fdcd1f01 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -17,7 +17,7 @@
17#include <linux/stringify.h> 17#include <linux/stringify.h>
18#include <asm/hazards.h> 18#include <asm/hazards.h>
19 19
20#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) 20#ifdef CONFIG_CPU_MIPSR2
21 21
22static inline void arch_local_irq_disable(void) 22static inline void arch_local_irq_disable(void)
23{ 23{
@@ -118,30 +118,15 @@ void arch_local_irq_disable(void);
118unsigned long arch_local_irq_save(void); 118unsigned long arch_local_irq_save(void);
119void arch_local_irq_restore(unsigned long flags); 119void arch_local_irq_restore(unsigned long flags);
120void __arch_local_irq_restore(unsigned long flags); 120void __arch_local_irq_restore(unsigned long flags);
121#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ 121#endif /* CONFIG_CPU_MIPSR2 */
122
123
124extern void smtc_ipi_replay(void);
125 122
126static inline void arch_local_irq_enable(void) 123static inline void arch_local_irq_enable(void)
127{ 124{
128#ifdef CONFIG_MIPS_MT_SMTC
129 /*
130 * SMTC kernel needs to do a software replay of queued
131 * IPIs, at the cost of call overhead on each local_irq_enable()
132 */
133 smtc_ipi_replay();
134#endif
135 __asm__ __volatile__( 125 __asm__ __volatile__(
136 " .set push \n" 126 " .set push \n"
137 " .set reorder \n" 127 " .set reorder \n"
138 " .set noat \n" 128 " .set noat \n"
139#ifdef CONFIG_MIPS_MT_SMTC 129#if defined(CONFIG_CPU_MIPSR2)
140 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
141 " ori $1, 0x400 \n"
142 " xori $1, 0x400 \n"
143 " mtc0 $1, $2, 1 \n"
144#elif defined(CONFIG_CPU_MIPSR2)
145 " ei \n" 130 " ei \n"
146#else 131#else
147 " mfc0 $1,$12 \n" 132 " mfc0 $1,$12 \n"
@@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void)
163 asm __volatile__( 148 asm __volatile__(
164 " .set push \n" 149 " .set push \n"
165 " .set reorder \n" 150 " .set reorder \n"
166#ifdef CONFIG_MIPS_MT_SMTC
167 " mfc0 %[flags], $2, 1 \n"
168#else
169 " mfc0 %[flags], $12 \n" 151 " mfc0 %[flags], $12 \n"
170#endif
171 " .set pop \n" 152 " .set pop \n"
172 : [flags] "=r" (flags)); 153 : [flags] "=r" (flags));
173 154
@@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void)
177 158
178static inline int arch_irqs_disabled_flags(unsigned long flags) 159static inline int arch_irqs_disabled_flags(unsigned long flags)
179{ 160{
180#ifdef CONFIG_MIPS_MT_SMTC
181 /*
182 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
183 */
184 return flags & 0x400;
185#else
186 return !(flags & 1); 161 return !(flags & 1);
187#endif
188} 162}
189 163
190#endif /* #ifndef __ASSEMBLY__ */ 164#endif /* #ifndef __ASSEMBLY__ */
diff --git a/arch/mips/include/asm/kvm_para.h b/arch/mips/include/asm/kvm_para.h
new file mode 100644
index 000000000000..5a9aa918abe6
--- /dev/null
+++ b/arch/mips/include/asm/kvm_para.h
@@ -0,0 +1,109 @@
1#ifndef _ASM_MIPS_KVM_PARA_H
2#define _ASM_MIPS_KVM_PARA_H
3
4#include <uapi/asm/kvm_para.h>
5
6#define KVM_HYPERCALL ".word 0x42000028"
7
8/*
9 * Hypercalls for KVM.
10 *
11 * Hypercall number is passed in v0.
12 * Return value will be placed in v0.
13 * Up to 3 arguments are passed in a0, a1, and a2.
14 */
15static inline unsigned long kvm_hypercall0(unsigned long num)
16{
17 register unsigned long n asm("v0");
18 register unsigned long r asm("v0");
19
20 n = num;
21 __asm__ __volatile__(
22 KVM_HYPERCALL
23 : "=r" (r) : "r" (n) : "memory"
24 );
25
26 return r;
27}
28
29static inline unsigned long kvm_hypercall1(unsigned long num,
30 unsigned long arg0)
31{
32 register unsigned long n asm("v0");
33 register unsigned long r asm("v0");
34 register unsigned long a0 asm("a0");
35
36 n = num;
37 a0 = arg0;
38 __asm__ __volatile__(
39 KVM_HYPERCALL
40 : "=r" (r) : "r" (n), "r" (a0) : "memory"
41 );
42
43 return r;
44}
45
46static inline unsigned long kvm_hypercall2(unsigned long num,
47 unsigned long arg0, unsigned long arg1)
48{
49 register unsigned long n asm("v0");
50 register unsigned long r asm("v0");
51 register unsigned long a0 asm("a0");
52 register unsigned long a1 asm("a1");
53
54 n = num;
55 a0 = arg0;
56 a1 = arg1;
57 __asm__ __volatile__(
58 KVM_HYPERCALL
59 : "=r" (r) : "r" (n), "r" (a0), "r" (a1) : "memory"
60 );
61
62 return r;
63}
64
65static inline unsigned long kvm_hypercall3(unsigned long num,
66 unsigned long arg0, unsigned long arg1, unsigned long arg2)
67{
68 register unsigned long n asm("v0");
69 register unsigned long r asm("v0");
70 register unsigned long a0 asm("a0");
71 register unsigned long a1 asm("a1");
72 register unsigned long a2 asm("a2");
73
74 n = num;
75 a0 = arg0;
76 a1 = arg1;
77 a2 = arg2;
78 __asm__ __volatile__(
79 KVM_HYPERCALL
80 : "=r" (r) : "r" (n), "r" (a0), "r" (a1), "r" (a2) : "memory"
81 );
82
83 return r;
84}
85
86static inline bool kvm_check_and_clear_guest_paused(void)
87{
88 return false;
89}
90
91static inline unsigned int kvm_arch_para_features(void)
92{
93 return 0;
94}
95
96#ifdef CONFIG_MIPS_PARAVIRT
97static inline bool kvm_para_available(void)
98{
99 return true;
100}
101#else
102static inline bool kvm_para_available(void)
103{
104 return false;
105}
106#endif
107
108
109#endif /* _ASM_MIPS_KVM_PARA_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index 94ed063eec92..cf8022872892 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -22,7 +22,6 @@
22#define cpu_has_3k_cache 0 22#define cpu_has_3k_cache 0
23#define cpu_has_4k_cache 0 23#define cpu_has_4k_cache 0
24#define cpu_has_tx39_cache 0 24#define cpu_has_tx39_cache 0
25#define cpu_has_fpu 0
26#define cpu_has_counter 1 25#define cpu_has_counter 1
27#define cpu_has_watch 1 26#define cpu_has_watch 1
28#define cpu_has_divec 1 27#define cpu_has_divec 1
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 60fc4c347c44..cceae32a0732 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -35,6 +35,8 @@ enum octeon_irq {
35 OCTEON_IRQ_PCI_MSI2, 35 OCTEON_IRQ_PCI_MSI2,
36 OCTEON_IRQ_PCI_MSI3, 36 OCTEON_IRQ_PCI_MSI3,
37 37
38 OCTEON_IRQ_TWSI,
39 OCTEON_IRQ_TWSI2,
38 OCTEON_IRQ_RML, 40 OCTEON_IRQ_RML,
39 OCTEON_IRQ_TIMER0, 41 OCTEON_IRQ_TIMER0,
40 OCTEON_IRQ_TIMER1, 42 OCTEON_IRQ_TIMER1,
diff --git a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
index 1bcb6421205e..1dfe47453ea4 100644
--- a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
@@ -39,6 +39,10 @@
39#define cpu_has_nofpuex 0 39#define cpu_has_nofpuex 0
40#define cpu_has_64bits 1 40#define cpu_has_64bits 1
41 41
42#define cpu_has_mips_2 1
43#define cpu_has_mips_3 1
44#define cpu_has_mips_5 0
45
42#define cpu_has_mips32r1 0 46#define cpu_has_mips32r1 0
43#define cpu_has_mips32r2 0 47#define cpu_has_mips32r2 0
44#define cpu_has_mips64r1 0 48#define cpu_has_mips64r1 0
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 7c5e17a17849..77eeda77e73c 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -80,36 +80,6 @@
80 .endm 80 .endm
81 81
82 .macro kernel_entry_setup 82 .macro kernel_entry_setup
83#ifdef CONFIG_MIPS_MT_SMTC
84 mfc0 t0, CP0_CONFIG
85 bgez t0, 9f
86 mfc0 t0, CP0_CONFIG, 1
87 bgez t0, 9f
88 mfc0 t0, CP0_CONFIG, 2
89 bgez t0, 9f
90 mfc0 t0, CP0_CONFIG, 3
91 and t0, 1<<2
92 bnez t0, 0f
939:
94 /* Assume we came from YAMON... */
95 PTR_LA v0, 0x9fc00534 /* YAMON print */
96 lw v0, (v0)
97 move a0, zero
98 PTR_LA a1, nonmt_processor
99 jal v0
100
101 PTR_LA v0, 0x9fc00520 /* YAMON exit */
102 lw v0, (v0)
103 li a0, 1
104 jal v0
105
1061: b 1b
107
108 __INITDATA
109nonmt_processor:
110 .asciz "SMTC kernel requires the MT ASE to run\n"
111 __FINIT
112#endif
113 83
114#ifdef CONFIG_EVA 84#ifdef CONFIG_EVA
115 sync 85 sync
diff --git a/arch/mips/include/asm/mach-malta/malta-pm.h b/arch/mips/include/asm/mach-malta/malta-pm.h
new file mode 100644
index 000000000000..c2c2e201013d
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/malta-pm.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __ASM_MIPS_MACH_MALTA_PM_H__
12#define __ASM_MIPS_MACH_MALTA_PM_H__
13
14#include <asm/mips-boards/piix4.h>
15
16#ifdef CONFIG_MIPS_MALTA_PM
17
18/**
19 * mips_pm_suspend - enter a suspend state
20 * @state: the state to enter, one of PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_*
21 *
22 * Enters a suspend state via the Malta's PIIX4. If the state to be entered
23 * is one which loses context (eg. SOFF) then this function will never
24 * return.
25 */
26extern int mips_pm_suspend(unsigned state);
27
28#else /* !CONFIG_MIPS_MALTA_PM */
29
30static inline int mips_pm_suspend(unsigned state)
31{
32 return -EINVAL;
33}
34
35#endif /* !CONFIG_MIPS_MALTA_PM */
36
37#endif /* __ASM_MIPS_MACH_MALTA_PM_H__ */
diff --git a/arch/mips/include/asm/mach-netlogic/topology.h b/arch/mips/include/asm/mach-netlogic/topology.h
index 0da99fa11c38..ceeb1f5e7129 100644
--- a/arch/mips/include/asm/mach-netlogic/topology.h
+++ b/arch/mips/include/asm/mach-netlogic/topology.h
@@ -10,10 +10,12 @@
10 10
11#include <asm/mach-netlogic/multi-node.h> 11#include <asm/mach-netlogic/multi-node.h>
12 12
13#ifdef CONFIG_SMP
13#define topology_physical_package_id(cpu) cpu_to_node(cpu) 14#define topology_physical_package_id(cpu) cpu_to_node(cpu)
14#define topology_core_id(cpu) (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE) 15#define topology_core_id(cpu) (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE)
15#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) 16#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu])
16#define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu)) 17#define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu))
18#endif
17 19
18#include <asm-generic/topology.h> 20#include <asm-generic/topology.h>
19 21
diff --git a/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h
new file mode 100644
index 000000000000..725e1ed83f6a
--- /dev/null
+++ b/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h
@@ -0,0 +1,36 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc.
7 */
8#ifndef __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H
9#define __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H
10
11#define cpu_has_4kex 1
12#define cpu_has_3k_cache 0
13#define cpu_has_tx39_cache 0
14#define cpu_has_counter 1
15#define cpu_has_llsc 1
16/*
17 * We Disable LL/SC on non SMP systems as it is faster to disable
18 * interrupts for atomic access than a LL/SC.
19 */
20#ifdef CONFIG_SMP
21# define kernel_uses_llsc 1
22#else
23# define kernel_uses_llsc 0
24#endif
25
26#ifdef CONFIG_CPU_CAVIUM_OCTEON
27#define cpu_dcache_line_size() 128
28#define cpu_icache_line_size() 128
29#define cpu_has_octeon_cache 1
30#define cpu_has_4k_cache 0
31#else
32#define cpu_has_octeon_cache 0
33#define cpu_has_4k_cache 1
34#endif
35
36#endif /* __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-paravirt/irq.h b/arch/mips/include/asm/mach-paravirt/irq.h
new file mode 100644
index 000000000000..9b4d35eca977
--- /dev/null
+++ b/arch/mips/include/asm/mach-paravirt/irq.h
@@ -0,0 +1,19 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc.
7 */
8#ifndef __ASM_MACH_PARAVIRT_IRQ_H__
9#define __ASM_MACH_PARAVIRT_IRQ_H__
10
11#define NR_IRQS 64
12#define MIPS_CPU_IRQ_BASE 1
13
14#define MIPS_IRQ_PCIA (MIPS_CPU_IRQ_BASE + 8)
15
16#define MIPS_IRQ_MBOX0 (MIPS_CPU_IRQ_BASE + 32)
17#define MIPS_IRQ_MBOX1 (MIPS_CPU_IRQ_BASE + 33)
18
19#endif /* __ASM_MACH_PARAVIRT_IRQ_H__ */
diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
new file mode 100644
index 000000000000..2f82bfa3a773
--- /dev/null
+++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
@@ -0,0 +1,50 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc
7 */
8#ifndef __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H
9#define __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H
10
11#define CP0_EBASE $15, 1
12
13 .macro kernel_entry_setup
14 mfc0 t0, CP0_EBASE
15 andi t0, t0, 0x3ff # CPUNum
16 beqz t0, 1f
17 # CPUs other than zero goto smp_bootstrap
18 j smp_bootstrap
19
201:
21 .endm
22
23/*
24 * Do SMP slave processor setup necessary before we can safely execute
25 * C code.
26 */
27 .macro smp_slave_setup
28 mfc0 t0, CP0_EBASE
29 andi t0, t0, 0x3ff # CPUNum
30 slti t1, t0, NR_CPUS
31 bnez t1, 1f
322:
33 di
34 wait
35 b 2b # Unknown CPU, loop forever.
361:
37 PTR_LA t1, paravirt_smp_sp
38 PTR_SLL t0, PTR_SCALESHIFT
39 PTR_ADDU t1, t1, t0
403:
41 PTR_L sp, 0(t1)
42 beqz sp, 3b # Spin until told to proceed.
43
44 PTR_LA t1, paravirt_smp_gp
45 PTR_ADDU t1, t1, t0
46 sync
47 PTR_L gp, 0(t1)
48 .endm
49
50#endif /* __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-paravirt/war.h b/arch/mips/include/asm/mach-paravirt/war.h
new file mode 100644
index 000000000000..36d3afb98451
--- /dev/null
+++ b/arch/mips/include/asm/mach-paravirt/war.h
@@ -0,0 +1,25 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) 2013 Cavium Networks <support@caviumnetworks.com>
8 */
9#ifndef __ASM_MIPS_MACH_PARAVIRT_WAR_H
10#define __ASM_MIPS_MACH_PARAVIRT_WAR_H
11
12#define R4600_V1_INDEX_ICACHEOP_WAR 0
13#define R4600_V1_HIT_CACHEOP_WAR 0
14#define R4600_V2_HIT_CACHEOP_WAR 0
15#define R5432_CP0_INTERRUPT_WAR 0
16#define BCM1250_M3_WAR 0
17#define SIBYTE_1956_WAR 0
18#define MIPS4K_ICACHE_REFILL_WAR 0
19#define MIPS_CACHE_SYNC_WAR 0
20#define TX49XX_ICACHE_INDEX_INV_WAR 0
21#define ICACHE_REFILLS_WORKAROUND_WAR 0
22#define R10000_LLSC_WAR 0
23#define MIPS34K_MISSED_ITLB_WAR 0
24
25#endif /* __ASM_MIPS_MACH_PARAVIRT_WAR_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
index aa45e6a07126..fe1566f2913e 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
@@ -25,11 +25,7 @@
25#ifndef MSP_USB_H_ 25#ifndef MSP_USB_H_
26#define MSP_USB_H_ 26#define MSP_USB_H_
27 27
28#ifdef CONFIG_MSP_HAS_DUAL_USB
29#define NUM_USB_DEVS 2
30#else
31#define NUM_USB_DEVS 1 28#define NUM_USB_DEVS 1
32#endif
33 29
34/* Register spaces for USB host 0 */ 30/* Register spaces for USB host 0 */
35#define MSP_USB0_MAB_START (MSP_USB0_BASE + 0x0) 31#define MSP_USB0_MAB_START (MSP_USB0_BASE + 0x0)
diff --git a/arch/mips/include/asm/mach-ralink/war.h b/arch/mips/include/asm/mach-ralink/war.h
index a7b712cf2d28..c074b5dc1f82 100644
--- a/arch/mips/include/asm/mach-ralink/war.h
+++ b/arch/mips/include/asm/mach-ralink/war.h
@@ -17,7 +17,6 @@
17#define MIPS4K_ICACHE_REFILL_WAR 0 17#define MIPS4K_ICACHE_REFILL_WAR 0
18#define MIPS_CACHE_SYNC_WAR 0 18#define MIPS_CACHE_SYNC_WAR 0
19#define TX49XX_ICACHE_INDEX_INV_WAR 0 19#define TX49XX_ICACHE_INDEX_INV_WAR 0
20#define RM9000_CDEX_SMP_WAR 0
21#define ICACHE_REFILLS_WORKAROUND_WAR 0 20#define ICACHE_REFILLS_WORKAROUND_WAR 0
22#define R10000_LLSC_WAR 0 21#define R10000_LLSC_WAR 0
23#define MIPS34K_MISSED_ITLB_WAR 0 22#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
index 3dfbd8e7947f..6cccd4d558d7 100644
--- a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
@@ -10,37 +10,6 @@
10#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 10#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
11 11
12 .macro kernel_entry_setup 12 .macro kernel_entry_setup
13#ifdef CONFIG_MIPS_MT_SMTC
14 mfc0 t0, CP0_CONFIG
15 bgez t0, 9f
16 mfc0 t0, CP0_CONFIG, 1
17 bgez t0, 9f
18 mfc0 t0, CP0_CONFIG, 2
19 bgez t0, 9f
20 mfc0 t0, CP0_CONFIG, 3
21 and t0, 1<<2
22 bnez t0, 0f
239 :
24 /* Assume we came from YAMON... */
25 PTR_LA v0, 0x9fc00534 /* YAMON print */
26 lw v0, (v0)
27 move a0, zero
28 PTR_LA a1, nonmt_processor
29 jal v0
30
31 PTR_LA v0, 0x9fc00520 /* YAMON exit */
32 lw v0, (v0)
33 li a0, 1
34 jal v0
35
361 : b 1b
37
38 __INITDATA
39nonmt_processor :
40 .asciz "SMTC kernel requires the MT ASE to run\n"
41 __FINIT
420 :
43#endif
44 .endm 13 .endm
45 14
46/* 15/*
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
index 9cf54041d416..9e340be52a50 100644
--- a/arch/mips/include/asm/mips-boards/piix4.h
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -55,4 +55,16 @@
55#define PIIX4_FUNC3_PMREGMISC 0x80 55#define PIIX4_FUNC3_PMREGMISC 0x80
56#define PIIX4_FUNC3_PMREGMISC_EN (1 << 0) 56#define PIIX4_FUNC3_PMREGMISC_EN (1 << 0)
57 57
58/* Power Management IO Space */
59#define PIIX4_FUNC3IO_PMSTS 0x00
60#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS (1 << 8)
61#define PIIX4_FUNC3IO_PMCNTRL 0x04
62#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN (1 << 13)
63#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP (0x7 << 10)
64#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10)
65#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_STR (0x1 << 10)
66
67/* Data for magic special PCI cycle */
68#define PIIX4_SUSPEND_MAGIC 0x00120002
69
58#endif /* __ASM_MIPS_BOARDS_PIIX4_H */ 70#endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
index 988507e46d42..e139a534e0fd 100644
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -72,7 +72,12 @@ static inline bool mips_cpc_present(void)
72#define MIPS_CPC_COCB_OFS 0x4000 72#define MIPS_CPC_COCB_OFS 0x4000
73 73
74/* Macros to ease the creation of register access functions */ 74/* Macros to ease the creation of register access functions */
75#define BUILD_CPC_R_(name, off) \ 75#define BUILD_CPC_R_(name, off) \
76static inline u32 *addr_cpc_##name(void) \
77{ \
78 return (u32 *)(mips_cpc_base + (off)); \
79} \
80 \
76static inline u32 read_cpc_##name(void) \ 81static inline u32 read_cpc_##name(void) \
77{ \ 82{ \
78 return __raw_readl(mips_cpc_base + (off)); \ 83 return __raw_readl(mips_cpc_base + (off)); \
@@ -147,4 +152,31 @@ BUILD_CPC_Cx_RW(other, 0x10)
147#define CPC_Cx_OTHER_CORENUM_SHF 16 152#define CPC_Cx_OTHER_CORENUM_SHF 16
148#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) 153#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16)
149 154
155#ifdef CONFIG_MIPS_CPC
156
157/**
158 * mips_cpc_lock_other - lock access to another core
159 * core: the other core to be accessed
160 *
161 * Call before operating upon a core via the 'other' register region in
162 * order to prevent the region being moved during access. Must be followed
163 * by a call to mips_cpc_unlock_other.
164 */
165extern void mips_cpc_lock_other(unsigned int core);
166
167/**
168 * mips_cpc_unlock_other - unlock access to another core
169 *
170 * Call after operating upon another core via the 'other' register region.
171 * Must be called after mips_cpc_lock_other.
172 */
173extern void mips_cpc_unlock_other(void);
174
175#else /* !CONFIG_MIPS_CPC */
176
177static inline void mips_cpc_lock_other(unsigned int core) { }
178static inline void mips_cpc_unlock_other(void) { }
179
180#endif /* !CONFIG_MIPS_CPC */
181
150#endif /* __MIPS_ASM_MIPS_CPC_H__ */ 182#endif /* __MIPS_ASM_MIPS_CPC_H__ */
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index a3df0c3faa0e..f6ba004a7711 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -1,7 +1,6 @@
1/* 1/*
2 * Definitions and decalrations for MIPS MT support 2 * Definitions and decalrations for MIPS MT support that are common between
3 * that are common between SMTC, VSMP, and/or AP/SP 3 * the VSMP, and AP/SP kernel models.
4 * kernel models.
5 */ 4 */
6#ifndef __ASM_MIPS_MT_H 5#ifndef __ASM_MIPS_MT_H
7#define __ASM_MIPS_MT_H 6#define __ASM_MIPS_MT_H
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 6efa79a27b6a..5f8052ce43bf 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -36,6 +36,8 @@
36 36
37#define read_c0_tcbind() __read_32bit_c0_register($2, 2) 37#define read_c0_tcbind() __read_32bit_c0_register($2, 2)
38 38
39#define write_c0_tchalt(val) __write_32bit_c0_register($2, 4, val)
40
39#define read_c0_tccontext() __read_32bit_c0_register($2, 5) 41#define read_c0_tccontext() __read_32bit_c0_register($2, 5)
40#define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val) 42#define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
41 43
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 3e025b5311db..98e9754a4b6b 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -709,11 +709,18 @@
709#ifndef __ASSEMBLY__ 709#ifndef __ASSEMBLY__
710 710
711/* 711/*
712 * Macros for handling the ISA mode bit for microMIPS. 712 * Macros for handling the ISA mode bit for MIPS16 and microMIPS.
713 */ 713 */
714#if defined(CONFIG_SYS_SUPPORTS_MIPS16) || \
715 defined(CONFIG_SYS_SUPPORTS_MICROMIPS)
714#define get_isa16_mode(x) ((x) & 0x1) 716#define get_isa16_mode(x) ((x) & 0x1)
715#define msk_isa16_mode(x) ((x) & ~0x1) 717#define msk_isa16_mode(x) ((x) & ~0x1)
716#define set_isa16_mode(x) do { (x) |= 0x1; } while(0) 718#define set_isa16_mode(x) do { (x) |= 0x1; } while(0)
719#else
720#define get_isa16_mode(x) 0
721#define msk_isa16_mode(x) (x)
722#define set_isa16_mode(x) do { } while(0)
723#endif
717 724
718/* 725/*
719 * microMIPS instructions can be 16-bit or 32-bit in length. This 726 * microMIPS instructions can be 16-bit or 32-bit in length. This
@@ -1007,19 +1014,8 @@ do { \
1007#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) 1014#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val)
1008 1015
1009#define read_c0_status() __read_32bit_c0_register($12, 0) 1016#define read_c0_status() __read_32bit_c0_register($12, 0)
1010#ifdef CONFIG_MIPS_MT_SMTC 1017
1011#define write_c0_status(val) \
1012do { \
1013 __write_32bit_c0_register($12, 0, val); \
1014 __ehb(); \
1015} while (0)
1016#else
1017/*
1018 * Legacy non-SMTC code, which may be hazardous
1019 * but which might not support EHB
1020 */
1021#define write_c0_status(val) __write_32bit_c0_register($12, 0, val) 1018#define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
1022#endif /* CONFIG_MIPS_MT_SMTC */
1023 1019
1024#define read_c0_cause() __read_32bit_c0_register($13, 0) 1020#define read_c0_cause() __read_32bit_c0_register($13, 0)
1025#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) 1021#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
@@ -1743,11 +1739,6 @@ static inline void tlb_write_random(void)
1743/* 1739/*
1744 * Manipulate bits in a c0 register. 1740 * Manipulate bits in a c0 register.
1745 */ 1741 */
1746#ifndef CONFIG_MIPS_MT_SMTC
1747/*
1748 * SMTC Linux requires shutting-down microthread scheduling
1749 * during CP0 register read-modify-write sequences.
1750 */
1751#define __BUILD_SET_C0(name) \ 1742#define __BUILD_SET_C0(name) \
1752static inline unsigned int \ 1743static inline unsigned int \
1753set_c0_##name(unsigned int set) \ 1744set_c0_##name(unsigned int set) \
@@ -1786,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val) \
1786 return res; \ 1777 return res; \
1787} 1778}
1788 1779
1789#else /* SMTC versions that manage MT scheduling */
1790
1791#include <linux/irqflags.h>
1792
1793/*
1794 * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with
1795 * header file recursion.
1796 */
1797static inline unsigned int __dmt(void)
1798{
1799 int res;
1800
1801 __asm__ __volatile__(
1802 " .set push \n"
1803 " .set mips32r2 \n"
1804 " .set noat \n"
1805 " .word 0x41610BC1 # dmt $1 \n"
1806 " ehb \n"
1807 " move %0, $1 \n"
1808 " .set pop \n"
1809 : "=r" (res));
1810
1811 instruction_hazard();
1812
1813 return res;
1814}
1815
1816#define __VPECONTROL_TE_SHIFT 15
1817#define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT)
1818
1819#define __EMT_ENABLE __VPECONTROL_TE
1820
1821static inline void __emt(unsigned int previous)
1822{
1823 if ((previous & __EMT_ENABLE))
1824 __asm__ __volatile__(
1825 " .set mips32r2 \n"
1826 " .word 0x41600be1 # emt \n"
1827 " ehb \n"
1828 " .set mips0 \n");
1829}
1830
1831static inline void __ehb(void)
1832{
1833 __asm__ __volatile__(
1834 " .set mips32r2 \n"
1835 " ehb \n" " .set mips0 \n");
1836}
1837
1838/*
1839 * Note that local_irq_save/restore affect TC-specific IXMT state,
1840 * not Status.IE as in non-SMTC kernel.
1841 */
1842
1843#define __BUILD_SET_C0(name) \
1844static inline unsigned int \
1845set_c0_##name(unsigned int set) \
1846{ \
1847 unsigned int res; \
1848 unsigned int new; \
1849 unsigned int omt; \
1850 unsigned long flags; \
1851 \
1852 local_irq_save(flags); \
1853 omt = __dmt(); \
1854 res = read_c0_##name(); \
1855 new = res | set; \
1856 write_c0_##name(new); \
1857 __emt(omt); \
1858 local_irq_restore(flags); \
1859 \
1860 return res; \
1861} \
1862 \
1863static inline unsigned int \
1864clear_c0_##name(unsigned int clear) \
1865{ \
1866 unsigned int res; \
1867 unsigned int new; \
1868 unsigned int omt; \
1869 unsigned long flags; \
1870 \
1871 local_irq_save(flags); \
1872 omt = __dmt(); \
1873 res = read_c0_##name(); \
1874 new = res & ~clear; \
1875 write_c0_##name(new); \
1876 __emt(omt); \
1877 local_irq_restore(flags); \
1878 \
1879 return res; \
1880} \
1881 \
1882static inline unsigned int \
1883change_c0_##name(unsigned int change, unsigned int newbits) \
1884{ \
1885 unsigned int res; \
1886 unsigned int new; \
1887 unsigned int omt; \
1888 unsigned long flags; \
1889 \
1890 local_irq_save(flags); \
1891 \
1892 omt = __dmt(); \
1893 res = read_c0_##name(); \
1894 new = res & ~change; \
1895 new |= (newbits & change); \
1896 write_c0_##name(new); \
1897 __emt(omt); \
1898 local_irq_restore(flags); \
1899 \
1900 return res; \
1901}
1902#endif
1903
1904__BUILD_SET_C0(status) 1780__BUILD_SET_C0(status)
1905__BUILD_SET_C0(cause) 1781__BUILD_SET_C0(cause)
1906__BUILD_SET_C0(config) 1782__BUILD_SET_C0(config)
@@ -1916,6 +1792,15 @@ __BUILD_SET_C0(brcm_cmt_ctrl)
1916__BUILD_SET_C0(brcm_config) 1792__BUILD_SET_C0(brcm_config)
1917__BUILD_SET_C0(brcm_mode) 1793__BUILD_SET_C0(brcm_mode)
1918 1794
1795/*
1796 * Return low 10 bits of ebase.
1797 * Note that under KVM (MIPSVZ) this returns vcpu id.
1798 */
1799static inline unsigned int get_ebase_cpunum(void)
1800{
1801 return read_c0_ebase() & 0x3ff;
1802}
1803
1919#endif /* !__ASSEMBLY__ */ 1804#endif /* !__ASSEMBLY__ */
1920 1805
1921#endif /* _ASM_MIPSREGS_H */ 1806#endif /* _ASM_MIPSREGS_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index e277bbad2871..2e373da5f8e9 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -18,10 +18,6 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/hazards.h> 19#include <asm/hazards.h>
20#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
21#ifdef CONFIG_MIPS_MT_SMTC
22#include <asm/mipsmtregs.h>
23#include <asm/smtc.h>
24#endif /* SMTC */
25#include <asm-generic/mm_hooks.h> 21#include <asm-generic/mm_hooks.h>
26 22
27#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 23#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
@@ -31,11 +27,15 @@ do { \
31} while (0) 27} while (0)
32 28
33#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 29#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
30
31#define TLBMISS_HANDLER_RESTORE() \
32 write_c0_xcontext((unsigned long) smp_processor_id() << \
33 SMP_CPUID_REGSHIFT)
34
34#define TLBMISS_HANDLER_SETUP() \ 35#define TLBMISS_HANDLER_SETUP() \
35 do { \ 36 do { \
36 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \ 37 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
37 write_c0_xcontext((unsigned long) smp_processor_id() << \ 38 TLBMISS_HANDLER_RESTORE(); \
38 SMP_CPUID_REGSHIFT); \
39 } while (0) 39 } while (0)
40 40
41#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ 41#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
@@ -47,9 +47,12 @@ do { \
47 */ 47 */
48extern unsigned long pgd_current[]; 48extern unsigned long pgd_current[];
49 49
50#define TLBMISS_HANDLER_SETUP() \ 50#define TLBMISS_HANDLER_RESTORE() \
51 write_c0_context((unsigned long) smp_processor_id() << \ 51 write_c0_context((unsigned long) smp_processor_id() << \
52 SMP_CPUID_REGSHIFT); \ 52 SMP_CPUID_REGSHIFT)
53
54#define TLBMISS_HANDLER_SETUP() \
55 TLBMISS_HANDLER_RESTORE(); \
53 back_to_back_c0_hazard(); \ 56 back_to_back_c0_hazard(); \
54 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 57 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
55#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ 58#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
@@ -63,13 +66,6 @@ extern unsigned long pgd_current[];
63#define ASID_INC 0x10 66#define ASID_INC 0x10
64#define ASID_MASK 0xff0 67#define ASID_MASK 0xff0
65 68
66#elif defined(CONFIG_MIPS_MT_SMTC)
67
68#define ASID_INC 0x1
69extern unsigned long smtc_asid_mask;
70#define ASID_MASK (smtc_asid_mask)
71#define HW_ASID_MASK 0xff
72/* End SMTC/34K debug hack */
73#else /* FIXME: not correct for R6000 */ 69#else /* FIXME: not correct for R6000 */
74 70
75#define ASID_INC 0x1 71#define ASID_INC 0x1
@@ -92,7 +88,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
92#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) 88#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
93#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) 89#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
94 90
95#ifndef CONFIG_MIPS_MT_SMTC
96/* Normal, classic MIPS get_new_mmu_context */ 91/* Normal, classic MIPS get_new_mmu_context */
97static inline void 92static inline void
98get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 93get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
@@ -115,12 +110,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
115 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 110 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
116} 111}
117 112
118#else /* CONFIG_MIPS_MT_SMTC */
119
120#define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
121
122#endif /* CONFIG_MIPS_MT_SMTC */
123
124/* 113/*
125 * Initialize the context related info for a new mm_struct 114 * Initialize the context related info for a new mm_struct
126 * instance. 115 * instance.
@@ -141,46 +130,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
141{ 130{
142 unsigned int cpu = smp_processor_id(); 131 unsigned int cpu = smp_processor_id();
143 unsigned long flags; 132 unsigned long flags;
144#ifdef CONFIG_MIPS_MT_SMTC
145 unsigned long oldasid;
146 unsigned long mtflags;
147 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
148 local_irq_save(flags); 133 local_irq_save(flags);
149 mtflags = dvpe();
150#else /* Not SMTC */
151 local_irq_save(flags);
152#endif /* CONFIG_MIPS_MT_SMTC */
153 134
154 /* Check if our ASID is of an older version and thus invalid */ 135 /* Check if our ASID is of an older version and thus invalid */
155 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 136 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
156 get_new_mmu_context(next, cpu); 137 get_new_mmu_context(next, cpu);
157#ifdef CONFIG_MIPS_MT_SMTC
158 /*
159 * If the EntryHi ASID being replaced happens to be
160 * the value flagged at ASID recycling time as having
161 * an extended life, clear the bit showing it being
162 * in use by this "CPU", and if that's the last bit,
163 * free up the ASID value for use and flush any old
164 * instances of it from the TLB.
165 */
166 oldasid = (read_c0_entryhi() & ASID_MASK);
167 if(smtc_live_asid[mytlb][oldasid]) {
168 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
169 if(smtc_live_asid[mytlb][oldasid] == 0)
170 smtc_flush_tlb_asid(oldasid);
171 }
172 /*
173 * Tread softly on EntryHi, and so long as we support
174 * having ASID_MASK smaller than the hardware maximum,
175 * make sure no "soft" bits become "hard"...
176 */
177 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
178 cpu_asid(cpu, next));
179 ehb(); /* Make sure it propagates to TCStatus */
180 evpe(mtflags);
181#else
182 write_c0_entryhi(cpu_asid(cpu, next)); 138 write_c0_entryhi(cpu_asid(cpu, next));
183#endif /* CONFIG_MIPS_MT_SMTC */
184 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 139 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
185 140
186 /* 141 /*
@@ -213,34 +168,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
213 unsigned long flags; 168 unsigned long flags;
214 unsigned int cpu = smp_processor_id(); 169 unsigned int cpu = smp_processor_id();
215 170
216#ifdef CONFIG_MIPS_MT_SMTC
217 unsigned long oldasid;
218 unsigned long mtflags;
219 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
220#endif /* CONFIG_MIPS_MT_SMTC */
221
222 local_irq_save(flags); 171 local_irq_save(flags);
223 172
224 /* Unconditionally get a new ASID. */ 173 /* Unconditionally get a new ASID. */
225 get_new_mmu_context(next, cpu); 174 get_new_mmu_context(next, cpu);
226 175
227#ifdef CONFIG_MIPS_MT_SMTC
228 /* See comments for similar code above */
229 mtflags = dvpe();
230 oldasid = read_c0_entryhi() & ASID_MASK;
231 if(smtc_live_asid[mytlb][oldasid]) {
232 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
233 if(smtc_live_asid[mytlb][oldasid] == 0)
234 smtc_flush_tlb_asid(oldasid);
235 }
236 /* See comments for similar code above */
237 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
238 cpu_asid(cpu, next));
239 ehb(); /* Make sure it propagates to TCStatus */
240 evpe(mtflags);
241#else
242 write_c0_entryhi(cpu_asid(cpu, next)); 176 write_c0_entryhi(cpu_asid(cpu, next));
243#endif /* CONFIG_MIPS_MT_SMTC */
244 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 177 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
245 178
246 /* mark mmu ownership change */ 179 /* mark mmu ownership change */
@@ -258,48 +191,15 @@ static inline void
258drop_mmu_context(struct mm_struct *mm, unsigned cpu) 191drop_mmu_context(struct mm_struct *mm, unsigned cpu)
259{ 192{
260 unsigned long flags; 193 unsigned long flags;
261#ifdef CONFIG_MIPS_MT_SMTC
262 unsigned long oldasid;
263 /* Can't use spinlock because called from TLB flush within DVPE */
264 unsigned int prevvpe;
265 int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
266#endif /* CONFIG_MIPS_MT_SMTC */
267 194
268 local_irq_save(flags); 195 local_irq_save(flags);
269 196
270 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 197 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
271 get_new_mmu_context(mm, cpu); 198 get_new_mmu_context(mm, cpu);
272#ifdef CONFIG_MIPS_MT_SMTC
273 /* See comments for similar code above */
274 prevvpe = dvpe();
275 oldasid = (read_c0_entryhi() & ASID_MASK);
276 if (smtc_live_asid[mytlb][oldasid]) {
277 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
278 if(smtc_live_asid[mytlb][oldasid] == 0)
279 smtc_flush_tlb_asid(oldasid);
280 }
281 /* See comments for similar code above */
282 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
283 | cpu_asid(cpu, mm));
284 ehb(); /* Make sure it propagates to TCStatus */
285 evpe(prevvpe);
286#else /* not CONFIG_MIPS_MT_SMTC */
287 write_c0_entryhi(cpu_asid(cpu, mm)); 199 write_c0_entryhi(cpu_asid(cpu, mm));
288#endif /* CONFIG_MIPS_MT_SMTC */
289 } else { 200 } else {
290 /* will get a new context next time */ 201 /* will get a new context next time */
291#ifndef CONFIG_MIPS_MT_SMTC
292 cpu_context(cpu, mm) = 0; 202 cpu_context(cpu, mm) = 0;
293#else /* SMTC */
294 int i;
295
296 /* SMTC shares the TLB (and ASIDs) across VPEs */
297 for_each_online_cpu(i) {
298 if((smtc_status & SMTC_TLB_SHARED)
299 || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
300 cpu_context(i, mm) = 0;
301 }
302#endif /* CONFIG_MIPS_MT_SMTC */
303 } 203 }
304 local_irq_restore(flags); 204 local_irq_restore(flags);
305} 205}
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index c2edae382d5d..800fe578dc99 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr)
144#define MODULE_KERNEL_TYPE "64BIT " 144#define MODULE_KERNEL_TYPE "64BIT "
145#endif 145#endif
146 146
147#ifdef CONFIG_MIPS_MT_SMTC
148#define MODULE_KERNEL_SMTC "MT_SMTC "
149#else
150#define MODULE_KERNEL_SMTC ""
151#endif
152
153#define MODULE_ARCH_VERMAGIC \ 147#define MODULE_ARCH_VERMAGIC \
154 MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC 148 MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
155 149
156#endif /* _ASM_MODULE_H */ 150#endif /* _ASM_MODULE_H */
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index baddc5f600be..538f6d482db8 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -96,6 +96,13 @@ static inline void write_msa_##name(unsigned int val) \
96 * allow compilation with toolchains that do not support MSA. Once all 96 * allow compilation with toolchains that do not support MSA. Once all
97 * toolchains in use support MSA these can be removed. 97 * toolchains in use support MSA these can be removed.
98 */ 98 */
99#ifdef CONFIG_CPU_MICROMIPS
100#define CFC_MSA_INSN 0x587e0056
101#define CTC_MSA_INSN 0x583e0816
102#else
103#define CFC_MSA_INSN 0x787e0059
104#define CTC_MSA_INSN 0x783e0819
105#endif
99 106
100#define __BUILD_MSA_CTL_REG(name, cs) \ 107#define __BUILD_MSA_CTL_REG(name, cs) \
101static inline unsigned int read_msa_##name(void) \ 108static inline unsigned int read_msa_##name(void) \
@@ -104,7 +111,8 @@ static inline unsigned int read_msa_##name(void) \
104 __asm__ __volatile__( \ 111 __asm__ __volatile__( \
105 " .set push\n" \ 112 " .set push\n" \
106 " .set noat\n" \ 113 " .set noat\n" \
107 " .word 0x787e0059 | (" #cs " << 11)\n" \ 114 " .insn\n" \
115 " .word #CFC_MSA_INSN | (" #cs " << 11)\n" \
108 " move %0, $1\n" \ 116 " move %0, $1\n" \
109 " .set pop\n" \ 117 " .set pop\n" \
110 : "=r"(reg)); \ 118 : "=r"(reg)); \
@@ -117,7 +125,8 @@ static inline void write_msa_##name(unsigned int val) \
117 " .set push\n" \ 125 " .set push\n" \
118 " .set noat\n" \ 126 " .set noat\n" \
119 " move $1, %0\n" \ 127 " move $1, %0\n" \
120 " .word 0x783e0819 | (" #cs " << 6)\n" \ 128 " .insn\n" \
129 " .word #CTC_MSA_INSN | (" #cs " << 6)\n" \
121 " .set pop\n" \ 130 " .set pop\n" \
122 : : "r"(val)); \ 131 : : "r"(val)); \
123} 132}
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index de9aada6f4c1..06f1f75bfa9b 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -146,9 +146,10 @@ static inline int hard_smp_processor_id(void)
146 146
147static inline int nlm_nodeid(void) 147static inline int nlm_nodeid(void)
148{ 148{
149 uint32_t prid = read_c0_prid(); 149 uint32_t prid = read_c0_prid() & PRID_IMP_MASK;
150 150
151 if ((prid & 0xff00) == PRID_IMP_NETLOGIC_XLP9XX) 151 if ((prid == PRID_IMP_NETLOGIC_XLP9XX) ||
152 (prid == PRID_IMP_NETLOGIC_XLP5XX))
152 return (__read_32bit_c0_register($15, 1) >> 7) & 0x7; 153 return (__read_32bit_c0_register($15, 1) >> 7) & 0x7;
153 else 154 else
154 return (__read_32bit_c0_register($15, 1) >> 5) & 0x3; 155 return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
index 1f23dfaa7167..805bfd21f33e 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
@@ -74,6 +74,8 @@
74#define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 4) 74#define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 4)
75#define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 5) 75#define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 5)
76 76
77#define XLP_IO_SATA_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 2)
78
77/* XLP2xx has an updated USB block */ 79/* XLP2xx has an updated USB block */
78#define XLP2XX_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 4, i) 80#define XLP2XX_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 4, i)
79#define XLP2XX_IO_USB_XHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 1) 81#define XLP2XX_IO_USB_XHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 1)
@@ -103,13 +105,11 @@
103#define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 5) 105#define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 5)
104#define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 6) 106#define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 6)
105 107
108/* Flash */
106#define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 0) 109#define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 0)
107#define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1) 110#define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1)
108#define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2) 111#define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2)
109/* SD flash */ 112#define XLP_IO_MMC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
110#define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
111#define XLP_IO_MMC_OFFSET(node, slot) \
112 ((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ)
113 113
114/* Things have changed drastically in XLP 9XX */ 114/* Things have changed drastically in XLP 9XX */
115#define XLP9XX_HDR_OFFSET(n, d, f) \ 115#define XLP9XX_HDR_OFFSET(n, d, f) \
@@ -120,6 +120,8 @@
120#define XLP9XX_IO_UART_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 2) 120#define XLP9XX_IO_UART_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 2)
121#define XLP9XX_IO_SYS_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 0) 121#define XLP9XX_IO_SYS_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 0)
122#define XLP9XX_IO_FUSE_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 1) 122#define XLP9XX_IO_FUSE_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 1)
123#define XLP9XX_IO_CLOCK_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 2)
124#define XLP9XX_IO_POWER_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 3)
123#define XLP9XX_IO_JTAG_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 4) 125#define XLP9XX_IO_JTAG_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 4)
124 126
125#define XLP9XX_IO_PCIE_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 1, i) 127#define XLP9XX_IO_PCIE_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 1, i)
@@ -135,11 +137,11 @@
135/* XLP9XX on-chip SATA controller */ 137/* XLP9XX on-chip SATA controller */
136#define XLP9XX_IO_SATA_OFFSET(node) XLP9XX_HDR_OFFSET(node, 3, 2) 138#define XLP9XX_IO_SATA_OFFSET(node) XLP9XX_HDR_OFFSET(node, 3, 2)
137 139
140/* Flash */
138#define XLP9XX_IO_NOR_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 0) 141#define XLP9XX_IO_NOR_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 0)
139#define XLP9XX_IO_NAND_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 1) 142#define XLP9XX_IO_NAND_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 1)
140#define XLP9XX_IO_SPI_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 2) 143#define XLP9XX_IO_SPI_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 2)
141/* SD flash */ 144#define XLP9XX_IO_MMC_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3)
142#define XLP9XX_IO_MMCSD_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3)
143 145
144/* PCI config header register id's */ 146/* PCI config header register id's */
145#define XLP_PCI_CFGREG0 0x00 147#define XLP_PCI_CFGREG0 0x00
@@ -186,8 +188,10 @@
186#define PCI_DEVICE_ID_NLM_NOR 0x1015 188#define PCI_DEVICE_ID_NLM_NOR 0x1015
187#define PCI_DEVICE_ID_NLM_NAND 0x1016 189#define PCI_DEVICE_ID_NLM_NAND 0x1016
188#define PCI_DEVICE_ID_NLM_MMC 0x1018 190#define PCI_DEVICE_ID_NLM_MMC 0x1018
189#define PCI_DEVICE_ID_NLM_XHCI 0x101d 191#define PCI_DEVICE_ID_NLM_SATA 0x101A
192#define PCI_DEVICE_ID_NLM_XHCI 0x101D
190 193
194#define PCI_DEVICE_ID_XLP9XX_MMC 0x9018
191#define PCI_DEVICE_ID_XLP9XX_SATA 0x901A 195#define PCI_DEVICE_ID_XLP9XX_SATA 0x901A
192#define PCI_DEVICE_ID_XLP9XX_XHCI 0x901D 196#define PCI_DEVICE_ID_XLP9XX_XHCI 0x901D
193 197
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
index d4deb87ad069..91540f41e1e4 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
@@ -69,6 +69,20 @@
69#define PCIE_9XX_BYTE_SWAP_IO_BASE 0x25e 69#define PCIE_9XX_BYTE_SWAP_IO_BASE 0x25e
70#define PCIE_9XX_BYTE_SWAP_IO_LIM 0x25f 70#define PCIE_9XX_BYTE_SWAP_IO_LIM 0x25f
71 71
72#define PCIE_9XX_BRIDGE_MSIX_ADDR_BASE 0x264
73#define PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT 0x265
74#define PCIE_9XX_MSI_STATUS 0x283
75#define PCIE_9XX_MSI_EN 0x284
76/* 128 MSIX vectors available in 9xx */
77#define PCIE_9XX_MSIX_STATUS0 0x286
78#define PCIE_9XX_MSIX_STATUSX(n) (n + 0x286)
79#define PCIE_9XX_MSIX_VEC 0x296
80#define PCIE_9XX_MSIX_VECX(n) (n + 0x296)
81#define PCIE_9XX_INT_STATUS0 0x397
82#define PCIE_9XX_INT_STATUS1 0x398
83#define PCIE_9XX_INT_EN0 0x399
84#define PCIE_9XX_INT_EN1 0x39a
85
72/* other */ 86/* other */
73#define PCIE_NLINKS 4 87#define PCIE_NLINKS 4
74 88
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index f10bf3bba58f..41cefe94f0c9 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -199,6 +199,10 @@
199#define PIC_IRT_PCIE_LINK_3_INDEX 81 199#define PIC_IRT_PCIE_LINK_3_INDEX 81
200#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX) 200#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
201 201
202#define PIC_9XX_IRT_PCIE_LINK_0_INDEX 191
203#define PIC_9XX_IRT_PCIE_LINK_INDEX(num) \
204 ((num) + PIC_9XX_IRT_PCIE_LINK_0_INDEX)
205
202#define PIC_CLOCK_TIMER 7 206#define PIC_CLOCK_TIMER 7
203 207
204#if !defined(LOCORE) && !defined(__ASSEMBLY__) 208#if !defined(LOCORE) && !defined(__ASSEMBLY__)
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/sys.h b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
index d9b107ffca93..bc7bddf25be9 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/sys.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
@@ -118,6 +118,10 @@
118#define SYS_SCRTCH3 0x4c 118#define SYS_SCRTCH3 0x4c
119 119
120/* PLL registers XLP2XX */ 120/* PLL registers XLP2XX */
121#define SYS_CPU_PLL_CTRL0(core) (0x1c0 + (core * 4))
122#define SYS_CPU_PLL_CTRL1(core) (0x1c1 + (core * 4))
123#define SYS_CPU_PLL_CTRL2(core) (0x1c2 + (core * 4))
124#define SYS_CPU_PLL_CTRL3(core) (0x1c3 + (core * 4))
121#define SYS_PLL_CTRL0 0x240 125#define SYS_PLL_CTRL0 0x240
122#define SYS_PLL_CTRL1 0x241 126#define SYS_PLL_CTRL1 0x241
123#define SYS_PLL_CTRL2 0x242 127#define SYS_PLL_CTRL2 0x242
@@ -147,6 +151,32 @@
147#define SYS_SYS_PLL_MEM_REQ 0x2a3 151#define SYS_SYS_PLL_MEM_REQ 0x2a3
148#define SYS_PLL_MEM_STAT 0x2a4 152#define SYS_PLL_MEM_STAT 0x2a4
149 153
154/* PLL registers XLP9XX */
155#define SYS_9XX_CPU_PLL_CTRL0(core) (0xc0 + (core * 4))
156#define SYS_9XX_CPU_PLL_CTRL1(core) (0xc1 + (core * 4))
157#define SYS_9XX_CPU_PLL_CTRL2(core) (0xc2 + (core * 4))
158#define SYS_9XX_CPU_PLL_CTRL3(core) (0xc3 + (core * 4))
159#define SYS_9XX_DMC_PLL_CTRL0 0x140
160#define SYS_9XX_DMC_PLL_CTRL1 0x141
161#define SYS_9XX_DMC_PLL_CTRL2 0x142
162#define SYS_9XX_DMC_PLL_CTRL3 0x143
163#define SYS_9XX_PLL_CTRL0 0x144
164#define SYS_9XX_PLL_CTRL1 0x145
165#define SYS_9XX_PLL_CTRL2 0x146
166#define SYS_9XX_PLL_CTRL3 0x147
167
168#define SYS_9XX_PLL_CTRL0_DEVX(x) (0x148 + (x) * 4)
169#define SYS_9XX_PLL_CTRL1_DEVX(x) (0x149 + (x) * 4)
170#define SYS_9XX_PLL_CTRL2_DEVX(x) (0x14a + (x) * 4)
171#define SYS_9XX_PLL_CTRL3_DEVX(x) (0x14b + (x) * 4)
172
173#define SYS_9XX_CPU_PLL_CHG_CTRL 0x188
174#define SYS_9XX_PLL_CHG_CTRL 0x189
175#define SYS_9XX_CLK_DEV_DIS 0x18a
176#define SYS_9XX_CLK_DEV_SEL 0x18b
177#define SYS_9XX_CLK_DEV_DIV 0x18d
178#define SYS_9XX_CLK_DEV_CHG 0x18f
179
150/* Registers changed on 9XX */ 180/* Registers changed on 9XX */
151#define SYS_9XX_POWER_ON_RESET_CFG 0x00 181#define SYS_9XX_POWER_ON_RESET_CFG 0x00
152#define SYS_9XX_CHIP_RESET 0x01 182#define SYS_9XX_CHIP_RESET 0x01
@@ -170,6 +200,11 @@
170#define nlm_get_fuse_regbase(node) \ 200#define nlm_get_fuse_regbase(node) \
171 (nlm_get_fuse_pcibase(node) + XLP_IO_PCI_HDRSZ) 201 (nlm_get_fuse_pcibase(node) + XLP_IO_PCI_HDRSZ)
172 202
203#define nlm_get_clock_pcibase(node) \
204 nlm_pcicfg_base(XLP9XX_IO_CLOCK_OFFSET(node))
205#define nlm_get_clock_regbase(node) \
206 (nlm_get_clock_pcibase(node) + XLP_IO_PCI_HDRSZ)
207
173unsigned int nlm_get_pic_frequency(int node); 208unsigned int nlm_get_pic_frequency(int node);
174#endif 209#endif
175#endif 210#endif
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
index 2b0c9599ebe5..a862b93223cc 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
@@ -58,6 +58,10 @@
58#define PIC_I2C_1_IRQ 31 58#define PIC_I2C_1_IRQ 31
59#define PIC_I2C_2_IRQ 32 59#define PIC_I2C_2_IRQ 32
60#define PIC_I2C_3_IRQ 33 60#define PIC_I2C_3_IRQ 33
61#define PIC_SPI_IRQ 34
62#define PIC_NAND_IRQ 37
63#define PIC_SATA_IRQ 38
64#define PIC_GPIO_IRQ 39
61 65
62#define PIC_PCIE_LINK_MSI_IRQ_BASE 44 /* 44 - 47 MSI IRQ */ 66#define PIC_PCIE_LINK_MSI_IRQ_BASE 44 /* 44 - 47 MSI IRQ */
63#define PIC_PCIE_LINK_MSI_IRQ(i) (44 + (i)) 67#define PIC_PCIE_LINK_MSI_IRQ(i) (44 + (i))
@@ -66,8 +70,9 @@
66#define PIC_PCIE_MSIX_IRQ_BASE 48 /* 48 - 51 MSI-X IRQ */ 70#define PIC_PCIE_MSIX_IRQ_BASE 48 /* 48 - 51 MSI-X IRQ */
67#define PIC_PCIE_MSIX_IRQ(i) (48 + (i)) 71#define PIC_PCIE_MSIX_IRQ(i) (48 + (i))
68 72
69#define NLM_MSIX_VEC_BASE 96 /* 96 - 127 - MSIX mapped */ 73/* XLP9xx and XLP8xx has 128 and 32 MSIX vectors respectively */
70#define NLM_MSI_VEC_BASE 128 /* 128 -255 - MSI mapped */ 74#define NLM_MSIX_VEC_BASE 96 /* 96 - 223 - MSIX mapped */
75#define NLM_MSI_VEC_BASE 224 /* 224 -351 - MSI mapped */
71 76
72#define NLM_PIC_INDIRECT_VEC_BASE 512 77#define NLM_PIC_INDIRECT_VEC_BASE 512
73#define NLM_GPIO_VEC_BASE 768 78#define NLM_GPIO_VEC_BASE 768
@@ -95,17 +100,19 @@ void *xlp_dt_init(void *fdtp);
95 100
96static inline int cpu_is_xlpii(void) 101static inline int cpu_is_xlpii(void)
97{ 102{
98 int chip = read_c0_prid() & 0xff00; 103 int chip = read_c0_prid() & PRID_IMP_MASK;
99 104
100 return chip == PRID_IMP_NETLOGIC_XLP2XX || 105 return chip == PRID_IMP_NETLOGIC_XLP2XX ||
101 chip == PRID_IMP_NETLOGIC_XLP9XX; 106 chip == PRID_IMP_NETLOGIC_XLP9XX ||
107 chip == PRID_IMP_NETLOGIC_XLP5XX;
102} 108}
103 109
104static inline int cpu_is_xlp9xx(void) 110static inline int cpu_is_xlp9xx(void)
105{ 111{
106 int chip = read_c0_prid() & 0xff00; 112 int chip = read_c0_prid() & PRID_IMP_MASK;
107 113
108 return chip == PRID_IMP_NETLOGIC_XLP9XX; 114 return chip == PRID_IMP_NETLOGIC_XLP9XX ||
115 chip == PRID_IMP_NETLOGIC_XLP5XX;
109} 116}
110#endif /* !__ASSEMBLY__ */ 117#endif /* !__ASSEMBLY__ */
111#endif /* _ASM_NLM_XLP_H */ 118#endif /* _ASM_NLM_XLP_H */
diff --git a/arch/mips/include/asm/nile4.h b/arch/mips/include/asm/nile4.h
index 2e2436d0e94e..99e97f8bfbca 100644
--- a/arch/mips/include/asm/nile4.h
+++ b/arch/mips/include/asm/nile4.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions 2 * asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions
3 * 3 *
4 * Copyright (C) 2000 Geert Uytterhoeven <geert@sonycom.com> 4 * Copyright (C) 2000 Geert Uytterhoeven <geert@linux-m68k.org>
5 * Sony Software Development Center Europe (SDCE), Brussels 5 * Sony Software Development Center Europe (SDCE), Brussels
6 * 6 *
7 * This file is based on the following documentation: 7 * This file is based on the following documentation:
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index f5d77b91537f..d781f9e66884 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -211,7 +211,6 @@ union octeon_cvmemctl {
211 211
212extern void octeon_write_lcd(const char *s); 212extern void octeon_write_lcd(const char *s);
213extern void octeon_check_cpu_bist(void); 213extern void octeon_check_cpu_bist(void);
214extern int octeon_get_boot_debug_flag(void);
215extern int octeon_get_boot_uart(void); 214extern int octeon_get_boot_uart(void);
216 215
217struct uart_port; 216struct uart_port;
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 008324d1c261..539ddd148bbb 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -32,6 +32,8 @@ struct vm_area_struct;
32 _page_cachable_default) 32 _page_cachable_default)
33#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 33#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
34 _PAGE_GLOBAL | _page_cachable_default) 34 _PAGE_GLOBAL | _page_cachable_default)
35#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
35#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ 37#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
36 _page_cachable_default) 38 _page_cachable_default)
37#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 39#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
diff --git a/arch/mips/include/asm/pm-cps.h b/arch/mips/include/asm/pm-cps.h
new file mode 100644
index 000000000000..625eda53d571
--- /dev/null
+++ b/arch/mips/include/asm/pm-cps.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __MIPS_ASM_PM_CPS_H__
12#define __MIPS_ASM_PM_CPS_H__
13
14/*
15 * The CM & CPC can only handle coherence & power control on a per-core basis,
16 * thus in an MT system the VPEs within each core are coupled and can only
17 * enter or exit states requiring CM or CPC assistance in unison.
18 */
19#ifdef CONFIG_MIPS_MT
20# define coupled_coherence cpu_has_mipsmt
21#else
22# define coupled_coherence 0
23#endif
24
25/* Enumeration of possible PM states */
26enum cps_pm_state {
27 CPS_PM_NC_WAIT, /* MIPS wait instruction, non-coherent */
28 CPS_PM_CLOCK_GATED, /* Core clock gated */
29 CPS_PM_POWER_GATED, /* Core power gated */
30 CPS_PM_STATE_COUNT,
31};
32
33/**
34 * cps_pm_support_state - determine whether the system supports a PM state
35 * @state: the state to test for support
36 *
37 * Returns true if the system supports the given state, otherwise false.
38 */
39extern bool cps_pm_support_state(enum cps_pm_state state);
40
41/**
42 * cps_pm_enter_state - enter a PM state
43 * @state: the state to enter
44 *
45 * Enter the given PM state. If coupled_coherence is non-zero then it is
46 * expected that this function be called at approximately the same time on
47 * each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno.
48 */
49extern int cps_pm_enter_state(enum cps_pm_state state);
50
51#endif /* __MIPS_ASM_PM_CPS_H__ */
diff --git a/arch/mips/include/asm/pm.h b/arch/mips/include/asm/pm.h
new file mode 100644
index 000000000000..7c03469e043f
--- /dev/null
+++ b/arch/mips/include/asm/pm.h
@@ -0,0 +1,159 @@
1/*
2 * Copyright (C) 2014 Imagination Technologies Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * PM helper macros for CPU power off (e.g. Suspend-to-RAM).
10 */
11
12#ifndef __ASM_PM_H
13#define __ASM_PM_H
14
15#ifdef __ASSEMBLY__
16
17#include <asm/asm-offsets.h>
18#include <asm/asm.h>
19#include <asm/mipsregs.h>
20#include <asm/regdef.h>
21
22/* Save CPU state to stack for suspend to RAM */
23.macro SUSPEND_SAVE_REGS
24 subu sp, PT_SIZE
25 /* Call preserved GPRs */
26 LONG_S $16, PT_R16(sp)
27 LONG_S $17, PT_R17(sp)
28 LONG_S $18, PT_R18(sp)
29 LONG_S $19, PT_R19(sp)
30 LONG_S $20, PT_R20(sp)
31 LONG_S $21, PT_R21(sp)
32 LONG_S $22, PT_R22(sp)
33 LONG_S $23, PT_R23(sp)
34 LONG_S $28, PT_R28(sp)
35 LONG_S $30, PT_R30(sp)
36 LONG_S $31, PT_R31(sp)
37 /* A couple of CP0 registers with space in pt_regs */
38 mfc0 k0, CP0_STATUS
39 LONG_S k0, PT_STATUS(sp)
40.endm
41
42/* Restore CPU state from stack after resume from RAM */
43.macro RESUME_RESTORE_REGS_RETURN
44 .set push
45 .set noreorder
46 /* A couple of CP0 registers with space in pt_regs */
47 LONG_L k0, PT_STATUS(sp)
48 mtc0 k0, CP0_STATUS
49 /* Call preserved GPRs */
50 LONG_L $16, PT_R16(sp)
51 LONG_L $17, PT_R17(sp)
52 LONG_L $18, PT_R18(sp)
53 LONG_L $19, PT_R19(sp)
54 LONG_L $20, PT_R20(sp)
55 LONG_L $21, PT_R21(sp)
56 LONG_L $22, PT_R22(sp)
57 LONG_L $23, PT_R23(sp)
58 LONG_L $28, PT_R28(sp)
59 LONG_L $30, PT_R30(sp)
60 LONG_L $31, PT_R31(sp)
61 /* Pop and return */
62 jr ra
63 addiu sp, PT_SIZE
64 .set pop
65.endm
66
67/* Get address of static suspend state into t1 */
68.macro LA_STATIC_SUSPEND
69 la t1, mips_static_suspend_state
70.endm
71
72/* Save important CPU state for early restoration to global data */
73.macro SUSPEND_SAVE_STATIC
74#ifdef CONFIG_EVA
75 /*
76 * Segment configuration is saved in global data where it can be easily
77 * reloaded without depending on the segment configuration.
78 */
79 mfc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
80 LONG_S k0, SSS_SEGCTL0(t1)
81 mfc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
82 LONG_S k0, SSS_SEGCTL1(t1)
83 mfc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
84 LONG_S k0, SSS_SEGCTL2(t1)
85#endif
86 /* save stack pointer (pointing to GPRs) */
87 LONG_S sp, SSS_SP(t1)
88.endm
89
90/* Restore important CPU state early from global data */
91.macro RESUME_RESTORE_STATIC
92#ifdef CONFIG_EVA
93 /*
94 * Segment configuration must be restored prior to any access to
95 * allocated memory, as it may reside outside of the legacy kernel
96 * segments.
97 */
98 LONG_L k0, SSS_SEGCTL0(t1)
99 mtc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
100 LONG_L k0, SSS_SEGCTL1(t1)
101 mtc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
102 LONG_L k0, SSS_SEGCTL2(t1)
103 mtc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
104 tlbw_use_hazard
105#endif
106 /* restore stack pointer (pointing to GPRs) */
107 LONG_L sp, SSS_SP(t1)
108.endm
109
110/* flush caches to make sure context has reached memory */
111.macro SUSPEND_CACHE_FLUSH
112 .extern __wback_cache_all
113 .set push
114 .set noreorder
115 la t1, __wback_cache_all
116 LONG_L t0, 0(t1)
117 jalr t0
118 nop
119 .set pop
120 .endm
121
122/* Save suspend state and flush data caches to RAM */
123.macro SUSPEND_SAVE
124 SUSPEND_SAVE_REGS
125 LA_STATIC_SUSPEND
126 SUSPEND_SAVE_STATIC
127 SUSPEND_CACHE_FLUSH
128.endm
129
130/* Restore saved state after resume from RAM and return */
131.macro RESUME_RESTORE_RETURN
132 LA_STATIC_SUSPEND
133 RESUME_RESTORE_STATIC
134 RESUME_RESTORE_REGS_RETURN
135.endm
136
137#else /* __ASSEMBLY__ */
138
139/**
140 * struct mips_static_suspend_state - Core saved CPU state across S2R.
141 * @segctl: CP0 Segment control registers.
142 * @sp: Stack frame where GP register context is saved.
143 *
144 * This structure contains minimal CPU state that must be saved in static kernel
145 * data in order to be able to restore the rest of the state. This includes
146 * segmentation configuration in the case of EVA being enabled, as they must be
147 * restored prior to any kmalloc'd memory being referenced (even the stack
148 * pointer).
149 */
150struct mips_static_suspend_state {
151#ifdef CONFIG_EVA
152 unsigned long segctl[3];
153#endif
154 unsigned long sp;
155};
156
157#endif /* !__ASSEMBLY__ */
158
159#endif /* __ASM_PM_HELPERS_H */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index bf1ac8d35783..7e6e682aece3 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -39,9 +39,6 @@ struct pt_regs {
39 unsigned long cp0_badvaddr; 39 unsigned long cp0_badvaddr;
40 unsigned long cp0_cause; 40 unsigned long cp0_cause;
41 unsigned long cp0_epc; 41 unsigned long cp0_epc;
42#ifdef CONFIG_MIPS_MT_SMTC
43 unsigned long cp0_tcstatus;
44#endif /* CONFIG_MIPS_MT_SMTC */
45#ifdef CONFIG_CPU_CAVIUM_OCTEON 42#ifdef CONFIG_CPU_CAVIUM_OCTEON
46 unsigned long long mpl[3]; /* MTM{0,1,2} */ 43 unsigned long long mpl[3]; /* MTM{0,1,2} */
47 unsigned long long mtp[3]; /* MTP{0,1,2} */ 44 unsigned long long mtp[3]; /* MTP{0,1,2} */
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index ca64cbe44493..0b8bd28a0df1 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -43,11 +43,10 @@
43 : "i" (op), "R" (*(unsigned char *)(addr))) 43 : "i" (op), "R" (*(unsigned char *)(addr)))
44 44
45#ifdef CONFIG_MIPS_MT 45#ifdef CONFIG_MIPS_MT
46
46/* 47/*
47 * Temporary hacks for SMTC debug. Optionally force single-threaded 48 * Optionally force single-threaded execution during I-cache flushes.
48 * execution during I-cache flushes.
49 */ 49 */
50
51#define PROTECT_CACHE_FLUSHES 1 50#define PROTECT_CACHE_FLUSHES 1
52 51
53#ifdef PROTECT_CACHE_FLUSHES 52#ifdef PROTECT_CACHE_FLUSHES
@@ -524,6 +523,8 @@ __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32,
524__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) 523__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
525__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) 524__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
526__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) 525__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
526__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
527__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
527__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) 528__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
528 529
529__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) 530__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
diff --git a/arch/mips/include/asm/sgi/ip22.h b/arch/mips/include/asm/sgi/ip22.h
index 8db1a3588cf2..87ec9eaa04e3 100644
--- a/arch/mips/include/asm/sgi/ip22.h
+++ b/arch/mips/include/asm/sgi/ip22.h
@@ -69,6 +69,8 @@
69#define SGI_EISA_IRQ SGINT_LOCAL2 + 3 /* EISA interrupts */ 69#define SGI_EISA_IRQ SGINT_LOCAL2 + 3 /* EISA interrupts */
70#define SGI_KEYBD_IRQ SGINT_LOCAL2 + 4 /* keyboard */ 70#define SGI_KEYBD_IRQ SGINT_LOCAL2 + 4 /* keyboard */
71#define SGI_SERIAL_IRQ SGINT_LOCAL2 + 5 /* onboard serial */ 71#define SGI_SERIAL_IRQ SGINT_LOCAL2 + 5 /* onboard serial */
72#define SGI_GIOEXP0_IRQ (SGINT_LOCAL2 + 6) /* Indy GIO EXP0 */
73#define SGI_GIOEXP1_IRQ (SGINT_LOCAL2 + 7) /* Indy GIO EXP1 */
72 74
73#define ip22_is_fullhouse() (sgioc->sysid & SGIOC_SYSID_FULLHOUSE) 75#define ip22_is_fullhouse() (sgioc->sysid & SGIOC_SYSID_FULLHOUSE)
74 76
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
index d60d1a2180d1..a06a08a9afc6 100644
--- a/arch/mips/include/asm/smp-cps.h
+++ b/arch/mips/include/asm/smp-cps.h
@@ -13,17 +13,28 @@
13 13
14#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
15 15
16struct boot_config { 16struct vpe_boot_config {
17 unsigned int core;
18 unsigned int vpe;
19 unsigned long pc; 17 unsigned long pc;
20 unsigned long sp; 18 unsigned long sp;
21 unsigned long gp; 19 unsigned long gp;
22}; 20};
23 21
24extern struct boot_config mips_cps_bootcfg; 22struct core_boot_config {
23 atomic_t vpe_mask;
24 struct vpe_boot_config *vpe_config;
25};
26
27extern struct core_boot_config *mips_cps_core_bootcfg;
25 28
26extern void mips_cps_core_entry(void); 29extern void mips_cps_core_entry(void);
30extern void mips_cps_core_init(void);
31
32extern struct vpe_boot_config *mips_cps_boot_vpes(void);
33
34extern bool mips_cps_smp_in_use(void);
35
36extern void mips_cps_pm_save(void);
37extern void mips_cps_pm_restore(void);
27 38
28#else /* __ASSEMBLY__ */ 39#else /* __ASSEMBLY__ */
29 40
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index 73d35b18fb64..6ba1fb8b11e2 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -26,7 +26,6 @@ struct plat_smp_ops {
26 void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); 26 void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
27 void (*init_secondary)(void); 27 void (*init_secondary)(void);
28 void (*smp_finish)(void); 28 void (*smp_finish)(void);
29 void (*cpus_done)(void);
30 void (*boot_secondary)(int cpu, struct task_struct *idle); 29 void (*boot_secondary)(int cpu, struct task_struct *idle);
31 void (*smp_setup)(void); 30 void (*smp_setup)(void);
32 void (*prepare_cpus)(unsigned int max_cpus); 31 void (*prepare_cpus)(unsigned int max_cpus);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index efa02acd3dd5..b037334fca22 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -46,6 +46,9 @@ extern int __cpu_logical_map[NR_CPUS];
46 46
47extern volatile cpumask_t cpu_callin_map; 47extern volatile cpumask_t cpu_callin_map;
48 48
49/* Mask of CPUs which are currently definitely operating coherently */
50extern cpumask_t cpu_coherent_mask;
51
49extern void asmlinkage smp_bootstrap(void); 52extern void asmlinkage smp_bootstrap(void);
50 53
51/* 54/*
diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h
deleted file mode 100644
index e56b439b7871..000000000000
--- a/arch/mips/include/asm/smtc.h
+++ /dev/null
@@ -1,78 +0,0 @@
1#ifndef _ASM_SMTC_MT_H
2#define _ASM_SMTC_MT_H
3
4/*
5 * Definitions for SMTC multitasking on MIPS MT cores
6 */
7
8#include <asm/mips_mt.h>
9#include <asm/smtc_ipi.h>
10
11/*
12 * System-wide SMTC status information
13 */
14
15extern unsigned int smtc_status;
16
17#define SMTC_TLB_SHARED 0x00000001
18#define SMTC_MTC_ACTIVE 0x00000002
19
20/*
21 * TLB/ASID Management information
22 */
23
24#define MAX_SMTC_TLBS 2
25#define MAX_SMTC_ASIDS 256
26#if NR_CPUS <= 8
27typedef char asiduse;
28#else
29#if NR_CPUS <= 16
30typedef short asiduse;
31#else
32typedef long asiduse;
33#endif
34#endif
35
36/*
37 * VPE Management information
38 */
39
40#define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */
41
42extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
43
44struct mm_struct;
45struct task_struct;
46
47void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
48void self_ipi(struct smtc_ipi *);
49void smtc_flush_tlb_asid(unsigned long asid);
50extern int smtc_build_cpu_map(int startslot);
51extern void smtc_prepare_cpus(int cpus);
52extern void smtc_smp_finish(void);
53extern void smtc_boot_secondary(int cpu, struct task_struct *t);
54extern void smtc_cpus_done(void);
55extern void smtc_init_secondary(void);
56
57
58/*
59 * Sharing the TLB between multiple VPEs means that the
60 * "random" index selection function is not allowed to
61 * select the current value of the Index register. To
62 * avoid additional TLB pressure, the Index registers
63 * are "parked" with an non-Valid value.
64 */
65
66#define PARKED_INDEX ((unsigned int)0x80000000)
67
68/*
69 * Define low-level interrupt mask for IPIs, if necessary.
70 * By default, use SW interrupt 1, which requires no external
71 * hardware support, but which works only for single-core
72 * MIPS MT systems.
73 */
74#ifndef MIPS_CPU_IPI_IRQ
75#define MIPS_CPU_IPI_IRQ 1
76#endif
77
78#endif /* _ASM_SMTC_MT_H */
diff --git a/arch/mips/include/asm/smtc_ipi.h b/arch/mips/include/asm/smtc_ipi.h
deleted file mode 100644
index 15278dbd7e79..000000000000
--- a/arch/mips/include/asm/smtc_ipi.h
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code.
3 */
4#ifndef __ASM_SMTC_IPI_H
5#define __ASM_SMTC_IPI_H
6
7#include <linux/spinlock.h>
8
9//#define SMTC_IPI_DEBUG
10
11#ifdef SMTC_IPI_DEBUG
12#include <asm/mipsregs.h>
13#include <asm/mipsmtregs.h>
14#endif /* SMTC_IPI_DEBUG */
15
16/*
17 * An IPI "message"
18 */
19
20struct smtc_ipi {
21 struct smtc_ipi *flink;
22 int type;
23 void *arg;
24 int dest;
25#ifdef SMTC_IPI_DEBUG
26 int sender;
27 long stamp;
28#endif /* SMTC_IPI_DEBUG */
29};
30
31/*
32 * Defined IPI Types
33 */
34
35#define LINUX_SMP_IPI 1
36#define SMTC_CLOCK_TICK 2
37#define IRQ_AFFINITY_IPI 3
38
39/*
40 * A queue of IPI messages
41 */
42
43struct smtc_ipi_q {
44 struct smtc_ipi *head;
45 spinlock_t lock;
46 struct smtc_ipi *tail;
47 int depth;
48 int resched_flag; /* reschedule already queued */
49};
50
51static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
52{
53 unsigned long flags;
54
55 spin_lock_irqsave(&q->lock, flags);
56 if (q->head == NULL)
57 q->head = q->tail = p;
58 else
59 q->tail->flink = p;
60 p->flink = NULL;
61 q->tail = p;
62 q->depth++;
63#ifdef SMTC_IPI_DEBUG
64 p->sender = read_c0_tcbind();
65 p->stamp = read_c0_count();
66#endif /* SMTC_IPI_DEBUG */
67 spin_unlock_irqrestore(&q->lock, flags);
68}
69
70static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q)
71{
72 struct smtc_ipi *p;
73
74 if (q->head == NULL)
75 p = NULL;
76 else {
77 p = q->head;
78 q->head = q->head->flink;
79 q->depth--;
80 /* Arguably unnecessary, but leaves queue cleaner */
81 if (q->head == NULL)
82 q->tail = NULL;
83 }
84
85 return p;
86}
87
88static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
89{
90 unsigned long flags;
91 struct smtc_ipi *p;
92
93 spin_lock_irqsave(&q->lock, flags);
94 p = __smtc_ipi_dq(q);
95 spin_unlock_irqrestore(&q->lock, flags);
96
97 return p;
98}
99
100static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p)
101{
102 unsigned long flags;
103
104 spin_lock_irqsave(&q->lock, flags);
105 if (q->head == NULL) {
106 q->head = q->tail = p;
107 p->flink = NULL;
108 } else {
109 p->flink = q->head;
110 q->head = p;
111 }
112 q->depth++;
113 spin_unlock_irqrestore(&q->lock, flags);
114}
115
116static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q)
117{
118 unsigned long flags;
119 int retval;
120
121 spin_lock_irqsave(&q->lock, flags);
122 retval = q->depth;
123 spin_unlock_irqrestore(&q->lock, flags);
124 return retval;
125}
126
127extern void smtc_send_ipi(int cpu, int type, unsigned int action);
128
129#endif /* __ASM_SMTC_IPI_H */
diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
deleted file mode 100644
index 25da651f1f5f..000000000000
--- a/arch/mips/include/asm/smtc_proc.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Definitions for SMTC /proc entries
3 * Copyright(C) 2005 MIPS Technologies Inc.
4 */
5#ifndef __ASM_SMTC_PROC_H
6#define __ASM_SMTC_PROC_H
7
8/*
9 * per-"CPU" statistics
10 */
11
12struct smtc_cpu_proc {
13 unsigned long timerints;
14 unsigned long selfipis;
15};
16
17extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
18
19/* Count of number of recoveries of "stolen" FPU access rights on 34K */
20
21extern atomic_t smtc_fpu_recoveries;
22
23#endif /* __ASM_SMTC_PROC_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index d301e108d5b8..b188c797565c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -19,22 +19,12 @@
19#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
20#include <asm/thread_info.h> 20#include <asm/thread_info.h>
21 21
22/* 22#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
23 * For SMTC kernel, global IE should be left set, and interrupts
24 * controlled exclusively via IXMT.
25 */
26#ifdef CONFIG_MIPS_MT_SMTC
27#define STATMASK 0x1e
28#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
29#define STATMASK 0x3f 23#define STATMASK 0x3f
30#else 24#else
31#define STATMASK 0x1f 25#define STATMASK 0x1f
32#endif 26#endif
33 27
34#ifdef CONFIG_MIPS_MT_SMTC
35#include <asm/mipsmtregs.h>
36#endif /* CONFIG_MIPS_MT_SMTC */
37
38 .macro SAVE_AT 28 .macro SAVE_AT
39 .set push 29 .set push
40 .set noat 30 .set noat
@@ -186,16 +176,6 @@
186 mfc0 v1, CP0_STATUS 176 mfc0 v1, CP0_STATUS
187 LONG_S $2, PT_R2(sp) 177 LONG_S $2, PT_R2(sp)
188 LONG_S v1, PT_STATUS(sp) 178 LONG_S v1, PT_STATUS(sp)
189#ifdef CONFIG_MIPS_MT_SMTC
190 /*
191 * Ideally, these instructions would be shuffled in
192 * to cover the pipeline delay.
193 */
194 .set mips32
195 mfc0 k0, CP0_TCSTATUS
196 .set mips0
197 LONG_S k0, PT_TCSTATUS(sp)
198#endif /* CONFIG_MIPS_MT_SMTC */
199 LONG_S $4, PT_R4(sp) 179 LONG_S $4, PT_R4(sp)
200 mfc0 v1, CP0_CAUSE 180 mfc0 v1, CP0_CAUSE
201 LONG_S $5, PT_R5(sp) 181 LONG_S $5, PT_R5(sp)
@@ -321,36 +301,6 @@
321 .set push 301 .set push
322 .set reorder 302 .set reorder
323 .set noat 303 .set noat
324#ifdef CONFIG_MIPS_MT_SMTC
325 .set mips32r2
326 /*
327 * We need to make sure the read-modify-write
328 * of Status below isn't perturbed by an interrupt
329 * or cross-TC access, so we need to do at least a DMT,
330 * protected by an interrupt-inhibit. But setting IXMT
331 * also creates a few-cycle window where an IPI could
332 * be queued and not be detected before potentially
333 * returning to a WAIT or user-mode loop. It must be
334 * replayed.
335 *
336 * We're in the middle of a context switch, and
337 * we can't dispatch it directly without trashing
338 * some registers, so we'll try to detect this unlikely
339 * case and program a software interrupt in the VPE,
340 * as would be done for a cross-VPE IPI. To accommodate
341 * the handling of that case, we're doing a DVPE instead
342 * of just a DMT here to protect against other threads.
343 * This is a lot of cruft to cover a tiny window.
344 * If you can find a better design, implement it!
345 *
346 */
347 mfc0 v0, CP0_TCSTATUS
348 ori v0, TCSTATUS_IXMT
349 mtc0 v0, CP0_TCSTATUS
350 _ehb
351 DVPE 5 # dvpe a1
352 jal mips_ihb
353#endif /* CONFIG_MIPS_MT_SMTC */
354 mfc0 a0, CP0_STATUS 304 mfc0 a0, CP0_STATUS
355 ori a0, STATMASK 305 ori a0, STATMASK
356 xori a0, STATMASK 306 xori a0, STATMASK
@@ -362,59 +312,6 @@
362 and v0, v1 312 and v0, v1
363 or v0, a0 313 or v0, a0
364 mtc0 v0, CP0_STATUS 314 mtc0 v0, CP0_STATUS
365#ifdef CONFIG_MIPS_MT_SMTC
366/*
367 * Only after EXL/ERL have been restored to status can we
368 * restore TCStatus.IXMT.
369 */
370 LONG_L v1, PT_TCSTATUS(sp)
371 _ehb
372 mfc0 a0, CP0_TCSTATUS
373 andi v1, TCSTATUS_IXMT
374 bnez v1, 0f
375
376/*
377 * We'd like to detect any IPIs queued in the tiny window
378 * above and request an software interrupt to service them
379 * when we ERET.
380 *
381 * Computing the offset into the IPIQ array of the executing
382 * TC's IPI queue in-line would be tedious. We use part of
383 * the TCContext register to hold 16 bits of offset that we
384 * can add in-line to find the queue head.
385 */
386 mfc0 v0, CP0_TCCONTEXT
387 la a2, IPIQ
388 srl v0, v0, 16
389 addu a2, a2, v0
390 LONG_L v0, 0(a2)
391 beqz v0, 0f
392/*
393 * If we have a queue, provoke dispatch within the VPE by setting C_SW1
394 */
395 mfc0 v0, CP0_CAUSE
396 ori v0, v0, C_SW1
397 mtc0 v0, CP0_CAUSE
3980:
399 /*
400 * This test should really never branch but
401 * let's be prudent here. Having atomized
402 * the shared register modifications, we can
403 * now EVPE, and must do so before interrupts
404 * are potentially re-enabled.
405 */
406 andi a1, a1, MVPCONTROL_EVP
407 beqz a1, 1f
408 evpe
4091:
410 /* We know that TCStatua.IXMT should be set from above */
411 xori a0, a0, TCSTATUS_IXMT
412 or a0, a0, v1
413 mtc0 a0, CP0_TCSTATUS
414 _ehb
415
416 .set mips0
417#endif /* CONFIG_MIPS_MT_SMTC */
418 LONG_L v1, PT_EPC(sp) 315 LONG_L v1, PT_EPC(sp)
419 MTC0 v1, CP0_EPC 316 MTC0 v1, CP0_EPC
420 LONG_L $31, PT_R31(sp) 317 LONG_L $31, PT_R31(sp)
@@ -467,33 +364,11 @@
467 * Set cp0 enable bit as sign that we're running on the kernel stack 364 * Set cp0 enable bit as sign that we're running on the kernel stack
468 */ 365 */
469 .macro CLI 366 .macro CLI
470#if !defined(CONFIG_MIPS_MT_SMTC)
471 mfc0 t0, CP0_STATUS 367 mfc0 t0, CP0_STATUS
472 li t1, ST0_CU0 | STATMASK 368 li t1, ST0_CU0 | STATMASK
473 or t0, t1 369 or t0, t1
474 xori t0, STATMASK 370 xori t0, STATMASK
475 mtc0 t0, CP0_STATUS 371 mtc0 t0, CP0_STATUS
476#else /* CONFIG_MIPS_MT_SMTC */
477 /*
478 * For SMTC, we need to set privilege
479 * and disable interrupts only for the
480 * current TC, using the TCStatus register.
481 */
482 mfc0 t0, CP0_TCSTATUS
483 /* Fortunately CU 0 is in the same place in both registers */
484 /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
485 li t1, ST0_CU0 | 0x08001c00
486 or t0, t1
487 /* Clear TKSU, leave IXMT */
488 xori t0, 0x00001800
489 mtc0 t0, CP0_TCSTATUS
490 _ehb
491 /* We need to leave the global IE bit set, but clear EXL...*/
492 mfc0 t0, CP0_STATUS
493 ori t0, ST0_EXL | ST0_ERL
494 xori t0, ST0_EXL | ST0_ERL
495 mtc0 t0, CP0_STATUS
496#endif /* CONFIG_MIPS_MT_SMTC */
497 irq_disable_hazard 372 irq_disable_hazard
498 .endm 373 .endm
499 374
@@ -502,35 +377,11 @@
502 * Set cp0 enable bit as sign that we're running on the kernel stack 377 * Set cp0 enable bit as sign that we're running on the kernel stack
503 */ 378 */
504 .macro STI 379 .macro STI
505#if !defined(CONFIG_MIPS_MT_SMTC)
506 mfc0 t0, CP0_STATUS 380 mfc0 t0, CP0_STATUS
507 li t1, ST0_CU0 | STATMASK 381 li t1, ST0_CU0 | STATMASK
508 or t0, t1 382 or t0, t1
509 xori t0, STATMASK & ~1 383 xori t0, STATMASK & ~1
510 mtc0 t0, CP0_STATUS 384 mtc0 t0, CP0_STATUS
511#else /* CONFIG_MIPS_MT_SMTC */
512 /*
513 * For SMTC, we need to set privilege
514 * and enable interrupts only for the
515 * current TC, using the TCStatus register.
516 */
517 _ehb
518 mfc0 t0, CP0_TCSTATUS
519 /* Fortunately CU 0 is in the same place in both registers */
520 /* Set TCU0, TKSU (for later inversion) and IXMT */
521 li t1, ST0_CU0 | 0x08001c00
522 or t0, t1
523 /* Clear TKSU *and* IXMT */
524 xori t0, 0x00001c00
525 mtc0 t0, CP0_TCSTATUS
526 _ehb
527 /* We need to leave the global IE bit set, but clear EXL...*/
528 mfc0 t0, CP0_STATUS
529 ori t0, ST0_EXL
530 xori t0, ST0_EXL
531 mtc0 t0, CP0_STATUS
532 /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
533#endif /* CONFIG_MIPS_MT_SMTC */
534 irq_enable_hazard 385 irq_enable_hazard
535 .endm 386 .endm
536 387
@@ -540,32 +391,6 @@
540 * Set cp0 enable bit as sign that we're running on the kernel stack 391 * Set cp0 enable bit as sign that we're running on the kernel stack
541 */ 392 */
542 .macro KMODE 393 .macro KMODE
543#ifdef CONFIG_MIPS_MT_SMTC
544 /*
545 * This gets baroque in SMTC. We want to
546 * protect the non-atomic clearing of EXL
547 * with DMT/EMT, but we don't want to take
548 * an interrupt while DMT is still in effect.
549 */
550
551 /* KMODE gets invoked from both reorder and noreorder code */
552 .set push
553 .set mips32r2
554 .set noreorder
555 mfc0 v0, CP0_TCSTATUS
556 andi v1, v0, TCSTATUS_IXMT
557 ori v0, TCSTATUS_IXMT
558 mtc0 v0, CP0_TCSTATUS
559 _ehb
560 DMT 2 # dmt v0
561 /*
562 * We don't know a priori if ra is "live"
563 */
564 move t0, ra
565 jal mips_ihb
566 nop /* delay slot */
567 move ra, t0
568#endif /* CONFIG_MIPS_MT_SMTC */
569 mfc0 t0, CP0_STATUS 394 mfc0 t0, CP0_STATUS
570 li t1, ST0_CU0 | (STATMASK & ~1) 395 li t1, ST0_CU0 | (STATMASK & ~1)
571#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 396#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
@@ -576,25 +401,6 @@
576 or t0, t1 401 or t0, t1
577 xori t0, STATMASK & ~1 402 xori t0, STATMASK & ~1
578 mtc0 t0, CP0_STATUS 403 mtc0 t0, CP0_STATUS
579#ifdef CONFIG_MIPS_MT_SMTC
580 _ehb
581 andi v0, v0, VPECONTROL_TE
582 beqz v0, 2f
583 nop /* delay slot */
584 emt
5852:
586 mfc0 v0, CP0_TCSTATUS
587 /* Clear IXMT, then OR in previous value */
588 ori v0, TCSTATUS_IXMT
589 xori v0, TCSTATUS_IXMT
590 or v0, v1, v0
591 mtc0 v0, CP0_TCSTATUS
592 /*
593 * irq_disable_hazard below should expand to EHB
594 * on 24K/34K CPUS
595 */
596 .set pop
597#endif /* CONFIG_MIPS_MT_SMTC */
598 irq_disable_hazard 404 irq_disable_hazard
599 .endm 405 .endm
600 406
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index d2d961d6cb86..7de865805deb 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -159,11 +159,7 @@ static inline struct thread_info *current_thread_info(void)
159 * We stash processor id into a COP0 register to retrieve it fast 159 * We stash processor id into a COP0 register to retrieve it fast
160 * at kernel exception entry. 160 * at kernel exception entry.
161 */ 161 */
162#if defined(CONFIG_MIPS_MT_SMTC) 162#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
163#define SMP_CPUID_REG 2, 2 /* TCBIND */
164#define ASM_SMP_CPUID_REG $2, 2
165#define SMP_CPUID_PTRSHIFT 19
166#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
167#define SMP_CPUID_REG 20, 0 /* XCONTEXT */ 163#define SMP_CPUID_REG 20, 0 /* XCONTEXT */
168#define ASM_SMP_CPUID_REG $20 164#define ASM_SMP_CPUID_REG $20
169#define SMP_CPUID_PTRSHIFT 48 165#define SMP_CPUID_PTRSHIFT 48
@@ -179,13 +175,8 @@ static inline struct thread_info *current_thread_info(void)
179#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2) 175#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2)
180#endif 176#endif
181 177
182#ifdef CONFIG_MIPS_MT_SMTC
183#define ASM_CPUID_MFC0 mfc0
184#define UASM_i_CPUID_MFC0 uasm_i_mfc0
185#else
186#define ASM_CPUID_MFC0 MFC0 178#define ASM_CPUID_MFC0 MFC0
187#define UASM_i_CPUID_MFC0 UASM_i_MFC0 179#define UASM_i_CPUID_MFC0 UASM_i_MFC0
188#endif
189 180
190#endif /* __KERNEL__ */ 181#endif /* __KERNEL__ */
191#endif /* _ASM_THREAD_INFO_H */ 182#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 24f534a7fbc3..8f3047d611ee 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -52,14 +52,11 @@ extern int (*perf_irq)(void);
52 */ 52 */
53extern unsigned int __weak get_c0_compare_int(void); 53extern unsigned int __weak get_c0_compare_int(void);
54extern int r4k_clockevent_init(void); 54extern int r4k_clockevent_init(void);
55extern int smtc_clockevent_init(void);
56extern int gic_clockevent_init(void); 55extern int gic_clockevent_init(void);
57 56
58static inline int mips_clockevent_init(void) 57static inline int mips_clockevent_init(void)
59{ 58{
60#ifdef CONFIG_MIPS_MT_SMTC 59#if defined(CONFIG_CEVT_GIC)
61 return smtc_clockevent_init();
62#elif defined(CONFIG_CEVT_GIC)
63 return (gic_clockevent_init() | r4k_clockevent_init()); 60 return (gic_clockevent_init() | r4k_clockevent_init());
64#elif defined(CONFIG_CEVT_R4K) 61#elif defined(CONFIG_CEVT_R4K)
65 return r4k_clockevent_init(); 62 return r4k_clockevent_init();
diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
index c5424757da65..b05bb70a2e46 100644
--- a/arch/mips/include/asm/timex.h
+++ b/arch/mips/include/asm/timex.h
@@ -4,12 +4,16 @@
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1998, 1999, 2003 by Ralf Baechle 6 * Copyright (C) 1998, 1999, 2003 by Ralf Baechle
7 * Copyright (C) 2014 by Maciej W. Rozycki
7 */ 8 */
8#ifndef _ASM_TIMEX_H 9#ifndef _ASM_TIMEX_H
9#define _ASM_TIMEX_H 10#define _ASM_TIMEX_H
10 11
11#ifdef __KERNEL__ 12#ifdef __KERNEL__
12 13
14#include <linux/compiler.h>
15
16#include <asm/cpu.h>
13#include <asm/cpu-features.h> 17#include <asm/cpu-features.h>
14#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
15#include <asm/cpu-type.h> 19#include <asm/cpu-type.h>
@@ -45,29 +49,54 @@ typedef unsigned int cycles_t;
45 * However for now the implementaton of this function doesn't get these 49 * However for now the implementaton of this function doesn't get these
46 * fine details right. 50 * fine details right.
47 */ 51 */
48static inline cycles_t get_cycles(void) 52static inline int can_use_mips_counter(unsigned int prid)
49{ 53{
50 switch (boot_cpu_type()) { 54 int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY;
51 case CPU_R4400PC:
52 case CPU_R4400SC:
53 case CPU_R4400MC:
54 if ((read_c0_prid() & 0xff) >= 0x0050)
55 return read_c0_count();
56 break;
57 55
58 case CPU_R4000PC: 56 if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter)
59 case CPU_R4000SC: 57 return 0;
60 case CPU_R4000MC: 58 else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r)
61 break; 59 return 1;
60 else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp))
61 return 1;
62 /* Make sure we don't peek at cpu_data[0].options in the fast path! */
63 if (!__builtin_constant_p(cpu_has_counter))
64 asm volatile("" : "=m" (cpu_data[0].options));
65 if (likely(cpu_has_counter &&
66 prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
67 return 1;
68 else
69 return 0;
70}
62 71
63 default: 72static inline cycles_t get_cycles(void)
64 if (cpu_has_counter) 73{
65 return read_c0_count(); 74 if (can_use_mips_counter(read_c0_prid()))
66 break; 75 return read_c0_count();
67 } 76 else
77 return 0; /* no usable counter */
78}
79
80/*
81 * Like get_cycles - but where c0_count is not available we desperately
82 * use c0_random in an attempt to get at least a little bit of entropy.
83 *
84 * R6000 and R6000A neither have a count register nor a random register.
85 * That leaves no entropy source in the CPU itself.
86 */
87static inline unsigned long random_get_entropy(void)
88{
89 unsigned int prid = read_c0_prid();
90 unsigned int imp = prid & PRID_IMP_MASK;
68 91
69 return 0; /* no usable counter */ 92 if (can_use_mips_counter(prid))
93 return read_c0_count();
94 else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
95 return read_c0_random();
96 else
97 return 0; /* no usable register */
70} 98}
99#define random_get_entropy random_get_entropy
71 100
72#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
73 102
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index c33a9564fb41..f8d63b3b40b4 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -55,6 +55,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
55#define Ip_u2u1u3(op) \ 55#define Ip_u2u1u3(op) \
56void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 56void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
57 57
58#define Ip_u3u2u1(op) \
59void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
60
58#define Ip_u3u1u2(op) \ 61#define Ip_u3u1u2(op) \
59void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 62void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
60 63
@@ -74,6 +77,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
74#define Ip_u1u2(op) \ 77#define Ip_u1u2(op) \
75void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) 78void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
76 79
80#define Ip_u2u1(op) \
81void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
82
77#define Ip_u1s2(op) \ 83#define Ip_u1s2(op) \
78void ISAOPC(op)(u32 **buf, unsigned int a, signed int b) 84void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
79 85
@@ -99,6 +105,7 @@ Ip_u2u1s3(_daddiu);
99Ip_u3u1u2(_daddu); 105Ip_u3u1u2(_daddu);
100Ip_u2u1msbu3(_dins); 106Ip_u2u1msbu3(_dins);
101Ip_u2u1msbu3(_dinsm); 107Ip_u2u1msbu3(_dinsm);
108Ip_u1u2(_divu);
102Ip_u1u2u3(_dmfc0); 109Ip_u1u2u3(_dmfc0);
103Ip_u1u2u3(_dmtc0); 110Ip_u1u2u3(_dmtc0);
104Ip_u2u1u3(_drotr); 111Ip_u2u1u3(_drotr);
@@ -114,16 +121,22 @@ Ip_u2u1msbu3(_ext);
114Ip_u2u1msbu3(_ins); 121Ip_u2u1msbu3(_ins);
115Ip_u1(_j); 122Ip_u1(_j);
116Ip_u1(_jal); 123Ip_u1(_jal);
124Ip_u2u1(_jalr);
117Ip_u1(_jr); 125Ip_u1(_jr);
126Ip_u2s3u1(_lb);
118Ip_u2s3u1(_ld); 127Ip_u2s3u1(_ld);
119Ip_u3u1u2(_ldx); 128Ip_u3u1u2(_ldx);
129Ip_u2s3u1(_lh);
120Ip_u2s3u1(_ll); 130Ip_u2s3u1(_ll);
121Ip_u2s3u1(_lld); 131Ip_u2s3u1(_lld);
122Ip_u1s2(_lui); 132Ip_u1s2(_lui);
123Ip_u2s3u1(_lw); 133Ip_u2s3u1(_lw);
124Ip_u3u1u2(_lwx); 134Ip_u3u1u2(_lwx);
125Ip_u1u2u3(_mfc0); 135Ip_u1u2u3(_mfc0);
136Ip_u1(_mfhi);
137Ip_u1(_mflo);
126Ip_u1u2u3(_mtc0); 138Ip_u1u2u3(_mtc0);
139Ip_u3u1u2(_mul);
127Ip_u3u1u2(_or); 140Ip_u3u1u2(_or);
128Ip_u2u1u3(_ori); 141Ip_u2u1u3(_ori);
129Ip_u2s3u1(_pref); 142Ip_u2s3u1(_pref);
@@ -133,17 +146,25 @@ Ip_u2s3u1(_sc);
133Ip_u2s3u1(_scd); 146Ip_u2s3u1(_scd);
134Ip_u2s3u1(_sd); 147Ip_u2s3u1(_sd);
135Ip_u2u1u3(_sll); 148Ip_u2u1u3(_sll);
149Ip_u3u2u1(_sllv);
150Ip_u2u1s3(_sltiu);
151Ip_u3u1u2(_sltu);
136Ip_u2u1u3(_sra); 152Ip_u2u1u3(_sra);
137Ip_u2u1u3(_srl); 153Ip_u2u1u3(_srl);
154Ip_u3u2u1(_srlv);
138Ip_u3u1u2(_subu); 155Ip_u3u1u2(_subu);
139Ip_u2s3u1(_sw); 156Ip_u2s3u1(_sw);
157Ip_u1(_sync);
140Ip_u1(_syscall); 158Ip_u1(_syscall);
141Ip_0(_tlbp); 159Ip_0(_tlbp);
142Ip_0(_tlbr); 160Ip_0(_tlbr);
143Ip_0(_tlbwi); 161Ip_0(_tlbwi);
144Ip_0(_tlbwr); 162Ip_0(_tlbwr);
163Ip_u1(_wait);
164Ip_u2u1(_wsbh);
145Ip_u3u1u2(_xor); 165Ip_u3u1u2(_xor);
146Ip_u2u1u3(_xori); 166Ip_u2u1u3(_xori);
167Ip_u2u1(_yield);
147 168
148 169
149/* Handle labels. */ 170/* Handle labels. */
@@ -264,6 +285,8 @@ void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
264 unsigned int bit, int lid); 285 unsigned int bit, int lid);
265void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, 286void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
266 unsigned int bit, int lid); 287 unsigned int bit, int lid);
288void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
289 unsigned int r2, int lid);
267void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 290void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
268void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 291void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
269void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 292void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index be7196eacb88..96fe7395ed8d 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -4,6 +4,7 @@ include include/uapi/asm-generic/Kbuild.asm
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += ipcbuf.h 5generic-y += ipcbuf.h
6 6
7header-y += bitfield.h
7header-y += bitsperlong.h 8header-y += bitsperlong.h
8header-y += break.h 9header-y += break.h
9header-y += byteorder.h 10header-y += byteorder.h
diff --git a/arch/mips/include/uapi/asm/bitfield.h b/arch/mips/include/uapi/asm/bitfield.h
new file mode 100644
index 000000000000..ad9861359cea
--- /dev/null
+++ b/arch/mips/include/uapi/asm/bitfield.h
@@ -0,0 +1,29 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2014 by Ralf Baechle <ralf@linux-mips.org>
7 */
8#ifndef __UAPI_ASM_BITFIELD_H
9#define __UAPI_ASM_BITFIELD_H
10
11/*
12 * * Damn ... bitfields depend from byteorder :-(
13 * */
14#ifdef __MIPSEB__
15#define __BITFIELD_FIELD(field, more) \
16 field; \
17 more
18
19#elif defined(__MIPSEL__)
20
21#define __BITFIELD_FIELD(field, more) \
22 more \
23 field;
24
25#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
26#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
27#endif
28
29#endif /* __UAPI_ASM_BITFIELD_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 3125797f2a88..4b7160259292 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -13,6 +13,8 @@
13#ifndef _UAPI_ASM_INST_H 13#ifndef _UAPI_ASM_INST_H
14#define _UAPI_ASM_INST_H 14#define _UAPI_ASM_INST_H
15 15
16#include <asm/bitfield.h>
17
16/* 18/*
17 * Major opcodes; before MIPS IV cop1x was called cop3. 19 * Major opcodes; before MIPS IV cop1x was called cop3.
18 */ 20 */
@@ -74,16 +76,17 @@ enum spec2_op {
74enum spec3_op { 76enum spec3_op {
75 ext_op, dextm_op, dextu_op, dext_op, 77 ext_op, dextm_op, dextu_op, dext_op,
76 ins_op, dinsm_op, dinsu_op, dins_op, 78 ins_op, dinsm_op, dinsu_op, dins_op,
77 lx_op = 0x0a, lwle_op = 0x19, 79 yield_op = 0x09, lx_op = 0x0a,
78 lwre_op = 0x1a, cachee_op = 0x1b, 80 lwle_op = 0x19, lwre_op = 0x1a,
79 sbe_op = 0x1c, she_op = 0x1d, 81 cachee_op = 0x1b, sbe_op = 0x1c,
80 sce_op = 0x1e, swe_op = 0x1f, 82 she_op = 0x1d, sce_op = 0x1e,
81 bshfl_op = 0x20, swle_op = 0x21, 83 swe_op = 0x1f, bshfl_op = 0x20,
82 swre_op = 0x22, prefe_op = 0x23, 84 swle_op = 0x21, swre_op = 0x22,
83 dbshfl_op = 0x24, lbue_op = 0x28, 85 prefe_op = 0x23, dbshfl_op = 0x24,
84 lhue_op = 0x29, lbe_op = 0x2c, 86 lbue_op = 0x28, lhue_op = 0x29,
85 lhe_op = 0x2d, lle_op = 0x2e, 87 lbe_op = 0x2c, lhe_op = 0x2d,
86 lwe_op = 0x2f, rdhwr_op = 0x3b 88 lle_op = 0x2e, lwe_op = 0x2f,
89 rdhwr_op = 0x3b
87}; 90};
88 91
89/* 92/*
@@ -125,7 +128,8 @@ enum bcop_op {
125enum cop0_coi_func { 128enum cop0_coi_func {
126 tlbr_op = 0x01, tlbwi_op = 0x02, 129 tlbr_op = 0x01, tlbwi_op = 0x02,
127 tlbwr_op = 0x06, tlbp_op = 0x08, 130 tlbwr_op = 0x06, tlbp_op = 0x08,
128 rfe_op = 0x10, eret_op = 0x18 131 rfe_op = 0x10, eret_op = 0x18,
132 wait_op = 0x20,
129}; 133};
130 134
131/* 135/*
@@ -202,6 +206,16 @@ enum lx_func {
202}; 206};
203 207
204/* 208/*
209 * BSHFL opcodes
210 */
211enum bshfl_func {
212 wsbh_op = 0x2,
213 dshd_op = 0x5,
214 seb_op = 0x10,
215 seh_op = 0x18,
216};
217
218/*
205 * (microMIPS) Major opcodes. 219 * (microMIPS) Major opcodes.
206 */ 220 */
207enum mm_major_op { 221enum mm_major_op {
@@ -244,17 +258,22 @@ enum mm_32i_minor_op {
244enum mm_32a_minor_op { 258enum mm_32a_minor_op {
245 mm_sll32_op = 0x000, 259 mm_sll32_op = 0x000,
246 mm_ins_op = 0x00c, 260 mm_ins_op = 0x00c,
261 mm_sllv32_op = 0x010,
247 mm_ext_op = 0x02c, 262 mm_ext_op = 0x02c,
248 mm_pool32axf_op = 0x03c, 263 mm_pool32axf_op = 0x03c,
249 mm_srl32_op = 0x040, 264 mm_srl32_op = 0x040,
250 mm_sra_op = 0x080, 265 mm_sra_op = 0x080,
266 mm_srlv32_op = 0x090,
251 mm_rotr_op = 0x0c0, 267 mm_rotr_op = 0x0c0,
252 mm_lwxs_op = 0x118, 268 mm_lwxs_op = 0x118,
253 mm_addu32_op = 0x150, 269 mm_addu32_op = 0x150,
254 mm_subu32_op = 0x1d0, 270 mm_subu32_op = 0x1d0,
271 mm_wsbh_op = 0x1ec,
272 mm_mul_op = 0x210,
255 mm_and_op = 0x250, 273 mm_and_op = 0x250,
256 mm_or32_op = 0x290, 274 mm_or32_op = 0x290,
257 mm_xor32_op = 0x310, 275 mm_xor32_op = 0x310,
276 mm_sltu_op = 0x390,
258}; 277};
259 278
260/* 279/*
@@ -294,15 +313,20 @@ enum mm_32axf_minor_op {
294 mm_mfc0_op = 0x003, 313 mm_mfc0_op = 0x003,
295 mm_mtc0_op = 0x00b, 314 mm_mtc0_op = 0x00b,
296 mm_tlbp_op = 0x00d, 315 mm_tlbp_op = 0x00d,
316 mm_mfhi32_op = 0x035,
297 mm_jalr_op = 0x03c, 317 mm_jalr_op = 0x03c,
298 mm_tlbr_op = 0x04d, 318 mm_tlbr_op = 0x04d,
319 mm_mflo32_op = 0x075,
299 mm_jalrhb_op = 0x07c, 320 mm_jalrhb_op = 0x07c,
300 mm_tlbwi_op = 0x08d, 321 mm_tlbwi_op = 0x08d,
301 mm_tlbwr_op = 0x0cd, 322 mm_tlbwr_op = 0x0cd,
302 mm_jalrs_op = 0x13c, 323 mm_jalrs_op = 0x13c,
303 mm_jalrshb_op = 0x17c, 324 mm_jalrshb_op = 0x17c,
325 mm_sync_op = 0x1ad,
304 mm_syscall_op = 0x22d, 326 mm_syscall_op = 0x22d,
327 mm_wait_op = 0x24d,
305 mm_eret_op = 0x3cd, 328 mm_eret_op = 0x3cd,
329 mm_divu_op = 0x5dc,
306}; 330};
307 331
308/* 332/*
@@ -480,24 +504,6 @@ enum MIPS6e_i8_func {
480 */ 504 */
481#define MM_NOP16 0x0c00 505#define MM_NOP16 0x0c00
482 506
483/*
484 * Damn ... bitfields depend from byteorder :-(
485 */
486#ifdef __MIPSEB__
487#define __BITFIELD_FIELD(field, more) \
488 field; \
489 more
490
491#elif defined(__MIPSEL__)
492
493#define __BITFIELD_FIELD(field, more) \
494 more \
495 field;
496
497#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
498#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
499#endif
500
501struct j_format { 507struct j_format {
502 __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */ 508 __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
503 __BITFIELD_FIELD(unsigned int target : 26, 509 __BITFIELD_FIELD(unsigned int target : 26,
diff --git a/arch/mips/include/uapi/asm/kvm_para.h b/arch/mips/include/uapi/asm/kvm_para.h
index 14fab8f0b957..7e16d7c42e65 100644
--- a/arch/mips/include/uapi/asm/kvm_para.h
+++ b/arch/mips/include/uapi/asm/kvm_para.h
@@ -1 +1,5 @@
1#include <asm-generic/kvm_para.h> 1#ifndef _UAPI_ASM_MIPS_KVM_PARA_H
2#define _UAPI_ASM_MIPS_KVM_PARA_H
3
4
5#endif /* _UAPI_ASM_MIPS_KVM_PARA_H */
diff --git a/arch/mips/include/uapi/asm/types.h b/arch/mips/include/uapi/asm/types.h
index 7ac9d0baad84..f3dd9ff0cc0c 100644
--- a/arch/mips/include/uapi/asm/types.h
+++ b/arch/mips/include/uapi/asm/types.h
@@ -14,9 +14,12 @@
14/* 14/*
15 * We don't use int-l64.h for the kernel anymore but still use it for 15 * We don't use int-l64.h for the kernel anymore but still use it for
16 * userspace to avoid code changes. 16 * userspace to avoid code changes.
17 *
18 * However, some user programs (e.g. perf) may not want this. They can
19 * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
17 */ 20 */
18#ifndef __KERNEL__ 21#ifndef __KERNEL__
19# if _MIPS_SZLONG == 64 22# if _MIPS_SZLONG == 64 && !defined(__SANE_USERSPACE_TYPES__)
20# include <asm-generic/int-l64.h> 23# include <asm-generic/int-l64.h>
21# else 24# else
22# include <asm-generic/int-ll64.h> 25# include <asm-generic/int-ll64.h>
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 277dab301cea..008a2fed0584 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -17,7 +17,6 @@ endif
17 17
18obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 18obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
20obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
21obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 20obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
22obj-$(CONFIG_CEVT_GIC) += cevt-gic.o 21obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
23obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 22obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
@@ -42,7 +41,7 @@ obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
42obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o 41obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
43obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o 42obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
44obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o 43obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
45obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o 44obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o
46 45
47obj-$(CONFIG_SMP) += smp.o 46obj-$(CONFIG_SMP) += smp.o
48obj-$(CONFIG_SMP_UP) += smp-up.o 47obj-$(CONFIG_SMP_UP) += smp-up.o
@@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
50 49
51obj-$(CONFIG_MIPS_MT) += mips-mt.o 50obj-$(CONFIG_MIPS_MT) += mips-mt.o
52obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o 51obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
53obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
54obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o 52obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
55obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 53obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
56obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o 54obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
@@ -107,6 +105,9 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
107obj-$(CONFIG_MIPS_CM) += mips-cm.o 105obj-$(CONFIG_MIPS_CM) += mips-cm.o
108obj-$(CONFIG_MIPS_CPC) += mips-cpc.o 106obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
109 107
108obj-$(CONFIG_CPU_PM) += pm.o
109obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
110
110# 111#
111# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not 112# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
112# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches 113# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0ea75c244b48..02f075df8f2e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/kbuild.h> 15#include <linux/kbuild.h>
16#include <linux/suspend.h> 16#include <linux/suspend.h>
17#include <asm/pm.h>
17#include <asm/ptrace.h> 18#include <asm/ptrace.h>
18#include <asm/processor.h> 19#include <asm/processor.h>
19#include <asm/smp-cps.h> 20#include <asm/smp-cps.h>
@@ -64,9 +65,6 @@ void output_ptreg_defines(void)
64 OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); 65 OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
65 OFFSET(PT_STATUS, pt_regs, cp0_status); 66 OFFSET(PT_STATUS, pt_regs, cp0_status);
66 OFFSET(PT_CAUSE, pt_regs, cp0_cause); 67 OFFSET(PT_CAUSE, pt_regs, cp0_cause);
67#ifdef CONFIG_MIPS_MT_SMTC
68 OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
69#endif /* CONFIG_MIPS_MT_SMTC */
70#ifdef CONFIG_CPU_CAVIUM_OCTEON 68#ifdef CONFIG_CPU_CAVIUM_OCTEON
71 OFFSET(PT_MPL, pt_regs, mpl); 69 OFFSET(PT_MPL, pt_regs, mpl);
72 OFFSET(PT_MTP, pt_regs, mtp); 70 OFFSET(PT_MTP, pt_regs, mtp);
@@ -404,6 +402,20 @@ void output_pbe_defines(void)
404} 402}
405#endif 403#endif
406 404
405#ifdef CONFIG_CPU_PM
406void output_pm_defines(void)
407{
408 COMMENT(" PM offsets. ");
409#ifdef CONFIG_EVA
410 OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
411 OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
412 OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
413#endif
414 OFFSET(SSS_SP, mips_static_suspend_state, sp);
415 BLANK();
416}
417#endif
418
407void output_kvm_defines(void) 419void output_kvm_defines(void)
408{ 420{
409 COMMENT(" KVM/MIPS Specfic offsets. "); 421 COMMENT(" KVM/MIPS Specfic offsets. ");
@@ -472,10 +484,14 @@ void output_kvm_defines(void)
472void output_cps_defines(void) 484void output_cps_defines(void)
473{ 485{
474 COMMENT(" MIPS CPS offsets. "); 486 COMMENT(" MIPS CPS offsets. ");
475 OFFSET(BOOTCFG_CORE, boot_config, core); 487
476 OFFSET(BOOTCFG_VPE, boot_config, vpe); 488 OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
477 OFFSET(BOOTCFG_PC, boot_config, pc); 489 OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
478 OFFSET(BOOTCFG_SP, boot_config, sp); 490 DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
479 OFFSET(BOOTCFG_GP, boot_config, gp); 491
492 OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
493 OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
494 OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
495 DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
480} 496}
481#endif 497#endif
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 76122ff5cb5e..7b2df224f041 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -48,6 +48,202 @@ int __isa_exception_epc(struct pt_regs *regs)
48 return epc; 48 return epc;
49} 49}
50 50
51/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
52static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
53
54int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
55 unsigned long *contpc)
56{
57 union mips_instruction insn = (union mips_instruction)dec_insn.insn;
58 int bc_false = 0;
59 unsigned int fcr31;
60 unsigned int bit;
61
62 if (!cpu_has_mmips)
63 return 0;
64
65 switch (insn.mm_i_format.opcode) {
66 case mm_pool32a_op:
67 if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
68 mm_pool32axf_op) {
69 switch (insn.mm_i_format.simmediate >>
70 MM_POOL32A_MINOR_SHIFT) {
71 case mm_jalr_op:
72 case mm_jalrhb_op:
73 case mm_jalrs_op:
74 case mm_jalrshb_op:
75 if (insn.mm_i_format.rt != 0) /* Not mm_jr */
76 regs->regs[insn.mm_i_format.rt] =
77 regs->cp0_epc +
78 dec_insn.pc_inc +
79 dec_insn.next_pc_inc;
80 *contpc = regs->regs[insn.mm_i_format.rs];
81 return 1;
82 }
83 }
84 break;
85 case mm_pool32i_op:
86 switch (insn.mm_i_format.rt) {
87 case mm_bltzals_op:
88 case mm_bltzal_op:
89 regs->regs[31] = regs->cp0_epc +
90 dec_insn.pc_inc +
91 dec_insn.next_pc_inc;
92 /* Fall through */
93 case mm_bltz_op:
94 if ((long)regs->regs[insn.mm_i_format.rs] < 0)
95 *contpc = regs->cp0_epc +
96 dec_insn.pc_inc +
97 (insn.mm_i_format.simmediate << 1);
98 else
99 *contpc = regs->cp0_epc +
100 dec_insn.pc_inc +
101 dec_insn.next_pc_inc;
102 return 1;
103 case mm_bgezals_op:
104 case mm_bgezal_op:
105 regs->regs[31] = regs->cp0_epc +
106 dec_insn.pc_inc +
107 dec_insn.next_pc_inc;
108 /* Fall through */
109 case mm_bgez_op:
110 if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
111 *contpc = regs->cp0_epc +
112 dec_insn.pc_inc +
113 (insn.mm_i_format.simmediate << 1);
114 else
115 *contpc = regs->cp0_epc +
116 dec_insn.pc_inc +
117 dec_insn.next_pc_inc;
118 return 1;
119 case mm_blez_op:
120 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
121 *contpc = regs->cp0_epc +
122 dec_insn.pc_inc +
123 (insn.mm_i_format.simmediate << 1);
124 else
125 *contpc = regs->cp0_epc +
126 dec_insn.pc_inc +
127 dec_insn.next_pc_inc;
128 return 1;
129 case mm_bgtz_op:
130 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
131 *contpc = regs->cp0_epc +
132 dec_insn.pc_inc +
133 (insn.mm_i_format.simmediate << 1);
134 else
135 *contpc = regs->cp0_epc +
136 dec_insn.pc_inc +
137 dec_insn.next_pc_inc;
138 return 1;
139 case mm_bc2f_op:
140 case mm_bc1f_op:
141 bc_false = 1;
142 /* Fall through */
143 case mm_bc2t_op:
144 case mm_bc1t_op:
145 preempt_disable();
146 if (is_fpu_owner())
147 asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
148 else
149 fcr31 = current->thread.fpu.fcr31;
150 preempt_enable();
151
152 if (bc_false)
153 fcr31 = ~fcr31;
154
155 bit = (insn.mm_i_format.rs >> 2);
156 bit += (bit != 0);
157 bit += 23;
158 if (fcr31 & (1 << bit))
159 *contpc = regs->cp0_epc +
160 dec_insn.pc_inc +
161 (insn.mm_i_format.simmediate << 1);
162 else
163 *contpc = regs->cp0_epc +
164 dec_insn.pc_inc + dec_insn.next_pc_inc;
165 return 1;
166 }
167 break;
168 case mm_pool16c_op:
169 switch (insn.mm_i_format.rt) {
170 case mm_jalr16_op:
171 case mm_jalrs16_op:
172 regs->regs[31] = regs->cp0_epc +
173 dec_insn.pc_inc + dec_insn.next_pc_inc;
174 /* Fall through */
175 case mm_jr16_op:
176 *contpc = regs->regs[insn.mm_i_format.rs];
177 return 1;
178 }
179 break;
180 case mm_beqz16_op:
181 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
182 *contpc = regs->cp0_epc +
183 dec_insn.pc_inc +
184 (insn.mm_b1_format.simmediate << 1);
185 else
186 *contpc = regs->cp0_epc +
187 dec_insn.pc_inc + dec_insn.next_pc_inc;
188 return 1;
189 case mm_bnez16_op:
190 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
191 *contpc = regs->cp0_epc +
192 dec_insn.pc_inc +
193 (insn.mm_b1_format.simmediate << 1);
194 else
195 *contpc = regs->cp0_epc +
196 dec_insn.pc_inc + dec_insn.next_pc_inc;
197 return 1;
198 case mm_b16_op:
199 *contpc = regs->cp0_epc + dec_insn.pc_inc +
200 (insn.mm_b0_format.simmediate << 1);
201 return 1;
202 case mm_beq32_op:
203 if (regs->regs[insn.mm_i_format.rs] ==
204 regs->regs[insn.mm_i_format.rt])
205 *contpc = regs->cp0_epc +
206 dec_insn.pc_inc +
207 (insn.mm_i_format.simmediate << 1);
208 else
209 *contpc = regs->cp0_epc +
210 dec_insn.pc_inc +
211 dec_insn.next_pc_inc;
212 return 1;
213 case mm_bne32_op:
214 if (regs->regs[insn.mm_i_format.rs] !=
215 regs->regs[insn.mm_i_format.rt])
216 *contpc = regs->cp0_epc +
217 dec_insn.pc_inc +
218 (insn.mm_i_format.simmediate << 1);
219 else
220 *contpc = regs->cp0_epc +
221 dec_insn.pc_inc + dec_insn.next_pc_inc;
222 return 1;
223 case mm_jalx32_op:
224 regs->regs[31] = regs->cp0_epc +
225 dec_insn.pc_inc + dec_insn.next_pc_inc;
226 *contpc = regs->cp0_epc + dec_insn.pc_inc;
227 *contpc >>= 28;
228 *contpc <<= 28;
229 *contpc |= (insn.j_format.target << 2);
230 return 1;
231 case mm_jals32_op:
232 case mm_jal32_op:
233 regs->regs[31] = regs->cp0_epc +
234 dec_insn.pc_inc + dec_insn.next_pc_inc;
235 /* Fall through */
236 case mm_j32_op:
237 *contpc = regs->cp0_epc + dec_insn.pc_inc;
238 *contpc >>= 27;
239 *contpc <<= 27;
240 *contpc |= (insn.j_format.target << 1);
241 set_isa16_mode(*contpc);
242 return 1;
243 }
244 return 0;
245}
246
51/* 247/*
52 * Compute return address and emulate branch in microMIPS mode after an 248 * Compute return address and emulate branch in microMIPS mode after an
53 * exception only. It does not handle compact branches/jumps and cannot 249 * exception only. It does not handle compact branches/jumps and cannot
@@ -366,7 +562,11 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
366 case cop1_op: 562 case cop1_op:
367 preempt_disable(); 563 preempt_disable();
368 if (is_fpu_owner()) 564 if (is_fpu_owner())
369 asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); 565 asm volatile(
566 ".set push\n"
567 "\t.set mips1\n"
568 "\tcfc1\t%0,$31\n"
569 "\t.set pop" : "=r" (fcr31));
370 else 570 else
371 fcr31 = current->thread.fpu.fcr31; 571 fcr31 = current->thread.fpu.fcr31;
372 preempt_enable(); 572 preempt_enable();
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
index 594cbbf16d62..6093716980b9 100644
--- a/arch/mips/kernel/cevt-gic.c
+++ b/arch/mips/kernel/cevt-gic.c
@@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
26 26
27 cnt = gic_read_count(); 27 cnt = gic_read_count();
28 cnt += (u64)delta; 28 cnt += (u64)delta;
29 gic_write_compare(cnt); 29 gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
30 res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; 30 res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
31 return res; 31 return res;
32} 32}
@@ -73,7 +73,8 @@ int gic_clockevent_init(void)
73 cd = &per_cpu(gic_clockevent_device, cpu); 73 cd = &per_cpu(gic_clockevent_device, cpu);
74 74
75 cd->name = "MIPS GIC"; 75 cd->name = "MIPS GIC";
76 cd->features = CLOCK_EVT_FEAT_ONESHOT; 76 cd->features = CLOCK_EVT_FEAT_ONESHOT |
77 CLOCK_EVT_FEAT_C3STOP;
77 78
78 clockevent_set_clock(cd, gic_frequency); 79 clockevent_set_clock(cd, gic_frequency);
79 80
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 50d3f5a8d6bb..bc127e22fdab 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,17 +12,10 @@
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14 14
15#include <asm/smtc_ipi.h>
16#include <asm/time.h> 15#include <asm/time.h>
17#include <asm/cevt-r4k.h> 16#include <asm/cevt-r4k.h>
18#include <asm/gic.h> 17#include <asm/gic.h>
19 18
20/*
21 * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
22 * of these routines with SMTC-specific variants.
23 */
24
25#ifndef CONFIG_MIPS_MT_SMTC
26static int mips_next_event(unsigned long delta, 19static int mips_next_event(unsigned long delta,
27 struct clock_event_device *evt) 20 struct clock_event_device *evt)
28{ 21{
@@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta,
36 return res; 29 return res;
37} 30}
38 31
39#endif /* CONFIG_MIPS_MT_SMTC */
40
41void mips_set_clock_mode(enum clock_event_mode mode, 32void mips_set_clock_mode(enum clock_event_mode mode,
42 struct clock_event_device *evt) 33 struct clock_event_device *evt)
43{ 34{
@@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode,
47DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 38DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
48int cp0_timer_irq_installed; 39int cp0_timer_irq_installed;
49 40
50#ifndef CONFIG_MIPS_MT_SMTC
51irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 41irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
52{ 42{
53 const int r2 = cpu_has_mips_r2; 43 const int r2 = cpu_has_mips_r2;
@@ -72,9 +62,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
72 /* Clear Count/Compare Interrupt */ 62 /* Clear Count/Compare Interrupt */
73 write_c0_compare(read_c0_compare()); 63 write_c0_compare(read_c0_compare());
74 cd = &per_cpu(mips_clockevent_device, cpu); 64 cd = &per_cpu(mips_clockevent_device, cpu);
75#ifdef CONFIG_CEVT_GIC
76 if (!gic_present)
77#endif
78 cd->event_handler(cd); 65 cd->event_handler(cd);
79 } 66 }
80 67
@@ -82,8 +69,6 @@ out:
82 return IRQ_HANDLED; 69 return IRQ_HANDLED;
83} 70}
84 71
85#endif /* Not CONFIG_MIPS_MT_SMTC */
86
87struct irqaction c0_compare_irqaction = { 72struct irqaction c0_compare_irqaction = {
88 .handler = c0_compare_interrupt, 73 .handler = c0_compare_interrupt,
89 .flags = IRQF_PERCPU | IRQF_TIMER, 74 .flags = IRQF_PERCPU | IRQF_TIMER,
@@ -170,7 +155,6 @@ int c0_compare_int_usable(void)
170 return 1; 155 return 1;
171} 156}
172 157
173#ifndef CONFIG_MIPS_MT_SMTC
174int r4k_clockevent_init(void) 158int r4k_clockevent_init(void)
175{ 159{
176 unsigned int cpu = smp_processor_id(); 160 unsigned int cpu = smp_processor_id();
@@ -195,7 +179,9 @@ int r4k_clockevent_init(void)
195 cd = &per_cpu(mips_clockevent_device, cpu); 179 cd = &per_cpu(mips_clockevent_device, cpu);
196 180
197 cd->name = "MIPS"; 181 cd->name = "MIPS";
198 cd->features = CLOCK_EVT_FEAT_ONESHOT; 182 cd->features = CLOCK_EVT_FEAT_ONESHOT |
183 CLOCK_EVT_FEAT_C3STOP |
184 CLOCK_EVT_FEAT_PERCPU;
199 185
200 clockevent_set_clock(cd, mips_hpt_frequency); 186 clockevent_set_clock(cd, mips_hpt_frequency);
201 187
@@ -210,9 +196,6 @@ int r4k_clockevent_init(void)
210 cd->set_mode = mips_set_clock_mode; 196 cd->set_mode = mips_set_clock_mode;
211 cd->event_handler = mips_event_handler; 197 cd->event_handler = mips_event_handler;
212 198
213#ifdef CONFIG_CEVT_GIC
214 if (!gic_present)
215#endif
216 clockevents_register_device(cd); 199 clockevents_register_device(cd);
217 200
218 if (cp0_timer_irq_installed) 201 if (cp0_timer_irq_installed)
@@ -225,4 +208,3 @@ int r4k_clockevent_init(void)
225 return 0; 208 return 0;
226} 209}
227 210
228#endif /* Not CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
deleted file mode 100644
index b6cf0a60d896..000000000000
--- a/arch/mips/kernel/cevt-smtc.c
+++ /dev/null
@@ -1,324 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8 * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
9 */
10#include <linux/clockchips.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13#include <linux/smp.h>
14#include <linux/irq.h>
15
16#include <asm/smtc_ipi.h>
17#include <asm/time.h>
18#include <asm/cevt-r4k.h>
19
20/*
21 * Variant clock event timer support for SMTC on MIPS 34K, 1004K
22 * or other MIPS MT cores.
23 *
24 * Notes on SMTC Support:
25 *
26 * SMTC has multiple microthread TCs pretending to be Linux CPUs.
27 * But there's only one Count/Compare pair per VPE, and Compare
28 * interrupts are taken opportunisitically by available TCs
29 * bound to the VPE with the Count register. The new timer
30 * framework provides for global broadcasts, but we really
31 * want VPE-level multicasts for best behavior. So instead
32 * of invoking the high-level clock-event broadcast code,
33 * this version of SMTC support uses the historical SMTC
34 * multicast mechanisms "under the hood", appearing to the
35 * generic clock layer as if the interrupts are per-CPU.
36 *
37 * The approach taken here is to maintain a set of NR_CPUS
38 * virtual timers, and track which "CPU" needs to be alerted
39 * at each event.
40 *
41 * It's unlikely that we'll see a MIPS MT core with more than
42 * 2 VPEs, but we *know* that we won't need to handle more
43 * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
44 * is always going to be overkill, but always going to be enough.
45 */
46
47unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
48static int smtc_nextinvpe[NR_CPUS];
49
50/*
51 * Timestamps stored are absolute values to be programmed
52 * into Count register. Valid timestamps will never be zero.
53 * If a Zero Count value is actually calculated, it is converted
54 * to be a 1, which will introduce 1 or two CPU cycles of error
55 * roughly once every four billion events, which at 1000 HZ means
56 * about once every 50 days. If that's actually a problem, one
57 * could alternate squashing 0 to 1 and to -1.
58 */
59
60#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
61#define ISVALID(x) ((x) != 0L)
62
63/*
64 * Time comparison is subtle, as it's really truncated
65 * modular arithmetic.
66 */
67
68#define IS_SOONER(a, b, reference) \
69 (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
70
71/*
72 * CATCHUP_INCREMENT, used when the function falls behind the counter.
73 * Could be an increasing function instead of a constant;
74 */
75
76#define CATCHUP_INCREMENT 64
77
78static int mips_next_event(unsigned long delta,
79 struct clock_event_device *evt)
80{
81 unsigned long flags;
82 unsigned int mtflags;
83 unsigned long timestamp, reference, previous;
84 unsigned long nextcomp = 0L;
85 int vpe = current_cpu_data.vpe_id;
86 int cpu = smp_processor_id();
87 local_irq_save(flags);
88 mtflags = dmt();
89
90 /*
91 * Maintain the per-TC virtual timer
92 * and program the per-VPE shared Count register
93 * as appropriate here...
94 */
95 reference = (unsigned long)read_c0_count();
96 timestamp = MAKEVALID(reference + delta);
97 /*
98 * To really model the clock, we have to catch the case
99 * where the current next-in-VPE timestamp is the old
100 * timestamp for the calling CPE, but the new value is
101 * in fact later. In that case, we have to do a full
102 * scan and discover the new next-in-VPE CPU id and
103 * timestamp.
104 */
105 previous = smtc_nexttime[vpe][cpu];
106 if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
107 && IS_SOONER(previous, timestamp, reference)) {
108 int i;
109 int soonest = cpu;
110
111 /*
112 * Update timestamp array here, so that new
113 * value gets considered along with those of
114 * other virtual CPUs on the VPE.
115 */
116 smtc_nexttime[vpe][cpu] = timestamp;
117 for_each_online_cpu(i) {
118 if (ISVALID(smtc_nexttime[vpe][i])
119 && IS_SOONER(smtc_nexttime[vpe][i],
120 smtc_nexttime[vpe][soonest], reference)) {
121 soonest = i;
122 }
123 }
124 smtc_nextinvpe[vpe] = soonest;
125 nextcomp = smtc_nexttime[vpe][soonest];
126 /*
127 * Otherwise, we don't have to process the whole array rank,
128 * we just have to see if the event horizon has gotten closer.
129 */
130 } else {
131 if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
132 IS_SOONER(timestamp,
133 smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
134 smtc_nextinvpe[vpe] = cpu;
135 nextcomp = timestamp;
136 }
137 /*
138 * Since next-in-VPE may me the same as the executing
139 * virtual CPU, we update the array *after* checking
140 * its value.
141 */
142 smtc_nexttime[vpe][cpu] = timestamp;
143 }
144
145 /*
146 * It may be that, in fact, we don't need to update Compare,
147 * but if we do, we want to make sure we didn't fall into
148 * a crack just behind Count.
149 */
150 if (ISVALID(nextcomp)) {
151 write_c0_compare(nextcomp);
152 ehb();
153 /*
154 * We never return an error, we just make sure
155 * that we trigger the handlers as quickly as
156 * we can if we fell behind.
157 */
158 while ((nextcomp - (unsigned long)read_c0_count())
159 > (unsigned long)LONG_MAX) {
160 nextcomp += CATCHUP_INCREMENT;
161 write_c0_compare(nextcomp);
162 ehb();
163 }
164 }
165 emt(mtflags);
166 local_irq_restore(flags);
167 return 0;
168}
169
170
171void smtc_distribute_timer(int vpe)
172{
173 unsigned long flags;
174 unsigned int mtflags;
175 int cpu;
176 struct clock_event_device *cd;
177 unsigned long nextstamp;
178 unsigned long reference;
179
180
181repeat:
182 nextstamp = 0L;
183 for_each_online_cpu(cpu) {
184 /*
185 * Find virtual CPUs within the current VPE who have
186 * unserviced timer requests whose time is now past.
187 */
188 local_irq_save(flags);
189 mtflags = dmt();
190 if (cpu_data[cpu].vpe_id == vpe &&
191 ISVALID(smtc_nexttime[vpe][cpu])) {
192 reference = (unsigned long)read_c0_count();
193 if ((smtc_nexttime[vpe][cpu] - reference)
194 > (unsigned long)LONG_MAX) {
195 smtc_nexttime[vpe][cpu] = 0L;
196 emt(mtflags);
197 local_irq_restore(flags);
198 /*
199 * We don't send IPIs to ourself.
200 */
201 if (cpu != smp_processor_id()) {
202 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
203 } else {
204 cd = &per_cpu(mips_clockevent_device, cpu);
205 cd->event_handler(cd);
206 }
207 } else {
208 /* Local to VPE but Valid Time not yet reached. */
209 if (!ISVALID(nextstamp) ||
210 IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
211 reference)) {
212 smtc_nextinvpe[vpe] = cpu;
213 nextstamp = smtc_nexttime[vpe][cpu];
214 }
215 emt(mtflags);
216 local_irq_restore(flags);
217 }
218 } else {
219 emt(mtflags);
220 local_irq_restore(flags);
221
222 }
223 }
224 /* Reprogram for interrupt at next soonest timestamp for VPE */
225 if (ISVALID(nextstamp)) {
226 write_c0_compare(nextstamp);
227 ehb();
228 if ((nextstamp - (unsigned long)read_c0_count())
229 > (unsigned long)LONG_MAX)
230 goto repeat;
231 }
232}
233
234
235irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
236{
237 int cpu = smp_processor_id();
238
239 /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
240 handle_perf_irq(1);
241
242 if (read_c0_cause() & (1 << 30)) {
243 /* Clear Count/Compare Interrupt */
244 write_c0_compare(read_c0_compare());
245 smtc_distribute_timer(cpu_data[cpu].vpe_id);
246 }
247 return IRQ_HANDLED;
248}
249
250
251int smtc_clockevent_init(void)
252{
253 uint64_t mips_freq = mips_hpt_frequency;
254 unsigned int cpu = smp_processor_id();
255 struct clock_event_device *cd;
256 unsigned int irq;
257 int i;
258 int j;
259
260 if (!cpu_has_counter || !mips_hpt_frequency)
261 return -ENXIO;
262 if (cpu == 0) {
263 for (i = 0; i < num_possible_cpus(); i++) {
264 smtc_nextinvpe[i] = 0;
265 for (j = 0; j < num_possible_cpus(); j++)
266 smtc_nexttime[i][j] = 0L;
267 }
268 /*
269 * SMTC also can't have the usablility test
270 * run by secondary TCs once Compare is in use.
271 */
272 if (!c0_compare_int_usable())
273 return -ENXIO;
274 }
275
276 /*
277 * With vectored interrupts things are getting platform specific.
278 * get_c0_compare_int is a hook to allow a platform to return the
279 * interrupt number of it's liking.
280 */
281 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
282 if (get_c0_compare_int)
283 irq = get_c0_compare_int();
284
285 cd = &per_cpu(mips_clockevent_device, cpu);
286
287 cd->name = "MIPS";
288 cd->features = CLOCK_EVT_FEAT_ONESHOT;
289
290 /* Calculate the min / max delta */
291 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
292 cd->shift = 32;
293 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
294 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
295
296 cd->rating = 300;
297 cd->irq = irq;
298 cd->cpumask = cpumask_of(cpu);
299 cd->set_next_event = mips_next_event;
300 cd->set_mode = mips_set_clock_mode;
301 cd->event_handler = mips_event_handler;
302
303 clockevents_register_device(cd);
304
305 /*
306 * On SMTC we only want to do the data structure
307 * initialization and IRQ setup once.
308 */
309 if (cpu)
310 return 0;
311 /*
312 * And we need the hwmask associated with the c0_compare
313 * vector to be initialized.
314 */
315 irq_hwmask[irq] = (0x100 << cp0_compare_irq);
316 if (cp0_timer_irq_installed)
317 return 0;
318
319 cp0_timer_irq_installed = 1;
320
321 setup_irq(irq, &c0_compare_irqaction);
322
323 return 0;
324}
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index f7a46db4b161..6f4f739dad96 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -14,19 +14,43 @@
14#include <asm/asmmacro.h> 14#include <asm/asmmacro.h>
15#include <asm/cacheops.h> 15#include <asm/cacheops.h>
16#include <asm/mipsregs.h> 16#include <asm/mipsregs.h>
17#include <asm/mipsmtregs.h>
18#include <asm/pm.h>
17 19
18#define GCR_CL_COHERENCE_OFS 0x2008 20#define GCR_CL_COHERENCE_OFS 0x2008
21#define GCR_CL_ID_OFS 0x2028
22
23.extern mips_cm_base
24
25.set noreorder
26
27 /*
28 * Set dest to non-zero if the core supports the MT ASE, else zero. If
29 * MT is not supported then branch to nomt.
30 */
31 .macro has_mt dest, nomt
32 mfc0 \dest, CP0_CONFIG
33 bgez \dest, \nomt
34 mfc0 \dest, CP0_CONFIG, 1
35 bgez \dest, \nomt
36 mfc0 \dest, CP0_CONFIG, 2
37 bgez \dest, \nomt
38 mfc0 \dest, CP0_CONFIG, 3
39 andi \dest, \dest, MIPS_CONF3_MT
40 beqz \dest, \nomt
41 .endm
19 42
20.section .text.cps-vec 43.section .text.cps-vec
21.balign 0x1000 44.balign 0x1000
22.set noreorder
23 45
24LEAF(mips_cps_core_entry) 46LEAF(mips_cps_core_entry)
25 /* 47 /*
26 * These first 8 bytes will be patched by cps_smp_setup to load the 48 * These first 12 bytes will be patched by cps_smp_setup to load the
27 * base address of the CM GCRs into register v1. 49 * base address of the CM GCRs into register v1 and the CCA to use into
50 * register s0.
28 */ 51 */
29 .quad 0 52 .quad 0
53 .word 0
30 54
31 /* Check whether we're here due to an NMI */ 55 /* Check whether we're here due to an NMI */
32 mfc0 k0, CP0_STATUS 56 mfc0 k0, CP0_STATUS
@@ -117,10 +141,11 @@ icache_done:
117 add a0, a0, t0 141 add a0, a0, t0
118dcache_done: 142dcache_done:
119 143
120 /* Set Kseg0 cacheable, coherent, write-back, write-allocate */ 144 /* Set Kseg0 CCA to that in s0 */
121 mfc0 t0, CP0_CONFIG 145 mfc0 t0, CP0_CONFIG
122 ori t0, 0x7 146 ori t0, 0x7
123 xori t0, 0x2 147 xori t0, 0x7
148 or t0, t0, s0
124 mtc0 t0, CP0_CONFIG 149 mtc0 t0, CP0_CONFIG
125 ehb 150 ehb
126 151
@@ -134,21 +159,24 @@ dcache_done:
134 jr t0 159 jr t0
135 nop 160 nop
136 161
1371: /* We're up, cached & coherent */ 162 /*
163 * We're up, cached & coherent. Perform any further required core-level
164 * initialisation.
165 */
1661: jal mips_cps_core_init
167 nop
138 168
139 /* 169 /*
140 * TODO: We should check the VPE number we intended to boot here, and 170 * Boot any other VPEs within this core that should be online, and
141 * if non-zero we should start that VPE and stop this one. For 171 * deactivate this VPE if it should be offline.
142 * the moment this doesn't matter since CPUs are brought up
143 * sequentially and in order, but once hotplug is implemented
144 * this will need revisiting.
145 */ 172 */
173 jal mips_cps_boot_vpes
174 nop
146 175
147 /* Off we go! */ 176 /* Off we go! */
148 la t0, mips_cps_bootcfg 177 lw t1, VPEBOOTCFG_PC(v0)
149 lw t1, BOOTCFG_PC(t0) 178 lw gp, VPEBOOTCFG_GP(v0)
150 lw gp, BOOTCFG_GP(t0) 179 lw sp, VPEBOOTCFG_SP(v0)
151 lw sp, BOOTCFG_SP(t0)
152 jr t1 180 jr t1
153 nop 181 nop
154 END(mips_cps_core_entry) 182 END(mips_cps_core_entry)
@@ -189,3 +217,271 @@ LEAF(excep_ejtag)
189 jr k0 217 jr k0
190 nop 218 nop
191 END(excep_ejtag) 219 END(excep_ejtag)
220
221LEAF(mips_cps_core_init)
222#ifdef CONFIG_MIPS_MT
223 /* Check that the core implements the MT ASE */
224 has_mt t0, 3f
225 nop
226
227 .set push
228 .set mt
229
230 /* Only allow 1 TC per VPE to execute... */
231 dmt
232
233 /* ...and for the moment only 1 VPE */
234 dvpe
235 la t1, 1f
236 jr.hb t1
237 nop
238
239 /* Enter VPE configuration state */
2401: mfc0 t0, CP0_MVPCONTROL
241 ori t0, t0, MVPCONTROL_VPC
242 mtc0 t0, CP0_MVPCONTROL
243
244 /* Retrieve the number of VPEs within the core */
245 mfc0 t0, CP0_MVPCONF0
246 srl t0, t0, MVPCONF0_PVPE_SHIFT
247 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
248 addi t7, t0, 1
249
250 /* If there's only 1, we're done */
251 beqz t0, 2f
252 nop
253
254 /* Loop through each VPE within this core */
255 li t5, 1
256
2571: /* Operate on the appropriate TC */
258 mtc0 t5, CP0_VPECONTROL
259 ehb
260
261 /* Bind TC to VPE (1:1 TC:VPE mapping) */
262 mttc0 t5, CP0_TCBIND
263
264 /* Set exclusive TC, non-active, master */
265 li t0, VPECONF0_MVP
266 sll t1, t5, VPECONF0_XTC_SHIFT
267 or t0, t0, t1
268 mttc0 t0, CP0_VPECONF0
269
270 /* Set TC non-active, non-allocatable */
271 mttc0 zero, CP0_TCSTATUS
272
273 /* Set TC halted */
274 li t0, TCHALT_H
275 mttc0 t0, CP0_TCHALT
276
277 /* Next VPE */
278 addi t5, t5, 1
279 slt t0, t5, t7
280 bnez t0, 1b
281 nop
282
283 /* Leave VPE configuration state */
2842: mfc0 t0, CP0_MVPCONTROL
285 xori t0, t0, MVPCONTROL_VPC
286 mtc0 t0, CP0_MVPCONTROL
287
2883: .set pop
289#endif
290 jr ra
291 nop
292 END(mips_cps_core_init)
293
294LEAF(mips_cps_boot_vpes)
295 /* Retrieve CM base address */
296 la t0, mips_cm_base
297 lw t0, 0(t0)
298
299 /* Calculate a pointer to this cores struct core_boot_config */
300 lw t0, GCR_CL_ID_OFS(t0)
301 li t1, COREBOOTCFG_SIZE
302 mul t0, t0, t1
303 la t1, mips_cps_core_bootcfg
304 lw t1, 0(t1)
305 addu t0, t0, t1
306
307 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
308 has_mt t6, 1f
309 li t9, 0
310
311 /* Find the number of VPEs present in the core */
312 mfc0 t1, CP0_MVPCONF0
313 srl t1, t1, MVPCONF0_PVPE_SHIFT
314 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
315 addi t1, t1, 1
316
317 /* Calculate a mask for the VPE ID from EBase.CPUNum */
318 clz t1, t1
319 li t2, 31
320 subu t1, t2, t1
321 li t2, 1
322 sll t1, t2, t1
323 addiu t1, t1, -1
324
325 /* Retrieve the VPE ID from EBase.CPUNum */
326 mfc0 t9, $15, 1
327 and t9, t9, t1
328
3291: /* Calculate a pointer to this VPEs struct vpe_boot_config */
330 li t1, VPEBOOTCFG_SIZE
331 mul v0, t9, t1
332 lw t7, COREBOOTCFG_VPECONFIG(t0)
333 addu v0, v0, t7
334
335#ifdef CONFIG_MIPS_MT
336
337 /* If the core doesn't support MT then return */
338 bnez t6, 1f
339 nop
340 jr ra
341 nop
342
343 .set push
344 .set mt
345
3461: /* Enter VPE configuration state */
347 dvpe
348 la t1, 1f
349 jr.hb t1
350 nop
3511: mfc0 t1, CP0_MVPCONTROL
352 ori t1, t1, MVPCONTROL_VPC
353 mtc0 t1, CP0_MVPCONTROL
354 ehb
355
356 /* Loop through each VPE */
357 lw t6, COREBOOTCFG_VPEMASK(t0)
358 move t8, t6
359 li t5, 0
360
361 /* Check whether the VPE should be running. If not, skip it */
3621: andi t0, t6, 1
363 beqz t0, 2f
364 nop
365
366 /* Operate on the appropriate TC */
367 mfc0 t0, CP0_VPECONTROL
368 ori t0, t0, VPECONTROL_TARGTC
369 xori t0, t0, VPECONTROL_TARGTC
370 or t0, t0, t5
371 mtc0 t0, CP0_VPECONTROL
372 ehb
373
374 /* Skip the VPE if its TC is not halted */
375 mftc0 t0, CP0_TCHALT
376 beqz t0, 2f
377 nop
378
379 /* Calculate a pointer to the VPEs struct vpe_boot_config */
380 li t0, VPEBOOTCFG_SIZE
381 mul t0, t0, t5
382 addu t0, t0, t7
383
384 /* Set the TC restart PC */
385 lw t1, VPEBOOTCFG_PC(t0)
386 mttc0 t1, CP0_TCRESTART
387
388 /* Set the TC stack pointer */
389 lw t1, VPEBOOTCFG_SP(t0)
390 mttgpr t1, sp
391
392 /* Set the TC global pointer */
393 lw t1, VPEBOOTCFG_GP(t0)
394 mttgpr t1, gp
395
396 /* Copy config from this VPE */
397 mfc0 t0, CP0_CONFIG
398 mttc0 t0, CP0_CONFIG
399
400 /* Ensure no software interrupts are pending */
401 mttc0 zero, CP0_CAUSE
402 mttc0 zero, CP0_STATUS
403
404 /* Set TC active, not interrupt exempt */
405 mftc0 t0, CP0_TCSTATUS
406 li t1, ~TCSTATUS_IXMT
407 and t0, t0, t1
408 ori t0, t0, TCSTATUS_A
409 mttc0 t0, CP0_TCSTATUS
410
411 /* Clear the TC halt bit */
412 mttc0 zero, CP0_TCHALT
413
414 /* Set VPE active */
415 mftc0 t0, CP0_VPECONF0
416 ori t0, t0, VPECONF0_VPA
417 mttc0 t0, CP0_VPECONF0
418
419 /* Next VPE */
4202: srl t6, t6, 1
421 addi t5, t5, 1
422 bnez t6, 1b
423 nop
424
425 /* Leave VPE configuration state */
426 mfc0 t1, CP0_MVPCONTROL
427 xori t1, t1, MVPCONTROL_VPC
428 mtc0 t1, CP0_MVPCONTROL
429 ehb
430 evpe
431
432 /* Check whether this VPE is meant to be running */
433 li t0, 1
434 sll t0, t0, t9
435 and t0, t0, t8
436 bnez t0, 2f
437 nop
438
439 /* This VPE should be offline, halt the TC */
440 li t0, TCHALT_H
441 mtc0 t0, CP0_TCHALT
442 la t0, 1f
4431: jr.hb t0
444 nop
445
4462: .set pop
447
448#endif /* CONFIG_MIPS_MT */
449
450 /* Return */
451 jr ra
452 nop
453 END(mips_cps_boot_vpes)
454
455#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
456
457 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
458 .macro psstate dest
459 .set push
460 .set noat
461 lw $1, TI_CPU(gp)
462 sll $1, $1, LONGLOG
463 la \dest, __per_cpu_offset
464 addu $1, $1, \dest
465 lw $1, 0($1)
466 la \dest, cps_cpu_state
467 addu \dest, \dest, $1
468 .set pop
469 .endm
470
471LEAF(mips_cps_pm_save)
472 /* Save CPU state */
473 SUSPEND_SAVE_REGS
474 psstate t1
475 SUSPEND_SAVE_STATIC
476 jr v0
477 nop
478 END(mips_cps_pm_save)
479
480LEAF(mips_cps_pm_restore)
481 /* Restore CPU state */
482 psstate t1
483 RESUME_RESTORE_STATIC
484 RESUME_RESTORE_REGS_RETURN
485 END(mips_cps_pm_restore)
486
487#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 6e8fb85ce7c3..d74f957c561e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -62,7 +62,7 @@ static inline void check_errata(void)
62 case CPU_34K: 62 case CPU_34K:
63 /* 63 /*
64 * Erratum "RPS May Cause Incorrect Instruction Execution" 64 * Erratum "RPS May Cause Incorrect Instruction Execution"
65 * This code only handles VPE0, any SMP/SMTC/RTOS code 65 * This code only handles VPE0, any SMP/RTOS code
66 * making use of VPE1 will be responsable for that VPE. 66 * making use of VPE1 will be responsable for that VPE.
67 */ 67 */
68 if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) 68 if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
@@ -423,7 +423,7 @@ static void decode_configs(struct cpuinfo_mips *c)
423 423
424#ifndef CONFIG_MIPS_CPS 424#ifndef CONFIG_MIPS_CPS
425 if (cpu_has_mips_r2) { 425 if (cpu_has_mips_r2) {
426 c->core = read_c0_ebase() & 0x3ff; 426 c->core = get_ebase_cpunum();
427 if (cpu_has_mipsmt) 427 if (cpu_has_mipsmt)
428 c->core >>= fls(core_nvpes()) - 1; 428 c->core >>= fls(core_nvpes()) - 1;
429 } 429 }
@@ -684,21 +684,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
684 */ 684 */
685 c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; 685 c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
686 break; 686 break;
687 case PRID_IMP_RM9000:
688 c->cputype = CPU_RM9000;
689 __cpu_name[cpu] = "RM9000";
690 set_isa(c, MIPS_CPU_ISA_IV);
691 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
692 MIPS_CPU_LLSC;
693 /*
694 * Bit 29 in the info register of the RM9000
695 * indicates if the TLB has 48 or 64 entries.
696 *
697 * 29 1 => 64 entry JTLB
698 * 0 => 48 entry JTLB
699 */
700 c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
701 break;
702 case PRID_IMP_R8000: 687 case PRID_IMP_R8000:
703 c->cputype = CPU_R8000; 688 c->cputype = CPU_R8000;
704 __cpu_name[cpu] = "RM8000"; 689 __cpu_name[cpu] = "RM8000";
@@ -1041,6 +1026,7 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
1041 decode_configs(c); 1026 decode_configs(c);
1042 /* JZRISC does not implement the CP0 counter. */ 1027 /* JZRISC does not implement the CP0 counter. */
1043 c->options &= ~MIPS_CPU_COUNTER; 1028 c->options &= ~MIPS_CPU_COUNTER;
1029 BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter);
1044 switch (c->processor_id & PRID_IMP_MASK) { 1030 switch (c->processor_id & PRID_IMP_MASK) {
1045 case PRID_IMP_JZRISC: 1031 case PRID_IMP_JZRISC:
1046 c->cputype = CPU_JZRISC; 1032 c->cputype = CPU_JZRISC;
@@ -1074,6 +1060,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
1074 switch (c->processor_id & PRID_IMP_MASK) { 1060 switch (c->processor_id & PRID_IMP_MASK) {
1075 case PRID_IMP_NETLOGIC_XLP2XX: 1061 case PRID_IMP_NETLOGIC_XLP2XX:
1076 case PRID_IMP_NETLOGIC_XLP9XX: 1062 case PRID_IMP_NETLOGIC_XLP9XX:
1063 case PRID_IMP_NETLOGIC_XLP5XX:
1077 c->cputype = CPU_XLP; 1064 c->cputype = CPU_XLP;
1078 __cpu_name[cpu] = "Broadcom XLPII"; 1065 __cpu_name[cpu] = "Broadcom XLPII";
1079 break; 1066 break;
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e5786858cdb6..4353d323f017 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -16,9 +16,6 @@
16#include <asm/isadep.h> 16#include <asm/isadep.h>
17#include <asm/thread_info.h> 17#include <asm/thread_info.h>
18#include <asm/war.h> 18#include <asm/war.h>
19#ifdef CONFIG_MIPS_MT_SMTC
20#include <asm/mipsmtregs.h>
21#endif
22 19
23#ifndef CONFIG_PREEMPT 20#ifndef CONFIG_PREEMPT
24#define resume_kernel restore_all 21#define resume_kernel restore_all
@@ -89,41 +86,6 @@ FEXPORT(syscall_exit)
89 bnez t0, syscall_exit_work 86 bnez t0, syscall_exit_work
90 87
91restore_all: # restore full frame 88restore_all: # restore full frame
92#ifdef CONFIG_MIPS_MT_SMTC
93#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
94/* Re-arm any temporarily masked interrupts not explicitly "acked" */
95 mfc0 v0, CP0_TCSTATUS
96 ori v1, v0, TCSTATUS_IXMT
97 mtc0 v1, CP0_TCSTATUS
98 andi v0, TCSTATUS_IXMT
99 _ehb
100 mfc0 t0, CP0_TCCONTEXT
101 DMT 9 # dmt t1
102 jal mips_ihb
103 mfc0 t2, CP0_STATUS
104 andi t3, t0, 0xff00
105 or t2, t2, t3
106 mtc0 t2, CP0_STATUS
107 _ehb
108 andi t1, t1, VPECONTROL_TE
109 beqz t1, 1f
110 EMT
1111:
112 mfc0 v1, CP0_TCSTATUS
113 /* We set IXMT above, XOR should clear it here */
114 xori v1, v1, TCSTATUS_IXMT
115 or v1, v0, v1
116 mtc0 v1, CP0_TCSTATUS
117 _ehb
118 xor t0, t0, t3
119 mtc0 t0, CP0_TCCONTEXT
120#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
121/* Detect and execute deferred IPI "interrupts" */
122 LONG_L s0, TI_REGS($28)
123 LONG_S sp, TI_REGS($28)
124 jal deferred_smtc_ipi
125 LONG_S s0, TI_REGS($28)
126#endif /* CONFIG_MIPS_MT_SMTC */
127 .set noat 89 .set noat
128 RESTORE_TEMP 90 RESTORE_TEMP
129 RESTORE_AT 91 RESTORE_AT
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index a9ce3408be25..ac35e12cb1f3 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -21,20 +21,6 @@
21#include <asm/war.h> 21#include <asm/war.h>
22#include <asm/thread_info.h> 22#include <asm/thread_info.h>
23 23
24#ifdef CONFIG_MIPS_MT_SMTC
25#define PANIC_PIC(msg) \
26 .set push; \
27 .set nomicromips; \
28 .set reorder; \
29 PTR_LA a0,8f; \
30 .set noat; \
31 PTR_LA AT, panic; \
32 jr AT; \
339: b 9b; \
34 .set pop; \
35 TEXT(msg)
36#endif
37
38 __INIT 24 __INIT
39 25
40/* 26/*
@@ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp)
251 SAVE_AT 237 SAVE_AT
252 .set push 238 .set push
253 .set noreorder 239 .set noreorder
254#ifdef CONFIG_MIPS_MT_SMTC
255 /*
256 * To keep from blindly blocking *all* interrupts
257 * during service by SMTC kernel, we also want to
258 * pass the IM value to be cleared.
259 */
260FEXPORT(except_vec_vi_mori)
261 ori a0, $0, 0
262#endif /* CONFIG_MIPS_MT_SMTC */
263 PTR_LA v1, except_vec_vi_handler 240 PTR_LA v1, except_vec_vi_handler
264FEXPORT(except_vec_vi_lui) 241FEXPORT(except_vec_vi_lui)
265 lui v0, 0 /* Patched */ 242 lui v0, 0 /* Patched */
@@ -277,37 +254,10 @@ EXPORT(except_vec_vi_end)
277NESTED(except_vec_vi_handler, 0, sp) 254NESTED(except_vec_vi_handler, 0, sp)
278 SAVE_TEMP 255 SAVE_TEMP
279 SAVE_STATIC 256 SAVE_STATIC
280#ifdef CONFIG_MIPS_MT_SMTC
281 /*
282 * SMTC has an interesting problem that interrupts are level-triggered,
283 * and the CLI macro will clear EXL, potentially causing a duplicate
284 * interrupt service invocation. So we need to clear the associated
285 * IM bit of Status prior to doing CLI, and restore it after the
286 * service routine has been invoked - we must assume that the
287 * service routine will have cleared the state, and any active
288 * level represents a new or otherwised unserviced event...
289 */
290 mfc0 t1, CP0_STATUS
291 and t0, a0, t1
292#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
293 mfc0 t2, CP0_TCCONTEXT
294 or t2, t0, t2
295 mtc0 t2, CP0_TCCONTEXT
296#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
297 xor t1, t1, t0
298 mtc0 t1, CP0_STATUS
299 _ehb
300#endif /* CONFIG_MIPS_MT_SMTC */
301 CLI 257 CLI
302#ifdef CONFIG_TRACE_IRQFLAGS 258#ifdef CONFIG_TRACE_IRQFLAGS
303 move s0, v0 259 move s0, v0
304#ifdef CONFIG_MIPS_MT_SMTC
305 move s1, a0
306#endif
307 TRACE_IRQS_OFF 260 TRACE_IRQS_OFF
308#ifdef CONFIG_MIPS_MT_SMTC
309 move a0, s1
310#endif
311 move v0, s0 261 move v0, s0
312#endif 262#endif
313 263
@@ -496,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
496 446
497 .align 5 447 .align 5
498 LEAF(handle_ri_rdhwr_vivt) 448 LEAF(handle_ri_rdhwr_vivt)
499#ifdef CONFIG_MIPS_MT_SMTC
500 PANIC_PIC("handle_ri_rdhwr_vivt called")
501#else
502 .set push 449 .set push
503 .set noat 450 .set noat
504 .set noreorder 451 .set noreorder
@@ -517,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
517 .set pop 464 .set pop
518 bltz k1, handle_ri /* slow path */ 465 bltz k1, handle_ri /* slow path */
519 /* fall thru */ 466 /* fall thru */
520#endif
521 END(handle_ri_rdhwr_vivt) 467 END(handle_ri_rdhwr_vivt)
522 468
523 LEAF(handle_ri_rdhwr) 469 LEAF(handle_ri_rdhwr)
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index e712dcf18b2d..95afd663cd45 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -35,33 +35,12 @@
35 */ 35 */
36 .macro setup_c0_status set clr 36 .macro setup_c0_status set clr
37 .set push 37 .set push
38#ifdef CONFIG_MIPS_MT_SMTC
39 /*
40 * For SMTC, we need to set privilege and disable interrupts only for
41 * the current TC, using the TCStatus register.
42 */
43 mfc0 t0, CP0_TCSTATUS
44 /* Fortunately CU 0 is in the same place in both registers */
45 /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
46 li t1, ST0_CU0 | 0x08001c00
47 or t0, t1
48 /* Clear TKSU, leave IXMT */
49 xori t0, 0x00001800
50 mtc0 t0, CP0_TCSTATUS
51 _ehb
52 /* We need to leave the global IE bit set, but clear EXL...*/
53 mfc0 t0, CP0_STATUS
54 or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
55 xor t0, ST0_EXL | ST0_ERL | \clr
56 mtc0 t0, CP0_STATUS
57#else
58 mfc0 t0, CP0_STATUS 38 mfc0 t0, CP0_STATUS
59 or t0, ST0_CU0|\set|0x1f|\clr 39 or t0, ST0_CU0|\set|0x1f|\clr
60 xor t0, 0x1f|\clr 40 xor t0, 0x1f|\clr
61 mtc0 t0, CP0_STATUS 41 mtc0 t0, CP0_STATUS
62 .set noreorder 42 .set noreorder
63 sll zero,3 # ehb 43 sll zero,3 # ehb
64#endif
65 .set pop 44 .set pop
66 .endm 45 .endm
67 46
@@ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
115 jr t0 94 jr t0
1160: 950:
117 96
118#ifdef CONFIG_MIPS_MT_SMTC
119 /*
120 * In SMTC kernel, "CLI" is thread-specific, in TCStatus.
121 * We still need to enable interrupts globally in Status,
122 * and clear EXL/ERL.
123 *
124 * TCContext is used to track interrupt levels under
125 * service in SMTC kernel. Clear for boot TC before
126 * allowing any interrupts.
127 */
128 mtc0 zero, CP0_TCCONTEXT
129
130 mfc0 t0, CP0_STATUS
131 ori t0, t0, 0xff1f
132 xori t0, t0, 0x001e
133 mtc0 t0, CP0_STATUS
134#endif /* CONFIG_MIPS_MT_SMTC */
135
136 PTR_LA t0, __bss_start # clear .bss 97 PTR_LA t0, __bss_start # clear .bss
137 LONG_S zero, (t0) 98 LONG_S zero, (t0)
138 PTR_LA t1, __bss_stop - LONGSIZE 99 PTR_LA t1, __bss_stop - LONGSIZE
@@ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
164 * function after setting up the stack and gp registers. 125 * function after setting up the stack and gp registers.
165 */ 126 */
166NESTED(smp_bootstrap, 16, sp) 127NESTED(smp_bootstrap, 16, sp)
167#ifdef CONFIG_MIPS_MT_SMTC
168 /*
169 * Read-modify-writes of Status must be atomic, and this
170 * is one case where CLI is invoked without EXL being
171 * necessarily set. The CLI and setup_c0_status will
172 * in fact be redundant for all but the first TC of
173 * each VPE being booted.
174 */
175 DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
176 jal mips_ihb
177#endif /* CONFIG_MIPS_MT_SMTC */
178 smp_slave_setup 128 smp_slave_setup
179 setup_c0_status_sec 129 setup_c0_status_sec
180#ifdef CONFIG_MIPS_MT_SMTC
181 andi t2, t2, VPECONTROL_TE
182 beqz t2, 2f
183 EMT # emt
1842:
185#endif /* CONFIG_MIPS_MT_SMTC */
186 j start_secondary 130 j start_secondary
187 END(smp_bootstrap) 131 END(smp_bootstrap)
188#endif /* CONFIG_SMP */ 132#endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 2b91fe80c436..50b364897dda 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = {
42 .irq_disable = disable_8259A_irq, 42 .irq_disable = disable_8259A_irq,
43 .irq_unmask = enable_8259A_irq, 43 .irq_unmask = enable_8259A_irq,
44 .irq_mask_ack = mask_and_ack_8259A, 44 .irq_mask_ack = mask_and_ack_8259A,
45#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
46 .irq_set_affinity = plat_set_irq_affinity,
47#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
48}; 45};
49 46
50/* 47/*
@@ -180,7 +177,6 @@ handle_real_irq:
180 outb(cached_master_mask, PIC_MASTER_IMR); 177 outb(cached_master_mask, PIC_MASTER_IMR);
181 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ 178 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
182 } 179 }
183 smtc_im_ack_irq(irq);
184 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 180 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
185 return; 181 return;
186 182
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 837ff27950bc..09ce45980758 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -224,29 +224,26 @@ void __init check_wait(void)
224 cpu_wait = r4k_wait; 224 cpu_wait = r4k_wait;
225 */ 225 */
226 break; 226 break;
227 case CPU_RM9000:
228 if ((c->processor_id & 0x00ff) >= 0x40)
229 cpu_wait = r4k_wait;
230 break;
231 default: 227 default:
232 break; 228 break;
233 } 229 }
234} 230}
235 231
236static void smtc_idle_hook(void)
237{
238#ifdef CONFIG_MIPS_MT_SMTC
239 void smtc_idle_loop_hook(void);
240
241 smtc_idle_loop_hook();
242#endif
243}
244
245void arch_cpu_idle(void) 232void arch_cpu_idle(void)
246{ 233{
247 smtc_idle_hook();
248 if (cpu_wait) 234 if (cpu_wait)
249 cpu_wait(); 235 cpu_wait();
250 else 236 else
251 local_irq_enable(); 237 local_irq_enable();
252} 238}
239
240#ifdef CONFIG_CPU_IDLE
241
242int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
243 struct cpuidle_driver *drv, int index)
244{
245 arch_cpu_idle();
246 return index;
247}
248
249#endif
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 8520dad6d4e3..88e4c323382c 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -54,6 +54,21 @@ void gic_write_compare(cycle_t cnt)
54 (int)(cnt & 0xffffffff)); 54 (int)(cnt & 0xffffffff));
55} 55}
56 56
57void gic_write_cpu_compare(cycle_t cnt, int cpu)
58{
59 unsigned long flags;
60
61 local_irq_save(flags);
62
63 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
64 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
65 (int)(cnt >> 32));
66 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
67 (int)(cnt & 0xffffffff));
68
69 local_irq_restore(flags);
70}
71
57cycle_t gic_read_compare(void) 72cycle_t gic_read_compare(void)
58{ 73{
59 unsigned int hi, lo; 74 unsigned int hi, lo;
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index fab40f7d2e03..4858642d543d 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d)
53 */ 53 */
54static void level_mask_and_ack_msc_irq(struct irq_data *d) 54static void level_mask_and_ack_msc_irq(struct irq_data *d)
55{ 55{
56 unsigned int irq = d->irq;
57
58 mask_msc_irq(d); 56 mask_msc_irq(d);
59 if (!cpu_has_veic) 57 if (!cpu_has_veic)
60 MSCIC_WRITE(MSC01_IC_EOI, 0); 58 MSCIC_WRITE(MSC01_IC_EOI, 0);
61 /* This actually needs to be a call into platform code */
62 smtc_im_ack_irq(irq);
63} 59}
64 60
65/* 61/*
@@ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d)
78 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); 74 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
79 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); 75 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
80 } 76 }
81 smtc_im_ack_irq(irq);
82} 77}
83 78
84/* 79/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index d1fea7a054be..5024fa39b861 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -73,7 +73,6 @@ void free_irqno(unsigned int irq)
73 */ 73 */
74void ack_bad_irq(unsigned int irq) 74void ack_bad_irq(unsigned int irq)
75{ 75{
76 smtc_im_ack_irq(irq);
77 printk("unexpected IRQ # %d\n", irq); 76 printk("unexpected IRQ # %d\n", irq);
78} 77}
79 78
@@ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq)
142{ 141{
143 irq_enter(); 142 irq_enter();
144 check_stack_overflow(); 143 check_stack_overflow();
145 if (!smtc_handle_on_other_cpu(irq))
146 generic_handle_irq(irq);
147 irq_exit();
148}
149
150#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
151/*
152 * To avoid inefficient and in some cases pathological re-checking of
153 * IRQ affinity, we have this variant that skips the affinity check.
154 */
155
156void __irq_entry do_IRQ_no_affinity(unsigned int irq)
157{
158 irq_enter();
159 smtc_im_backstop(irq);
160 generic_handle_irq(irq); 144 generic_handle_irq(irq);
161 irq_exit(); 145 irq_exit();
162} 146}
163 147
164#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index c9dc67402969..ba473608a347 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -9,12 +9,18 @@
9 */ 9 */
10 10
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/percpu.h>
13#include <linux/spinlock.h>
12 14
13#include <asm/mips-cm.h> 15#include <asm/mips-cm.h>
14#include <asm/mips-cpc.h> 16#include <asm/mips-cpc.h>
15 17
16void __iomem *mips_cpc_base; 18void __iomem *mips_cpc_base;
17 19
20static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
21
22static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
23
18phys_t __weak mips_cpc_phys_base(void) 24phys_t __weak mips_cpc_phys_base(void)
19{ 25{
20 u32 cpc_base; 26 u32 cpc_base;
@@ -39,6 +45,10 @@ phys_t __weak mips_cpc_phys_base(void)
39int mips_cpc_probe(void) 45int mips_cpc_probe(void)
40{ 46{
41 phys_t addr; 47 phys_t addr;
48 unsigned cpu;
49
50 for_each_possible_cpu(cpu)
51 spin_lock_init(&per_cpu(cpc_core_lock, cpu));
42 52
43 addr = mips_cpc_phys_base(); 53 addr = mips_cpc_phys_base();
44 if (!addr) 54 if (!addr)
@@ -50,3 +60,21 @@ int mips_cpc_probe(void)
50 60
51 return 0; 61 return 0;
52} 62}
63
64void mips_cpc_lock_other(unsigned int core)
65{
66 unsigned curr_core;
67 preempt_disable();
68 curr_core = current_cpu_data.core;
69 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
70 per_cpu(cpc_core_lock_flags, curr_core));
71 write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
72}
73
74void mips_cpc_unlock_other(void)
75{
76 unsigned curr_core = current_cpu_data.core;
77 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
78 per_cpu(cpc_core_lock_flags, curr_core));
79 preempt_enable();
80}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index cb098628aee8..362bb3707e62 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels 2 * General MIPS MT support routines, usable in AP/SP and SMVP.
3 * Copyright (C) 2005 Mips Technologies, Inc 3 * Copyright (C) 2005 Mips Technologies, Inc
4 */ 4 */
5#include <linux/cpu.h> 5#include <linux/cpu.h>
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 6ded9bd1489c..88b1ef5f868a 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels 2 * General MIPS MT support routines, usable in AP/SP and SMVP.
3 * Copyright (C) 2005 Mips Technologies, Inc 3 * Copyright (C) 2005 Mips Technologies, Inc
4 */ 4 */
5 5
@@ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl)
57 int tc; 57 int tc;
58 unsigned long haltval; 58 unsigned long haltval;
59 unsigned long tcstatval; 59 unsigned long tcstatval;
60#ifdef CONFIG_MIPS_MT_SMTC
61 void smtc_soft_dump(void);
62#endif /* CONFIG_MIPT_MT_SMTC */
63 60
64 local_irq_save(flags); 61 local_irq_save(flags);
65 vpflags = dvpe(); 62 vpflags = dvpe();
@@ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl)
116 if (!haltval) 113 if (!haltval)
117 write_tc_c0_tchalt(0); 114 write_tc_c0_tchalt(0);
118 } 115 }
119#ifdef CONFIG_MIPS_MT_SMTC
120 smtc_soft_dump();
121#endif /* CONFIG_MIPT_MT_SMTC */
122 printk("===========================\n"); 116 printk("===========================\n");
123 evpe(vpflags); 117 evpe(vpflags);
124 local_irq_restore(flags); 118 local_irq_restore(flags);
@@ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void)
295 289
296void mt_cflush_lockdown(void) 290void mt_cflush_lockdown(void)
297{ 291{
298#ifdef CONFIG_MIPS_MT_SMTC
299 void smtc_cflush_lockdown(void);
300
301 smtc_cflush_lockdown();
302#endif /* CONFIG_MIPS_MT_SMTC */
303 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 292 /* FILL IN VSMP and AP/SP VERSIONS HERE */
304} 293}
305 294
306void mt_cflush_release(void) 295void mt_cflush_release(void)
307{ 296{
308#ifdef CONFIG_MIPS_MT_SMTC
309 void smtc_cflush_release(void);
310
311 smtc_cflush_release();
312#endif /* CONFIG_MIPS_MT_SMTC */
313 /* FILL IN VSMP and AP/SP VERSIONS HERE */ 297 /* FILL IN VSMP and AP/SP VERSIONS HERE */
314} 298}
315 299
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 029e002a4ea0..f6547680c81c 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -10,24 +10,12 @@
10 * Copyright (C) 2000 MIPS Technologies, Inc. 10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 * written by Carsten Langgaard, carstenl@mips.com 11 * written by Carsten Langgaard, carstenl@mips.com
12 */ 12 */
13#include <asm/asm.h>
14#include <asm/cachectl.h>
15#include <asm/fpregdef.h>
16#include <asm/mipsregs.h>
17#include <asm/asm-offsets.h>
18#include <asm/pgtable-bits.h>
19#include <asm/regdef.h>
20#include <asm/stackframe.h>
21#include <asm/thread_info.h>
22
23#include <asm/asmmacro.h>
24
25/*
26 * Offset to the current process status flags, the first 32 bytes of the
27 * stack are not used.
28 */
29#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
30 13
14#define USE_ALTERNATE_RESUME_IMPL 1
15 .set push
16 .set arch=mips64r2
17#include "r4k_switch.S"
18 .set pop
31/* 19/*
32 * task_struct *resume(task_struct *prev, task_struct *next, 20 * task_struct *resume(task_struct *prev, task_struct *next,
33 * struct thread_info *next_ti, int usedfpu) 21 * struct thread_info *next_ti, int usedfpu)
@@ -40,6 +28,61 @@
40 cpu_save_nonscratch a0 28 cpu_save_nonscratch a0
41 LONG_S ra, THREAD_REG31(a0) 29 LONG_S ra, THREAD_REG31(a0)
42 30
31 /*
32 * check if we need to save FPU registers
33 */
34 PTR_L t3, TASK_THREAD_INFO(a0)
35 LONG_L t0, TI_FLAGS(t3)
36 li t1, _TIF_USEDFPU
37 and t2, t0, t1
38 beqz t2, 1f
39 nor t1, zero, t1
40
41 and t0, t0, t1
42 LONG_S t0, TI_FLAGS(t3)
43
44 /*
45 * clear saved user stack CU1 bit
46 */
47 LONG_L t0, ST_OFF(t3)
48 li t1, ~ST0_CU1
49 and t0, t0, t1
50 LONG_S t0, ST_OFF(t3)
51
52 .set push
53 .set arch=mips64r2
54 fpu_save_double a0 t0 t1 # c0_status passed in t0
55 # clobbers t1
56 .set pop
571:
58
59 /* check if we need to save COP2 registers */
60 PTR_L t2, TASK_THREAD_INFO(a0)
61 LONG_L t0, ST_OFF(t2)
62 bbit0 t0, 30, 1f
63
64 /* Disable COP2 in the stored process state */
65 li t1, ST0_CU2
66 xor t0, t1
67 LONG_S t0, ST_OFF(t2)
68
69 /* Enable COP2 so we can save it */
70 mfc0 t0, CP0_STATUS
71 or t0, t1
72 mtc0 t0, CP0_STATUS
73
74 /* Save COP2 */
75 daddu a0, THREAD_CP2
76 jal octeon_cop2_save
77 dsubu a0, THREAD_CP2
78
79 /* Disable COP2 now that we are done */
80 mfc0 t0, CP0_STATUS
81 li t1, ST0_CU2
82 xor t0, t1
83 mtc0 t0, CP0_STATUS
84
851:
43#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 86#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
44 /* Check if we need to store CVMSEG state */ 87 /* Check if we need to store CVMSEG state */
45 mfc0 t0, $11,7 /* CvmMemCtl */ 88 mfc0 t0, $11,7 /* CvmMemCtl */
@@ -85,12 +128,7 @@
85 move $28, a2 128 move $28, a2
86 cpu_restore_nonscratch a1 129 cpu_restore_nonscratch a1
87 130
88#if (_THREAD_SIZE - 32) < 0x8000 131 PTR_ADDU t0, $28, _THREAD_SIZE - 32
89 PTR_ADDIU t0, $28, _THREAD_SIZE - 32
90#else
91 PTR_LI t0, _THREAD_SIZE - 32
92 PTR_ADDU t0, $28
93#endif
94 set_saved_sp t0, t1, t2 132 set_saved_sp t0, t1, t2
95 133
96 mfc0 t1, CP0_STATUS /* Do we really need this? */ 134 mfc0 t1, CP0_STATUS /* Do we really need this? */
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
new file mode 100644
index 000000000000..5aa4c6f8cf83
--- /dev/null
+++ b/arch/mips/kernel/pm-cps.c
@@ -0,0 +1,716 @@
1/*
2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/init.h>
12#include <linux/percpu.h>
13#include <linux/slab.h>
14
15#include <asm/asm-offsets.h>
16#include <asm/cacheflush.h>
17#include <asm/cacheops.h>
18#include <asm/idle.h>
19#include <asm/mips-cm.h>
20#include <asm/mips-cpc.h>
21#include <asm/mipsmtregs.h>
22#include <asm/pm.h>
23#include <asm/pm-cps.h>
24#include <asm/smp-cps.h>
25#include <asm/uasm.h>
26
27/*
28 * cps_nc_entry_fn - type of a generated non-coherent state entry function
29 * @online: the count of online coupled VPEs
30 * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
31 *
32 * The code entering & exiting non-coherent states is generated at runtime
33 * using uasm, in order to ensure that the compiler cannot insert a stray
34 * memory access at an unfortunate time and to allow the generation of optimal
35 * core-specific code particularly for cache routines. If coupled_coherence
36 * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
37 * returns the number of VPEs that were in the wait state at the point this
38 * VPE left it. Returns garbage if coupled_coherence is zero or this is not
39 * the entry function for CPS_PM_NC_WAIT.
40 */
41typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
42
43/*
44 * The entry point of the generated non-coherent idle state entry/exit
45 * functions. Actually per-core rather than per-CPU.
46 */
47static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
48 nc_asm_enter);
49
50/* Bitmap indicating which states are supported by the system */
51DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
52
53/*
54 * Indicates the number of coupled VPEs ready to operate in a non-coherent
55 * state. Actually per-core rather than per-CPU.
56 */
57static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
58static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
59
60/* Indicates online CPUs coupled with the current CPU */
61static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
62
63/*
64 * Used to synchronize entry to deep idle states. Actually per-core rather
65 * than per-CPU.
66 */
67static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
68
69/* Saved CPU state across the CPS_PM_POWER_GATED state */
70DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
71
72/* A somewhat arbitrary number of labels & relocs for uasm */
73static struct uasm_label labels[32] __initdata;
74static struct uasm_reloc relocs[32] __initdata;
75
76/* CPU dependant sync types */
77static unsigned stype_intervention;
78static unsigned stype_memory;
79static unsigned stype_ordering;
80
81enum mips_reg {
82 zero, at, v0, v1, a0, a1, a2, a3,
83 t0, t1, t2, t3, t4, t5, t6, t7,
84 s0, s1, s2, s3, s4, s5, s6, s7,
85 t8, t9, k0, k1, gp, sp, fp, ra,
86};
87
88bool cps_pm_support_state(enum cps_pm_state state)
89{
90 return test_bit(state, state_support);
91}
92
93static void coupled_barrier(atomic_t *a, unsigned online)
94{
95 /*
96 * This function is effectively the same as
97 * cpuidle_coupled_parallel_barrier, which can't be used here since
98 * there's no cpuidle device.
99 */
100
101 if (!coupled_coherence)
102 return;
103
104 smp_mb__before_atomic_inc();
105 atomic_inc(a);
106
107 while (atomic_read(a) < online)
108 cpu_relax();
109
110 if (atomic_inc_return(a) == online * 2) {
111 atomic_set(a, 0);
112 return;
113 }
114
115 while (atomic_read(a) > online)
116 cpu_relax();
117}
118
119int cps_pm_enter_state(enum cps_pm_state state)
120{
121 unsigned cpu = smp_processor_id();
122 unsigned core = current_cpu_data.core;
123 unsigned online, left;
124 cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
125 u32 *core_ready_count, *nc_core_ready_count;
126 void *nc_addr;
127 cps_nc_entry_fn entry;
128 struct core_boot_config *core_cfg;
129 struct vpe_boot_config *vpe_cfg;
130
131 /* Check that there is an entry function for this state */
132 entry = per_cpu(nc_asm_enter, core)[state];
133 if (!entry)
134 return -EINVAL;
135
136 /* Calculate which coupled CPUs (VPEs) are online */
137#ifdef CONFIG_MIPS_MT
138 if (cpu_online(cpu)) {
139 cpumask_and(coupled_mask, cpu_online_mask,
140 &cpu_sibling_map[cpu]);
141 online = cpumask_weight(coupled_mask);
142 cpumask_clear_cpu(cpu, coupled_mask);
143 } else
144#endif
145 {
146 cpumask_clear(coupled_mask);
147 online = 1;
148 }
149
150 /* Setup the VPE to run mips_cps_pm_restore when started again */
151 if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
152 core_cfg = &mips_cps_core_bootcfg[core];
153 vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id];
154 vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
155 vpe_cfg->gp = (unsigned long)current_thread_info();
156 vpe_cfg->sp = 0;
157 }
158
159 /* Indicate that this CPU might not be coherent */
160 cpumask_clear_cpu(cpu, &cpu_coherent_mask);
161 smp_mb__after_clear_bit();
162
163 /* Create a non-coherent mapping of the core ready_count */
164 core_ready_count = per_cpu(ready_count, core);
165 nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
166 (unsigned long)core_ready_count);
167 nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
168 nc_core_ready_count = nc_addr;
169
170 /* Ensure ready_count is zero-initialised before the assembly runs */
171 ACCESS_ONCE(*nc_core_ready_count) = 0;
172 coupled_barrier(&per_cpu(pm_barrier, core), online);
173
174 /* Run the generated entry code */
175 left = entry(online, nc_core_ready_count);
176
177 /* Remove the non-coherent mapping of ready_count */
178 kunmap_noncoherent();
179
180 /* Indicate that this CPU is definitely coherent */
181 cpumask_set_cpu(cpu, &cpu_coherent_mask);
182
183 /*
184 * If this VPE is the first to leave the non-coherent wait state then
185 * it needs to wake up any coupled VPEs still running their wait
186 * instruction so that they return to cpuidle, which can then complete
187 * coordination between the coupled VPEs & provide the governor with
188 * a chance to reflect on the length of time the VPEs were in the
189 * idle state.
190 */
191 if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
192 arch_send_call_function_ipi_mask(coupled_mask);
193
194 return 0;
195}
196
197static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
198 struct uasm_reloc **pr,
199 const struct cache_desc *cache,
200 unsigned op, int lbl)
201{
202 unsigned cache_size = cache->ways << cache->waybit;
203 unsigned i;
204 const unsigned unroll_lines = 32;
205
206 /* If the cache isn't present this function has it easy */
207 if (cache->flags & MIPS_CACHE_NOT_PRESENT)
208 return;
209
210 /* Load base address */
211 UASM_i_LA(pp, t0, (long)CKSEG0);
212
213 /* Calculate end address */
214 if (cache_size < 0x8000)
215 uasm_i_addiu(pp, t1, t0, cache_size);
216 else
217 UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
218
219 /* Start of cache op loop */
220 uasm_build_label(pl, *pp, lbl);
221
222 /* Generate the cache ops */
223 for (i = 0; i < unroll_lines; i++)
224 uasm_i_cache(pp, op, i * cache->linesz, t0);
225
226 /* Update the base address */
227 uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
228
229 /* Loop if we haven't reached the end address yet */
230 uasm_il_bne(pp, pr, t0, t1, lbl);
231 uasm_i_nop(pp);
232}
233
234static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
235 struct uasm_reloc **pr,
236 const struct cpuinfo_mips *cpu_info,
237 int lbl)
238{
239 unsigned i, fsb_size = 8;
240 unsigned num_loads = (fsb_size * 3) / 2;
241 unsigned line_stride = 2;
242 unsigned line_size = cpu_info->dcache.linesz;
243 unsigned perf_counter, perf_event;
244 unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
245
246 /*
247 * Determine whether this CPU requires an FSB flush, and if so which
248 * performance counter/event reflect stalls due to a full FSB.
249 */
250 switch (__get_cpu_type(cpu_info->cputype)) {
251 case CPU_INTERAPTIV:
252 perf_counter = 1;
253 perf_event = 51;
254 break;
255
256 case CPU_PROAPTIV:
257 /* Newer proAptiv cores don't require this workaround */
258 if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
259 return 0;
260
261 /* On older ones it's unavailable */
262 return -1;
263
264 /* CPUs which do not require the workaround */
265 case CPU_P5600:
266 return 0;
267
268 default:
269 WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
270 return -1;
271 }
272
273 /*
274 * Ensure that the fill/store buffer (FSB) is not holding the results
275 * of a prefetch, since if it is then the CPC sequencer may become
276 * stuck in the D3 (ClrBus) state whilst entering a low power state.
277 */
278
279 /* Preserve perf counter setup */
280 uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
281 uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
282
283 /* Setup perf counter to count FSB full pipeline stalls */
284 uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
285 uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
286 uasm_i_ehb(pp);
287 uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
288 uasm_i_ehb(pp);
289
290 /* Base address for loads */
291 UASM_i_LA(pp, t0, (long)CKSEG0);
292
293 /* Start of clear loop */
294 uasm_build_label(pl, *pp, lbl);
295
296 /* Perform some loads to fill the FSB */
297 for (i = 0; i < num_loads; i++)
298 uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
299
300 /*
301 * Invalidate the new D-cache entries so that the cache will need
302 * refilling (via the FSB) if the loop is executed again.
303 */
304 for (i = 0; i < num_loads; i++) {
305 uasm_i_cache(pp, Hit_Invalidate_D,
306 i * line_size * line_stride, t0);
307 uasm_i_cache(pp, Hit_Writeback_Inv_SD,
308 i * line_size * line_stride, t0);
309 }
310
311 /* Completion barrier */
312 uasm_i_sync(pp, stype_memory);
313 uasm_i_ehb(pp);
314
315 /* Check whether the pipeline stalled due to the FSB being full */
316 uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
317
318 /* Loop if it didn't */
319 uasm_il_beqz(pp, pr, t1, lbl);
320 uasm_i_nop(pp);
321
322 /* Restore perf counter 1. The count may well now be wrong... */
323 uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
324 uasm_i_ehb(pp);
325 uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
326 uasm_i_ehb(pp);
327
328 return 0;
329}
330
331static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
332 struct uasm_reloc **pr,
333 unsigned r_addr, int lbl)
334{
335 uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
336 uasm_build_label(pl, *pp, lbl);
337 uasm_i_ll(pp, t1, 0, r_addr);
338 uasm_i_or(pp, t1, t1, t0);
339 uasm_i_sc(pp, t1, 0, r_addr);
340 uasm_il_beqz(pp, pr, t1, lbl);
341 uasm_i_nop(pp);
342}
343
344static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
345{
346 struct uasm_label *l = labels;
347 struct uasm_reloc *r = relocs;
348 u32 *buf, *p;
349 const unsigned r_online = a0;
350 const unsigned r_nc_count = a1;
351 const unsigned r_pcohctl = t7;
352 const unsigned max_instrs = 256;
353 unsigned cpc_cmd;
354 int err;
355 enum {
356 lbl_incready = 1,
357 lbl_poll_cont,
358 lbl_secondary_hang,
359 lbl_disable_coherence,
360 lbl_flush_fsb,
361 lbl_invicache,
362 lbl_flushdcache,
363 lbl_hang,
364 lbl_set_cont,
365 lbl_secondary_cont,
366 lbl_decready,
367 };
368
369 /* Allocate a buffer to hold the generated code */
370 p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
371 if (!buf)
372 return NULL;
373
374 /* Clear labels & relocs ready for (re)use */
375 memset(labels, 0, sizeof(labels));
376 memset(relocs, 0, sizeof(relocs));
377
378 if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
379 /*
380 * Save CPU state. Note the non-standard calling convention
381 * with the return address placed in v0 to avoid clobbering
382 * the ra register before it is saved.
383 */
384 UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
385 uasm_i_jalr(&p, v0, t0);
386 uasm_i_nop(&p);
387 }
388
389 /*
390 * Load addresses of required CM & CPC registers. This is done early
391 * because they're needed in both the enable & disable coherence steps
392 * but in the coupled case the enable step will only run on one VPE.
393 */
394 UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
395
396 if (coupled_coherence) {
397 /* Increment ready_count */
398 uasm_i_sync(&p, stype_ordering);
399 uasm_build_label(&l, p, lbl_incready);
400 uasm_i_ll(&p, t1, 0, r_nc_count);
401 uasm_i_addiu(&p, t2, t1, 1);
402 uasm_i_sc(&p, t2, 0, r_nc_count);
403 uasm_il_beqz(&p, &r, t2, lbl_incready);
404 uasm_i_addiu(&p, t1, t1, 1);
405
406 /* Ordering barrier */
407 uasm_i_sync(&p, stype_ordering);
408
409 /*
410 * If this is the last VPE to become ready for non-coherence
411 * then it should branch below.
412 */
413 uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
414 uasm_i_nop(&p);
415
416 if (state < CPS_PM_POWER_GATED) {
417 /*
418 * Otherwise this is not the last VPE to become ready
419 * for non-coherence. It needs to wait until coherence
420 * has been disabled before proceeding, which it will do
421 * by polling for the top bit of ready_count being set.
422 */
423 uasm_i_addiu(&p, t1, zero, -1);
424 uasm_build_label(&l, p, lbl_poll_cont);
425 uasm_i_lw(&p, t0, 0, r_nc_count);
426 uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
427 uasm_i_ehb(&p);
428 uasm_i_yield(&p, zero, t1);
429 uasm_il_b(&p, &r, lbl_poll_cont);
430 uasm_i_nop(&p);
431 } else {
432 /*
433 * The core will lose power & this VPE will not continue
434 * so it can simply halt here.
435 */
436 uasm_i_addiu(&p, t0, zero, TCHALT_H);
437 uasm_i_mtc0(&p, t0, 2, 4);
438 uasm_build_label(&l, p, lbl_secondary_hang);
439 uasm_il_b(&p, &r, lbl_secondary_hang);
440 uasm_i_nop(&p);
441 }
442 }
443
444 /*
445 * This is the point of no return - this VPE will now proceed to
446 * disable coherence. At this point we *must* be sure that no other
447 * VPE within the core will interfere with the L1 dcache.
448 */
449 uasm_build_label(&l, p, lbl_disable_coherence);
450
451 /* Invalidate the L1 icache */
452 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
453 Index_Invalidate_I, lbl_invicache);
454
455 /* Writeback & invalidate the L1 dcache */
456 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
457 Index_Writeback_Inv_D, lbl_flushdcache);
458
459 /* Completion barrier */
460 uasm_i_sync(&p, stype_memory);
461 uasm_i_ehb(&p);
462
463 /*
464 * Disable all but self interventions. The load from COHCTL is defined
465 * by the interAptiv & proAptiv SUMs as ensuring that the operation
466 * resulting from the preceeding store is complete.
467 */
468 uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
469 uasm_i_sw(&p, t0, 0, r_pcohctl);
470 uasm_i_lw(&p, t0, 0, r_pcohctl);
471
472 /* Sync to ensure previous interventions are complete */
473 uasm_i_sync(&p, stype_intervention);
474 uasm_i_ehb(&p);
475
476 /* Disable coherence */
477 uasm_i_sw(&p, zero, 0, r_pcohctl);
478 uasm_i_lw(&p, t0, 0, r_pcohctl);
479
480 if (state >= CPS_PM_CLOCK_GATED) {
481 err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
482 lbl_flush_fsb);
483 if (err)
484 goto out_err;
485
486 /* Determine the CPC command to issue */
487 switch (state) {
488 case CPS_PM_CLOCK_GATED:
489 cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
490 break;
491 case CPS_PM_POWER_GATED:
492 cpc_cmd = CPC_Cx_CMD_PWRDOWN;
493 break;
494 default:
495 BUG();
496 goto out_err;
497 }
498
499 /* Issue the CPC command */
500 UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
501 uasm_i_addiu(&p, t1, zero, cpc_cmd);
502 uasm_i_sw(&p, t1, 0, t0);
503
504 if (state == CPS_PM_POWER_GATED) {
505 /* If anything goes wrong just hang */
506 uasm_build_label(&l, p, lbl_hang);
507 uasm_il_b(&p, &r, lbl_hang);
508 uasm_i_nop(&p);
509
510 /*
511 * There's no point generating more code, the core is
512 * powered down & if powered back up will run from the
513 * reset vector not from here.
514 */
515 goto gen_done;
516 }
517
518 /* Completion barrier */
519 uasm_i_sync(&p, stype_memory);
520 uasm_i_ehb(&p);
521 }
522
523 if (state == CPS_PM_NC_WAIT) {
524 /*
525 * At this point it is safe for all VPEs to proceed with
526 * execution. This VPE will set the top bit of ready_count
527 * to indicate to the other VPEs that they may continue.
528 */
529 if (coupled_coherence)
530 cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
531 lbl_set_cont);
532
533 /*
534 * VPEs which did not disable coherence will continue
535 * executing, after coherence has been disabled, from this
536 * point.
537 */
538 uasm_build_label(&l, p, lbl_secondary_cont);
539
540 /* Now perform our wait */
541 uasm_i_wait(&p, 0);
542 }
543
544 /*
545 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
546 * will run this. The first will actually re-enable coherence & the
547 * rest will just be performing a rather unusual nop.
548 */
549 uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
550 uasm_i_sw(&p, t0, 0, r_pcohctl);
551 uasm_i_lw(&p, t0, 0, r_pcohctl);
552
553 /* Completion barrier */
554 uasm_i_sync(&p, stype_memory);
555 uasm_i_ehb(&p);
556
557 if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
558 /* Decrement ready_count */
559 uasm_build_label(&l, p, lbl_decready);
560 uasm_i_sync(&p, stype_ordering);
561 uasm_i_ll(&p, t1, 0, r_nc_count);
562 uasm_i_addiu(&p, t2, t1, -1);
563 uasm_i_sc(&p, t2, 0, r_nc_count);
564 uasm_il_beqz(&p, &r, t2, lbl_decready);
565 uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
566
567 /* Ordering barrier */
568 uasm_i_sync(&p, stype_ordering);
569 }
570
571 if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
572 /*
573 * At this point it is safe for all VPEs to proceed with
574 * execution. This VPE will set the top bit of ready_count
575 * to indicate to the other VPEs that they may continue.
576 */
577 cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
578
579 /*
580 * This core will be reliant upon another core sending a
581 * power-up command to the CPC in order to resume operation.
582 * Thus an arbitrary VPE can't trigger the core leaving the
583 * idle state and the one that disables coherence might as well
584 * be the one to re-enable it. The rest will continue from here
585 * after that has been done.
586 */
587 uasm_build_label(&l, p, lbl_secondary_cont);
588
589 /* Ordering barrier */
590 uasm_i_sync(&p, stype_ordering);
591 }
592
593 /* The core is coherent, time to return to C code */
594 uasm_i_jr(&p, ra);
595 uasm_i_nop(&p);
596
597gen_done:
598 /* Ensure the code didn't exceed the resources allocated for it */
599 BUG_ON((p - buf) > max_instrs);
600 BUG_ON((l - labels) > ARRAY_SIZE(labels));
601 BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
602
603 /* Patch branch offsets */
604 uasm_resolve_relocs(relocs, labels);
605
606 /* Flush the icache */
607 local_flush_icache_range((unsigned long)buf, (unsigned long)p);
608
609 return buf;
610out_err:
611 kfree(buf);
612 return NULL;
613}
614
615static int __init cps_gen_core_entries(unsigned cpu)
616{
617 enum cps_pm_state state;
618 unsigned core = cpu_data[cpu].core;
619 unsigned dlinesz = cpu_data[cpu].dcache.linesz;
620 void *entry_fn, *core_rc;
621
622 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
623 if (per_cpu(nc_asm_enter, core)[state])
624 continue;
625 if (!test_bit(state, state_support))
626 continue;
627
628 entry_fn = cps_gen_entry_code(cpu, state);
629 if (!entry_fn) {
630 pr_err("Failed to generate core %u state %u entry\n",
631 core, state);
632 clear_bit(state, state_support);
633 }
634
635 per_cpu(nc_asm_enter, core)[state] = entry_fn;
636 }
637
638 if (!per_cpu(ready_count, core)) {
639 core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
640 if (!core_rc) {
641 pr_err("Failed allocate core %u ready_count\n", core);
642 return -ENOMEM;
643 }
644 per_cpu(ready_count_alloc, core) = core_rc;
645
646 /* Ensure ready_count is aligned to a cacheline boundary */
647 core_rc += dlinesz - 1;
648 core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
649 per_cpu(ready_count, core) = core_rc;
650 }
651
652 return 0;
653}
654
655static int __init cps_pm_init(void)
656{
657 unsigned cpu;
658 int err;
659
660 /* Detect appropriate sync types for the system */
661 switch (current_cpu_data.cputype) {
662 case CPU_INTERAPTIV:
663 case CPU_PROAPTIV:
664 case CPU_M5150:
665 case CPU_P5600:
666 stype_intervention = 0x2;
667 stype_memory = 0x3;
668 stype_ordering = 0x10;
669 break;
670
671 default:
672 pr_warn("Power management is using heavyweight sync 0\n");
673 }
674
675 /* A CM is required for all non-coherent states */
676 if (!mips_cm_present()) {
677 pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
678 goto out;
679 }
680
681 /*
682 * If interrupts were enabled whilst running a wait instruction on a
683 * non-coherent core then the VPE may end up processing interrupts
684 * whilst non-coherent. That would be bad.
685 */
686 if (cpu_wait == r4k_wait_irqoff)
687 set_bit(CPS_PM_NC_WAIT, state_support);
688 else
689 pr_warn("pm-cps: non-coherent wait unavailable\n");
690
691 /* Detect whether a CPC is present */
692 if (mips_cpc_present()) {
693 /* Detect whether clock gating is implemented */
694 if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
695 set_bit(CPS_PM_CLOCK_GATED, state_support);
696 else
697 pr_warn("pm-cps: CPC does not support clock gating\n");
698
699 /* Power gating is available with CPS SMP & any CPC */
700 if (mips_cps_smp_in_use())
701 set_bit(CPS_PM_POWER_GATED, state_support);
702 else
703 pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
704 } else {
705 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
706 }
707
708 for_each_present_cpu(cpu) {
709 err = cps_gen_core_entries(cpu);
710 if (err)
711 return err;
712 }
713out:
714 return 0;
715}
716arch_initcall(cps_pm_init);
diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c
new file mode 100644
index 000000000000..fefdf39d3df3
--- /dev/null
+++ b/arch/mips/kernel/pm.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright (C) 2014 Imagination Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * CPU PM notifiers for saving/restoring general CPU state.
10 */
11
12#include <linux/cpu_pm.h>
13#include <linux/init.h>
14
15#include <asm/dsp.h>
16#include <asm/fpu.h>
17#include <asm/mmu_context.h>
18#include <asm/pm.h>
19#include <asm/watch.h>
20
21/* Used by PM helper macros in asm/pm.h */
22struct mips_static_suspend_state mips_static_suspend_state;
23
24/**
25 * mips_cpu_save() - Save general CPU state.
26 * Ensures that general CPU context is saved, notably FPU and DSP.
27 */
28static int mips_cpu_save(void)
29{
30 /* Save FPU state */
31 lose_fpu(1);
32
33 /* Save DSP state */
34 save_dsp(current);
35
36 return 0;
37}
38
39/**
40 * mips_cpu_restore() - Restore general CPU state.
41 * Restores important CPU context.
42 */
43static void mips_cpu_restore(void)
44{
45 unsigned int cpu = smp_processor_id();
46
47 /* Restore ASID */
48 if (current->mm)
49 write_c0_entryhi(cpu_asid(cpu, current->mm));
50
51 /* Restore DSP state */
52 restore_dsp(current);
53
54 /* Restore UserLocal */
55 if (cpu_has_userlocal)
56 write_c0_userlocal(current_thread_info()->tp_value);
57
58 /* Restore watch registers */
59 __restore_watch();
60}
61
62/**
63 * mips_pm_notifier() - Notifier for preserving general CPU context.
64 * @self: Notifier block.
65 * @cmd: CPU PM event.
66 * @v: Private data (unused).
67 *
68 * This is called when a CPU power management event occurs, and is used to
69 * ensure that important CPU context is preserved across a CPU power down.
70 */
71static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd,
72 void *v)
73{
74 int ret;
75
76 switch (cmd) {
77 case CPU_PM_ENTER:
78 ret = mips_cpu_save();
79 if (ret)
80 return NOTIFY_STOP;
81 break;
82 case CPU_PM_ENTER_FAILED:
83 case CPU_PM_EXIT:
84 mips_cpu_restore();
85 break;
86 }
87
88 return NOTIFY_OK;
89}
90
91static struct notifier_block mips_pm_notifier_block = {
92 .notifier_call = mips_pm_notifier,
93};
94
95static int __init mips_pm_init(void)
96{
97 return cpu_pm_register_notifier(&mips_pm_notifier_block);
98}
99arch_initcall(mips_pm_init);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 60e39dc7f1eb..0a1ec0f3beff 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -140,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
140 */ 140 */
141 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 141 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
142 142
143#ifdef CONFIG_MIPS_MT_SMTC
144 /*
145 * SMTC restores TCStatus after Status, and the CU bits
146 * are aliased there.
147 */
148 childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
149#endif
150 clear_tsk_thread_flag(p, TIF_USEDFPU); 143 clear_tsk_thread_flag(p, TIF_USEDFPU);
151 144
152#ifdef CONFIG_MIPS_MT_FPAFF 145#ifdef CONFIG_MIPS_MT_FPAFF
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index abacac7c33ef..81ca3f70fe29 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -28,6 +28,7 @@
28 */ 28 */
29#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) 29#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
30 30
31#ifndef USE_ALTERNATE_RESUME_IMPL
31/* 32/*
32 * task_struct *resume(task_struct *prev, task_struct *next, 33 * task_struct *resume(task_struct *prev, task_struct *next,
33 * struct thread_info *next_ti, s32 fp_save) 34 * struct thread_info *next_ti, s32 fp_save)
@@ -87,18 +88,6 @@
87 88
88 PTR_ADDU t0, $28, _THREAD_SIZE - 32 89 PTR_ADDU t0, $28, _THREAD_SIZE - 32
89 set_saved_sp t0, t1, t2 90 set_saved_sp t0, t1, t2
90#ifdef CONFIG_MIPS_MT_SMTC
91 /* Read-modify-writes of Status must be atomic on a VPE */
92 mfc0 t2, CP0_TCSTATUS
93 ori t1, t2, TCSTATUS_IXMT
94 mtc0 t1, CP0_TCSTATUS
95 andi t2, t2, TCSTATUS_IXMT
96 _ehb
97 DMT 8 # dmt t0
98 move t1,ra
99 jal mips_ihb
100 move ra,t1
101#endif /* CONFIG_MIPS_MT_SMTC */
102 mfc0 t1, CP0_STATUS /* Do we really need this? */ 91 mfc0 t1, CP0_STATUS /* Do we really need this? */
103 li a3, 0xff01 92 li a3, 0xff01
104 and t1, a3 93 and t1, a3
@@ -107,22 +96,12 @@
107 and a2, a3 96 and a2, a3
108 or a2, t1 97 or a2, t1
109 mtc0 a2, CP0_STATUS 98 mtc0 a2, CP0_STATUS
110#ifdef CONFIG_MIPS_MT_SMTC
111 _ehb
112 andi t0, t0, VPECONTROL_TE
113 beqz t0, 1f
114 emt
1151:
116 mfc0 t1, CP0_TCSTATUS
117 xori t1, t1, TCSTATUS_IXMT
118 or t1, t1, t2
119 mtc0 t1, CP0_TCSTATUS
120 _ehb
121#endif /* CONFIG_MIPS_MT_SMTC */
122 move v0, a0 99 move v0, a0
123 jr ra 100 jr ra
124 END(resume) 101 END(resume)
125 102
103#endif /* USE_ALTERNATE_RESUME_IMPL */
104
126/* 105/*
127 * Save a thread's fp context. 106 * Save a thread's fp context.
128 */ 107 */
@@ -176,19 +155,10 @@ LEAF(_restore_msa)
176#define FPU_DEFAULT 0x00000000 155#define FPU_DEFAULT 0x00000000
177 156
178LEAF(_init_fpu) 157LEAF(_init_fpu)
179#ifdef CONFIG_MIPS_MT_SMTC
180 /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
181 mfc0 t0, CP0_TCSTATUS
182 /* Bit position is the same for Status, TCStatus */
183 li t1, ST0_CU1
184 or t0, t1
185 mtc0 t0, CP0_TCSTATUS
186#else /* Normal MIPS CU1 enable */
187 mfc0 t0, CP0_STATUS 158 mfc0 t0, CP0_STATUS
188 li t1, ST0_CU1 159 li t1, ST0_CU1
189 or t0, t1 160 or t0, t1
190 mtc0 t0, CP0_STATUS 161 mtc0 t0, CP0_STATUS
191#endif /* CONFIG_MIPS_MT_SMTC */
192 enable_fpu_hazard 162 enable_fpu_hazard
193 163
194 li t1, FPU_DEFAULT 164 li t1, FPU_DEFAULT
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 9c1aca00fd54..5a66b975989e 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -36,7 +36,6 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
36 unsigned long flags; 36 unsigned long flags;
37 int i; 37 int i;
38 38
39 /* Ought not to be strictly necessary for SMTC builds */
40 local_irq_save(flags); 39 local_irq_save(flags);
41 vpeflags = dvpe(); 40 vpeflags = dvpe();
42 set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); 41 set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index ea4c2dc31692..df9e2bd9b2c2 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -281,13 +281,6 @@ static void bmips_smp_finish(void)
281} 281}
282 282
283/* 283/*
284 * Runs on CPU0 after all CPUs have been booted
285 */
286static void bmips_cpus_done(void)
287{
288}
289
290/*
291 * BMIPS5000 raceless IPIs 284 * BMIPS5000 raceless IPIs
292 * 285 *
293 * Each CPU has two inbound SW IRQs which are independent of all other CPUs. 286 * Each CPU has two inbound SW IRQs which are independent of all other CPUs.
@@ -434,7 +427,6 @@ struct plat_smp_ops bmips43xx_smp_ops = {
434 .boot_secondary = bmips_boot_secondary, 427 .boot_secondary = bmips_boot_secondary,
435 .smp_finish = bmips_smp_finish, 428 .smp_finish = bmips_smp_finish,
436 .init_secondary = bmips_init_secondary, 429 .init_secondary = bmips_init_secondary,
437 .cpus_done = bmips_cpus_done,
438 .send_ipi_single = bmips43xx_send_ipi_single, 430 .send_ipi_single = bmips43xx_send_ipi_single,
439 .send_ipi_mask = bmips43xx_send_ipi_mask, 431 .send_ipi_mask = bmips43xx_send_ipi_mask,
440#ifdef CONFIG_HOTPLUG_CPU 432#ifdef CONFIG_HOTPLUG_CPU
@@ -449,7 +441,6 @@ struct plat_smp_ops bmips5000_smp_ops = {
449 .boot_secondary = bmips_boot_secondary, 441 .boot_secondary = bmips_boot_secondary,
450 .smp_finish = bmips_smp_finish, 442 .smp_finish = bmips_smp_finish,
451 .init_secondary = bmips_init_secondary, 443 .init_secondary = bmips_init_secondary,
452 .cpus_done = bmips_cpus_done,
453 .send_ipi_single = bmips5000_send_ipi_single, 444 .send_ipi_single = bmips5000_send_ipi_single,
454 .send_ipi_mask = bmips5000_send_ipi_mask, 445 .send_ipi_mask = bmips5000_send_ipi_mask,
455#ifdef CONFIG_HOTPLUG_CPU 446#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 3ef55fb7ac03..fc8a51553426 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -49,14 +49,11 @@ static void cmp_init_secondary(void)
49 49
50 /* Enable per-cpu interrupts: platform specific */ 50 /* Enable per-cpu interrupts: platform specific */
51 51
52#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 52#ifdef CONFIG_MIPS_MT_SMP
53 if (cpu_has_mipsmt) 53 if (cpu_has_mipsmt)
54 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & 54 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
55 TCBIND_CURVPE; 55 TCBIND_CURVPE;
56#endif 56#endif
57#ifdef CONFIG_MIPS_MT_SMTC
58 c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
59#endif
60} 57}
61 58
62static void cmp_smp_finish(void) 59static void cmp_smp_finish(void)
@@ -75,11 +72,6 @@ static void cmp_smp_finish(void)
75 local_irq_enable(); 72 local_irq_enable();
76} 73}
77 74
78static void cmp_cpus_done(void)
79{
80 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
81}
82
83/* 75/*
84 * Setup the PC, SP, and GP of a secondary processor and start it running 76 * Setup the PC, SP, and GP of a secondary processor and start it running
85 * smp_bootstrap is the place to resume from 77 * smp_bootstrap is the place to resume from
@@ -135,10 +127,6 @@ void __init cmp_smp_setup(void)
135 unsigned int mvpconf0 = read_c0_mvpconf0(); 127 unsigned int mvpconf0 = read_c0_mvpconf0();
136 128
137 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 129 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
138#elif defined(CONFIG_MIPS_MT_SMTC)
139 unsigned int mvpconf0 = read_c0_mvpconf0();
140
141 nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
142#endif 130#endif
143 smp_num_siblings = nvpe; 131 smp_num_siblings = nvpe;
144 } 132 }
@@ -165,7 +153,6 @@ struct plat_smp_ops cmp_smp_ops = {
165 .send_ipi_mask = gic_send_ipi_mask, 153 .send_ipi_mask = gic_send_ipi_mask,
166 .init_secondary = cmp_init_secondary, 154 .init_secondary = cmp_init_secondary,
167 .smp_finish = cmp_smp_finish, 155 .smp_finish = cmp_smp_finish,
168 .cpus_done = cmp_cpus_done,
169 .boot_secondary = cmp_boot_secondary, 156 .boot_secondary = cmp_boot_secondary,
170 .smp_setup = cmp_smp_setup, 157 .smp_setup = cmp_smp_setup,
171 .prepare_cpus = cmp_prepare_cpus, 158 .prepare_cpus = cmp_prepare_cpus,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 536eec0d21b6..df0598d9bfdd 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -20,104 +20,43 @@
20#include <asm/mips-cpc.h> 20#include <asm/mips-cpc.h>
21#include <asm/mips_mt.h> 21#include <asm/mips_mt.h>
22#include <asm/mipsregs.h> 22#include <asm/mipsregs.h>
23#include <asm/pm-cps.h>
23#include <asm/smp-cps.h> 24#include <asm/smp-cps.h>
24#include <asm/time.h> 25#include <asm/time.h>
25#include <asm/uasm.h> 26#include <asm/uasm.h>
26 27
27static DECLARE_BITMAP(core_power, NR_CPUS); 28static DECLARE_BITMAP(core_power, NR_CPUS);
28 29
29struct boot_config mips_cps_bootcfg; 30struct core_boot_config *mips_cps_core_bootcfg;
30 31
31static void init_core(void) 32static unsigned core_vpe_count(unsigned core)
32{ 33{
33 unsigned int nvpes, t; 34 unsigned cfg;
34 u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status;
35 35
36 if (!cpu_has_mipsmt) 36 if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
37 return; 37 return 1;
38
39 /* Enter VPE configuration state */
40 dvpe();
41 set_c0_mvpcontrol(MVPCONTROL_VPC);
42
43 /* Retrieve the count of VPEs in this core */
44 mvpconf0 = read_c0_mvpconf0();
45 nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
46 smp_num_siblings = nvpes;
47
48 for (t = 1; t < nvpes; t++) {
49 /* Use a 1:1 mapping of TC index to VPE index */
50 settc(t);
51
52 /* Bind 1 TC to this VPE */
53 tcbind = read_tc_c0_tcbind();
54 tcbind &= ~TCBIND_CURVPE;
55 tcbind |= t << TCBIND_CURVPE_SHIFT;
56 write_tc_c0_tcbind(tcbind);
57
58 /* Set exclusive TC, non-active, master */
59 vpeconf0 = read_vpe_c0_vpeconf0();
60 vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA);
61 vpeconf0 |= t << VPECONF0_XTC_SHIFT;
62 vpeconf0 |= VPECONF0_MVP;
63 write_vpe_c0_vpeconf0(vpeconf0);
64
65 /* Declare TC non-active, non-allocatable & interrupt exempt */
66 tcstatus = read_tc_c0_tcstatus();
67 tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA);
68 tcstatus |= TCSTATUS_IXMT;
69 write_tc_c0_tcstatus(tcstatus);
70
71 /* Halt the TC */
72 write_tc_c0_tchalt(TCHALT_H);
73
74 /* Allow only 1 TC to execute */
75 vpecontrol = read_vpe_c0_vpecontrol();
76 vpecontrol &= ~VPECONTROL_TE;
77 write_vpe_c0_vpecontrol(vpecontrol);
78
79 /* Copy (most of) Status from VPE 0 */
80 status = read_c0_status();
81 status &= ~(ST0_IM | ST0_IE | ST0_KSU);
82 status |= ST0_CU0;
83 write_vpe_c0_status(status);
84
85 /* Copy Config from VPE 0 */
86 write_vpe_c0_config(read_c0_config());
87 write_vpe_c0_config7(read_c0_config7());
88
89 /* Ensure no software interrupts are pending */
90 write_vpe_c0_cause(0);
91
92 /* Sync Count */
93 write_vpe_c0_count(read_c0_count());
94 }
95 38
96 /* Leave VPE configuration state */ 39 write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
97 clear_c0_mvpcontrol(MVPCONTROL_VPC); 40 cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
41 return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
98} 42}
99 43
100static void __init cps_smp_setup(void) 44static void __init cps_smp_setup(void)
101{ 45{
102 unsigned int ncores, nvpes, core_vpes; 46 unsigned int ncores, nvpes, core_vpes;
103 int c, v; 47 int c, v;
104 u32 core_cfg, *entry_code;
105 48
106 /* Detect & record VPE topology */ 49 /* Detect & record VPE topology */
107 ncores = mips_cm_numcores(); 50 ncores = mips_cm_numcores();
108 pr_info("VPE topology "); 51 pr_info("VPE topology ");
109 for (c = nvpes = 0; c < ncores; c++) { 52 for (c = nvpes = 0; c < ncores; c++) {
110 if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) { 53 core_vpes = core_vpe_count(c);
111 write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF);
112 core_cfg = read_gcr_co_config();
113 core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >>
114 CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
115 } else {
116 core_vpes = 1;
117 }
118
119 pr_cont("%c%u", c ? ',' : '{', core_vpes); 54 pr_cont("%c%u", c ? ',' : '{', core_vpes);
120 55
56 /* Use the number of VPEs in core 0 for smp_num_siblings */
57 if (!c)
58 smp_num_siblings = core_vpes;
59
121 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { 60 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
122 cpu_data[nvpes + v].core = c; 61 cpu_data[nvpes + v].core = c;
123#ifdef CONFIG_MIPS_MT_SMP 62#ifdef CONFIG_MIPS_MT_SMP
@@ -137,19 +76,14 @@ static void __init cps_smp_setup(void)
137 __cpu_logical_map[v] = v; 76 __cpu_logical_map[v] = v;
138 } 77 }
139 78
79 /* Set a coherent default CCA (CWB) */
80 change_c0_config(CONF_CM_CMASK, 0x5);
81
140 /* Core 0 is powered up (we're running on it) */ 82 /* Core 0 is powered up (we're running on it) */
141 bitmap_set(core_power, 0, 1); 83 bitmap_set(core_power, 0, 1);
142 84
143 /* Disable MT - we only want to run 1 TC per VPE */
144 if (cpu_has_mipsmt)
145 dmt();
146
147 /* Initialise core 0 */ 85 /* Initialise core 0 */
148 init_core(); 86 mips_cps_core_init();
149
150 /* Patch the start of mips_cps_core_entry to provide the CM base */
151 entry_code = (u32 *)&mips_cps_core_entry;
152 UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
153 87
154 /* Make core 0 coherent with everything */ 88 /* Make core 0 coherent with everything */
155 write_gcr_cl_coherence(0xff); 89 write_gcr_cl_coherence(0xff);
@@ -157,15 +91,99 @@ static void __init cps_smp_setup(void)
157 91
158static void __init cps_prepare_cpus(unsigned int max_cpus) 92static void __init cps_prepare_cpus(unsigned int max_cpus)
159{ 93{
94 unsigned ncores, core_vpes, c, cca;
95 bool cca_unsuitable;
96 u32 *entry_code;
97
160 mips_mt_set_cpuoptions(); 98 mips_mt_set_cpuoptions();
99
100 /* Detect whether the CCA is unsuited to multi-core SMP */
101 cca = read_c0_config() & CONF_CM_CMASK;
102 switch (cca) {
103 case 0x4: /* CWBE */
104 case 0x5: /* CWB */
105 /* The CCA is coherent, multi-core is fine */
106 cca_unsuitable = false;
107 break;
108
109 default:
110 /* CCA is not coherent, multi-core is not usable */
111 cca_unsuitable = true;
112 }
113
114 /* Warn the user if the CCA prevents multi-core */
115 ncores = mips_cm_numcores();
116 if (cca_unsuitable && ncores > 1) {
117 pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
118 cca);
119
120 for_each_present_cpu(c) {
121 if (cpu_data[c].core)
122 set_cpu_present(c, false);
123 }
124 }
125
126 /*
127 * Patch the start of mips_cps_core_entry to provide:
128 *
129 * v0 = CM base address
130 * s0 = kseg0 CCA
131 */
132 entry_code = (u32 *)&mips_cps_core_entry;
133 UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
134 uasm_i_addiu(&entry_code, 16, 0, cca);
135 dma_cache_wback_inv((unsigned long)&mips_cps_core_entry,
136 (void *)entry_code - (void *)&mips_cps_core_entry);
137
138 /* Allocate core boot configuration structs */
139 mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
140 GFP_KERNEL);
141 if (!mips_cps_core_bootcfg) {
142 pr_err("Failed to allocate boot config for %u cores\n", ncores);
143 goto err_out;
144 }
145
146 /* Allocate VPE boot configuration structs */
147 for (c = 0; c < ncores; c++) {
148 core_vpes = core_vpe_count(c);
149 mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
150 sizeof(*mips_cps_core_bootcfg[c].vpe_config),
151 GFP_KERNEL);
152 if (!mips_cps_core_bootcfg[c].vpe_config) {
153 pr_err("Failed to allocate %u VPE boot configs\n",
154 core_vpes);
155 goto err_out;
156 }
157 }
158
159 /* Mark this CPU as booted */
160 atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
161 1 << cpu_vpe_id(&current_cpu_data));
162
163 return;
164err_out:
165 /* Clean up allocations */
166 if (mips_cps_core_bootcfg) {
167 for (c = 0; c < ncores; c++)
168 kfree(mips_cps_core_bootcfg[c].vpe_config);
169 kfree(mips_cps_core_bootcfg);
170 mips_cps_core_bootcfg = NULL;
171 }
172
173 /* Effectively disable SMP by declaring CPUs not present */
174 for_each_possible_cpu(c) {
175 if (c == 0)
176 continue;
177 set_cpu_present(c, false);
178 }
161} 179}
162 180
163static void boot_core(struct boot_config *cfg) 181static void boot_core(unsigned core)
164{ 182{
165 u32 access; 183 u32 access;
166 184
167 /* Select the appropriate core */ 185 /* Select the appropriate core */
168 write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF); 186 write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
169 187
170 /* Set its reset vector */ 188 /* Set its reset vector */
171 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); 189 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
@@ -175,104 +193,74 @@ static void boot_core(struct boot_config *cfg)
175 193
176 /* Ensure the core can access the GCRs */ 194 /* Ensure the core can access the GCRs */
177 access = read_gcr_access(); 195 access = read_gcr_access();
178 access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core); 196 access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
179 write_gcr_access(access); 197 write_gcr_access(access);
180 198
181 /* Copy cfg */
182 mips_cps_bootcfg = *cfg;
183
184 if (mips_cpc_present()) { 199 if (mips_cpc_present()) {
185 /* Select the appropriate core */
186 write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF);
187
188 /* Reset the core */ 200 /* Reset the core */
201 mips_cpc_lock_other(core);
189 write_cpc_co_cmd(CPC_Cx_CMD_RESET); 202 write_cpc_co_cmd(CPC_Cx_CMD_RESET);
203 mips_cpc_unlock_other();
190 } else { 204 } else {
191 /* Take the core out of reset */ 205 /* Take the core out of reset */
192 write_gcr_co_reset_release(0); 206 write_gcr_co_reset_release(0);
193 } 207 }
194 208
195 /* The core is now powered up */ 209 /* The core is now powered up */
196 bitmap_set(core_power, cfg->core, 1); 210 bitmap_set(core_power, core, 1);
197} 211}
198 212
199static void boot_vpe(void *info) 213static void remote_vpe_boot(void *dummy)
200{ 214{
201 struct boot_config *cfg = info; 215 mips_cps_boot_vpes();
202 u32 tcstatus, vpeconf0;
203
204 /* Enter VPE configuration state */
205 dvpe();
206 set_c0_mvpcontrol(MVPCONTROL_VPC);
207
208 settc(cfg->vpe);
209
210 /* Set the TC restart PC */
211 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
212
213 /* Activate the TC, allow interrupts */
214 tcstatus = read_tc_c0_tcstatus();
215 tcstatus &= ~TCSTATUS_IXMT;
216 tcstatus |= TCSTATUS_A;
217 write_tc_c0_tcstatus(tcstatus);
218
219 /* Clear the TC halt bit */
220 write_tc_c0_tchalt(0);
221
222 /* Activate the VPE */
223 vpeconf0 = read_vpe_c0_vpeconf0();
224 vpeconf0 |= VPECONF0_VPA;
225 write_vpe_c0_vpeconf0(vpeconf0);
226
227 /* Set the stack & global pointer registers */
228 write_tc_gpr_sp(cfg->sp);
229 write_tc_gpr_gp(cfg->gp);
230
231 /* Leave VPE configuration state */
232 clear_c0_mvpcontrol(MVPCONTROL_VPC);
233
234 /* Enable other VPEs to execute */
235 evpe(EVPE_ENABLE);
236} 216}
237 217
238static void cps_boot_secondary(int cpu, struct task_struct *idle) 218static void cps_boot_secondary(int cpu, struct task_struct *idle)
239{ 219{
240 struct boot_config cfg; 220 unsigned core = cpu_data[cpu].core;
221 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
222 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
223 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
241 unsigned int remote; 224 unsigned int remote;
242 int err; 225 int err;
243 226
244 cfg.core = cpu_data[cpu].core; 227 vpe_cfg->pc = (unsigned long)&smp_bootstrap;
245 cfg.vpe = cpu_vpe_id(&cpu_data[cpu]); 228 vpe_cfg->sp = __KSTK_TOS(idle);
246 cfg.pc = (unsigned long)&smp_bootstrap; 229 vpe_cfg->gp = (unsigned long)task_thread_info(idle);
247 cfg.sp = __KSTK_TOS(idle); 230
248 cfg.gp = (unsigned long)task_thread_info(idle); 231 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
249 232
250 if (!test_bit(cfg.core, core_power)) { 233 preempt_disable();
234
235 if (!test_bit(core, core_power)) {
251 /* Boot a VPE on a powered down core */ 236 /* Boot a VPE on a powered down core */
252 boot_core(&cfg); 237 boot_core(core);
253 return; 238 goto out;
254 } 239 }
255 240
256 if (cfg.core != current_cpu_data.core) { 241 if (core != current_cpu_data.core) {
257 /* Boot a VPE on another powered up core */ 242 /* Boot a VPE on another powered up core */
258 for (remote = 0; remote < NR_CPUS; remote++) { 243 for (remote = 0; remote < NR_CPUS; remote++) {
259 if (cpu_data[remote].core != cfg.core) 244 if (cpu_data[remote].core != core)
260 continue; 245 continue;
261 if (cpu_online(remote)) 246 if (cpu_online(remote))
262 break; 247 break;
263 } 248 }
264 BUG_ON(remote >= NR_CPUS); 249 BUG_ON(remote >= NR_CPUS);
265 250
266 err = smp_call_function_single(remote, boot_vpe, &cfg, 1); 251 err = smp_call_function_single(remote, remote_vpe_boot,
252 NULL, 1);
267 if (err) 253 if (err)
268 panic("Failed to call remote CPU\n"); 254 panic("Failed to call remote CPU\n");
269 return; 255 goto out;
270 } 256 }
271 257
272 BUG_ON(!cpu_has_mipsmt); 258 BUG_ON(!cpu_has_mipsmt);
273 259
274 /* Boot a VPE on this core */ 260 /* Boot a VPE on this core */
275 boot_vpe(&cfg); 261 mips_cps_boot_vpes();
262out:
263 preempt_enable();
276} 264}
277 265
278static void cps_init_secondary(void) 266static void cps_init_secondary(void)
@@ -281,10 +269,6 @@ static void cps_init_secondary(void)
281 if (cpu_has_mipsmt) 269 if (cpu_has_mipsmt)
282 dmt(); 270 dmt();
283 271
284 /* TODO: revisit this assumption once hotplug is implemented */
285 if (cpu_vpe_id(&current_cpu_data) == 0)
286 init_core();
287
288 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 272 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
289 STATUSF_IP6 | STATUSF_IP7); 273 STATUSF_IP6 | STATUSF_IP7);
290} 274}
@@ -302,10 +286,148 @@ static void cps_smp_finish(void)
302 local_irq_enable(); 286 local_irq_enable();
303} 287}
304 288
305static void cps_cpus_done(void) 289#ifdef CONFIG_HOTPLUG_CPU
290
291static int cps_cpu_disable(void)
292{
293 unsigned cpu = smp_processor_id();
294 struct core_boot_config *core_cfg;
295
296 if (!cpu)
297 return -EBUSY;
298
299 if (!cps_pm_support_state(CPS_PM_POWER_GATED))
300 return -EINVAL;
301
302 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
303 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
304 smp_mb__after_atomic_dec();
305 set_cpu_online(cpu, false);
306 cpu_clear(cpu, cpu_callin_map);
307
308 return 0;
309}
310
311static DECLARE_COMPLETION(cpu_death_chosen);
312static unsigned cpu_death_sibling;
313static enum {
314 CPU_DEATH_HALT,
315 CPU_DEATH_POWER,
316} cpu_death;
317
318void play_dead(void)
319{
320 unsigned cpu, core;
321
322 local_irq_disable();
323 idle_task_exit();
324 cpu = smp_processor_id();
325 cpu_death = CPU_DEATH_POWER;
326
327 if (cpu_has_mipsmt) {
328 core = cpu_data[cpu].core;
329
330 /* Look for another online VPE within the core */
331 for_each_online_cpu(cpu_death_sibling) {
332 if (cpu_data[cpu_death_sibling].core != core)
333 continue;
334
335 /*
336 * There is an online VPE within the core. Just halt
337 * this TC and leave the core alone.
338 */
339 cpu_death = CPU_DEATH_HALT;
340 break;
341 }
342 }
343
344 /* This CPU has chosen its way out */
345 complete(&cpu_death_chosen);
346
347 if (cpu_death == CPU_DEATH_HALT) {
348 /* Halt this TC */
349 write_c0_tchalt(TCHALT_H);
350 instruction_hazard();
351 } else {
352 /* Power down the core */
353 cps_pm_enter_state(CPS_PM_POWER_GATED);
354 }
355
356 /* This should never be reached */
357 panic("Failed to offline CPU %u", cpu);
358}
359
360static void wait_for_sibling_halt(void *ptr_cpu)
306{ 361{
362 unsigned cpu = (unsigned)ptr_cpu;
363 unsigned vpe_id = cpu_data[cpu].vpe_id;
364 unsigned halted;
365 unsigned long flags;
366
367 do {
368 local_irq_save(flags);
369 settc(vpe_id);
370 halted = read_tc_c0_tchalt();
371 local_irq_restore(flags);
372 } while (!(halted & TCHALT_H));
373}
374
375static void cps_cpu_die(unsigned int cpu)
376{
377 unsigned core = cpu_data[cpu].core;
378 unsigned stat;
379 int err;
380
381 /* Wait for the cpu to choose its way out */
382 if (!wait_for_completion_timeout(&cpu_death_chosen,
383 msecs_to_jiffies(5000))) {
384 pr_err("CPU%u: didn't offline\n", cpu);
385 return;
386 }
387
388 /*
389 * Now wait for the CPU to actually offline. Without doing this that
390 * offlining may race with one or more of:
391 *
392 * - Onlining the CPU again.
393 * - Powering down the core if another VPE within it is offlined.
394 * - A sibling VPE entering a non-coherent state.
395 *
396 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
397 * with which we could race, so do nothing.
398 */
399 if (cpu_death == CPU_DEATH_POWER) {
400 /*
401 * Wait for the core to enter a powered down or clock gated
402 * state, the latter happening when a JTAG probe is connected
403 * in which case the CPC will refuse to power down the core.
404 */
405 do {
406 mips_cpc_lock_other(core);
407 stat = read_cpc_co_stat_conf();
408 stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
409 mips_cpc_unlock_other();
410 } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
411 stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
412 stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
413
414 /* Indicate the core is powered off */
415 bitmap_clear(core_power, core, 1);
416 } else if (cpu_has_mipsmt) {
417 /*
418 * Have a CPU with access to the offlined CPUs registers wait
419 * for its TC to halt.
420 */
421 err = smp_call_function_single(cpu_death_sibling,
422 wait_for_sibling_halt,
423 (void *)cpu, 1);
424 if (err)
425 panic("Failed to call remote sibling CPU\n");
426 }
307} 427}
308 428
429#endif /* CONFIG_HOTPLUG_CPU */
430
309static struct plat_smp_ops cps_smp_ops = { 431static struct plat_smp_ops cps_smp_ops = {
310 .smp_setup = cps_smp_setup, 432 .smp_setup = cps_smp_setup,
311 .prepare_cpus = cps_prepare_cpus, 433 .prepare_cpus = cps_prepare_cpus,
@@ -314,9 +436,18 @@ static struct plat_smp_ops cps_smp_ops = {
314 .smp_finish = cps_smp_finish, 436 .smp_finish = cps_smp_finish,
315 .send_ipi_single = gic_send_ipi_single, 437 .send_ipi_single = gic_send_ipi_single,
316 .send_ipi_mask = gic_send_ipi_mask, 438 .send_ipi_mask = gic_send_ipi_mask,
317 .cpus_done = cps_cpus_done, 439#ifdef CONFIG_HOTPLUG_CPU
440 .cpu_disable = cps_cpu_disable,
441 .cpu_die = cps_cpu_die,
442#endif
318}; 443};
319 444
445bool mips_cps_smp_in_use(void)
446{
447 extern struct plat_smp_ops *mp_ops;
448 return mp_ops == &cps_smp_ops;
449}
450
320int register_cps_smp_ops(void) 451int register_cps_smp_ops(void)
321{ 452{
322 if (!mips_cm_present()) { 453 if (!mips_cm_present()) {
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
index 3bb1f92ab525..3b21a96d1ccb 100644
--- a/arch/mips/kernel/smp-gic.c
+++ b/arch/mips/kernel/smp-gic.c
@@ -15,12 +15,14 @@
15#include <linux/printk.h> 15#include <linux/printk.h>
16 16
17#include <asm/gic.h> 17#include <asm/gic.h>
18#include <asm/mips-cpc.h>
18#include <asm/smp-ops.h> 19#include <asm/smp-ops.h>
19 20
20void gic_send_ipi_single(int cpu, unsigned int action) 21void gic_send_ipi_single(int cpu, unsigned int action)
21{ 22{
22 unsigned long flags; 23 unsigned long flags;
23 unsigned int intr; 24 unsigned int intr;
25 unsigned int core = cpu_data[cpu].core;
24 26
25 pr_debug("CPU%d: %s cpu %d action %u status %08x\n", 27 pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
26 smp_processor_id(), __func__, cpu, action, read_c0_status()); 28 smp_processor_id(), __func__, cpu, action, read_c0_status());
@@ -41,6 +43,15 @@ void gic_send_ipi_single(int cpu, unsigned int action)
41 } 43 }
42 44
43 gic_send_ipi(intr); 45 gic_send_ipi(intr);
46
47 if (mips_cpc_present() && (core != current_cpu_data.core)) {
48 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
49 mips_cpc_lock_other(core);
50 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
51 mips_cpc_unlock_other();
52 }
53 }
54
44 local_irq_restore(flags); 55 local_irq_restore(flags);
45} 56}
46 57
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index f8e13149604d..3babf6e4f894 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -183,10 +183,6 @@ static void vsmp_smp_finish(void)
183 local_irq_enable(); 183 local_irq_enable();
184} 184}
185 185
186static void vsmp_cpus_done(void)
187{
188}
189
190/* 186/*
191 * Setup the PC, SP, and GP of a secondary processor and start it 187 * Setup the PC, SP, and GP of a secondary processor and start it
192 * running! 188 * running!
@@ -287,7 +283,6 @@ struct plat_smp_ops vsmp_smp_ops = {
287 .send_ipi_mask = vsmp_send_ipi_mask, 283 .send_ipi_mask = vsmp_send_ipi_mask,
288 .init_secondary = vsmp_init_secondary, 284 .init_secondary = vsmp_init_secondary,
289 .smp_finish = vsmp_smp_finish, 285 .smp_finish = vsmp_smp_finish,
290 .cpus_done = vsmp_cpus_done,
291 .boot_secondary = vsmp_boot_secondary, 286 .boot_secondary = vsmp_boot_secondary,
292 .smp_setup = vsmp_smp_setup, 287 .smp_setup = vsmp_smp_setup,
293 .prepare_cpus = vsmp_prepare_cpus, 288 .prepare_cpus = vsmp_prepare_cpus,
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 7fde3e4d978f..17878d71ef2b 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -36,11 +36,6 @@ static void up_smp_finish(void)
36{ 36{
37} 37}
38 38
39/* Hook for after all CPUs are online */
40static void up_cpus_done(void)
41{
42}
43
44/* 39/*
45 * Firmware CPU startup hook 40 * Firmware CPU startup hook
46 */ 41 */
@@ -73,7 +68,6 @@ struct plat_smp_ops up_smp_ops = {
73 .send_ipi_mask = up_send_ipi_mask, 68 .send_ipi_mask = up_send_ipi_mask,
74 .init_secondary = up_init_secondary, 69 .init_secondary = up_init_secondary,
75 .smp_finish = up_smp_finish, 70 .smp_finish = up_smp_finish,
76 .cpus_done = up_cpus_done,
77 .boot_secondary = up_boot_secondary, 71 .boot_secondary = up_boot_secondary,
78 .smp_setup = up_smp_setup, 72 .smp_setup = up_smp_setup,
79 .prepare_cpus = up_prepare_cpus, 73 .prepare_cpus = up_prepare_cpus,
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 0a022ee33b2a..9bad52ede903 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -43,10 +43,6 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/setup.h> 44#include <asm/setup.h>
45 45
46#ifdef CONFIG_MIPS_MT_SMTC
47#include <asm/mipsmtregs.h>
48#endif /* CONFIG_MIPS_MT_SMTC */
49
50volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 46volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
51 47
52int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 48int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
@@ -66,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map);
66/* representing cpus for which sibling maps can be computed */ 62/* representing cpus for which sibling maps can be computed */
67static cpumask_t cpu_sibling_setup_map; 63static cpumask_t cpu_sibling_setup_map;
68 64
65cpumask_t cpu_coherent_mask;
66
69static inline void set_cpu_sibling_map(int cpu) 67static inline void set_cpu_sibling_map(int cpu)
70{ 68{
71 int i; 69 int i;
@@ -102,12 +100,6 @@ asmlinkage void start_secondary(void)
102{ 100{
103 unsigned int cpu; 101 unsigned int cpu;
104 102
105#ifdef CONFIG_MIPS_MT_SMTC
106 /* Only do cpu_probe for first TC of CPU */
107 if ((read_c0_tcbind() & TCBIND_CURTC) != 0)
108 __cpu_name[smp_processor_id()] = __cpu_name[0];
109 else
110#endif /* CONFIG_MIPS_MT_SMTC */
111 cpu_probe(); 103 cpu_probe();
112 cpu_report(); 104 cpu_report();
113 per_cpu_trap_init(false); 105 per_cpu_trap_init(false);
@@ -124,6 +116,7 @@ asmlinkage void start_secondary(void)
124 cpu = smp_processor_id(); 116 cpu = smp_processor_id();
125 cpu_data[cpu].udelay_val = loops_per_jiffy; 117 cpu_data[cpu].udelay_val = loops_per_jiffy;
126 118
119 cpu_set(cpu, cpu_coherent_mask);
127 notify_cpu_starting(cpu); 120 notify_cpu_starting(cpu);
128 121
129 set_cpu_online(cpu, true); 122 set_cpu_online(cpu, true);
@@ -173,7 +166,6 @@ void smp_send_stop(void)
173 166
174void __init smp_cpus_done(unsigned int max_cpus) 167void __init smp_cpus_done(unsigned int max_cpus)
175{ 168{
176 mp_ops->cpus_done();
177} 169}
178 170
179/* called from main before smp_init() */ 171/* called from main before smp_init() */
@@ -186,6 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
186#ifndef CONFIG_HOTPLUG_CPU 178#ifndef CONFIG_HOTPLUG_CPU
187 init_cpu_present(cpu_possible_mask); 179 init_cpu_present(cpu_possible_mask);
188#endif 180#endif
181 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
189} 182}
190 183
191/* preload SMP state for boot cpu */ 184/* preload SMP state for boot cpu */
@@ -238,13 +231,10 @@ static void flush_tlb_mm_ipi(void *mm)
238 * o collapses to normal function call on UP kernels 231 * o collapses to normal function call on UP kernels
239 * o collapses to normal function call on systems with a single shared 232 * o collapses to normal function call on systems with a single shared
240 * primary cache. 233 * primary cache.
241 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
242 */ 234 */
243static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 235static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
244{ 236{
245#ifndef CONFIG_MIPS_MT_SMTC
246 smp_call_function(func, info, 1); 237 smp_call_function(func, info, 1);
247#endif
248} 238}
249 239
250static inline void smp_on_each_tlb(void (*func) (void *info), void *info) 240static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
@@ -404,3 +394,46 @@ void dump_send_ipi(void (*dump_ipi_callback)(void *))
404} 394}
405EXPORT_SYMBOL(dump_send_ipi); 395EXPORT_SYMBOL(dump_send_ipi);
406#endif 396#endif
397
398#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
399
400static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
401static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
402
403void tick_broadcast(const struct cpumask *mask)
404{
405 atomic_t *count;
406 struct call_single_data *csd;
407 int cpu;
408
409 for_each_cpu(cpu, mask) {
410 count = &per_cpu(tick_broadcast_count, cpu);
411 csd = &per_cpu(tick_broadcast_csd, cpu);
412
413 if (atomic_inc_return(count) == 1)
414 smp_call_function_single_async(cpu, csd);
415 }
416}
417
418static void tick_broadcast_callee(void *info)
419{
420 int cpu = smp_processor_id();
421 tick_receive_broadcast();
422 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
423}
424
425static int __init tick_broadcast_init(void)
426{
427 struct call_single_data *csd;
428 int cpu;
429
430 for (cpu = 0; cpu < NR_CPUS; cpu++) {
431 csd = &per_cpu(tick_broadcast_csd, cpu);
432 csd->func = tick_broadcast_callee;
433 }
434
435 return 0;
436}
437early_initcall(tick_broadcast_init);
438
439#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
deleted file mode 100644
index 2866863a39df..000000000000
--- a/arch/mips/kernel/smtc-asm.S
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2 * Assembly Language Functions for MIPS MT SMTC support
3 */
4
5/*
6 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
7
8#include <asm/regdef.h>
9#include <asm/asmmacro.h>
10#include <asm/stackframe.h>
11#include <asm/irqflags.h>
12
13/*
14 * "Software Interrupt" linkage.
15 *
16 * This is invoked when an "Interrupt" is sent from one TC to another,
17 * where the TC to be interrupted is halted, has it's Restart address
18 * and Status values saved by the "remote control" thread, then modified
19 * to cause execution to begin here, in kenel mode. This code then
20 * disguises the TC state as that of an exception and transfers
21 * control to the general exception or vectored interrupt handler.
22 */
23 .set noreorder
24
25/*
26The __smtc_ipi_vector would use k0 and k1 as temporaries and
271) Set EXL (this is per-VPE, so this can't be done by proxy!)
282) Restore the K/CU and IXMT bits to the pre "exception" state
29 (EXL means no interrupts and access to the kernel map).
303) Set EPC to be the saved value of TCRestart.
314) Jump to the exception handler entry point passed by the sender.
32
33CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
34*/
35
36/*
37 * Reviled and slandered vision: Set EXL and restore K/CU/IXMT
38 * state of pre-halt thread, then save everything and call
39 * thought some function pointer to imaginary_exception, which
40 * will parse a register value or memory message queue to
41 * deliver things like interprocessor interrupts. On return
42 * from that function, jump to the global ret_from_irq code
43 * to invoke the scheduler and return as appropriate.
44 */
45
46#define PT_PADSLOT4 (PT_R0-8)
47#define PT_PADSLOT5 (PT_R0-4)
48
49 .text
50 .align 5
51FEXPORT(__smtc_ipi_vector)
52#ifdef CONFIG_CPU_MICROMIPS
53 nop
54#endif
55 .set noat
56 /* Disable thread scheduling to make Status update atomic */
57 DMT 27 # dmt k1
58 _ehb
59 /* Set EXL */
60 mfc0 k0,CP0_STATUS
61 ori k0,k0,ST0_EXL
62 mtc0 k0,CP0_STATUS
63 _ehb
64 /* Thread scheduling now inhibited by EXL. Restore TE state. */
65 andi k1,k1,VPECONTROL_TE
66 beqz k1,1f
67 emt
681:
69 /*
70 * The IPI sender has put some information on the anticipated
71 * kernel stack frame. If we were in user mode, this will be
72 * built above the saved kernel SP. If we were already in the
73 * kernel, it will be built above the current CPU SP.
74 *
75 * Were we in kernel mode, as indicated by CU0?
76 */
77 sll k1,k0,3
78 .set noreorder
79 bltz k1,2f
80 move k1,sp
81 .set reorder
82 /*
83 * If previously in user mode, set CU0 and use kernel stack.
84 */
85 li k1,ST0_CU0
86 or k1,k1,k0
87 mtc0 k1,CP0_STATUS
88 _ehb
89 get_saved_sp
90 /* Interrupting TC will have pre-set values in slots in the new frame */
912: subu k1,k1,PT_SIZE
92 /* Load TCStatus Value */
93 lw k0,PT_TCSTATUS(k1)
94 /* Write it to TCStatus to restore CU/KSU/IXMT state */
95 mtc0 k0,$2,1
96 _ehb
97 lw k0,PT_EPC(k1)
98 mtc0 k0,CP0_EPC
99 /* Save all will redundantly recompute the SP, but use it for now */
100 SAVE_ALL
101 CLI
102 TRACE_IRQS_OFF
103 /* Function to be invoked passed stack pad slot 5 */
104 lw t0,PT_PADSLOT5(sp)
105 /* Argument from sender passed in stack pad slot 4 */
106 lw a0,PT_PADSLOT4(sp)
107 LONG_L s0, TI_REGS($28)
108 LONG_S sp, TI_REGS($28)
109 PTR_LA ra, ret_from_irq
110 jr t0
111
112/*
113 * Called from idle loop to provoke processing of queued IPIs
114 * First IPI message in queue passed as argument.
115 */
116
117LEAF(self_ipi)
118 /* Before anything else, block interrupts */
119 mfc0 t0,CP0_TCSTATUS
120 ori t1,t0,TCSTATUS_IXMT
121 mtc0 t1,CP0_TCSTATUS
122 _ehb
123 /* We know we're in kernel mode, so prepare stack frame */
124 subu t1,sp,PT_SIZE
125 sw ra,PT_EPC(t1)
126 sw a0,PT_PADSLOT4(t1)
127 la t2,ipi_decode
128 sw t2,PT_PADSLOT5(t1)
129 /* Save pre-disable value of TCStatus */
130 sw t0,PT_TCSTATUS(t1)
131 j __smtc_ipi_vector
132 nop
133END(self_ipi)
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
deleted file mode 100644
index 38635a996cbf..000000000000
--- a/arch/mips/kernel/smtc-proc.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * /proc hooks for SMTC kernel
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5
6#include <linux/kernel.h>
7#include <linux/sched.h>
8#include <linux/cpumask.h>
9#include <linux/interrupt.h>
10
11#include <asm/cpu.h>
12#include <asm/processor.h>
13#include <linux/atomic.h>
14#include <asm/hardirq.h>
15#include <asm/mmu_context.h>
16#include <asm/mipsregs.h>
17#include <asm/cacheflush.h>
18#include <linux/proc_fs.h>
19#include <linux/seq_file.h>
20
21#include <asm/smtc_proc.h>
22
23/*
24 * /proc diagnostic and statistics hooks
25 */
26
27/*
28 * Statistics gathered
29 */
30unsigned long selfipis[NR_CPUS];
31
32struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
33
34atomic_t smtc_fpu_recoveries;
35
36static int smtc_proc_show(struct seq_file *m, void *v)
37{
38 int i;
39 extern unsigned long ebase;
40
41 seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status);
42 seq_printf(m, "Config7: 0x%08x\n", read_c0_config7());
43 seq_printf(m, "EBASE: 0x%08lx\n", ebase);
44 seq_printf(m, "Counter Interrupts taken per CPU (TC)\n");
45 for (i=0; i < NR_CPUS; i++)
46 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
47 seq_printf(m, "Self-IPIs by CPU:\n");
48 for(i = 0; i < NR_CPUS; i++)
49 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
50 seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
51 atomic_read(&smtc_fpu_recoveries));
52 return 0;
53}
54
55static int smtc_proc_open(struct inode *inode, struct file *file)
56{
57 return single_open(file, smtc_proc_show, NULL);
58}
59
60static const struct file_operations smtc_proc_fops = {
61 .open = smtc_proc_open,
62 .read = seq_read,
63 .llseek = seq_lseek,
64 .release = single_release,
65};
66
67void init_smtc_stats(void)
68{
69 int i;
70
71 for (i=0; i<NR_CPUS; i++) {
72 smtc_cpu_stats[i].timerints = 0;
73 smtc_cpu_stats[i].selfipis = 0;
74 }
75
76 atomic_set(&smtc_fpu_recoveries, 0);
77
78 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
79}
80
81static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
82 unsigned long action_unused, void *data)
83{
84 struct proc_cpuinfo_notifier_args *pcn = data;
85 struct seq_file *m = pcn->m;
86 unsigned long n = pcn->n;
87
88 if (!cpu_has_mipsmt)
89 return NOTIFY_OK;
90
91 seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
92 seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
93
94 return NOTIFY_OK;
95}
96
97static int __init proc_cpuinfo_notifier_init(void)
98{
99 return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
100}
101
102subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
deleted file mode 100644
index c1681d65dd5c..000000000000
--- a/arch/mips/kernel/smtc.c
+++ /dev/null
@@ -1,1528 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2004 Mips Technologies, Inc
17 * Copyright (C) 2008 Kevin D. Kissell
18 */
19
20#include <linux/clockchips.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/smp.h>
24#include <linux/cpumask.h>
25#include <linux/interrupt.h>
26#include <linux/kernel_stat.h>
27#include <linux/module.h>
28#include <linux/ftrace.h>
29#include <linux/slab.h>
30
31#include <asm/cpu.h>
32#include <asm/processor.h>
33#include <linux/atomic.h>
34#include <asm/hardirq.h>
35#include <asm/hazards.h>
36#include <asm/irq.h>
37#include <asm/idle.h>
38#include <asm/mmu_context.h>
39#include <asm/mipsregs.h>
40#include <asm/cacheflush.h>
41#include <asm/time.h>
42#include <asm/addrspace.h>
43#include <asm/smtc.h>
44#include <asm/smtc_proc.h>
45#include <asm/setup.h>
46
47/*
48 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
49 * in do_IRQ. These are passed in setup_irq_smtc() and stored
50 * in this table.
51 */
52unsigned long irq_hwmask[NR_IRQS];
53
54#define LOCK_MT_PRA() \
55 local_irq_save(flags); \
56 mtflags = dmt()
57
58#define UNLOCK_MT_PRA() \
59 emt(mtflags); \
60 local_irq_restore(flags)
61
62#define LOCK_CORE_PRA() \
63 local_irq_save(flags); \
64 mtflags = dvpe()
65
66#define UNLOCK_CORE_PRA() \
67 evpe(mtflags); \
68 local_irq_restore(flags)
69
70/*
71 * Data structures purely associated with SMTC parallelism
72 */
73
74
75/*
76 * Table for tracking ASIDs whose lifetime is prolonged.
77 */
78
79asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
80
81/*
82 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
83 */
84
85#define IPIBUF_PER_CPU 4
86
87struct smtc_ipi_q IPIQ[NR_CPUS];
88static struct smtc_ipi_q freeIPIq;
89
90
91/*
92 * Number of FPU contexts for each VPE
93 */
94
95static int smtc_nconf1[MAX_SMTC_VPES];
96
97
98/* Forward declarations */
99
100void ipi_decode(struct smtc_ipi *);
101static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
102static void setup_cross_vpe_interrupts(unsigned int nvpe);
103void init_smtc_stats(void);
104
105/* Global SMTC Status */
106
107unsigned int smtc_status;
108
109/* Boot command line configuration overrides */
110
111static int vpe0limit;
112static int ipibuffers;
113static int nostlb;
114static int asidmask;
115unsigned long smtc_asid_mask = 0xff;
116
117static int __init vpe0tcs(char *str)
118{
119 get_option(&str, &vpe0limit);
120
121 return 1;
122}
123
124static int __init ipibufs(char *str)
125{
126 get_option(&str, &ipibuffers);
127 return 1;
128}
129
130static int __init stlb_disable(char *s)
131{
132 nostlb = 1;
133 return 1;
134}
135
136static int __init asidmask_set(char *str)
137{
138 get_option(&str, &asidmask);
139 switch (asidmask) {
140 case 0x1:
141 case 0x3:
142 case 0x7:
143 case 0xf:
144 case 0x1f:
145 case 0x3f:
146 case 0x7f:
147 case 0xff:
148 smtc_asid_mask = (unsigned long)asidmask;
149 break;
150 default:
151 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
152 }
153 return 1;
154}
155
156__setup("vpe0tcs=", vpe0tcs);
157__setup("ipibufs=", ipibufs);
158__setup("nostlb", stlb_disable);
159__setup("asidmask=", asidmask_set);
160
161#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
162
163static int hang_trig;
164
165static int __init hangtrig_enable(char *s)
166{
167 hang_trig = 1;
168 return 1;
169}
170
171
172__setup("hangtrig", hangtrig_enable);
173
174#define DEFAULT_BLOCKED_IPI_LIMIT 32
175
176static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
177
178static int __init tintq(char *str)
179{
180 get_option(&str, &timerq_limit);
181 return 1;
182}
183
184__setup("tintq=", tintq);
185
186static int imstuckcount[MAX_SMTC_VPES][8];
187/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
188static int vpemask[MAX_SMTC_VPES][8] = {
189 {0, 0, 1, 0, 0, 0, 0, 1},
190 {0, 0, 0, 0, 0, 0, 0, 1}
191};
192int tcnoprog[NR_CPUS];
193static atomic_t idle_hook_initialized = ATOMIC_INIT(0);
194static int clock_hang_reported[NR_CPUS];
195
196#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
197
198/*
199 * Configure shared TLB - VPC configuration bit must be set by caller
200 */
201
202static void smtc_configure_tlb(void)
203{
204 int i, tlbsiz, vpes;
205 unsigned long mvpconf0;
206 unsigned long config1val;
207
208 /* Set up ASID preservation table */
209 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
210 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
211 smtc_live_asid[vpes][i] = 0;
212 }
213 }
214 mvpconf0 = read_c0_mvpconf0();
215
216 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
217 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
218 /* If we have multiple VPEs, try to share the TLB */
219 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
220 /*
221 * If TLB sizing is programmable, shared TLB
222 * size is the total available complement.
223 * Otherwise, we have to take the sum of all
224 * static VPE TLB entries.
225 */
226 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
227 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
228 /*
229 * If there's more than one VPE, there had better
230 * be more than one TC, because we need one to bind
231 * to each VPE in turn to be able to read
232 * its configuration state!
233 */
234 settc(1);
235 /* Stop the TC from doing anything foolish */
236 write_tc_c0_tchalt(TCHALT_H);
237 mips_ihb();
238 /* No need to un-Halt - that happens later anyway */
239 for (i=0; i < vpes; i++) {
240 write_tc_c0_tcbind(i);
241 /*
242 * To be 100% sure we're really getting the right
243 * information, we exit the configuration state
244 * and do an IHB after each rebinding.
245 */
246 write_c0_mvpcontrol(
247 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
248 mips_ihb();
249 /*
250 * Only count if the MMU Type indicated is TLB
251 */
252 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
253 config1val = read_vpe_c0_config1();
254 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
255 }
256
257 /* Put core back in configuration state */
258 write_c0_mvpcontrol(
259 read_c0_mvpcontrol() | MVPCONTROL_VPC );
260 mips_ihb();
261 }
262 }
263 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
264 ehb();
265
266 /*
267 * Setup kernel data structures to use software total,
268 * rather than read the per-VPE Config1 value. The values
269 * for "CPU 0" gets copied to all the other CPUs as part
270 * of their initialization in smtc_cpu_setup().
271 */
272
273 /* MIPS32 limits TLB indices to 64 */
274 if (tlbsiz > 64)
275 tlbsiz = 64;
276 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
277 smtc_status |= SMTC_TLB_SHARED;
278 local_flush_tlb_all();
279
280 printk("TLB of %d entry pairs shared by %d VPEs\n",
281 tlbsiz, vpes);
282 } else {
283 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
284 }
285 }
286}
287
288
289/*
290 * Incrementally build the CPU map out of constituent MIPS MT cores,
291 * using the specified available VPEs and TCs. Plaform code needs
292 * to ensure that each MIPS MT core invokes this routine on reset,
293 * one at a time(!).
294 *
295 * This version of the build_cpu_map and prepare_cpus routines assumes
296 * that *all* TCs of a MIPS MT core will be used for Linux, and that
297 * they will be spread across *all* available VPEs (to minimise the
298 * loss of efficiency due to exception service serialization).
299 * An improved version would pick up configuration information and
300 * possibly leave some TCs/VPEs as "slave" processors.
301 *
302 * Use c0_MVPConf0 to find out how many TCs are available, setting up
303 * cpu_possible_mask and the logical/physical mappings.
304 */
305
306int __init smtc_build_cpu_map(int start_cpu_slot)
307{
308 int i, ntcs;
309
310 /*
311 * The CPU map isn't actually used for anything at this point,
312 * so it's not clear what else we should do apart from set
313 * everything up so that "logical" = "physical".
314 */
315 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
316 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
317 set_cpu_possible(i, true);
318 __cpu_number_map[i] = i;
319 __cpu_logical_map[i] = i;
320 }
321#ifdef CONFIG_MIPS_MT_FPAFF
322 /* Initialize map of CPUs with FPUs */
323 cpus_clear(mt_fpu_cpumask);
324#endif
325
326 /* One of those TC's is the one booting, and not a secondary... */
327 printk("%i available secondary CPU TC(s)\n", i - 1);
328
329 return i;
330}
331
332/*
333 * Common setup before any secondaries are started
334 * Make sure all CPUs are in a sensible state before we boot any of the
335 * secondaries.
336 *
337 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
338 * as possible across the available VPEs.
339 */
340
341static void smtc_tc_setup(int vpe, int tc, int cpu)
342{
343 static int cp1contexts[MAX_SMTC_VPES];
344
345 /*
346 * Make a local copy of the available FPU contexts in order
347 * to keep track of TCs that can have one.
348 */
349 if (tc == 1)
350 {
351 /*
352 * FIXME: Multi-core SMTC hasn't been tested and the
353 * maximum number of VPEs may change.
354 */
355 cp1contexts[0] = smtc_nconf1[0] - 1;
356 cp1contexts[1] = smtc_nconf1[1];
357 }
358
359 settc(tc);
360 write_tc_c0_tchalt(TCHALT_H);
361 mips_ihb();
362 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
363 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
364 | TCSTATUS_A);
365 /*
366 * TCContext gets an offset from the base of the IPIQ array
367 * to be used in low-level code to detect the presence of
368 * an active IPI queue.
369 */
370 write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
371
372 /* Bind TC to VPE. */
373 write_tc_c0_tcbind(vpe);
374
375 /* In general, all TCs should have the same cpu_data indications. */
376 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
377
378 /* Check to see if there is a FPU context available for this TC. */
379 if (!cp1contexts[vpe])
380 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
381 else
382 cp1contexts[vpe]--;
383
384 /* Store the TC and VPE into the cpu_data structure. */
385 cpu_data[cpu].vpe_id = vpe;
386 cpu_data[cpu].tc_id = tc;
387
388 /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */
389 cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
390}
391
392/*
393 * Tweak to get Count registers synced as closely as possible. The
394 * value seems good for 34K-class cores.
395 */
396
397#define CP0_SKEW 8
398
399void smtc_prepare_cpus(int cpus)
400{
401 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
402 unsigned long flags;
403 unsigned long val;
404 int nipi;
405 struct smtc_ipi *pipi;
406
407 /* disable interrupts so we can disable MT */
408 local_irq_save(flags);
409 /* disable MT so we can configure */
410 dvpe();
411 dmt();
412
413 spin_lock_init(&freeIPIq.lock);
414
415 /*
416 * We probably don't have as many VPEs as we do SMP "CPUs",
417 * but it's possible - and in any case we'll never use more!
418 */
419 for (i=0; i<NR_CPUS; i++) {
420 IPIQ[i].head = IPIQ[i].tail = NULL;
421 spin_lock_init(&IPIQ[i].lock);
422 IPIQ[i].depth = 0;
423 IPIQ[i].resched_flag = 0; /* No reschedules queued initially */
424 }
425
426 /* cpu_data index starts at zero */
427 cpu = 0;
428 cpu_data[cpu].vpe_id = 0;
429 cpu_data[cpu].tc_id = 0;
430 cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
431 cpu++;
432
433 /* Report on boot-time options */
434 mips_mt_set_cpuoptions();
435 if (vpelimit > 0)
436 printk("Limit of %d VPEs set\n", vpelimit);
437 if (tclimit > 0)
438 printk("Limit of %d TCs set\n", tclimit);
439 if (nostlb) {
440 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
441 }
442 if (asidmask)
443 printk("ASID mask value override to 0x%x\n", asidmask);
444
445 /* Temporary */
446#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
447 if (hang_trig)
448 printk("Logic Analyser Trigger on suspected TC hang\n");
449#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
450
451 /* Put MVPE's into 'configuration state' */
452 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
453
454 val = read_c0_mvpconf0();
455 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
456 if (vpelimit > 0 && nvpe > vpelimit)
457 nvpe = vpelimit;
458 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
459 if (ntc > NR_CPUS)
460 ntc = NR_CPUS;
461 if (tclimit > 0 && ntc > tclimit)
462 ntc = tclimit;
463 slop = ntc % nvpe;
464 for (i = 0; i < nvpe; i++) {
465 tcpervpe[i] = ntc / nvpe;
466 if (slop) {
467 if((slop - i) > 0) tcpervpe[i]++;
468 }
469 }
470 /* Handle command line override for VPE0 */
471 if (vpe0limit > ntc) vpe0limit = ntc;
472 if (vpe0limit > 0) {
473 int slopslop;
474 if (vpe0limit < tcpervpe[0]) {
475 /* Reducing TC count - distribute to others */
476 slop = tcpervpe[0] - vpe0limit;
477 slopslop = slop % (nvpe - 1);
478 tcpervpe[0] = vpe0limit;
479 for (i = 1; i < nvpe; i++) {
480 tcpervpe[i] += slop / (nvpe - 1);
481 if(slopslop && ((slopslop - (i - 1) > 0)))
482 tcpervpe[i]++;
483 }
484 } else if (vpe0limit > tcpervpe[0]) {
485 /* Increasing TC count - steal from others */
486 slop = vpe0limit - tcpervpe[0];
487 slopslop = slop % (nvpe - 1);
488 tcpervpe[0] = vpe0limit;
489 for (i = 1; i < nvpe; i++) {
490 tcpervpe[i] -= slop / (nvpe - 1);
491 if(slopslop && ((slopslop - (i - 1) > 0)))
492 tcpervpe[i]--;
493 }
494 }
495 }
496
497 /* Set up shared TLB */
498 smtc_configure_tlb();
499
500 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
501 /* Get number of CP1 contexts for each VPE. */
502 if (tc == 0)
503 {
504 /*
505 * Do not call settc() for TC0 or the FPU context
506 * value will be incorrect. Besides, we know that
507 * we are TC0 anyway.
508 */
509 smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() &
510 VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
511 if (nvpe == 2)
512 {
513 settc(1);
514 smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() &
515 VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
516 settc(0);
517 }
518 }
519 if (tcpervpe[vpe] == 0)
520 continue;
521 if (vpe != 0)
522 printk(", ");
523 printk("VPE %d: TC", vpe);
524 for (i = 0; i < tcpervpe[vpe]; i++) {
525 /*
526 * TC 0 is bound to VPE 0 at reset,
527 * and is presumably executing this
528 * code. Leave it alone!
529 */
530 if (tc != 0) {
531 smtc_tc_setup(vpe, tc, cpu);
532 if (vpe != 0) {
533 /*
534 * Set MVP bit (possibly again). Do it
535 * here to catch CPUs that have no TCs
536 * bound to the VPE at reset. In that
537 * case, a TC must be bound to the VPE
538 * before we can set VPEControl[MVP]
539 */
540 write_vpe_c0_vpeconf0(
541 read_vpe_c0_vpeconf0() |
542 VPECONF0_MVP);
543 }
544 cpu++;
545 }
546 printk(" %d", tc);
547 tc++;
548 }
549 if (vpe != 0) {
550 /*
551 * Allow this VPE to control others.
552 */
553 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() |
554 VPECONF0_MVP);
555
556 /*
557 * Clear any stale software interrupts from VPE's Cause
558 */
559 write_vpe_c0_cause(0);
560
561 /*
562 * Clear ERL/EXL of VPEs other than 0
563 * and set restricted interrupt enable/mask.
564 */
565 write_vpe_c0_status((read_vpe_c0_status()
566 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
567 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
568 | ST0_IE));
569 /*
570 * set config to be the same as vpe0,
571 * particularly kseg0 coherency alg
572 */
573 write_vpe_c0_config(read_c0_config());
574 /* Clear any pending timer interrupt */
575 write_vpe_c0_compare(0);
576 /* Propagate Config7 */
577 write_vpe_c0_config7(read_c0_config7());
578 write_vpe_c0_count(read_c0_count() + CP0_SKEW);
579 ehb();
580 }
581 /* enable multi-threading within VPE */
582 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
583 /* enable the VPE */
584 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
585 }
586
587 /*
588 * Pull any physically present but unused TCs out of circulation.
589 */
590 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
591 set_cpu_possible(tc, false);
592 set_cpu_present(tc, false);
593 tc++;
594 }
595
596 /* release config state */
597 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
598
599 printk("\n");
600
601 /* Set up coprocessor affinity CPU mask(s) */
602
603#ifdef CONFIG_MIPS_MT_FPAFF
604 for (tc = 0; tc < ntc; tc++) {
605 if (cpu_data[tc].options & MIPS_CPU_FPU)
606 cpu_set(tc, mt_fpu_cpumask);
607 }
608#endif
609
610 /* set up ipi interrupts... */
611
612 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
613
614 setup_cross_vpe_interrupts(nvpe);
615
616 /* Set up queue of free IPI "messages". */
617 nipi = NR_CPUS * IPIBUF_PER_CPU;
618 if (ipibuffers > 0)
619 nipi = ipibuffers;
620
621 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
622 if (pipi == NULL)
623 panic("kmalloc of IPI message buffers failed");
624 else
625 printk("IPI buffer pool of %d buffers\n", nipi);
626 for (i = 0; i < nipi; i++) {
627 smtc_ipi_nq(&freeIPIq, pipi);
628 pipi++;
629 }
630
631 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
632 emt(EMT_ENABLE);
633 evpe(EVPE_ENABLE);
634 local_irq_restore(flags);
635 /* Initialize SMTC /proc statistics/diagnostics */
636 init_smtc_stats();
637}
638
639
640/*
641 * Setup the PC, SP, and GP of a secondary processor and start it
642 * running!
643 * smp_bootstrap is the place to resume from
644 * __KSTK_TOS(idle) is apparently the stack pointer
645 * (unsigned long)idle->thread_info the gp
646 *
647 */
648void smtc_boot_secondary(int cpu, struct task_struct *idle)
649{
650 extern u32 kernelsp[NR_CPUS];
651 unsigned long flags;
652 int mtflags;
653
654 LOCK_MT_PRA();
655 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
656 dvpe();
657 }
658 settc(cpu_data[cpu].tc_id);
659
660 /* pc */
661 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
662
663 /* stack pointer */
664 kernelsp[cpu] = __KSTK_TOS(idle);
665 write_tc_gpr_sp(__KSTK_TOS(idle));
666
667 /* global pointer */
668 write_tc_gpr_gp((unsigned long)task_thread_info(idle));
669
670 smtc_status |= SMTC_MTC_ACTIVE;
671 write_tc_c0_tchalt(0);
672 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
673 evpe(EVPE_ENABLE);
674 }
675 UNLOCK_MT_PRA();
676}
677
678void smtc_init_secondary(void)
679{
680}
681
682void smtc_smp_finish(void)
683{
684 int cpu = smp_processor_id();
685
686 /*
687 * Lowest-numbered CPU per VPE starts a clock tick.
688 * Like per_cpu_trap_init() hack, this assumes that
689 * SMTC init code assigns TCs consdecutively and
690 * in ascending order across available VPEs.
691 */
692 if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
693 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
694
695 local_irq_enable();
696
697 printk("TC %d going on-line as CPU %d\n",
698 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
699}
700
701void smtc_cpus_done(void)
702{
703}
704
705/*
706 * Support for SMTC-optimized driver IRQ registration
707 */
708
709/*
710 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
711 * in do_IRQ. These are passed in setup_irq_smtc() and stored
712 * in this table.
713 */
714
715int setup_irq_smtc(unsigned int irq, struct irqaction * new,
716 unsigned long hwmask)
717{
718#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
719 unsigned int vpe = current_cpu_data.vpe_id;
720
721 vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
722#endif
723 irq_hwmask[irq] = hwmask;
724
725 return setup_irq(irq, new);
726}
727
728#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
729/*
730 * Support for IRQ affinity to TCs
731 */
732
733void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
734{
735 /*
736 * If a "fast path" cache of quickly decodable affinity state
737 * is maintained, this is where it gets done, on a call up
738 * from the platform affinity code.
739 */
740}
741
742void smtc_forward_irq(struct irq_data *d)
743{
744 unsigned int irq = d->irq;
745 int target;
746
747 /*
748 * OK wise guy, now figure out how to get the IRQ
749 * to be serviced on an authorized "CPU".
750 *
751 * Ideally, to handle the situation where an IRQ has multiple
752 * eligible CPUS, we would maintain state per IRQ that would
753 * allow a fair distribution of service requests. Since the
754 * expected use model is any-or-only-one, for simplicity
755 * and efficiency, we just pick the easiest one to find.
756 */
757
758 target = cpumask_first(d->affinity);
759
760 /*
761 * We depend on the platform code to have correctly processed
762 * IRQ affinity change requests to ensure that the IRQ affinity
763 * mask has been purged of bits corresponding to nonexistent and
764 * offline "CPUs", and to TCs bound to VPEs other than the VPE
765 * connected to the physical interrupt input for the interrupt
766 * in question. Otherwise we have a nasty problem with interrupt
767 * mask management. This is best handled in non-performance-critical
768 * platform IRQ affinity setting code, to minimize interrupt-time
769 * checks.
770 */
771
772 /* If no one is eligible, service locally */
773 if (target >= NR_CPUS)
774 do_IRQ_no_affinity(irq);
775 else
776 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
777}
778
779#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
780
781/*
782 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
783 * Within a VPE one TC can interrupt another by different approaches.
784 * The easiest to get right would probably be to make all TCs except
785 * the target IXMT and set a software interrupt, but an IXMT-based
786 * scheme requires that a handler must run before a new IPI could
787 * be sent, which would break the "broadcast" loops in MIPS MT.
788 * A more gonzo approach within a VPE is to halt the TC, extract
789 * its Restart, Status, and a couple of GPRs, and program the Restart
790 * address to emulate an interrupt.
791 *
792 * Within a VPE, one can be confident that the target TC isn't in
793 * a critical EXL state when halted, since the write to the Halt
794 * register could not have issued on the writing thread if the
795 * halting thread had EXL set. So k0 and k1 of the target TC
796 * can be used by the injection code. Across VPEs, one can't
797 * be certain that the target TC isn't in a critical exception
798 * state. So we try a two-step process of sending a software
799 * interrupt to the target VPE, which either handles the event
800 * itself (if it was the target) or injects the event within
801 * the VPE.
802 */
803
804static void smtc_ipi_qdump(void)
805{
806 int i;
807 struct smtc_ipi *temp;
808
809 for (i = 0; i < NR_CPUS ;i++) {
810 pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
811 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
812 IPIQ[i].depth);
813 temp = IPIQ[i].head;
814
815 while (temp != IPIQ[i].tail) {
816 pr_debug("%d %d %d: ", temp->type, temp->dest,
817 (int)temp->arg);
818#ifdef SMTC_IPI_DEBUG
819 pr_debug("%u %lu\n", temp->sender, temp->stamp);
820#else
821 pr_debug("\n");
822#endif
823 temp = temp->flink;
824 }
825 }
826}
827
828/*
829 * The standard atomic.h primitives don't quite do what we want
830 * here: We need an atomic add-and-return-previous-value (which
831 * could be done with atomic_add_return and a decrement) and an
832 * atomic set/zero-and-return-previous-value (which can't really
833 * be done with the atomic.h primitives). And since this is
834 * MIPS MT, we can assume that we have LL/SC.
835 */
836static inline int atomic_postincrement(atomic_t *v)
837{
838 unsigned long result;
839
840 unsigned long temp;
841
842 __asm__ __volatile__(
843 "1: ll %0, %2 \n"
844 " addu %1, %0, 1 \n"
845 " sc %1, %2 \n"
846 " beqz %1, 1b \n"
847 __WEAK_LLSC_MB
848 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
849 : "m" (v->counter)
850 : "memory");
851
852 return result;
853}
854
855void smtc_send_ipi(int cpu, int type, unsigned int action)
856{
857 int tcstatus;
858 struct smtc_ipi *pipi;
859 unsigned long flags;
860 int mtflags;
861 unsigned long tcrestart;
862 int set_resched_flag = (type == LINUX_SMP_IPI &&
863 action == SMP_RESCHEDULE_YOURSELF);
864
865 if (cpu == smp_processor_id()) {
866 printk("Cannot Send IPI to self!\n");
867 return;
868 }
869 if (set_resched_flag && IPIQ[cpu].resched_flag != 0)
870 return; /* There is a reschedule queued already */
871
872 /* Set up a descriptor, to be delivered either promptly or queued */
873 pipi = smtc_ipi_dq(&freeIPIq);
874 if (pipi == NULL) {
875 bust_spinlocks(1);
876 mips_mt_regdump(dvpe());
877 panic("IPI Msg. Buffers Depleted");
878 }
879 pipi->type = type;
880 pipi->arg = (void *)action;
881 pipi->dest = cpu;
882 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
883 /* If not on same VPE, enqueue and send cross-VPE interrupt */
884 IPIQ[cpu].resched_flag |= set_resched_flag;
885 smtc_ipi_nq(&IPIQ[cpu], pipi);
886 LOCK_CORE_PRA();
887 settc(cpu_data[cpu].tc_id);
888 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
889 UNLOCK_CORE_PRA();
890 } else {
891 /*
892 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
893 * since ASID shootdown on the other VPE may
894 * collide with this operation.
895 */
896 LOCK_CORE_PRA();
897 settc(cpu_data[cpu].tc_id);
898 /* Halt the targeted TC */
899 write_tc_c0_tchalt(TCHALT_H);
900 mips_ihb();
901
902 /*
903 * Inspect TCStatus - if IXMT is set, we have to queue
904 * a message. Otherwise, we set up the "interrupt"
905 * of the other TC
906 */
907 tcstatus = read_tc_c0_tcstatus();
908
909 if ((tcstatus & TCSTATUS_IXMT) != 0) {
910 /*
911 * If we're in the the irq-off version of the wait
912 * loop, we need to force exit from the wait and
913 * do a direct post of the IPI.
914 */
915 if (cpu_wait == r4k_wait_irqoff) {
916 tcrestart = read_tc_c0_tcrestart();
917 if (address_is_in_r4k_wait_irqoff(tcrestart)) {
918 write_tc_c0_tcrestart(__pastwait);
919 tcstatus &= ~TCSTATUS_IXMT;
920 write_tc_c0_tcstatus(tcstatus);
921 goto postdirect;
922 }
923 }
924 /*
925 * Otherwise we queue the message for the target TC
926 * to pick up when he does a local_irq_restore()
927 */
928 write_tc_c0_tchalt(0);
929 UNLOCK_CORE_PRA();
930 IPIQ[cpu].resched_flag |= set_resched_flag;
931 smtc_ipi_nq(&IPIQ[cpu], pipi);
932 } else {
933postdirect:
934 post_direct_ipi(cpu, pipi);
935 write_tc_c0_tchalt(0);
936 UNLOCK_CORE_PRA();
937 }
938 }
939}
940
941/*
942 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
943 */
944static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
945{
946 struct pt_regs *kstack;
947 unsigned long tcstatus;
948 unsigned long tcrestart;
949 extern u32 kernelsp[NR_CPUS];
950 extern void __smtc_ipi_vector(void);
951//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
952
953 /* Extract Status, EPC from halted TC */
954 tcstatus = read_tc_c0_tcstatus();
955 tcrestart = read_tc_c0_tcrestart();
956 /* If TCRestart indicates a WAIT instruction, advance the PC */
957 if ((tcrestart & 0x80000000)
958 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
959 tcrestart += 4;
960 }
961 /*
962 * Save on TC's future kernel stack
963 *
964 * CU bit of Status is indicator that TC was
965 * already running on a kernel stack...
966 */
967 if (tcstatus & ST0_CU0) {
968 /* Note that this "- 1" is pointer arithmetic */
969 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
970 } else {
971 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
972 }
973
974 kstack->cp0_epc = (long)tcrestart;
975 /* Save TCStatus */
976 kstack->cp0_tcstatus = tcstatus;
977 /* Pass token of operation to be performed kernel stack pad area */
978 kstack->pad0[4] = (unsigned long)pipi;
979 /* Pass address of function to be called likewise */
980 kstack->pad0[5] = (unsigned long)&ipi_decode;
981 /* Set interrupt exempt and kernel mode */
982 tcstatus |= TCSTATUS_IXMT;
983 tcstatus &= ~TCSTATUS_TKSU;
984 write_tc_c0_tcstatus(tcstatus);
985 ehb();
986 /* Set TC Restart address to be SMTC IPI vector */
987 write_tc_c0_tcrestart(__smtc_ipi_vector);
988}
989
990static void ipi_resched_interrupt(void)
991{
992 scheduler_ipi();
993}
994
995static void ipi_call_interrupt(void)
996{
997 /* Invoke generic function invocation code in smp.c */
998 smp_call_function_interrupt();
999}
1000
1001DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
1002
1003static void __irq_entry smtc_clock_tick_interrupt(void)
1004{
1005 unsigned int cpu = smp_processor_id();
1006 struct clock_event_device *cd;
1007 int irq = MIPS_CPU_IRQ_BASE + 1;
1008
1009 irq_enter();
1010 kstat_incr_irq_this_cpu(irq);
1011 cd = &per_cpu(mips_clockevent_device, cpu);
1012 cd->event_handler(cd);
1013 irq_exit();
1014}
1015
1016void ipi_decode(struct smtc_ipi *pipi)
1017{
1018 void *arg_copy = pipi->arg;
1019 int type_copy = pipi->type;
1020
1021 smtc_ipi_nq(&freeIPIq, pipi);
1022
1023 switch (type_copy) {
1024 case SMTC_CLOCK_TICK:
1025 smtc_clock_tick_interrupt();
1026 break;
1027
1028 case LINUX_SMP_IPI:
1029 switch ((int)arg_copy) {
1030 case SMP_RESCHEDULE_YOURSELF:
1031 ipi_resched_interrupt();
1032 break;
1033 case SMP_CALL_FUNCTION:
1034 ipi_call_interrupt();
1035 break;
1036 default:
1037 printk("Impossible SMTC IPI Argument %p\n", arg_copy);
1038 break;
1039 }
1040 break;
1041#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
1042 case IRQ_AFFINITY_IPI:
1043 /*
1044 * Accept a "forwarded" interrupt that was initially
1045 * taken by a TC who doesn't have affinity for the IRQ.
1046 */
1047 do_IRQ_no_affinity((int)arg_copy);
1048 break;
1049#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
1050 default:
1051 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
1052 break;
1053 }
1054}
1055
1056/*
1057 * Similar to smtc_ipi_replay(), but invoked from context restore,
1058 * so it reuses the current exception frame rather than set up a
1059 * new one with self_ipi.
1060 */
1061
1062void deferred_smtc_ipi(void)
1063{
1064 int cpu = smp_processor_id();
1065
1066 /*
1067 * Test is not atomic, but much faster than a dequeue,
1068 * and the vast majority of invocations will have a null queue.
1069 * If irq_disabled when this was called, then any IPIs queued
1070 * after we test last will be taken on the next irq_enable/restore.
1071 * If interrupts were enabled, then any IPIs added after the
1072 * last test will be taken directly.
1073 */
1074
1075 while (IPIQ[cpu].head != NULL) {
1076 struct smtc_ipi_q *q = &IPIQ[cpu];
1077 struct smtc_ipi *pipi;
1078 unsigned long flags;
1079
1080 /*
1081 * It may be possible we'll come in with interrupts
1082 * already enabled.
1083 */
1084 local_irq_save(flags);
1085 spin_lock(&q->lock);
1086 pipi = __smtc_ipi_dq(q);
1087 spin_unlock(&q->lock);
1088 if (pipi != NULL) {
1089 if (pipi->type == LINUX_SMP_IPI &&
1090 (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
1091 IPIQ[cpu].resched_flag = 0;
1092 ipi_decode(pipi);
1093 }
1094 /*
1095 * The use of the __raw_local restore isn't
1096 * as obviously necessary here as in smtc_ipi_replay(),
1097 * but it's more efficient, given that we're already
1098 * running down the IPI queue.
1099 */
1100 __arch_local_irq_restore(flags);
1101 }
1102}
1103
1104/*
1105 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
1106 * set via cross-VPE MTTR manipulation of the Cause register. It would be
1107 * in some regards preferable to have external logic for "doorbell" hardware
1108 * interrupts.
1109 */
1110
1111static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
1112
1113static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
1114{
1115 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
1116 int my_tc = cpu_data[smp_processor_id()].tc_id;
1117 int cpu;
1118 struct smtc_ipi *pipi;
1119 unsigned long tcstatus;
1120 int sent;
1121 unsigned long flags;
1122 unsigned int mtflags;
1123 unsigned int vpflags;
1124
1125 /*
1126 * So long as cross-VPE interrupts are done via
1127 * MFTR/MTTR read-modify-writes of Cause, we need
1128 * to stop other VPEs whenever the local VPE does
1129 * anything similar.
1130 */
1131 local_irq_save(flags);
1132 vpflags = dvpe();
1133 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
1134 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
1135 irq_enable_hazard();
1136 evpe(vpflags);
1137 local_irq_restore(flags);
1138
1139 /*
1140 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
1141 * queued for TCs on this VPE other than the current one.
1142 * Return-from-interrupt should cause us to drain the queue
1143 * for the current TC, so we ought not to have to do it explicitly here.
1144 */
1145
1146 for_each_online_cpu(cpu) {
1147 if (cpu_data[cpu].vpe_id != my_vpe)
1148 continue;
1149
1150 pipi = smtc_ipi_dq(&IPIQ[cpu]);
1151 if (pipi != NULL) {
1152 if (cpu_data[cpu].tc_id != my_tc) {
1153 sent = 0;
1154 LOCK_MT_PRA();
1155 settc(cpu_data[cpu].tc_id);
1156 write_tc_c0_tchalt(TCHALT_H);
1157 mips_ihb();
1158 tcstatus = read_tc_c0_tcstatus();
1159 if ((tcstatus & TCSTATUS_IXMT) == 0) {
1160 post_direct_ipi(cpu, pipi);
1161 sent = 1;
1162 }
1163 write_tc_c0_tchalt(0);
1164 UNLOCK_MT_PRA();
1165 if (!sent) {
1166 smtc_ipi_req(&IPIQ[cpu], pipi);
1167 }
1168 } else {
1169 /*
1170 * ipi_decode() should be called
1171 * with interrupts off
1172 */
1173 local_irq_save(flags);
1174 if (pipi->type == LINUX_SMP_IPI &&
1175 (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
1176 IPIQ[cpu].resched_flag = 0;
1177 ipi_decode(pipi);
1178 local_irq_restore(flags);
1179 }
1180 }
1181 }
1182
1183 return IRQ_HANDLED;
1184}
1185
1186static void ipi_irq_dispatch(void)
1187{
1188 do_IRQ(cpu_ipi_irq);
1189}
1190
1191static struct irqaction irq_ipi = {
1192 .handler = ipi_interrupt,
1193 .flags = IRQF_PERCPU,
1194 .name = "SMTC_IPI"
1195};
1196
1197static void setup_cross_vpe_interrupts(unsigned int nvpe)
1198{
1199 if (nvpe < 1)
1200 return;
1201
1202 if (!cpu_has_vint)
1203 panic("SMTC Kernel requires Vectored Interrupt support");
1204
1205 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1206
1207 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1208
1209 irq_set_handler(cpu_ipi_irq, handle_percpu_irq);
1210}
1211
1212/*
1213 * SMTC-specific hacks invoked from elsewhere in the kernel.
1214 */
1215
1216 /*
1217 * smtc_ipi_replay is called from raw_local_irq_restore
1218 */
1219
1220void smtc_ipi_replay(void)
1221{
1222 unsigned int cpu = smp_processor_id();
1223
1224 /*
1225 * To the extent that we've ever turned interrupts off,
1226 * we may have accumulated deferred IPIs. This is subtle.
1227 * we should be OK: If we pick up something and dispatch
1228 * it here, that's great. If we see nothing, but concurrent
1229 * with this operation, another TC sends us an IPI, IXMT
1230 * is clear, and we'll handle it as a real pseudo-interrupt
1231 * and not a pseudo-pseudo interrupt. The important thing
1232 * is to do the last check for queued message *after* the
1233 * re-enabling of interrupts.
1234 */
1235 while (IPIQ[cpu].head != NULL) {
1236 struct smtc_ipi_q *q = &IPIQ[cpu];
1237 struct smtc_ipi *pipi;
1238 unsigned long flags;
1239
1240 /*
1241 * It's just possible we'll come in with interrupts
1242 * already enabled.
1243 */
1244 local_irq_save(flags);
1245
1246 spin_lock(&q->lock);
1247 pipi = __smtc_ipi_dq(q);
1248 spin_unlock(&q->lock);
1249 /*
1250 ** But use a raw restore here to avoid recursion.
1251 */
1252 __arch_local_irq_restore(flags);
1253
1254 if (pipi) {
1255 self_ipi(pipi);
1256 smtc_cpu_stats[cpu].selfipis++;
1257 }
1258 }
1259}
1260
1261EXPORT_SYMBOL(smtc_ipi_replay);
1262
1263void smtc_idle_loop_hook(void)
1264{
1265#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1266 int im;
1267 int flags;
1268 int mtflags;
1269 int bit;
1270 int vpe;
1271 int tc;
1272 int hook_ntcs;
1273 /*
1274 * printk within DMT-protected regions can deadlock,
1275 * so buffer diagnostic messages for later output.
1276 */
1277 char *pdb_msg;
1278 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1279
1280 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1281 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1282 int mvpconf0;
1283 /* Tedious stuff to just do once */
1284 mvpconf0 = read_c0_mvpconf0();
1285 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1286 if (hook_ntcs > NR_CPUS)
1287 hook_ntcs = NR_CPUS;
1288 for (tc = 0; tc < hook_ntcs; tc++) {
1289 tcnoprog[tc] = 0;
1290 clock_hang_reported[tc] = 0;
1291 }
1292 for (vpe = 0; vpe < 2; vpe++)
1293 for (im = 0; im < 8; im++)
1294 imstuckcount[vpe][im] = 0;
1295 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1296 atomic_set(&idle_hook_initialized, 1000);
1297 } else {
1298 /* Someone else is initializing in parallel - let 'em finish */
1299 while (atomic_read(&idle_hook_initialized) < 1000)
1300 ;
1301 }
1302 }
1303
1304 /* Have we stupidly left IXMT set somewhere? */
1305 if (read_c0_tcstatus() & 0x400) {
1306 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1307 ehb();
1308 printk("Dangling IXMT in cpu_idle()\n");
1309 }
1310
1311 /* Have we stupidly left an IM bit turned off? */
1312#define IM_LIMIT 2000
1313 local_irq_save(flags);
1314 mtflags = dmt();
1315 pdb_msg = &id_ho_db_msg[0];
1316 im = read_c0_status();
1317 vpe = current_cpu_data.vpe_id;
1318 for (bit = 0; bit < 8; bit++) {
1319 /*
1320 * In current prototype, I/O interrupts
1321 * are masked for VPE > 0
1322 */
1323 if (vpemask[vpe][bit]) {
1324 if (!(im & (0x100 << bit)))
1325 imstuckcount[vpe][bit]++;
1326 else
1327 imstuckcount[vpe][bit] = 0;
1328 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1329 set_c0_status(0x100 << bit);
1330 ehb();
1331 imstuckcount[vpe][bit] = 0;
1332 pdb_msg += sprintf(pdb_msg,
1333 "Dangling IM %d fixed for VPE %d\n", bit,
1334 vpe);
1335 }
1336 }
1337 }
1338
1339 emt(mtflags);
1340 local_irq_restore(flags);
1341 if (pdb_msg != &id_ho_db_msg[0])
1342 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1343#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1344
1345 smtc_ipi_replay();
1346}
1347
1348void smtc_soft_dump(void)
1349{
1350 int i;
1351
1352 printk("Counter Interrupts taken per CPU (TC)\n");
1353 for (i=0; i < NR_CPUS; i++) {
1354 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1355 }
1356 printk("Self-IPI invocations:\n");
1357 for (i=0; i < NR_CPUS; i++) {
1358 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1359 }
1360 smtc_ipi_qdump();
1361 printk("%d Recoveries of \"stolen\" FPU\n",
1362 atomic_read(&smtc_fpu_recoveries));
1363}
1364
1365
1366/*
1367 * TLB management routines special to SMTC
1368 */
1369
1370void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1371{
1372 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1373 int tlb, i;
1374
1375 /*
1376 * It would be nice to be able to use a spinlock here,
1377 * but this is invoked from within TLB flush routines
1378 * that protect themselves with DVPE, so if a lock is
1379 * held by another TC, it'll never be freed.
1380 *
1381 * DVPE/DMT must not be done with interrupts enabled,
1382 * so even so most callers will already have disabled
1383 * them, let's be really careful...
1384 */
1385
1386 local_irq_save(flags);
1387 if (smtc_status & SMTC_TLB_SHARED) {
1388 mtflags = dvpe();
1389 tlb = 0;
1390 } else {
1391 mtflags = dmt();
1392 tlb = cpu_data[cpu].vpe_id;
1393 }
1394 asid = asid_cache(cpu);
1395
1396 do {
1397 if (!((asid += ASID_INC) & ASID_MASK) ) {
1398 if (cpu_has_vtag_icache)
1399 flush_icache_all();
1400 /* Traverse all online CPUs (hack requires contiguous range) */
1401 for_each_online_cpu(i) {
1402 /*
1403 * We don't need to worry about our own CPU, nor those of
1404 * CPUs who don't share our TLB.
1405 */
1406 if ((i != smp_processor_id()) &&
1407 ((smtc_status & SMTC_TLB_SHARED) ||
1408 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1409 settc(cpu_data[i].tc_id);
1410 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1411 if (!prevhalt) {
1412 write_tc_c0_tchalt(TCHALT_H);
1413 mips_ihb();
1414 }
1415 tcstat = read_tc_c0_tcstatus();
1416 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1417 if (!prevhalt)
1418 write_tc_c0_tchalt(0);
1419 }
1420 }
1421 if (!asid) /* fix version if needed */
1422 asid = ASID_FIRST_VERSION;
1423 local_flush_tlb_all(); /* start new asid cycle */
1424 }
1425 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1426
1427 /*
1428 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1429 */
1430 for_each_online_cpu(i) {
1431 if ((smtc_status & SMTC_TLB_SHARED) ||
1432 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1433 cpu_context(i, mm) = asid_cache(i) = asid;
1434 }
1435
1436 if (smtc_status & SMTC_TLB_SHARED)
1437 evpe(mtflags);
1438 else
1439 emt(mtflags);
1440 local_irq_restore(flags);
1441}
1442
1443/*
1444 * Invoked from macros defined in mmu_context.h
1445 * which must already have disabled interrupts
1446 * and done a DVPE or DMT as appropriate.
1447 */
1448
1449void smtc_flush_tlb_asid(unsigned long asid)
1450{
1451 int entry;
1452 unsigned long ehi;
1453
1454 entry = read_c0_wired();
1455
1456 /* Traverse all non-wired entries */
1457 while (entry < current_cpu_data.tlbsize) {
1458 write_c0_index(entry);
1459 ehb();
1460 tlb_read();
1461 ehb();
1462 ehi = read_c0_entryhi();
1463 if ((ehi & ASID_MASK) == asid) {
1464 /*
1465 * Invalidate only entries with specified ASID,
1466 * makiing sure all entries differ.
1467 */
1468 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1469 write_c0_entrylo0(0);
1470 write_c0_entrylo1(0);
1471 mtc0_tlbw_hazard();
1472 tlb_write_indexed();
1473 }
1474 entry++;
1475 }
1476 write_c0_index(PARKED_INDEX);
1477 tlbw_use_hazard();
1478}
1479
1480/*
1481 * Support for single-threading cache flush operations.
1482 */
1483
1484static int halt_state_save[NR_CPUS];
1485
1486/*
1487 * To really, really be sure that nothing is being done
1488 * by other TCs, halt them all. This code assumes that
1489 * a DVPE has already been done, so while their Halted
1490 * state is theoretically architecturally unstable, in
1491 * practice, it's not going to change while we're looking
1492 * at it.
1493 */
1494
1495void smtc_cflush_lockdown(void)
1496{
1497 int cpu;
1498
1499 for_each_online_cpu(cpu) {
1500 if (cpu != smp_processor_id()) {
1501 settc(cpu_data[cpu].tc_id);
1502 halt_state_save[cpu] = read_tc_c0_tchalt();
1503 write_tc_c0_tchalt(TCHALT_H);
1504 }
1505 }
1506 mips_ihb();
1507}
1508
1509/* It would be cheating to change the cpu_online states during a flush! */
1510
1511void smtc_cflush_release(void)
1512{
1513 int cpu;
1514
1515 /*
1516 * Start with a hazard barrier to ensure
1517 * that all CACHE ops have played through.
1518 */
1519 mips_ihb();
1520
1521 for_each_online_cpu(cpu) {
1522 if (cpu != smp_processor_id()) {
1523 settc(cpu_data[cpu].tc_id);
1524 write_tc_c0_tchalt(halt_state_save[cpu]);
1525 }
1526 }
1527 mips_ihb();
1528}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index c24ad5f4b324..2242bdd4370e 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -6,8 +6,6 @@
6 * not have done anything significant (but they may have had interrupts 6 * not have done anything significant (but they may have had interrupts
7 * enabled briefly - prom_smp_finish() should not be responsible for enabling 7 * enabled briefly - prom_smp_finish() should not be responsible for enabling
8 * interrupts...) 8 * interrupts...)
9 *
10 * FIXME: broken for SMTC
11 */ 9 */
12 10
13#include <linux/kernel.h> 11#include <linux/kernel.h>
@@ -33,14 +31,6 @@ void synchronise_count_master(int cpu)
33 unsigned long flags; 31 unsigned long flags;
34 unsigned int initcount; 32 unsigned int initcount;
35 33
36#ifdef CONFIG_MIPS_MT_SMTC
37 /*
38 * SMTC needs to synchronise per VPE, not per CPU
39 * ignore for now
40 */
41 return;
42#endif
43
44 printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); 34 printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
45 35
46 local_irq_save(flags); 36 local_irq_save(flags);
@@ -110,14 +100,6 @@ void synchronise_count_slave(int cpu)
110 int i; 100 int i;
111 unsigned int initcount; 101 unsigned int initcount;
112 102
113#ifdef CONFIG_MIPS_MT_SMTC
114 /*
115 * SMTC needs to synchronise per VPE, not per CPU
116 * ignore for now
117 */
118 return;
119#endif
120
121 /* 103 /*
122 * Not every cpu is online at the time this gets called, 104 * Not every cpu is online at the time this gets called,
123 * so we first wait for the master to say everyone is ready 105 * so we first wait for the master to say everyone is ready
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index dcb8e5d3bb8a..8d0170969e22 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -26,7 +26,6 @@
26#include <asm/cpu-features.h> 26#include <asm/cpu-features.h>
27#include <asm/cpu-type.h> 27#include <asm/cpu-type.h>
28#include <asm/div64.h> 28#include <asm/div64.h>
29#include <asm/smtc_ipi.h>
30#include <asm/time.h> 29#include <asm/time.h>
31 30
32/* 31/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d57fc10df773..51706d6dd5b0 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -15,6 +15,7 @@
15#include <linux/bug.h> 15#include <linux/bug.h>
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/context_tracking.h> 17#include <linux/context_tracking.h>
18#include <linux/cpu_pm.h>
18#include <linux/kexec.h> 19#include <linux/kexec.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -370,9 +371,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
370{ 371{
371 static int die_counter; 372 static int die_counter;
372 int sig = SIGSEGV; 373 int sig = SIGSEGV;
373#ifdef CONFIG_MIPS_MT_SMTC
374 unsigned long dvpret;
375#endif /* CONFIG_MIPS_MT_SMTC */
376 374
377 oops_enter(); 375 oops_enter();
378 376
@@ -382,13 +380,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
382 380
383 console_verbose(); 381 console_verbose();
384 raw_spin_lock_irq(&die_lock); 382 raw_spin_lock_irq(&die_lock);
385#ifdef CONFIG_MIPS_MT_SMTC
386 dvpret = dvpe();
387#endif /* CONFIG_MIPS_MT_SMTC */
388 bust_spinlocks(1); 383 bust_spinlocks(1);
389#ifdef CONFIG_MIPS_MT_SMTC
390 mips_mt_regdump(dvpret);
391#endif /* CONFIG_MIPS_MT_SMTC */
392 384
393 printk("%s[#%d]:\n", str, ++die_counter); 385 printk("%s[#%d]:\n", str, ++die_counter);
394 show_registers(regs); 386 show_registers(regs);
@@ -1761,19 +1753,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1761 extern char rollback_except_vec_vi; 1753 extern char rollback_except_vec_vi;
1762 char *vec_start = using_rollback_handler() ? 1754 char *vec_start = using_rollback_handler() ?
1763 &rollback_except_vec_vi : &except_vec_vi; 1755 &rollback_except_vec_vi : &except_vec_vi;
1764#ifdef CONFIG_MIPS_MT_SMTC
1765 /*
1766 * We need to provide the SMTC vectored interrupt handler
1767 * not only with the address of the handler, but with the
1768 * Status.IM bit to be masked before going there.
1769 */
1770 extern char except_vec_vi_mori;
1771#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1772 const int mori_offset = &except_vec_vi_mori - vec_start + 2;
1773#else
1774 const int mori_offset = &except_vec_vi_mori - vec_start;
1775#endif
1776#endif /* CONFIG_MIPS_MT_SMTC */
1777#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 1756#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1778 const int lui_offset = &except_vec_vi_lui - vec_start + 2; 1757 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1779 const int ori_offset = &except_vec_vi_ori - vec_start + 2; 1758 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
@@ -1797,12 +1776,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1797#else 1776#else
1798 handler_len); 1777 handler_len);
1799#endif 1778#endif
1800#ifdef CONFIG_MIPS_MT_SMTC
1801 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1802
1803 h = (u16 *)(b + mori_offset);
1804 *h = (0x100 << n);
1805#endif /* CONFIG_MIPS_MT_SMTC */
1806 h = (u16 *)(b + lui_offset); 1779 h = (u16 *)(b + lui_offset);
1807 *h = (handler >> 16) & 0xffff; 1780 *h = (handler >> 16) & 0xffff;
1808 h = (u16 *)(b + ori_offset); 1781 h = (u16 *)(b + ori_offset);
@@ -1867,32 +1840,16 @@ static int __init ulri_disable(char *s)
1867} 1840}
1868__setup("noulri", ulri_disable); 1841__setup("noulri", ulri_disable);
1869 1842
1870void per_cpu_trap_init(bool is_boot_cpu) 1843/* configure STATUS register */
1844static void configure_status(void)
1871{ 1845{
1872 unsigned int cpu = smp_processor_id();
1873 unsigned int status_set = ST0_CU0;
1874 unsigned int hwrena = cpu_hwrena_impl_bits;
1875#ifdef CONFIG_MIPS_MT_SMTC
1876 int secondaryTC = 0;
1877 int bootTC = (cpu == 0);
1878
1879 /*
1880 * Only do per_cpu_trap_init() for first TC of Each VPE.
1881 * Note that this hack assumes that the SMTC init code
1882 * assigns TCs consecutively and in ascending order.
1883 */
1884
1885 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1886 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1887 secondaryTC = 1;
1888#endif /* CONFIG_MIPS_MT_SMTC */
1889
1890 /* 1846 /*
1891 * Disable coprocessors and select 32-bit or 64-bit addressing 1847 * Disable coprocessors and select 32-bit or 64-bit addressing
1892 * and the 16/32 or 32/32 FPR register model. Reset the BEV 1848 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1893 * flag that some firmware may have left set and the TS bit (for 1849 * flag that some firmware may have left set and the TS bit (for
1894 * IP27). Set XX for ISA IV code to work. 1850 * IP27). Set XX for ISA IV code to work.
1895 */ 1851 */
1852 unsigned int status_set = ST0_CU0;
1896#ifdef CONFIG_64BIT 1853#ifdef CONFIG_64BIT
1897 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 1854 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1898#endif 1855#endif
@@ -1903,6 +1860,12 @@ void per_cpu_trap_init(bool is_boot_cpu)
1903 1860
1904 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1861 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1905 status_set); 1862 status_set);
1863}
1864
1865/* configure HWRENA register */
1866static void configure_hwrena(void)
1867{
1868 unsigned int hwrena = cpu_hwrena_impl_bits;
1906 1869
1907 if (cpu_has_mips_r2) 1870 if (cpu_has_mips_r2)
1908 hwrena |= 0x0000000f; 1871 hwrena |= 0x0000000f;
@@ -1912,11 +1875,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
1912 1875
1913 if (hwrena) 1876 if (hwrena)
1914 write_c0_hwrena(hwrena); 1877 write_c0_hwrena(hwrena);
1878}
1915 1879
1916#ifdef CONFIG_MIPS_MT_SMTC 1880static void configure_exception_vector(void)
1917 if (!secondaryTC) { 1881{
1918#endif /* CONFIG_MIPS_MT_SMTC */
1919
1920 if (cpu_has_veic || cpu_has_vint) { 1882 if (cpu_has_veic || cpu_has_vint) {
1921 unsigned long sr = set_c0_status(ST0_BEV); 1883 unsigned long sr = set_c0_status(ST0_BEV);
1922 write_c0_ebase(ebase); 1884 write_c0_ebase(ebase);
@@ -1932,6 +1894,16 @@ void per_cpu_trap_init(bool is_boot_cpu)
1932 } else 1894 } else
1933 set_c0_cause(CAUSEF_IV); 1895 set_c0_cause(CAUSEF_IV);
1934 } 1896 }
1897}
1898
1899void per_cpu_trap_init(bool is_boot_cpu)
1900{
1901 unsigned int cpu = smp_processor_id();
1902
1903 configure_status();
1904 configure_hwrena();
1905
1906 configure_exception_vector();
1935 1907
1936 /* 1908 /*
1937 * Before R2 both interrupt numbers were fixed to 7, so on R2 only: 1909 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
@@ -1951,10 +1923,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
1951 cp0_perfcount_irq = -1; 1923 cp0_perfcount_irq = -1;
1952 } 1924 }
1953 1925
1954#ifdef CONFIG_MIPS_MT_SMTC
1955 }
1956#endif /* CONFIG_MIPS_MT_SMTC */
1957
1958 if (!cpu_data[cpu].asid_cache) 1926 if (!cpu_data[cpu].asid_cache)
1959 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1927 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1960 1928
@@ -1963,23 +1931,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
1963 BUG_ON(current->mm); 1931 BUG_ON(current->mm);
1964 enter_lazy_tlb(&init_mm, current); 1932 enter_lazy_tlb(&init_mm, current);
1965 1933
1966#ifdef CONFIG_MIPS_MT_SMTC
1967 if (bootTC) {
1968#endif /* CONFIG_MIPS_MT_SMTC */
1969 /* Boot CPU's cache setup in setup_arch(). */ 1934 /* Boot CPU's cache setup in setup_arch(). */
1970 if (!is_boot_cpu) 1935 if (!is_boot_cpu)
1971 cpu_cache_init(); 1936 cpu_cache_init();
1972 tlb_init(); 1937 tlb_init();
1973#ifdef CONFIG_MIPS_MT_SMTC
1974 } else if (!secondaryTC) {
1975 /*
1976 * First TC in non-boot VPE must do subset of tlb_init()
1977 * for MMU countrol registers.
1978 */
1979 write_c0_pagemask(PM_DEFAULT_MASK);
1980 write_c0_wired(0);
1981 }
1982#endif /* CONFIG_MIPS_MT_SMTC */
1983 TLBMISS_HANDLER_SETUP(); 1938 TLBMISS_HANDLER_SETUP();
1984} 1939}
1985 1940
@@ -2187,3 +2142,32 @@ void __init trap_init(void)
2187 2142
2188 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ 2143 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
2189} 2144}
2145
2146static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2147 void *v)
2148{
2149 switch (cmd) {
2150 case CPU_PM_ENTER_FAILED:
2151 case CPU_PM_EXIT:
2152 configure_status();
2153 configure_hwrena();
2154 configure_exception_vector();
2155
2156 /* Restore register with CPU number for TLB handlers */
2157 TLBMISS_HANDLER_RESTORE();
2158
2159 break;
2160 }
2161
2162 return NOTIFY_OK;
2163}
2164
2165static struct notifier_block trap_pm_notifier_block = {
2166 .notifier_call = trap_pm_notifier,
2167};
2168
2169static int __init trap_pm_init(void)
2170{
2171 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2172}
2173arch_initcall(trap_pm_init);
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
index 949ae0e17018..2e003b11a098 100644
--- a/arch/mips/kernel/vpe-mt.c
+++ b/arch/mips/kernel/vpe-mt.c
@@ -127,9 +127,8 @@ int vpe_run(struct vpe *v)
127 clear_c0_mvpcontrol(MVPCONTROL_VPC); 127 clear_c0_mvpcontrol(MVPCONTROL_VPC);
128 128
129 /* 129 /*
130 * SMTC/SMVP kernels manage VPE enable independently, 130 * SMVP kernels manage VPE enable independently, but uniprocessor
131 * but uniprocessor kernels need to turn it on, even 131 * kernels need to turn it on, even if that wasn't the pre-dvpe() state.
132 * if that wasn't the pre-dvpe() state.
133 */ 132 */
134#ifdef CONFIG_SMP 133#ifdef CONFIG_SMP
135 evpe(vpeflags); 134 evpe(vpeflags);
@@ -454,12 +453,11 @@ int __init vpe_module_init(void)
454 453
455 settc(tc); 454 settc(tc);
456 455
457 /* Any TC that is bound to VPE0 gets left as is - in 456 /*
458 * case we are running SMTC on VPE0. A TC that is bound 457 * A TC that is bound to any other VPE gets bound to
459 * to any other VPE gets bound to VPE0, ideally I'd like 458 * VPE0, ideally I'd like to make it homeless but it
460 * to make it homeless but it doesn't appear to let me 459 * doesn't appear to let me bind a TC to a non-existent
461 * bind a TC to a non-existent VPE. Which is perfectly 460 * VPE. Which is perfectly reasonable.
462 * reasonable.
463 * 461 *
464 * The (un)bound state is visible to an EJTAG probe so 462 * The (un)bound state is visible to an EJTAG probe so
465 * may notify GDB... 463 * may notify GDB...
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 85685e1cdb89..030568a70ac4 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -61,7 +61,7 @@
61/* we have a cascade of 8 irqs */ 61/* we have a cascade of 8 irqs */
62#define MIPS_CPU_IRQ_CASCADE 8 62#define MIPS_CPU_IRQ_CASCADE 8
63 63
64#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 64#ifdef CONFIG_MIPS_MT_SMP
65int gic_present; 65int gic_present;
66#endif 66#endif
67 67
@@ -440,7 +440,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
440 arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call); 440 arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
441#endif 441#endif
442 442
443#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 443#ifndef CONFIG_MIPS_MT_SMP
444 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | 444 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
445 IE_IRQ3 | IE_IRQ4 | IE_IRQ5); 445 IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
446#else 446#else
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 705cfb7c1a74..21d27c6819a2 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -11,7 +11,9 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/param.h> 12#include <linux/param.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/stringify.h>
14 15
16#include <asm/asm.h>
15#include <asm/compiler.h> 17#include <asm/compiler.h>
16#include <asm/war.h> 18#include <asm/war.h>
17 19
@@ -27,11 +29,7 @@ void __delay(unsigned long loops)
27 " .set noreorder \n" 29 " .set noreorder \n"
28 " .align 3 \n" 30 " .align 3 \n"
29 "1: bnez %0, 1b \n" 31 "1: bnez %0, 1b \n"
30#if BITS_PER_LONG == 32 32 " " __stringify(LONG_SUBU) " %0, %1 \n"
31 " subu %0, %1 \n"
32#else
33 " dsubu %0, %1 \n"
34#endif
35 " .set reorder \n" 33 " .set reorder \n"
36 : "=r" (loops) 34 : "=r" (loops)
37 : GCC_DADDI_IMM_ASM() (1), "0" (loops)); 35 : GCC_DADDI_IMM_ASM() (1), "0" (loops));
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index 6807f7172eaf..57bcdaf1f1c8 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -15,7 +15,7 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/stringify.h> 16#include <linux/stringify.h>
17 17
18#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) 18#ifndef CONFIG_CPU_MIPSR2
19 19
20/* 20/*
21 * For cli() we have to insert nops to make sure that the new value 21 * For cli() we have to insert nops to make sure that the new value
@@ -42,12 +42,7 @@ notrace void arch_local_irq_disable(void)
42 __asm__ __volatile__( 42 __asm__ __volatile__(
43 " .set push \n" 43 " .set push \n"
44 " .set noat \n" 44 " .set noat \n"
45#ifdef CONFIG_MIPS_MT_SMTC 45#if defined(CONFIG_CPU_MIPSR2)
46 " mfc0 $1, $2, 1 \n"
47 " ori $1, 0x400 \n"
48 " .set noreorder \n"
49 " mtc0 $1, $2, 1 \n"
50#elif defined(CONFIG_CPU_MIPSR2)
51 /* see irqflags.h for inline function */ 46 /* see irqflags.h for inline function */
52#else 47#else
53 " mfc0 $1,$12 \n" 48 " mfc0 $1,$12 \n"
@@ -77,13 +72,7 @@ notrace unsigned long arch_local_irq_save(void)
77 " .set push \n" 72 " .set push \n"
78 " .set reorder \n" 73 " .set reorder \n"
79 " .set noat \n" 74 " .set noat \n"
80#ifdef CONFIG_MIPS_MT_SMTC 75#if defined(CONFIG_CPU_MIPSR2)
81 " mfc0 %[flags], $2, 1 \n"
82 " ori $1, %[flags], 0x400 \n"
83 " .set noreorder \n"
84 " mtc0 $1, $2, 1 \n"
85 " andi %[flags], %[flags], 0x400 \n"
86#elif defined(CONFIG_CPU_MIPSR2)
87 /* see irqflags.h for inline function */ 76 /* see irqflags.h for inline function */
88#else 77#else
89 " mfc0 %[flags], $12 \n" 78 " mfc0 %[flags], $12 \n"
@@ -108,29 +97,13 @@ notrace void arch_local_irq_restore(unsigned long flags)
108{ 97{
109 unsigned long __tmp1; 98 unsigned long __tmp1;
110 99
111#ifdef CONFIG_MIPS_MT_SMTC
112 /*
113 * SMTC kernel needs to do a software replay of queued
114 * IPIs, at the cost of branch and call overhead on each
115 * local_irq_restore()
116 */
117 if (unlikely(!(flags & 0x0400)))
118 smtc_ipi_replay();
119#endif
120 preempt_disable(); 100 preempt_disable();
121 101
122 __asm__ __volatile__( 102 __asm__ __volatile__(
123 " .set push \n" 103 " .set push \n"
124 " .set noreorder \n" 104 " .set noreorder \n"
125 " .set noat \n" 105 " .set noat \n"
126#ifdef CONFIG_MIPS_MT_SMTC 106#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
127 " mfc0 $1, $2, 1 \n"
128 " andi %[flags], 0x400 \n"
129 " ori $1, 0x400 \n"
130 " xori $1, 0x400 \n"
131 " or %[flags], $1 \n"
132 " mtc0 %[flags], $2, 1 \n"
133#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
134 /* see irqflags.h for inline function */ 107 /* see irqflags.h for inline function */
135#elif defined(CONFIG_CPU_MIPSR2) 108#elif defined(CONFIG_CPU_MIPSR2)
136 /* see irqflags.h for inline function */ 109 /* see irqflags.h for inline function */
@@ -163,14 +136,7 @@ notrace void __arch_local_irq_restore(unsigned long flags)
163 " .set push \n" 136 " .set push \n"
164 " .set noreorder \n" 137 " .set noreorder \n"
165 " .set noat \n" 138 " .set noat \n"
166#ifdef CONFIG_MIPS_MT_SMTC 139#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
167 " mfc0 $1, $2, 1 \n"
168 " andi %[flags], 0x400 \n"
169 " ori $1, 0x400 \n"
170 " xori $1, 0x400 \n"
171 " or %[flags], $1 \n"
172 " mtc0 %[flags], $2, 1 \n"
173#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
174 /* see irqflags.h for inline function */ 140 /* see irqflags.h for inline function */
175#elif defined(CONFIG_CPU_MIPSR2) 141#elif defined(CONFIG_CPU_MIPSR2)
176 /* see irqflags.h for inline function */ 142 /* see irqflags.h for inline function */
@@ -192,4 +158,4 @@ notrace void __arch_local_irq_restore(unsigned long flags)
192} 158}
193EXPORT_SYMBOL(__arch_local_irq_restore); 159EXPORT_SYMBOL(__arch_local_irq_restore);
194 160
195#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ 161#endif /* !CONFIG_CPU_MIPSR2 */
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 603d79a95f47..e6a86ccc4421 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -95,10 +95,11 @@ config CS5536
95 95
96config CS5536_MFGPT 96config CS5536_MFGPT
97 bool "CS5536 MFGPT Timer" 97 bool "CS5536 MFGPT Timer"
98 depends on CS5536 98 depends on CS5536 && !HIGH_RES_TIMERS
99 select MIPS_EXTERNAL_TIMER 99 select MIPS_EXTERNAL_TIMER
100 help 100 help
101 This option enables the mfgpt0 timer of AMD CS5536. 101 This option enables the mfgpt0 timer of AMD CS5536. With this timer
102 switched on you can not use high resolution timers.
102 103
103 If you want to enable the Loongson2 CPUFreq Driver, Please enable 104 If you want to enable the Loongson2 CPUFreq Driver, Please enable
104 this option at first, otherwise, You will get wrong system time. 105 this option at first, otherwise, You will get wrong system time.
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c
index c665fe16d4c9..1e8894020ea5 100644
--- a/arch/mips/loongson/loongson-3/smp.c
+++ b/arch/mips/loongson/loongson-3/smp.c
@@ -279,13 +279,6 @@ static void loongson3_boot_secondary(int cpu, struct task_struct *idle)
279 loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu]+0x0)); 279 loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu]+0x0));
280} 280}
281 281
282/*
283 * Final cleanup after all secondaries booted
284 */
285static void __init loongson3_cpus_done(void)
286{
287}
288
289#ifdef CONFIG_HOTPLUG_CPU 282#ifdef CONFIG_HOTPLUG_CPU
290 283
291static int loongson3_cpu_disable(void) 284static int loongson3_cpu_disable(void)
@@ -432,7 +425,6 @@ struct plat_smp_ops loongson3_smp_ops = {
432 .send_ipi_mask = loongson3_send_ipi_mask, 425 .send_ipi_mask = loongson3_send_ipi_mask,
433 .init_secondary = loongson3_init_secondary, 426 .init_secondary = loongson3_init_secondary,
434 .smp_finish = loongson3_smp_finish, 427 .smp_finish = loongson3_smp_finish,
435 .cpus_done = loongson3_cpus_done,
436 .boot_secondary = loongson3_boot_secondary, 428 .boot_secondary = loongson3_boot_secondary,
437 .smp_setup = loongson3_smp_setup, 429 .smp_setup = loongson3_smp_setup,
438 .prepare_cpus = loongson3_prepare_cpus, 430 .prepare_cpus = loongson3_prepare_cpus,
diff --git a/arch/mips/loongson1/Kconfig b/arch/mips/loongson1/Kconfig
index fbf75f635798..e23c25d09963 100644
--- a/arch/mips/loongson1/Kconfig
+++ b/arch/mips/loongson1/Kconfig
@@ -14,6 +14,7 @@ config LOONGSON1_LS1B
14 select SYS_SUPPORTS_32BIT_KERNEL 14 select SYS_SUPPORTS_32BIT_KERNEL
15 select SYS_SUPPORTS_LITTLE_ENDIAN 15 select SYS_SUPPORTS_LITTLE_ENDIAN
16 select SYS_SUPPORTS_HIGHMEM 16 select SYS_SUPPORTS_HIGHMEM
17 select SYS_SUPPORTS_MIPS16
17 select SYS_HAS_EARLY_PRINTK 18 select SYS_HAS_EARLY_PRINTK
18 select COMMON_CLK 19 select COMMON_CLK
19 20
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index 121a848a3594..619cfc1a2442 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -2,10 +2,12 @@
2# Makefile for the Linux/MIPS kernel FPU emulation. 2# Makefile for the Linux/MIPS kernel FPU emulation.
3# 3#
4 4
5obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \ 5obj-y += cp1emu.o ieee754dp.o ieee754sp.o ieee754.o dp_div.o dp_mul.o \
6 ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \ 6 dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o dp_tint.o \
7 dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \ 7 dp_fint.o dp_tlong.o dp_flong.o sp_div.o sp_mul.o sp_sub.o \
8 dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \ 8 sp_add.o sp_fdp.o sp_cmp.o sp_simple.o sp_tint.o sp_fint.o \
9 sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \ 9 sp_tlong.o sp_flong.o dsemul.o
10 sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \ 10
11 dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o 11lib-y += ieee754d.o dp_sqrt.o sp_sqrt.o
12
13obj-$(CONFIG_DEBUG_FS) += me-debugfs.o
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 7b3c9acae689..736c17a226e9 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * cp1emu.c: a MIPS coprocessor 1 (fpu) instruction emulator 2 * cp1emu.c: a MIPS coprocessor 1 (FPU) instruction emulator
3 * 3 *
4 * MIPS floating point support 4 * MIPS floating point support
5 * Copyright (C) 1994-2000 Algorithmics Ltd. 5 * Copyright (C) 1994-2000 Algorithmics Ltd.
@@ -18,61 +18,46 @@
18 * 18 *
19 * You should have received a copy of the GNU General Public License along 19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 21 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 * 22 *
23 * A complete emulator for MIPS coprocessor 1 instructions. This is 23 * A complete emulator for MIPS coprocessor 1 instructions. This is
24 * required for #float(switch) or #float(trap), where it catches all 24 * required for #float(switch) or #float(trap), where it catches all
25 * COP1 instructions via the "CoProcessor Unusable" exception. 25 * COP1 instructions via the "CoProcessor Unusable" exception.
26 * 26 *
27 * More surprisingly it is also required for #float(ieee), to help out 27 * More surprisingly it is also required for #float(ieee), to help out
28 * the hardware fpu at the boundaries of the IEEE-754 representation 28 * the hardware FPU at the boundaries of the IEEE-754 representation
29 * (denormalised values, infinities, underflow, etc). It is made 29 * (denormalised values, infinities, underflow, etc). It is made
30 * quite nasty because emulation of some non-COP1 instructions is 30 * quite nasty because emulation of some non-COP1 instructions is
31 * required, e.g. in branch delay slots. 31 * required, e.g. in branch delay slots.
32 * 32 *
33 * Note if you know that you won't have an fpu, then you'll get much 33 * Note if you know that you won't have an FPU, then you'll get much
34 * better performance by compiling with -msoft-float! 34 * better performance by compiling with -msoft-float!
35 */ 35 */
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/module.h>
38#include <linux/debugfs.h> 37#include <linux/debugfs.h>
38#include <linux/kconfig.h>
39#include <linux/percpu-defs.h>
39#include <linux/perf_event.h> 40#include <linux/perf_event.h>
40 41
42#include <asm/branch.h>
41#include <asm/inst.h> 43#include <asm/inst.h>
42#include <asm/bootinfo.h>
43#include <asm/processor.h>
44#include <asm/ptrace.h> 44#include <asm/ptrace.h>
45#include <asm/signal.h> 45#include <asm/signal.h>
46#include <asm/mipsregs.h> 46#include <asm/uaccess.h>
47
48#include <asm/processor.h>
47#include <asm/fpu_emulator.h> 49#include <asm/fpu_emulator.h>
48#include <asm/fpu.h> 50#include <asm/fpu.h>
49#include <asm/uaccess.h>
50#include <asm/branch.h>
51 51
52#include "ieee754.h" 52#include "ieee754.h"
53 53
54/* Strap kernel emulator for full MIPS IV emulation */
55
56#ifdef __mips
57#undef __mips
58#endif
59#define __mips 4
60
61/* Function which emulates a floating point instruction. */ 54/* Function which emulates a floating point instruction. */
62 55
63static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *, 56static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
64 mips_instruction); 57 mips_instruction);
65 58
66#if __mips >= 4 && __mips != 32
67static int fpux_emu(struct pt_regs *, 59static int fpux_emu(struct pt_regs *,
68 struct mips_fpu_struct *, mips_instruction, void *__user *); 60 struct mips_fpu_struct *, mips_instruction, void *__user *);
69#endif
70
71/* Further private data for which no space exists in mips_fpu_struct */
72
73#ifdef CONFIG_DEBUG_FS
74DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
75#endif
76 61
77/* Control registers */ 62/* Control registers */
78 63
@@ -82,27 +67,6 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
82/* Determine rounding mode from the RM bits of the FCSR */ 67/* Determine rounding mode from the RM bits of the FCSR */
83#define modeindex(v) ((v) & FPU_CSR_RM) 68#define modeindex(v) ((v) & FPU_CSR_RM)
84 69
85/* microMIPS bitfields */
86#define MM_POOL32A_MINOR_MASK 0x3f
87#define MM_POOL32A_MINOR_SHIFT 0x6
88#define MM_MIPS32_COND_FC 0x30
89
90/* Convert Mips rounding mode (0..3) to IEEE library modes. */
91static const unsigned char ieee_rm[4] = {
92 [FPU_CSR_RN] = IEEE754_RN,
93 [FPU_CSR_RZ] = IEEE754_RZ,
94 [FPU_CSR_RU] = IEEE754_RU,
95 [FPU_CSR_RD] = IEEE754_RD,
96};
97/* Convert IEEE library modes to Mips rounding mode (0..3). */
98static const unsigned char mips_rm[4] = {
99 [IEEE754_RN] = FPU_CSR_RN,
100 [IEEE754_RZ] = FPU_CSR_RZ,
101 [IEEE754_RD] = FPU_CSR_RD,
102 [IEEE754_RU] = FPU_CSR_RU,
103};
104
105#if __mips >= 4
106/* convert condition code register number to csr bit */ 70/* convert condition code register number to csr bit */
107static const unsigned int fpucondbit[8] = { 71static const unsigned int fpucondbit[8] = {
108 FPU_CSR_COND0, 72 FPU_CSR_COND0,
@@ -114,10 +78,6 @@ static const unsigned int fpucondbit[8] = {
114 FPU_CSR_COND6, 78 FPU_CSR_COND6,
115 FPU_CSR_COND7 79 FPU_CSR_COND7
116}; 80};
117#endif
118
119/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
120static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
121 81
122/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */ 82/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
123static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0}; 83static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
@@ -466,199 +426,6 @@ static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
466 return 0; 426 return 0;
467} 427}
468 428
469int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
470 unsigned long *contpc)
471{
472 union mips_instruction insn = (union mips_instruction)dec_insn.insn;
473 int bc_false = 0;
474 unsigned int fcr31;
475 unsigned int bit;
476
477 if (!cpu_has_mmips)
478 return 0;
479
480 switch (insn.mm_i_format.opcode) {
481 case mm_pool32a_op:
482 if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
483 mm_pool32axf_op) {
484 switch (insn.mm_i_format.simmediate >>
485 MM_POOL32A_MINOR_SHIFT) {
486 case mm_jalr_op:
487 case mm_jalrhb_op:
488 case mm_jalrs_op:
489 case mm_jalrshb_op:
490 if (insn.mm_i_format.rt != 0) /* Not mm_jr */
491 regs->regs[insn.mm_i_format.rt] =
492 regs->cp0_epc +
493 dec_insn.pc_inc +
494 dec_insn.next_pc_inc;
495 *contpc = regs->regs[insn.mm_i_format.rs];
496 return 1;
497 }
498 }
499 break;
500 case mm_pool32i_op:
501 switch (insn.mm_i_format.rt) {
502 case mm_bltzals_op:
503 case mm_bltzal_op:
504 regs->regs[31] = regs->cp0_epc +
505 dec_insn.pc_inc +
506 dec_insn.next_pc_inc;
507 /* Fall through */
508 case mm_bltz_op:
509 if ((long)regs->regs[insn.mm_i_format.rs] < 0)
510 *contpc = regs->cp0_epc +
511 dec_insn.pc_inc +
512 (insn.mm_i_format.simmediate << 1);
513 else
514 *contpc = regs->cp0_epc +
515 dec_insn.pc_inc +
516 dec_insn.next_pc_inc;
517 return 1;
518 case mm_bgezals_op:
519 case mm_bgezal_op:
520 regs->regs[31] = regs->cp0_epc +
521 dec_insn.pc_inc +
522 dec_insn.next_pc_inc;
523 /* Fall through */
524 case mm_bgez_op:
525 if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
526 *contpc = regs->cp0_epc +
527 dec_insn.pc_inc +
528 (insn.mm_i_format.simmediate << 1);
529 else
530 *contpc = regs->cp0_epc +
531 dec_insn.pc_inc +
532 dec_insn.next_pc_inc;
533 return 1;
534 case mm_blez_op:
535 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
536 *contpc = regs->cp0_epc +
537 dec_insn.pc_inc +
538 (insn.mm_i_format.simmediate << 1);
539 else
540 *contpc = regs->cp0_epc +
541 dec_insn.pc_inc +
542 dec_insn.next_pc_inc;
543 return 1;
544 case mm_bgtz_op:
545 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
546 *contpc = regs->cp0_epc +
547 dec_insn.pc_inc +
548 (insn.mm_i_format.simmediate << 1);
549 else
550 *contpc = regs->cp0_epc +
551 dec_insn.pc_inc +
552 dec_insn.next_pc_inc;
553 return 1;
554 case mm_bc2f_op:
555 case mm_bc1f_op:
556 bc_false = 1;
557 /* Fall through */
558 case mm_bc2t_op:
559 case mm_bc1t_op:
560 preempt_disable();
561 if (is_fpu_owner())
562 asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
563 else
564 fcr31 = current->thread.fpu.fcr31;
565 preempt_enable();
566
567 if (bc_false)
568 fcr31 = ~fcr31;
569
570 bit = (insn.mm_i_format.rs >> 2);
571 bit += (bit != 0);
572 bit += 23;
573 if (fcr31 & (1 << bit))
574 *contpc = regs->cp0_epc +
575 dec_insn.pc_inc +
576 (insn.mm_i_format.simmediate << 1);
577 else
578 *contpc = regs->cp0_epc +
579 dec_insn.pc_inc + dec_insn.next_pc_inc;
580 return 1;
581 }
582 break;
583 case mm_pool16c_op:
584 switch (insn.mm_i_format.rt) {
585 case mm_jalr16_op:
586 case mm_jalrs16_op:
587 regs->regs[31] = regs->cp0_epc +
588 dec_insn.pc_inc + dec_insn.next_pc_inc;
589 /* Fall through */
590 case mm_jr16_op:
591 *contpc = regs->regs[insn.mm_i_format.rs];
592 return 1;
593 }
594 break;
595 case mm_beqz16_op:
596 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
597 *contpc = regs->cp0_epc +
598 dec_insn.pc_inc +
599 (insn.mm_b1_format.simmediate << 1);
600 else
601 *contpc = regs->cp0_epc +
602 dec_insn.pc_inc + dec_insn.next_pc_inc;
603 return 1;
604 case mm_bnez16_op:
605 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
606 *contpc = regs->cp0_epc +
607 dec_insn.pc_inc +
608 (insn.mm_b1_format.simmediate << 1);
609 else
610 *contpc = regs->cp0_epc +
611 dec_insn.pc_inc + dec_insn.next_pc_inc;
612 return 1;
613 case mm_b16_op:
614 *contpc = regs->cp0_epc + dec_insn.pc_inc +
615 (insn.mm_b0_format.simmediate << 1);
616 return 1;
617 case mm_beq32_op:
618 if (regs->regs[insn.mm_i_format.rs] ==
619 regs->regs[insn.mm_i_format.rt])
620 *contpc = regs->cp0_epc +
621 dec_insn.pc_inc +
622 (insn.mm_i_format.simmediate << 1);
623 else
624 *contpc = regs->cp0_epc +
625 dec_insn.pc_inc +
626 dec_insn.next_pc_inc;
627 return 1;
628 case mm_bne32_op:
629 if (regs->regs[insn.mm_i_format.rs] !=
630 regs->regs[insn.mm_i_format.rt])
631 *contpc = regs->cp0_epc +
632 dec_insn.pc_inc +
633 (insn.mm_i_format.simmediate << 1);
634 else
635 *contpc = regs->cp0_epc +
636 dec_insn.pc_inc + dec_insn.next_pc_inc;
637 return 1;
638 case mm_jalx32_op:
639 regs->regs[31] = regs->cp0_epc +
640 dec_insn.pc_inc + dec_insn.next_pc_inc;
641 *contpc = regs->cp0_epc + dec_insn.pc_inc;
642 *contpc >>= 28;
643 *contpc <<= 28;
644 *contpc |= (insn.j_format.target << 2);
645 return 1;
646 case mm_jals32_op:
647 case mm_jal32_op:
648 regs->regs[31] = regs->cp0_epc +
649 dec_insn.pc_inc + dec_insn.next_pc_inc;
650 /* Fall through */
651 case mm_j32_op:
652 *contpc = regs->cp0_epc + dec_insn.pc_inc;
653 *contpc >>= 27;
654 *contpc <<= 27;
655 *contpc |= (insn.j_format.target << 1);
656 set_isa16_mode(*contpc);
657 return 1;
658 }
659 return 0;
660}
661
662/* 429/*
663 * Redundant with logic already in kernel/branch.c, 430 * Redundant with logic already in kernel/branch.c,
664 * embedded in compute_return_epc. At some point, 431 * embedded in compute_return_epc. At some point,
@@ -817,7 +584,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
817 if (insn.i_format.rs == bc_op) { 584 if (insn.i_format.rs == bc_op) {
818 preempt_disable(); 585 preempt_disable();
819 if (is_fpu_owner()) 586 if (is_fpu_owner())
820 asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); 587 asm volatile(
588 ".set push\n"
589 "\t.set mips1\n"
590 "\tcfc1\t%0,$31\n"
591 "\t.set pop" : "=r" (fcr31));
821 else 592 else
822 fcr31 = current->thread.fpu.fcr31; 593 fcr31 = current->thread.fpu.fcr31;
823 preempt_enable(); 594 preempt_enable();
@@ -867,23 +638,25 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
867 */ 638 */
868static inline int cop1_64bit(struct pt_regs *xcp) 639static inline int cop1_64bit(struct pt_regs *xcp)
869{ 640{
870#if defined(CONFIG_64BIT) && !defined(CONFIG_MIPS32_O32) 641 if (config_enabled(CONFIG_64BIT) && !config_enabled(CONFIG_MIPS32_O32))
871 return 1; 642 return 1;
872#elif defined(CONFIG_32BIT) && !defined(CONFIG_MIPS_O32_FP64_SUPPORT) 643 else if (config_enabled(CONFIG_32BIT) &&
873 return 0; 644 !config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
874#else 645 return 0;
646
875 return !test_thread_flag(TIF_32BIT_FPREGS); 647 return !test_thread_flag(TIF_32BIT_FPREGS);
876#endif
877} 648}
878 649
879#define SIFROMREG(si, x) do { \ 650#define SIFROMREG(si, x) \
651do { \
880 if (cop1_64bit(xcp)) \ 652 if (cop1_64bit(xcp)) \
881 (si) = get_fpr32(&ctx->fpr[x], 0); \ 653 (si) = get_fpr32(&ctx->fpr[x], 0); \
882 else \ 654 else \
883 (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ 655 (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \
884} while (0) 656} while (0)
885 657
886#define SITOREG(si, x) do { \ 658#define SITOREG(si, x) \
659do { \
887 if (cop1_64bit(xcp)) { \ 660 if (cop1_64bit(xcp)) { \
888 unsigned i; \ 661 unsigned i; \
889 set_fpr32(&ctx->fpr[x], 0, si); \ 662 set_fpr32(&ctx->fpr[x], 0, si); \
@@ -896,17 +669,19 @@ static inline int cop1_64bit(struct pt_regs *xcp)
896 669
897#define SIFROMHREG(si, x) ((si) = get_fpr32(&ctx->fpr[x], 1)) 670#define SIFROMHREG(si, x) ((si) = get_fpr32(&ctx->fpr[x], 1))
898 671
899#define SITOHREG(si, x) do { \ 672#define SITOHREG(si, x) \
673do { \
900 unsigned i; \ 674 unsigned i; \
901 set_fpr32(&ctx->fpr[x], 1, si); \ 675 set_fpr32(&ctx->fpr[x], 1, si); \
902 for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \ 676 for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
903 set_fpr32(&ctx->fpr[x], i, 0); \ 677 set_fpr32(&ctx->fpr[x], i, 0); \
904} while (0) 678} while (0)
905 679
906#define DIFROMREG(di, x) \ 680#define DIFROMREG(di, x) \
907 ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) == 0)], 0)) 681 ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) == 0)], 0))
908 682
909#define DITOREG(di, x) do { \ 683#define DITOREG(di, x) \
684do { \
910 unsigned fpr, i; \ 685 unsigned fpr, i; \
911 fpr = (x) & ~(cop1_64bit(xcp) == 0); \ 686 fpr = (x) & ~(cop1_64bit(xcp) == 0); \
912 set_fpr64(&ctx->fpr[fpr], 0, di); \ 687 set_fpr64(&ctx->fpr[fpr], 0, di); \
@@ -927,23 +702,36 @@ static inline int cop1_64bit(struct pt_regs *xcp)
927static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 702static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
928 struct mm_decoded_insn dec_insn, void *__user *fault_addr) 703 struct mm_decoded_insn dec_insn, void *__user *fault_addr)
929{ 704{
930 mips_instruction ir;
931 unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc; 705 unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
932 unsigned int cond; 706 unsigned int cond, cbit;
933 int pc_inc; 707 mips_instruction ir;
708 int likely, pc_inc;
709 u32 __user *wva;
710 u64 __user *dva;
711 u32 value;
712 u32 wval;
713 u64 dval;
714 int sig;
715
716 /*
717 * These are giving gcc a gentle hint about what to expect in
718 * dec_inst in order to do better optimization.
719 */
720 if (!cpu_has_mmips && dec_insn.micro_mips_mode)
721 unreachable();
934 722
935 /* XXX NEC Vr54xx bug workaround */ 723 /* XXX NEC Vr54xx bug workaround */
936 if (xcp->cp0_cause & CAUSEF_BD) { 724 if (delay_slot(xcp)) {
937 if (dec_insn.micro_mips_mode) { 725 if (dec_insn.micro_mips_mode) {
938 if (!mm_isBranchInstr(xcp, dec_insn, &contpc)) 726 if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
939 xcp->cp0_cause &= ~CAUSEF_BD; 727 clear_delay_slot(xcp);
940 } else { 728 } else {
941 if (!isBranchInstr(xcp, dec_insn, &contpc)) 729 if (!isBranchInstr(xcp, dec_insn, &contpc))
942 xcp->cp0_cause &= ~CAUSEF_BD; 730 clear_delay_slot(xcp);
943 } 731 }
944 } 732 }
945 733
946 if (xcp->cp0_cause & CAUSEF_BD) { 734 if (delay_slot(xcp)) {
947 /* 735 /*
948 * The instruction to be emulated is in a branch delay slot 736 * The instruction to be emulated is in a branch delay slot
949 * which means that we have to emulate the branch instruction 737 * which means that we have to emulate the branch instruction
@@ -985,96 +773,85 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
985 return SIGILL; 773 return SIGILL;
986 } 774 }
987 775
988 emul: 776emul:
989 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0); 777 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
990 MIPS_FPU_EMU_INC_STATS(emulated); 778 MIPS_FPU_EMU_INC_STATS(emulated);
991 switch (MIPSInst_OPCODE(ir)) { 779 switch (MIPSInst_OPCODE(ir)) {
992 case ldc1_op:{ 780 case ldc1_op:
993 u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] + 781 dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
994 MIPSInst_SIMM(ir)); 782 MIPSInst_SIMM(ir));
995 u64 val;
996
997 MIPS_FPU_EMU_INC_STATS(loads); 783 MIPS_FPU_EMU_INC_STATS(loads);
998 784
999 if (!access_ok(VERIFY_READ, va, sizeof(u64))) { 785 if (!access_ok(VERIFY_READ, dva, sizeof(u64))) {
1000 MIPS_FPU_EMU_INC_STATS(errors); 786 MIPS_FPU_EMU_INC_STATS(errors);
1001 *fault_addr = va; 787 *fault_addr = dva;
1002 return SIGBUS; 788 return SIGBUS;
1003 } 789 }
1004 if (__get_user(val, va)) { 790 if (__get_user(dval, dva)) {
1005 MIPS_FPU_EMU_INC_STATS(errors); 791 MIPS_FPU_EMU_INC_STATS(errors);
1006 *fault_addr = va; 792 *fault_addr = dva;
1007 return SIGSEGV; 793 return SIGSEGV;
1008 } 794 }
1009 DITOREG(val, MIPSInst_RT(ir)); 795 DITOREG(dval, MIPSInst_RT(ir));
1010 break; 796 break;
1011 }
1012
1013 case sdc1_op:{
1014 u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
1015 MIPSInst_SIMM(ir));
1016 u64 val;
1017 797
798 case sdc1_op:
799 dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
800 MIPSInst_SIMM(ir));
1018 MIPS_FPU_EMU_INC_STATS(stores); 801 MIPS_FPU_EMU_INC_STATS(stores);
1019 DIFROMREG(val, MIPSInst_RT(ir)); 802 DIFROMREG(dval, MIPSInst_RT(ir));
1020 if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) { 803 if (!access_ok(VERIFY_WRITE, dva, sizeof(u64))) {
1021 MIPS_FPU_EMU_INC_STATS(errors); 804 MIPS_FPU_EMU_INC_STATS(errors);
1022 *fault_addr = va; 805 *fault_addr = dva;
1023 return SIGBUS; 806 return SIGBUS;
1024 } 807 }
1025 if (__put_user(val, va)) { 808 if (__put_user(dval, dva)) {
1026 MIPS_FPU_EMU_INC_STATS(errors); 809 MIPS_FPU_EMU_INC_STATS(errors);
1027 *fault_addr = va; 810 *fault_addr = dva;
1028 return SIGSEGV; 811 return SIGSEGV;
1029 } 812 }
1030 break; 813 break;
1031 }
1032
1033 case lwc1_op:{
1034 u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
1035 MIPSInst_SIMM(ir));
1036 u32 val;
1037 814
815 case lwc1_op:
816 wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
817 MIPSInst_SIMM(ir));
1038 MIPS_FPU_EMU_INC_STATS(loads); 818 MIPS_FPU_EMU_INC_STATS(loads);
1039 if (!access_ok(VERIFY_READ, va, sizeof(u32))) { 819 if (!access_ok(VERIFY_READ, wva, sizeof(u32))) {
1040 MIPS_FPU_EMU_INC_STATS(errors); 820 MIPS_FPU_EMU_INC_STATS(errors);
1041 *fault_addr = va; 821 *fault_addr = wva;
1042 return SIGBUS; 822 return SIGBUS;
1043 } 823 }
1044 if (__get_user(val, va)) { 824 if (__get_user(wval, wva)) {
1045 MIPS_FPU_EMU_INC_STATS(errors); 825 MIPS_FPU_EMU_INC_STATS(errors);
1046 *fault_addr = va; 826 *fault_addr = wva;
1047 return SIGSEGV; 827 return SIGSEGV;
1048 } 828 }
1049 SITOREG(val, MIPSInst_RT(ir)); 829 SITOREG(wval, MIPSInst_RT(ir));
1050 break; 830 break;
1051 }
1052
1053 case swc1_op:{
1054 u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
1055 MIPSInst_SIMM(ir));
1056 u32 val;
1057 831
832 case swc1_op:
833 wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
834 MIPSInst_SIMM(ir));
1058 MIPS_FPU_EMU_INC_STATS(stores); 835 MIPS_FPU_EMU_INC_STATS(stores);
1059 SIFROMREG(val, MIPSInst_RT(ir)); 836 SIFROMREG(wval, MIPSInst_RT(ir));
1060 if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) { 837 if (!access_ok(VERIFY_WRITE, wva, sizeof(u32))) {
1061 MIPS_FPU_EMU_INC_STATS(errors); 838 MIPS_FPU_EMU_INC_STATS(errors);
1062 *fault_addr = va; 839 *fault_addr = wva;
1063 return SIGBUS; 840 return SIGBUS;
1064 } 841 }
1065 if (__put_user(val, va)) { 842 if (__put_user(wval, wva)) {
1066 MIPS_FPU_EMU_INC_STATS(errors); 843 MIPS_FPU_EMU_INC_STATS(errors);
1067 *fault_addr = va; 844 *fault_addr = wva;
1068 return SIGSEGV; 845 return SIGSEGV;
1069 } 846 }
1070 break; 847 break;
1071 }
1072 848
1073 case cop1_op: 849 case cop1_op:
1074 switch (MIPSInst_RS(ir)) { 850 switch (MIPSInst_RS(ir)) {
1075
1076#if defined(__mips64)
1077 case dmfc_op: 851 case dmfc_op:
852 if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
853 return SIGILL;
854
1078 /* copregister fs -> gpr[rt] */ 855 /* copregister fs -> gpr[rt] */
1079 if (MIPSInst_RT(ir) != 0) { 856 if (MIPSInst_RT(ir) != 0) {
1080 DIFROMREG(xcp->regs[MIPSInst_RT(ir)], 857 DIFROMREG(xcp->regs[MIPSInst_RT(ir)],
@@ -1083,10 +860,12 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1083 break; 860 break;
1084 861
1085 case dmtc_op: 862 case dmtc_op:
863 if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
864 return SIGILL;
865
1086 /* copregister fs <- rt */ 866 /* copregister fs <- rt */
1087 DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir)); 867 DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
1088 break; 868 break;
1089#endif
1090 869
1091 case mfhc_op: 870 case mfhc_op:
1092 if (!cpu_has_mips_r2) 871 if (!cpu_has_mips_r2)
@@ -1120,19 +899,14 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1120 SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir)); 899 SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
1121 break; 900 break;
1122 901
1123 case cfc_op:{ 902 case cfc_op:
1124 /* cop control register rd -> gpr[rt] */ 903 /* cop control register rd -> gpr[rt] */
1125 u32 value;
1126
1127 if (MIPSInst_RD(ir) == FPCREG_CSR) { 904 if (MIPSInst_RD(ir) == FPCREG_CSR) {
1128 value = ctx->fcr31; 905 value = ctx->fcr31;
1129 value = (value & ~FPU_CSR_RM) | 906 value = (value & ~FPU_CSR_RM) | modeindex(value);
1130 mips_rm[modeindex(value)]; 907 pr_debug("%p gpr[%d]<-csr=%08x\n",
1131#ifdef CSRTRACE 908 (void *) (xcp->cp0_epc),
1132 printk("%p gpr[%d]<-csr=%08x\n", 909 MIPSInst_RT(ir), value);
1133 (void *) (xcp->cp0_epc),
1134 MIPSInst_RT(ir), value);
1135#endif
1136 } 910 }
1137 else if (MIPSInst_RD(ir) == FPCREG_RID) 911 else if (MIPSInst_RD(ir) == FPCREG_RID)
1138 value = 0; 912 value = 0;
@@ -1141,12 +915,9 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1141 if (MIPSInst_RT(ir)) 915 if (MIPSInst_RT(ir))
1142 xcp->regs[MIPSInst_RT(ir)] = value; 916 xcp->regs[MIPSInst_RT(ir)] = value;
1143 break; 917 break;
1144 }
1145 918
1146 case ctc_op:{ 919 case ctc_op:
1147 /* copregister rd <- rt */ 920 /* copregister rd <- rt */
1148 u32 value;
1149
1150 if (MIPSInst_RT(ir) == 0) 921 if (MIPSInst_RT(ir) == 0)
1151 value = 0; 922 value = 0;
1152 else 923 else
@@ -1155,37 +926,33 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1155 /* we only have one writable control reg 926 /* we only have one writable control reg
1156 */ 927 */
1157 if (MIPSInst_RD(ir) == FPCREG_CSR) { 928 if (MIPSInst_RD(ir) == FPCREG_CSR) {
1158#ifdef CSRTRACE 929 pr_debug("%p gpr[%d]->csr=%08x\n",
1159 printk("%p gpr[%d]->csr=%08x\n", 930 (void *) (xcp->cp0_epc),
1160 (void *) (xcp->cp0_epc), 931 MIPSInst_RT(ir), value);
1161 MIPSInst_RT(ir), value);
1162#endif
1163 932
1164 /* 933 /*
1165 * Don't write reserved bits, 934 * Don't write reserved bits,
1166 * and convert to ieee library modes 935 * and convert to ieee library modes
1167 */ 936 */
1168 ctx->fcr31 = (value & 937 ctx->fcr31 = (value & ~(FPU_CSR_RSVD | FPU_CSR_RM)) |
1169 ~(FPU_CSR_RSVD | FPU_CSR_RM)) | 938 modeindex(value);
1170 ieee_rm[modeindex(value)];
1171 } 939 }
1172 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) { 940 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
1173 return SIGFPE; 941 return SIGFPE;
1174 } 942 }
1175 break; 943 break;
1176 }
1177 944
1178 case bc_op:{ 945 case bc_op:
1179 int likely = 0; 946 if (delay_slot(xcp))
1180
1181 if (xcp->cp0_cause & CAUSEF_BD)
1182 return SIGILL; 947 return SIGILL;
1183 948
1184#if __mips >= 4 949 if (cpu_has_mips_4_5_r)
1185 cond = ctx->fcr31 & fpucondbit[MIPSInst_RT(ir) >> 2]; 950 cbit = fpucondbit[MIPSInst_RT(ir) >> 2];
1186#else 951 else
1187 cond = ctx->fcr31 & FPU_CSR_COND; 952 cbit = FPU_CSR_COND;
1188#endif 953 cond = ctx->fcr31 & cbit;
954
955 likely = 0;
1189 switch (MIPSInst_RT(ir) & 3) { 956 switch (MIPSInst_RT(ir) & 3) {
1190 case bcfl_op: 957 case bcfl_op:
1191 likely = 1; 958 likely = 1;
@@ -1201,10 +968,10 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1201 return SIGILL; 968 return SIGILL;
1202 } 969 }
1203 970
1204 xcp->cp0_cause |= CAUSEF_BD; 971 set_delay_slot(xcp);
1205 if (cond) { 972 if (cond) {
1206 /* branch taken: emulate dslot 973 /*
1207 * instruction 974 * Branch taken: emulate dslot instruction
1208 */ 975 */
1209 xcp->cp0_epc += dec_insn.pc_inc; 976 xcp->cp0_epc += dec_insn.pc_inc;
1210 977
@@ -1238,23 +1005,37 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1238 1005
1239 switch (MIPSInst_OPCODE(ir)) { 1006 switch (MIPSInst_OPCODE(ir)) {
1240 case lwc1_op: 1007 case lwc1_op:
1008 goto emul;
1009
1241 case swc1_op: 1010 case swc1_op:
1242#if (__mips >= 2 || defined(__mips64)) 1011 goto emul;
1012
1243 case ldc1_op: 1013 case ldc1_op:
1244 case sdc1_op: 1014 case sdc1_op:
1245#endif 1015 if (cpu_has_mips_2_3_4_5 ||
1016 cpu_has_mips64)
1017 goto emul;
1018
1019 return SIGILL;
1020 goto emul;
1021
1246 case cop1_op: 1022 case cop1_op:
1247#if __mips >= 4 && __mips != 32
1248 case cop1x_op:
1249#endif
1250 /* its one of ours */
1251 goto emul; 1023 goto emul;
1252#if __mips >= 4 1024
1025 case cop1x_op:
1026 if (cpu_has_mips_4_5 || cpu_has_mips64)
1027 /* its one of ours */
1028 goto emul;
1029
1030 return SIGILL;
1031
1253 case spec_op: 1032 case spec_op:
1033 if (!cpu_has_mips_4_5_r)
1034 return SIGILL;
1035
1254 if (MIPSInst_FUNC(ir) == movc_op) 1036 if (MIPSInst_FUNC(ir) == movc_op)
1255 goto emul; 1037 goto emul;
1256 break; 1038 break;
1257#endif
1258 } 1039 }
1259 1040
1260 /* 1041 /*
@@ -1262,10 +1043,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1262 * instruction in the dslot 1043 * instruction in the dslot
1263 */ 1044 */
1264 return mips_dsemul(xcp, ir, contpc); 1045 return mips_dsemul(xcp, ir, contpc);
1265 } 1046 } else if (likely) { /* branch not taken */
1266 else {
1267 /* branch not taken */
1268 if (likely) {
1269 /* 1047 /*
1270 * branch likely nullifies 1048 * branch likely nullifies
1271 * dslot if not taken 1049 * dslot if not taken
@@ -1277,34 +1055,31 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1277 * dslot as normal insn 1055 * dslot as normal insn
1278 */ 1056 */
1279 } 1057 }
1280 }
1281 break; 1058 break;
1282 }
1283 1059
1284 default: 1060 default:
1285 if (!(MIPSInst_RS(ir) & 0x10)) 1061 if (!(MIPSInst_RS(ir) & 0x10))
1286 return SIGILL; 1062 return SIGILL;
1287 {
1288 int sig;
1289 1063
1290 /* a real fpu computation instruction */ 1064 /* a real fpu computation instruction */
1291 if ((sig = fpu_emu(xcp, ctx, ir))) 1065 if ((sig = fpu_emu(xcp, ctx, ir)))
1292 return sig; 1066 return sig;
1293 }
1294 } 1067 }
1295 break; 1068 break;
1296 1069
1297#if __mips >= 4 && __mips != 32 1070 case cop1x_op:
1298 case cop1x_op:{ 1071 if (!cpu_has_mips_4_5 && !cpu_has_mips64)
1299 int sig = fpux_emu(xcp, ctx, ir, fault_addr); 1072 return SIGILL;
1073
1074 sig = fpux_emu(xcp, ctx, ir, fault_addr);
1300 if (sig) 1075 if (sig)
1301 return sig; 1076 return sig;
1302 break; 1077 break;
1303 }
1304#endif
1305 1078
1306#if __mips >= 4
1307 case spec_op: 1079 case spec_op:
1080 if (!cpu_has_mips_4_5_r)
1081 return SIGILL;
1082
1308 if (MIPSInst_FUNC(ir) != movc_op) 1083 if (MIPSInst_FUNC(ir) != movc_op)
1309 return SIGILL; 1084 return SIGILL;
1310 cond = fpucondbit[MIPSInst_RT(ir) >> 2]; 1085 cond = fpucondbit[MIPSInst_RT(ir) >> 2];
@@ -1312,8 +1087,6 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1312 xcp->regs[MIPSInst_RD(ir)] = 1087 xcp->regs[MIPSInst_RD(ir)] =
1313 xcp->regs[MIPSInst_RS(ir)]; 1088 xcp->regs[MIPSInst_RS(ir)];
1314 break; 1089 break;
1315#endif
1316
1317 default: 1090 default:
1318sigill: 1091sigill:
1319 return SIGILL; 1092 return SIGILL;
@@ -1321,7 +1094,7 @@ sigill:
1321 1094
1322 /* we did it !! */ 1095 /* we did it !! */
1323 xcp->cp0_epc = contpc; 1096 xcp->cp0_epc = contpc;
1324 xcp->cp0_cause &= ~CAUSEF_BD; 1097 clear_delay_slot(xcp);
1325 1098
1326 return 0; 1099 return 0;
1327} 1100}
@@ -1342,44 +1115,42 @@ static const unsigned char cmptab[8] = {
1342}; 1115};
1343 1116
1344 1117
1345#if __mips >= 4 && __mips != 32
1346
1347/* 1118/*
1348 * Additional MIPS4 instructions 1119 * Additional MIPS4 instructions
1349 */ 1120 */
1350 1121
1351#define DEF3OP(name, p, f1, f2, f3) \ 1122#define DEF3OP(name, p, f1, f2, f3) \
1352static ieee754##p fpemu_##p##_##name(ieee754##p r, ieee754##p s, \ 1123static union ieee754##p fpemu_##p##_##name(union ieee754##p r, \
1353 ieee754##p t) \ 1124 union ieee754##p s, union ieee754##p t) \
1354{ \ 1125{ \
1355 struct _ieee754_csr ieee754_csr_save; \ 1126 struct _ieee754_csr ieee754_csr_save; \
1356 s = f1(s, t); \ 1127 s = f1(s, t); \
1357 ieee754_csr_save = ieee754_csr; \ 1128 ieee754_csr_save = ieee754_csr; \
1358 s = f2(s, r); \ 1129 s = f2(s, r); \
1359 ieee754_csr_save.cx |= ieee754_csr.cx; \ 1130 ieee754_csr_save.cx |= ieee754_csr.cx; \
1360 ieee754_csr_save.sx |= ieee754_csr.sx; \ 1131 ieee754_csr_save.sx |= ieee754_csr.sx; \
1361 s = f3(s); \ 1132 s = f3(s); \
1362 ieee754_csr.cx |= ieee754_csr_save.cx; \ 1133 ieee754_csr.cx |= ieee754_csr_save.cx; \
1363 ieee754_csr.sx |= ieee754_csr_save.sx; \ 1134 ieee754_csr.sx |= ieee754_csr_save.sx; \
1364 return s; \ 1135 return s; \
1365} 1136}
1366 1137
1367static ieee754dp fpemu_dp_recip(ieee754dp d) 1138static union ieee754dp fpemu_dp_recip(union ieee754dp d)
1368{ 1139{
1369 return ieee754dp_div(ieee754dp_one(0), d); 1140 return ieee754dp_div(ieee754dp_one(0), d);
1370} 1141}
1371 1142
1372static ieee754dp fpemu_dp_rsqrt(ieee754dp d) 1143static union ieee754dp fpemu_dp_rsqrt(union ieee754dp d)
1373{ 1144{
1374 return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d)); 1145 return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d));
1375} 1146}
1376 1147
1377static ieee754sp fpemu_sp_recip(ieee754sp s) 1148static union ieee754sp fpemu_sp_recip(union ieee754sp s)
1378{ 1149{
1379 return ieee754sp_div(ieee754sp_one(0), s); 1150 return ieee754sp_div(ieee754sp_one(0), s);
1380} 1151}
1381 1152
1382static ieee754sp fpemu_sp_rsqrt(ieee754sp s) 1153static union ieee754sp fpemu_sp_rsqrt(union ieee754sp s)
1383{ 1154{
1384 return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s)); 1155 return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s));
1385} 1156}
@@ -1403,8 +1174,8 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1403 switch (MIPSInst_FMA_FFMT(ir)) { 1174 switch (MIPSInst_FMA_FFMT(ir)) {
1404 case s_fmt:{ /* 0 */ 1175 case s_fmt:{ /* 0 */
1405 1176
1406 ieee754sp(*handler) (ieee754sp, ieee754sp, ieee754sp); 1177 union ieee754sp(*handler) (union ieee754sp, union ieee754sp, union ieee754sp);
1407 ieee754sp fd, fr, fs, ft; 1178 union ieee754sp fd, fr, fs, ft;
1408 u32 __user *va; 1179 u32 __user *va;
1409 u32 val; 1180 u32 val;
1410 1181
@@ -1467,18 +1238,26 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1467 SPTOREG(fd, MIPSInst_FD(ir)); 1238 SPTOREG(fd, MIPSInst_FD(ir));
1468 1239
1469 copcsr: 1240 copcsr:
1470 if (ieee754_cxtest(IEEE754_INEXACT)) 1241 if (ieee754_cxtest(IEEE754_INEXACT)) {
1242 MIPS_FPU_EMU_INC_STATS(ieee754_inexact);
1471 rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S; 1243 rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
1472 if (ieee754_cxtest(IEEE754_UNDERFLOW)) 1244 }
1245 if (ieee754_cxtest(IEEE754_UNDERFLOW)) {
1246 MIPS_FPU_EMU_INC_STATS(ieee754_underflow);
1473 rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S; 1247 rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
1474 if (ieee754_cxtest(IEEE754_OVERFLOW)) 1248 }
1249 if (ieee754_cxtest(IEEE754_OVERFLOW)) {
1250 MIPS_FPU_EMU_INC_STATS(ieee754_overflow);
1475 rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S; 1251 rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
1476 if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) 1252 }
1253 if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) {
1254 MIPS_FPU_EMU_INC_STATS(ieee754_invalidop);
1477 rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S; 1255 rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
1256 }
1478 1257
1479 ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr; 1258 ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
1480 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) { 1259 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
1481 /*printk ("SIGFPE: fpu csr = %08x\n", 1260 /*printk ("SIGFPE: FPU csr = %08x\n",
1482 ctx->fcr31); */ 1261 ctx->fcr31); */
1483 return SIGFPE; 1262 return SIGFPE;
1484 } 1263 }
@@ -1492,8 +1271,8 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1492 } 1271 }
1493 1272
1494 case d_fmt:{ /* 1 */ 1273 case d_fmt:{ /* 1 */
1495 ieee754dp(*handler) (ieee754dp, ieee754dp, ieee754dp); 1274 union ieee754dp(*handler) (union ieee754dp, union ieee754dp, union ieee754dp);
1496 ieee754dp fd, fr, fs, ft; 1275 union ieee754dp fd, fr, fs, ft;
1497 u64 __user *va; 1276 u64 __user *va;
1498 u64 val; 1277 u64 val;
1499 1278
@@ -1574,7 +1353,6 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1574 1353
1575 return 0; 1354 return 0;
1576} 1355}
1577#endif
1578 1356
1579 1357
1580 1358
@@ -1586,23 +1364,25 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1586{ 1364{
1587 int rfmt; /* resulting format */ 1365 int rfmt; /* resulting format */
1588 unsigned rcsr = 0; /* resulting csr */ 1366 unsigned rcsr = 0; /* resulting csr */
1367 unsigned int oldrm;
1368 unsigned int cbit;
1589 unsigned cond; 1369 unsigned cond;
1590 union { 1370 union {
1591 ieee754dp d; 1371 union ieee754dp d;
1592 ieee754sp s; 1372 union ieee754sp s;
1593 int w; 1373 int w;
1594#ifdef __mips64
1595 s64 l; 1374 s64 l;
1596#endif
1597 } rv; /* resulting value */ 1375 } rv; /* resulting value */
1376 u64 bits;
1598 1377
1599 MIPS_FPU_EMU_INC_STATS(cp1ops); 1378 MIPS_FPU_EMU_INC_STATS(cp1ops);
1600 switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) { 1379 switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) {
1601 case s_fmt:{ /* 0 */ 1380 case s_fmt: { /* 0 */
1602 union { 1381 union {
1603 ieee754sp(*b) (ieee754sp, ieee754sp); 1382 union ieee754sp(*b) (union ieee754sp, union ieee754sp);
1604 ieee754sp(*u) (ieee754sp); 1383 union ieee754sp(*u) (union ieee754sp);
1605 } handler; 1384 } handler;
1385 union ieee754sp fs, ft;
1606 1386
1607 switch (MIPSInst_FUNC(ir)) { 1387 switch (MIPSInst_FUNC(ir)) {
1608 /* binary ops */ 1388 /* binary ops */
@@ -1620,148 +1400,167 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1620 goto scopbop; 1400 goto scopbop;
1621 1401
1622 /* unary ops */ 1402 /* unary ops */
1623#if __mips >= 2 || defined(__mips64)
1624 case fsqrt_op: 1403 case fsqrt_op:
1404 if (!cpu_has_mips_4_5_r)
1405 return SIGILL;
1406
1625 handler.u = ieee754sp_sqrt; 1407 handler.u = ieee754sp_sqrt;
1626 goto scopuop; 1408 goto scopuop;
1627#endif 1409
1628#if __mips >= 4 && __mips != 32 1410 /*
1411 * Note that on some MIPS IV implementations such as the
1412 * R5000 and R8000 the FSQRT and FRECIP instructions do not
1413 * achieve full IEEE-754 accuracy - however this emulator does.
1414 */
1629 case frsqrt_op: 1415 case frsqrt_op:
1416 if (!cpu_has_mips_4_5_r2)
1417 return SIGILL;
1418
1630 handler.u = fpemu_sp_rsqrt; 1419 handler.u = fpemu_sp_rsqrt;
1631 goto scopuop; 1420 goto scopuop;
1421
1632 case frecip_op: 1422 case frecip_op:
1423 if (!cpu_has_mips_4_5_r2)
1424 return SIGILL;
1425
1633 handler.u = fpemu_sp_recip; 1426 handler.u = fpemu_sp_recip;
1634 goto scopuop; 1427 goto scopuop;
1635#endif 1428
1636#if __mips >= 4
1637 case fmovc_op: 1429 case fmovc_op:
1430 if (!cpu_has_mips_4_5_r)
1431 return SIGILL;
1432
1638 cond = fpucondbit[MIPSInst_FT(ir) >> 2]; 1433 cond = fpucondbit[MIPSInst_FT(ir) >> 2];
1639 if (((ctx->fcr31 & cond) != 0) != 1434 if (((ctx->fcr31 & cond) != 0) !=
1640 ((MIPSInst_FT(ir) & 1) != 0)) 1435 ((MIPSInst_FT(ir) & 1) != 0))
1641 return 0; 1436 return 0;
1642 SPFROMREG(rv.s, MIPSInst_FS(ir)); 1437 SPFROMREG(rv.s, MIPSInst_FS(ir));
1643 break; 1438 break;
1439
1644 case fmovz_op: 1440 case fmovz_op:
1441 if (!cpu_has_mips_4_5_r)
1442 return SIGILL;
1443
1645 if (xcp->regs[MIPSInst_FT(ir)] != 0) 1444 if (xcp->regs[MIPSInst_FT(ir)] != 0)
1646 return 0; 1445 return 0;
1647 SPFROMREG(rv.s, MIPSInst_FS(ir)); 1446 SPFROMREG(rv.s, MIPSInst_FS(ir));
1648 break; 1447 break;
1448
1649 case fmovn_op: 1449 case fmovn_op:
1450 if (!cpu_has_mips_4_5_r)
1451 return SIGILL;
1452
1650 if (xcp->regs[MIPSInst_FT(ir)] == 0) 1453 if (xcp->regs[MIPSInst_FT(ir)] == 0)
1651 return 0; 1454 return 0;
1652 SPFROMREG(rv.s, MIPSInst_FS(ir)); 1455 SPFROMREG(rv.s, MIPSInst_FS(ir));
1653 break; 1456 break;
1654#endif 1457
1655 case fabs_op: 1458 case fabs_op:
1656 handler.u = ieee754sp_abs; 1459 handler.u = ieee754sp_abs;
1657 goto scopuop; 1460 goto scopuop;
1461
1658 case fneg_op: 1462 case fneg_op:
1659 handler.u = ieee754sp_neg; 1463 handler.u = ieee754sp_neg;
1660 goto scopuop; 1464 goto scopuop;
1465
1661 case fmov_op: 1466 case fmov_op:
1662 /* an easy one */ 1467 /* an easy one */
1663 SPFROMREG(rv.s, MIPSInst_FS(ir)); 1468 SPFROMREG(rv.s, MIPSInst_FS(ir));
1664 goto copcsr; 1469 goto copcsr;
1665 1470
1666 /* binary op on handler */ 1471 /* binary op on handler */
1667 scopbop: 1472scopbop:
1668 { 1473 SPFROMREG(fs, MIPSInst_FS(ir));
1669 ieee754sp fs, ft; 1474 SPFROMREG(ft, MIPSInst_FT(ir));
1670
1671 SPFROMREG(fs, MIPSInst_FS(ir));
1672 SPFROMREG(ft, MIPSInst_FT(ir));
1673
1674 rv.s = (*handler.b) (fs, ft);
1675 goto copcsr;
1676 }
1677 scopuop:
1678 {
1679 ieee754sp fs;
1680 1475
1681 SPFROMREG(fs, MIPSInst_FS(ir)); 1476 rv.s = (*handler.b) (fs, ft);
1682 rv.s = (*handler.u) (fs); 1477 goto copcsr;
1683 goto copcsr; 1478scopuop:
1684 } 1479 SPFROMREG(fs, MIPSInst_FS(ir));
1685 copcsr: 1480 rv.s = (*handler.u) (fs);
1686 if (ieee754_cxtest(IEEE754_INEXACT)) 1481 goto copcsr;
1482copcsr:
1483 if (ieee754_cxtest(IEEE754_INEXACT)) {
1484 MIPS_FPU_EMU_INC_STATS(ieee754_inexact);
1687 rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S; 1485 rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
1688 if (ieee754_cxtest(IEEE754_UNDERFLOW)) 1486 }
1487 if (ieee754_cxtest(IEEE754_UNDERFLOW)) {
1488 MIPS_FPU_EMU_INC_STATS(ieee754_underflow);
1689 rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S; 1489 rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
1690 if (ieee754_cxtest(IEEE754_OVERFLOW)) 1490 }
1491 if (ieee754_cxtest(IEEE754_OVERFLOW)) {
1492 MIPS_FPU_EMU_INC_STATS(ieee754_overflow);
1691 rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S; 1493 rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
1692 if (ieee754_cxtest(IEEE754_ZERO_DIVIDE)) 1494 }
1495 if (ieee754_cxtest(IEEE754_ZERO_DIVIDE)) {
1496 MIPS_FPU_EMU_INC_STATS(ieee754_zerodiv);
1693 rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S; 1497 rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S;
1694 if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) 1498 }
1499 if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) {
1500 MIPS_FPU_EMU_INC_STATS(ieee754_invalidop);
1695 rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S; 1501 rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
1502 }
1696 break; 1503 break;
1697 1504
1698 /* unary conv ops */ 1505 /* unary conv ops */
1699 case fcvts_op: 1506 case fcvts_op:
1700 return SIGILL; /* not defined */ 1507 return SIGILL; /* not defined */
1701 case fcvtd_op:{
1702 ieee754sp fs;
1703 1508
1509 case fcvtd_op:
1704 SPFROMREG(fs, MIPSInst_FS(ir)); 1510 SPFROMREG(fs, MIPSInst_FS(ir));
1705 rv.d = ieee754dp_fsp(fs); 1511 rv.d = ieee754dp_fsp(fs);
1706 rfmt = d_fmt; 1512 rfmt = d_fmt;
1707 goto copcsr; 1513 goto copcsr;
1708 }
1709 case fcvtw_op:{
1710 ieee754sp fs;
1711 1514
1515 case fcvtw_op:
1712 SPFROMREG(fs, MIPSInst_FS(ir)); 1516 SPFROMREG(fs, MIPSInst_FS(ir));
1713 rv.w = ieee754sp_tint(fs); 1517 rv.w = ieee754sp_tint(fs);
1714 rfmt = w_fmt; 1518 rfmt = w_fmt;
1715 goto copcsr; 1519 goto copcsr;
1716 }
1717 1520
1718#if __mips >= 2 || defined(__mips64)
1719 case fround_op: 1521 case fround_op:
1720 case ftrunc_op: 1522 case ftrunc_op:
1721 case fceil_op: 1523 case fceil_op:
1722 case ffloor_op:{ 1524 case ffloor_op:
1723 unsigned int oldrm = ieee754_csr.rm; 1525 if (!cpu_has_mips_2_3_4_5 && !cpu_has_mips64)
1724 ieee754sp fs; 1526 return SIGILL;
1725 1527
1528 oldrm = ieee754_csr.rm;
1726 SPFROMREG(fs, MIPSInst_FS(ir)); 1529 SPFROMREG(fs, MIPSInst_FS(ir));
1727 ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))]; 1530 ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir));
1728 rv.w = ieee754sp_tint(fs); 1531 rv.w = ieee754sp_tint(fs);
1729 ieee754_csr.rm = oldrm; 1532 ieee754_csr.rm = oldrm;
1730 rfmt = w_fmt; 1533 rfmt = w_fmt;
1731 goto copcsr; 1534 goto copcsr;
1732 }
1733#endif /* __mips >= 2 */
1734 1535
1735#if defined(__mips64) 1536 case fcvtl_op:
1736 case fcvtl_op:{ 1537 if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
1737 ieee754sp fs; 1538 return SIGILL;
1738 1539
1739 SPFROMREG(fs, MIPSInst_FS(ir)); 1540 SPFROMREG(fs, MIPSInst_FS(ir));
1740 rv.l = ieee754sp_tlong(fs); 1541 rv.l = ieee754sp_tlong(fs);
1741 rfmt = l_fmt; 1542 rfmt = l_fmt;
1742 goto copcsr; 1543 goto copcsr;
1743 }
1744 1544
1745 case froundl_op: 1545 case froundl_op:
1746 case ftruncl_op: 1546 case ftruncl_op:
1747 case fceill_op: 1547 case fceill_op:
1748 case ffloorl_op:{ 1548 case ffloorl_op:
1749 unsigned int oldrm = ieee754_csr.rm; 1549 if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
1750 ieee754sp fs; 1550 return SIGILL;
1751 1551
1552 oldrm = ieee754_csr.rm;
1752 SPFROMREG(fs, MIPSInst_FS(ir)); 1553 SPFROMREG(fs, MIPSInst_FS(ir));
1753 ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))]; 1554 ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir));
1754 rv.l = ieee754sp_tlong(fs); 1555 rv.l = ieee754sp_tlong(fs);
1755 ieee754_csr.rm = oldrm; 1556 ieee754_csr.rm = oldrm;
1756 rfmt = l_fmt; 1557 rfmt = l_fmt;
1757 goto copcsr; 1558 goto copcsr;
1758 }
1759#endif /* defined(__mips64) */
1760 1559
1761 default: 1560 default:
1762 if (MIPSInst_FUNC(ir) >= fcmp_op) { 1561 if (MIPSInst_FUNC(ir) >= fcmp_op) {
1763 unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op; 1562 unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
1764 ieee754sp fs, ft; 1563 union ieee754sp fs, ft;
1765 1564
1766 SPFROMREG(fs, MIPSInst_FS(ir)); 1565 SPFROMREG(fs, MIPSInst_FS(ir));
1767 SPFROMREG(ft, MIPSInst_FT(ir)); 1566 SPFROMREG(ft, MIPSInst_FT(ir));
@@ -1774,19 +1573,18 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1774 else 1573 else
1775 goto copcsr; 1574 goto copcsr;
1776 1575
1777 } 1576 } else
1778 else {
1779 return SIGILL; 1577 return SIGILL;
1780 }
1781 break; 1578 break;
1782 } 1579 }
1783 break; 1580 break;
1784 } 1581 }
1785 1582
1786 case d_fmt:{ 1583 case d_fmt: {
1584 union ieee754dp fs, ft;
1787 union { 1585 union {
1788 ieee754dp(*b) (ieee754dp, ieee754dp); 1586 union ieee754dp(*b) (union ieee754dp, union ieee754dp);
1789 ieee754dp(*u) (ieee754dp); 1587 union ieee754dp(*u) (union ieee754dp);
1790 } handler; 1588 } handler;
1791 1589
1792 switch (MIPSInst_FUNC(ir)) { 1590 switch (MIPSInst_FUNC(ir)) {
@@ -1805,21 +1603,33 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1805 goto dcopbop; 1603 goto dcopbop;
1806 1604
1807 /* unary ops */ 1605 /* unary ops */
1808#if __mips >= 2 || defined(__mips64)
1809 case fsqrt_op: 1606 case fsqrt_op:
1607 if (!cpu_has_mips_2_3_4_5_r)
1608 return SIGILL;
1609
1810 handler.u = ieee754dp_sqrt; 1610 handler.u = ieee754dp_sqrt;
1811 goto dcopuop; 1611 goto dcopuop;
1812#endif 1612 /*
1813#if __mips >= 4 && __mips != 32 1613 * Note that on some MIPS IV implementations such as the
1614 * R5000 and R8000 the FSQRT and FRECIP instructions do not
1615 * achieve full IEEE-754 accuracy - however this emulator does.
1616 */
1814 case frsqrt_op: 1617 case frsqrt_op:
1618 if (!cpu_has_mips_4_5_r2)
1619 return SIGILL;
1620
1815 handler.u = fpemu_dp_rsqrt; 1621 handler.u = fpemu_dp_rsqrt;
1816 goto dcopuop; 1622 goto dcopuop;
1817 case frecip_op: 1623 case frecip_op:
1624 if (!cpu_has_mips_4_5_r2)
1625 return SIGILL;
1626
1818 handler.u = fpemu_dp_recip; 1627 handler.u = fpemu_dp_recip;
1819 goto dcopuop; 1628 goto dcopuop;
1820#endif
1821#if __mips >= 4
1822 case fmovc_op: 1629 case fmovc_op:
1630 if (!cpu_has_mips_4_5_r)
1631 return SIGILL;
1632
1823 cond = fpucondbit[MIPSInst_FT(ir) >> 2]; 1633 cond = fpucondbit[MIPSInst_FT(ir) >> 2];
1824 if (((ctx->fcr31 & cond) != 0) != 1634 if (((ctx->fcr31 & cond) != 0) !=
1825 ((MIPSInst_FT(ir) & 1) != 0)) 1635 ((MIPSInst_FT(ir) & 1) != 0))
@@ -1827,16 +1637,21 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1827 DPFROMREG(rv.d, MIPSInst_FS(ir)); 1637 DPFROMREG(rv.d, MIPSInst_FS(ir));
1828 break; 1638 break;
1829 case fmovz_op: 1639 case fmovz_op:
1640 if (!cpu_has_mips_4_5_r)
1641 return SIGILL;
1642
1830 if (xcp->regs[MIPSInst_FT(ir)] != 0) 1643 if (xcp->regs[MIPSInst_FT(ir)] != 0)
1831 return 0; 1644 return 0;
1832 DPFROMREG(rv.d, MIPSInst_FS(ir)); 1645 DPFROMREG(rv.d, MIPSInst_FS(ir));
1833 break; 1646 break;
1834 case fmovn_op: 1647 case fmovn_op:
1648 if (!cpu_has_mips_4_5_r)
1649 return SIGILL;
1650
1835 if (xcp->regs[MIPSInst_FT(ir)] == 0) 1651 if (xcp->regs[MIPSInst_FT(ir)] == 0)
1836 return 0; 1652 return 0;
1837 DPFROMREG(rv.d, MIPSInst_FS(ir)); 1653 DPFROMREG(rv.d, MIPSInst_FS(ir));
1838 break; 1654 break;
1839#endif
1840 case fabs_op: 1655 case fabs_op:
1841 handler.u = ieee754dp_abs; 1656 handler.u = ieee754dp_abs;
1842 goto dcopuop; 1657 goto dcopuop;
@@ -1851,91 +1666,78 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1851 goto copcsr; 1666 goto copcsr;
1852 1667
1853 /* binary op on handler */ 1668 /* binary op on handler */
1854 dcopbop:{ 1669dcopbop:
1855 ieee754dp fs, ft; 1670 DPFROMREG(fs, MIPSInst_FS(ir));
1856 1671 DPFROMREG(ft, MIPSInst_FT(ir));
1857 DPFROMREG(fs, MIPSInst_FS(ir));
1858 DPFROMREG(ft, MIPSInst_FT(ir));
1859
1860 rv.d = (*handler.b) (fs, ft);
1861 goto copcsr;
1862 }
1863 dcopuop:{
1864 ieee754dp fs;
1865
1866 DPFROMREG(fs, MIPSInst_FS(ir));
1867 rv.d = (*handler.u) (fs);
1868 goto copcsr;
1869 }
1870 1672
1871 /* unary conv ops */ 1673 rv.d = (*handler.b) (fs, ft);
1872 case fcvts_op:{ 1674 goto copcsr;
1873 ieee754dp fs; 1675dcopuop:
1676 DPFROMREG(fs, MIPSInst_FS(ir));
1677 rv.d = (*handler.u) (fs);
1678 goto copcsr;
1874 1679
1680 /*
1681 * unary conv ops
1682 */
1683 case fcvts_op:
1875 DPFROMREG(fs, MIPSInst_FS(ir)); 1684 DPFROMREG(fs, MIPSInst_FS(ir));
1876 rv.s = ieee754sp_fdp(fs); 1685 rv.s = ieee754sp_fdp(fs);
1877 rfmt = s_fmt; 1686 rfmt = s_fmt;
1878 goto copcsr; 1687 goto copcsr;
1879 } 1688
1880 case fcvtd_op: 1689 case fcvtd_op:
1881 return SIGILL; /* not defined */ 1690 return SIGILL; /* not defined */
1882 1691
1883 case fcvtw_op:{ 1692 case fcvtw_op:
1884 ieee754dp fs;
1885
1886 DPFROMREG(fs, MIPSInst_FS(ir)); 1693 DPFROMREG(fs, MIPSInst_FS(ir));
1887 rv.w = ieee754dp_tint(fs); /* wrong */ 1694 rv.w = ieee754dp_tint(fs); /* wrong */
1888 rfmt = w_fmt; 1695 rfmt = w_fmt;
1889 goto copcsr; 1696 goto copcsr;
1890 }
1891 1697
1892#if __mips >= 2 || defined(__mips64)
1893 case fround_op: 1698 case fround_op:
1894 case ftrunc_op: 1699 case ftrunc_op:
1895 case fceil_op: 1700 case fceil_op:
1896 case ffloor_op:{ 1701 case ffloor_op:
1897 unsigned int oldrm = ieee754_csr.rm; 1702 if (!cpu_has_mips_2_3_4_5_r)
1898 ieee754dp fs; 1703 return SIGILL;
1899 1704
1705 oldrm = ieee754_csr.rm;
1900 DPFROMREG(fs, MIPSInst_FS(ir)); 1706 DPFROMREG(fs, MIPSInst_FS(ir));
1901 ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))]; 1707 ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir));
1902 rv.w = ieee754dp_tint(fs); 1708 rv.w = ieee754dp_tint(fs);
1903 ieee754_csr.rm = oldrm; 1709 ieee754_csr.rm = oldrm;
1904 rfmt = w_fmt; 1710 rfmt = w_fmt;
1905 goto copcsr; 1711 goto copcsr;
1906 }
1907#endif
1908 1712
1909#if defined(__mips64) 1713 case fcvtl_op:
1910 case fcvtl_op:{ 1714 if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
1911 ieee754dp fs; 1715 return SIGILL;
1912 1716
1913 DPFROMREG(fs, MIPSInst_FS(ir)); 1717 DPFROMREG(fs, MIPSInst_FS(ir));
1914 rv.l = ieee754dp_tlong(fs); 1718 rv.l = ieee754dp_tlong(fs);
1915 rfmt = l_fmt; 1719 rfmt = l_fmt;
1916 goto copcsr; 1720 goto copcsr;
1917 }
1918 1721
1919 case froundl_op: 1722 case froundl_op:
1920 case ftruncl_op: 1723 case ftruncl_op:
1921 case fceill_op: 1724 case fceill_op:
1922 case ffloorl_op:{ 1725 case ffloorl_op:
1923 unsigned int oldrm = ieee754_csr.rm; 1726 if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
1924 ieee754dp fs; 1727 return SIGILL;
1925 1728
1729 oldrm = ieee754_csr.rm;
1926 DPFROMREG(fs, MIPSInst_FS(ir)); 1730 DPFROMREG(fs, MIPSInst_FS(ir));
1927 ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))]; 1731 ieee754_csr.rm = modeindex(MIPSInst_FUNC(ir));
1928 rv.l = ieee754dp_tlong(fs); 1732 rv.l = ieee754dp_tlong(fs);
1929 ieee754_csr.rm = oldrm; 1733 ieee754_csr.rm = oldrm;
1930 rfmt = l_fmt; 1734 rfmt = l_fmt;
1931 goto copcsr; 1735 goto copcsr;
1932 }
1933#endif /* __mips >= 3 */
1934 1736
1935 default: 1737 default:
1936 if (MIPSInst_FUNC(ir) >= fcmp_op) { 1738 if (MIPSInst_FUNC(ir) >= fcmp_op) {
1937 unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op; 1739 unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
1938 ieee754dp fs, ft; 1740 union ieee754dp fs, ft;
1939 1741
1940 DPFROMREG(fs, MIPSInst_FS(ir)); 1742 DPFROMREG(fs, MIPSInst_FS(ir));
1941 DPFROMREG(ft, MIPSInst_FT(ir)); 1743 DPFROMREG(ft, MIPSInst_FT(ir));
@@ -1957,11 +1759,8 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1957 break; 1759 break;
1958 } 1760 }
1959 break; 1761 break;
1960 }
1961
1962 case w_fmt:{
1963 ieee754sp fs;
1964 1762
1763 case w_fmt:
1965 switch (MIPSInst_FUNC(ir)) { 1764 switch (MIPSInst_FUNC(ir)) {
1966 case fcvts_op: 1765 case fcvts_op:
1967 /* convert word to single precision real */ 1766 /* convert word to single precision real */
@@ -1981,9 +1780,11 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1981 break; 1780 break;
1982 } 1781 }
1983 1782
1984#if defined(__mips64) 1783 case l_fmt:
1985 case l_fmt:{ 1784
1986 u64 bits; 1785 if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
1786 return SIGILL;
1787
1987 DIFROMREG(bits, MIPSInst_FS(ir)); 1788 DIFROMREG(bits, MIPSInst_FS(ir));
1988 1789
1989 switch (MIPSInst_FUNC(ir)) { 1790 switch (MIPSInst_FUNC(ir)) {
@@ -2001,8 +1802,6 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
2001 return SIGILL; 1802 return SIGILL;
2002 } 1803 }
2003 break; 1804 break;
2004 }
2005#endif
2006 1805
2007 default: 1806 default:
2008 return SIGILL; 1807 return SIGILL;
@@ -2017,7 +1816,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
2017 */ 1816 */
2018 ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr; 1817 ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
2019 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) { 1818 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
2020 /*printk ("SIGFPE: fpu csr = %08x\n",ctx->fcr31); */ 1819 /*printk ("SIGFPE: FPU csr = %08x\n",ctx->fcr31); */
2021 return SIGFPE; 1820 return SIGFPE;
2022 } 1821 }
2023 1822
@@ -2025,18 +1824,18 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
2025 * Now we can safely write the result back to the register file. 1824 * Now we can safely write the result back to the register file.
2026 */ 1825 */
2027 switch (rfmt) { 1826 switch (rfmt) {
2028 case -1:{ 1827 case -1:
2029#if __mips >= 4 1828
2030 cond = fpucondbit[MIPSInst_FD(ir) >> 2]; 1829 if (cpu_has_mips_4_5_r)
2031#else 1830 cbit = fpucondbit[MIPSInst_RT(ir) >> 2];
2032 cond = FPU_CSR_COND; 1831 else
2033#endif 1832 cbit = FPU_CSR_COND;
2034 if (rv.w) 1833 if (rv.w)
2035 ctx->fcr31 |= cond; 1834 ctx->fcr31 |= cbit;
2036 else 1835 else
2037 ctx->fcr31 &= ~cond; 1836 ctx->fcr31 &= ~cbit;
2038 break; 1837 break;
2039 } 1838
2040 case d_fmt: 1839 case d_fmt:
2041 DPTOREG(rv.d, MIPSInst_FD(ir)); 1840 DPTOREG(rv.d, MIPSInst_FD(ir));
2042 break; 1841 break;
@@ -2046,11 +1845,12 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
2046 case w_fmt: 1845 case w_fmt:
2047 SITOREG(rv.w, MIPSInst_FD(ir)); 1846 SITOREG(rv.w, MIPSInst_FD(ir));
2048 break; 1847 break;
2049#if defined(__mips64)
2050 case l_fmt: 1848 case l_fmt:
1849 if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
1850 return SIGILL;
1851
2051 DITOREG(rv.l, MIPSInst_FD(ir)); 1852 DITOREG(rv.l, MIPSInst_FD(ir));
2052 break; 1853 break;
2053#endif
2054 default: 1854 default:
2055 return SIGILL; 1855 return SIGILL;
2056 } 1856 }
@@ -2138,11 +1938,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
2138 * ieee754_csr. But ieee754_csr.rm is ieee 1938 * ieee754_csr. But ieee754_csr.rm is ieee
2139 * library modes. (not mips rounding mode) 1939 * library modes. (not mips rounding mode)
2140 */ 1940 */
2141 /* convert to ieee library modes */
2142 ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
2143 sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr); 1941 sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
2144 /* revert to mips rounding mode */
2145 ieee754_csr.rm = mips_rm[ieee754_csr.rm];
2146 } 1942 }
2147 1943
2148 if (has_fpu) 1944 if (has_fpu)
@@ -2155,58 +1951,8 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
2155 1951
2156 /* SIGILL indicates a non-fpu instruction */ 1952 /* SIGILL indicates a non-fpu instruction */
2157 if (sig == SIGILL && xcp->cp0_epc != oldepc) 1953 if (sig == SIGILL && xcp->cp0_epc != oldepc)
2158 /* but if epc has advanced, then ignore it */ 1954 /* but if EPC has advanced, then ignore it */
2159 sig = 0; 1955 sig = 0;
2160 1956
2161 return sig; 1957 return sig;
2162} 1958}
2163
2164#ifdef CONFIG_DEBUG_FS
2165
2166static int fpuemu_stat_get(void *data, u64 *val)
2167{
2168 int cpu;
2169 unsigned long sum = 0;
2170 for_each_online_cpu(cpu) {
2171 struct mips_fpu_emulator_stats *ps;
2172 local_t *pv;
2173 ps = &per_cpu(fpuemustats, cpu);
2174 pv = (void *)ps + (unsigned long)data;
2175 sum += local_read(pv);
2176 }
2177 *val = sum;
2178 return 0;
2179}
2180DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n");
2181
2182extern struct dentry *mips_debugfs_dir;
2183static int __init debugfs_fpuemu(void)
2184{
2185 struct dentry *d, *dir;
2186
2187 if (!mips_debugfs_dir)
2188 return -ENODEV;
2189 dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir);
2190 if (!dir)
2191 return -ENOMEM;
2192
2193#define FPU_STAT_CREATE(M) \
2194 do { \
2195 d = debugfs_create_file(#M , S_IRUGO, dir, \
2196 (void *)offsetof(struct mips_fpu_emulator_stats, M), \
2197 &fops_fpuemu_stat); \
2198 if (!d) \
2199 return -ENOMEM; \
2200 } while (0)
2201
2202 FPU_STAT_CREATE(emulated);
2203 FPU_STAT_CREATE(loads);
2204 FPU_STAT_CREATE(stores);
2205 FPU_STAT_CREATE(cp1ops);
2206 FPU_STAT_CREATE(cp1xops);
2207 FPU_STAT_CREATE(errors);
2208
2209 return 0;
2210}
2211__initcall(debugfs_fpuemu);
2212#endif
diff --git a/arch/mips/math-emu/dp_add.c b/arch/mips/math-emu/dp_add.c
index c57c8adc42c4..7f64577df984 100644
--- a/arch/mips/math-emu/dp_add.c
+++ b/arch/mips/math-emu/dp_add.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,24 +16,22 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 *
25 */ 20 */
26 21
27
28#include "ieee754dp.h" 22#include "ieee754dp.h"
29 23
30ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y) 24union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y)
31{ 25{
26 int s;
27
32 COMPXDP; 28 COMPXDP;
33 COMPYDP; 29 COMPYDP;
34 30
35 EXPLODEXDP; 31 EXPLODEXDP;
36 EXPLODEYDP; 32 EXPLODEYDP;
37 33
38 CLEARCX; 34 ieee754_clearcx();
39 35
40 FLUSHXDP; 36 FLUSHXDP;
41 FLUSHYDP; 37 FLUSHYDP;
@@ -52,8 +48,8 @@ ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 48 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 49 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
54 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 50 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
55 SETCX(IEEE754_INVALID_OPERATION); 51 ieee754_setcx(IEEE754_INVALID_OPERATION);
56 return ieee754dp_nanxcpt(ieee754dp_indef(), "add", x, y); 52 return ieee754dp_nanxcpt(ieee754dp_indef());
57 53
58 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 54 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
59 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): 55 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -69,14 +65,14 @@ ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
69 return x; 65 return x;
70 66
71 67
72 /* Infinity handling 68 /*
73 */ 69 * Infinity handling
74 70 */
75 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 71 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
76 if (xs == ys) 72 if (xs == ys)
77 return x; 73 return x;
78 SETCX(IEEE754_INVALID_OPERATION); 74 ieee754_setcx(IEEE754_INVALID_OPERATION);
79 return ieee754dp_xcpt(ieee754dp_indef(), "add", x, y); 75 return ieee754dp_indef();
80 76
81 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 77 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
82 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 78 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -88,15 +84,14 @@ ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
88 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 84 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
89 return x; 85 return x;
90 86
91 /* Zero handling 87 /*
92 */ 88 * Zero handling
93 89 */
94 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 90 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
95 if (xs == ys) 91 if (xs == ys)
96 return x; 92 return x;
97 else 93 else
98 return ieee754dp_zero(ieee754_csr.rm == 94 return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
99 IEEE754_RD);
100 95
101 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 96 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
102 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): 97 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
@@ -125,20 +120,24 @@ ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
125 assert(xm & DP_HIDDEN_BIT); 120 assert(xm & DP_HIDDEN_BIT);
126 assert(ym & DP_HIDDEN_BIT); 121 assert(ym & DP_HIDDEN_BIT);
127 122
128 /* provide guard,round and stick bit space */ 123 /*
124 * Provide guard,round and stick bit space.
125 */
129 xm <<= 3; 126 xm <<= 3;
130 ym <<= 3; 127 ym <<= 3;
131 128
132 if (xe > ye) { 129 if (xe > ye) {
133 /* have to shift y fraction right to align 130 /*
131 * Have to shift y fraction right to align.
134 */ 132 */
135 int s = xe - ye; 133 s = xe - ye;
136 ym = XDPSRS(ym, s); 134 ym = XDPSRS(ym, s);
137 ye += s; 135 ye += s;
138 } else if (ye > xe) { 136 } else if (ye > xe) {
139 /* have to shift x fraction right to align 137 /*
138 * Have to shift x fraction right to align.
140 */ 139 */
141 int s = ye - xe; 140 s = ye - xe;
142 xm = XDPSRS(xm, s); 141 xm = XDPSRS(xm, s);
143 xe += s; 142 xe += s;
144 } 143 }
@@ -146,14 +145,15 @@ ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
146 assert(xe <= DP_EMAX); 145 assert(xe <= DP_EMAX);
147 146
148 if (xs == ys) { 147 if (xs == ys) {
149 /* generate 28 bit result of adding two 27 bit numbers 148 /*
150 * leaving result in xm,xs,xe 149 * Generate 28 bit result of adding two 27 bit numbers
150 * leaving result in xm, xs and xe.
151 */ 151 */
152 xm = xm + ym; 152 xm = xm + ym;
153 xe = xe; 153 xe = xe;
154 xs = xs; 154 xs = xs;
155 155
156 if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ 156 if (xm >> (DP_FBITS + 1 + 3)) { /* carry out */
157 xm = XDPSRS1(xm); 157 xm = XDPSRS1(xm);
158 xe++; 158 xe++;
159 } 159 }
@@ -168,15 +168,16 @@ ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
168 xs = ys; 168 xs = ys;
169 } 169 }
170 if (xm == 0) 170 if (xm == 0)
171 return ieee754dp_zero(ieee754_csr.rm == 171 return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
172 IEEE754_RD);
173 172
174 /* normalize to rounding precision */ 173 /*
175 while ((xm >> (DP_MBITS + 3)) == 0) { 174 * Normalize to rounding precision.
175 */
176 while ((xm >> (DP_FBITS + 3)) == 0) {
176 xm <<= 1; 177 xm <<= 1;
177 xe--; 178 xe--;
178 } 179 }
179
180 } 180 }
181 DPNORMRET2(xs, xe, xm, "add", x, y); 181
182 return ieee754dp_format(xs, xe, xm);
182} 183}
diff --git a/arch/mips/math-emu/dp_cmp.c b/arch/mips/math-emu/dp_cmp.c
index 0f32486b0ed9..30f95f6e9ac4 100644
--- a/arch/mips/math-emu/dp_cmp.c
+++ b/arch/mips/math-emu/dp_cmp.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,16 +16,16 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754dp.h" 22#include "ieee754dp.h"
28 23
29int ieee754dp_cmp(ieee754dp x, ieee754dp y, int cmp, int sig) 24int ieee754dp_cmp(union ieee754dp x, union ieee754dp y, int cmp, int sig)
30{ 25{
26 s64 vx;
27 s64 vy;
28
31 COMPXDP; 29 COMPXDP;
32 COMPYDP; 30 COMPYDP;
33 31
@@ -35,21 +33,21 @@ int ieee754dp_cmp(ieee754dp x, ieee754dp y, int cmp, int sig)
35 EXPLODEYDP; 33 EXPLODEYDP;
36 FLUSHXDP; 34 FLUSHXDP;
37 FLUSHYDP; 35 FLUSHYDP;
38 CLEARCX; /* Even clear inexact flag here */ 36 ieee754_clearcx(); /* Even clear inexact flag here */
39 37
40 if (ieee754dp_isnan(x) || ieee754dp_isnan(y)) { 38 if (ieee754dp_isnan(x) || ieee754dp_isnan(y)) {
41 if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN) 39 if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
42 SETCX(IEEE754_INVALID_OPERATION); 40 ieee754_setcx(IEEE754_INVALID_OPERATION);
43 if (cmp & IEEE754_CUN) 41 if (cmp & IEEE754_CUN)
44 return 1; 42 return 1;
45 if (cmp & (IEEE754_CLT | IEEE754_CGT)) { 43 if (cmp & (IEEE754_CLT | IEEE754_CGT)) {
46 if (sig && SETANDTESTCX(IEEE754_INVALID_OPERATION)) 44 if (sig && ieee754_setandtestcx(IEEE754_INVALID_OPERATION))
47 return ieee754si_xcpt(0, "fcmpf", x); 45 return 0;
48 } 46 }
49 return 0; 47 return 0;
50 } else { 48 } else {
51 s64 vx = x.bits; 49 vx = x.bits;
52 s64 vy = y.bits; 50 vy = y.bits;
53 51
54 if (vx < 0) 52 if (vx < 0)
55 vx = -vx ^ DP_SIGN_BIT; 53 vx = -vx ^ DP_SIGN_BIT;
diff --git a/arch/mips/math-emu/dp_div.c b/arch/mips/math-emu/dp_div.c
index a1bce1b7c09c..bef0e55e5938 100644
--- a/arch/mips/math-emu/dp_div.c
+++ b/arch/mips/math-emu/dp_div.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,23 +16,24 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754dp.h" 22#include "ieee754dp.h"
28 23
29ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y) 24union ieee754dp ieee754dp_div(union ieee754dp x, union ieee754dp y)
30{ 25{
26 u64 rm;
27 int re;
28 u64 bm;
29
31 COMPXDP; 30 COMPXDP;
32 COMPYDP; 31 COMPYDP;
33 32
34 EXPLODEXDP; 33 EXPLODEXDP;
35 EXPLODEYDP; 34 EXPLODEYDP;
36 35
37 CLEARCX; 36 ieee754_clearcx();
38 37
39 FLUSHXDP; 38 FLUSHXDP;
40 FLUSHYDP; 39 FLUSHYDP;
@@ -51,8 +50,8 @@ ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y)
51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 50 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
54 SETCX(IEEE754_INVALID_OPERATION); 53 ieee754_setcx(IEEE754_INVALID_OPERATION);
55 return ieee754dp_nanxcpt(ieee754dp_indef(), "div", x, y); 54 return ieee754dp_nanxcpt(ieee754dp_indef());
56 55
57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 56 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): 57 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -68,12 +67,12 @@ ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y)
68 return x; 67 return x;
69 68
70 69
71 /* Infinity handling 70 /*
72 */ 71 * Infinity handling
73 72 */
74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 73 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
75 SETCX(IEEE754_INVALID_OPERATION); 74 ieee754_setcx(IEEE754_INVALID_OPERATION);
76 return ieee754dp_xcpt(ieee754dp_indef(), "div", x, y); 75 return ieee754dp_indef();
77 76
78 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 77 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
79 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 78 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -85,17 +84,17 @@ ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y)
85 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 84 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
86 return ieee754dp_inf(xs ^ ys); 85 return ieee754dp_inf(xs ^ ys);
87 86
88 /* Zero handling 87 /*
89 */ 88 * Zero handling
90 89 */
91 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 90 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
92 SETCX(IEEE754_INVALID_OPERATION); 91 ieee754_setcx(IEEE754_INVALID_OPERATION);
93 return ieee754dp_xcpt(ieee754dp_indef(), "div", x, y); 92 return ieee754dp_indef();
94 93
95 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 94 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
96 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): 95 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
97 SETCX(IEEE754_ZERO_DIVIDE); 96 ieee754_setcx(IEEE754_ZERO_DIVIDE);
98 return ieee754dp_xcpt(ieee754dp_inf(xs ^ ys), "div", x, y); 97 return ieee754dp_inf(xs ^ ys);
99 98
100 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): 99 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
101 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): 100 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
@@ -122,35 +121,34 @@ ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y)
122 xm <<= 3; 121 xm <<= 3;
123 ym <<= 3; 122 ym <<= 3;
124 123
125 { 124 /* now the dirty work */
126 /* now the dirty work */
127
128 u64 rm = 0;
129 int re = xe - ye;
130 u64 bm;
131
132 for (bm = DP_MBIT(DP_MBITS + 2); bm; bm >>= 1) {
133 if (xm >= ym) {
134 xm -= ym;
135 rm |= bm;
136 if (xm == 0)
137 break;
138 }
139 xm <<= 1;
140 }
141 rm <<= 1;
142 if (xm)
143 rm |= 1; /* have remainder, set sticky */
144 125
145 assert(rm); 126 rm = 0;
127 re = xe - ye;
146 128
147 /* normalise rm to rounding precision ? 129 for (bm = DP_MBIT(DP_FBITS + 2); bm; bm >>= 1) {
148 */ 130 if (xm >= ym) {
149 while ((rm >> (DP_MBITS + 3)) == 0) { 131 xm -= ym;
150 rm <<= 1; 132 rm |= bm;
151 re--; 133 if (xm == 0)
134 break;
152 } 135 }
136 xm <<= 1;
137 }
138
139 rm <<= 1;
140 if (xm)
141 rm |= 1; /* have remainder, set sticky */
153 142
154 DPNORMRET2(xs == ys ? 0 : 1, re, rm, "div", x, y); 143 assert(rm);
144
145 /*
146 * Normalise rm to rounding precision ?
147 */
148 while ((rm >> (DP_FBITS + 3)) == 0) {
149 rm <<= 1;
150 re--;
155 } 151 }
152
153 return ieee754dp_format(xs == ys ? 0 : 1, re, rm);
156} 154}
diff --git a/arch/mips/math-emu/dp_fint.c b/arch/mips/math-emu/dp_fint.c
index 88571288c9e0..10258f0afd69 100644
--- a/arch/mips/math-emu/dp_fint.c
+++ b/arch/mips/math-emu/dp_fint.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,21 +16,18 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754dp.h" 22#include "ieee754dp.h"
28 23
29ieee754dp ieee754dp_fint(int x) 24union ieee754dp ieee754dp_fint(int x)
30{ 25{
31 u64 xm; 26 u64 xm;
32 int xe; 27 int xe;
33 int xs; 28 int xs;
34 29
35 CLEARCX; 30 ieee754_clearcx();
36 31
37 if (x == 0) 32 if (x == 0)
38 return ieee754dp_zero(0); 33 return ieee754dp_zero(0);
@@ -51,29 +46,11 @@ ieee754dp ieee754dp_fint(int x)
51 xm = x; 46 xm = x;
52 } 47 }
53 48
54#if 1
55 /* normalize - result can never be inexact or overflow */ 49 /* normalize - result can never be inexact or overflow */
56 xe = DP_MBITS; 50 xe = DP_FBITS;
57 while ((xm >> DP_MBITS) == 0) { 51 while ((xm >> DP_FBITS) == 0) {
58 xm <<= 1; 52 xm <<= 1;
59 xe--; 53 xe--;
60 } 54 }
61 return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); 55 return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
62#else
63 /* normalize */
64 xe = DP_MBITS + 3;
65 while ((xm >> (DP_MBITS + 3)) == 0) {
66 xm <<= 1;
67 xe--;
68 }
69 DPNORMRET1(xs, xe, xm, "fint", x);
70#endif
71}
72
73ieee754dp ieee754dp_funs(unsigned int u)
74{
75 if ((int) u < 0)
76 return ieee754dp_add(ieee754dp_1e31(),
77 ieee754dp_fint(u & ~(1 << 31)));
78 return ieee754dp_fint(u);
79} 56}
diff --git a/arch/mips/math-emu/dp_flong.c b/arch/mips/math-emu/dp_flong.c
index 14fc01ec742d..a267c2e39d78 100644
--- a/arch/mips/math-emu/dp_flong.c
+++ b/arch/mips/math-emu/dp_flong.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,21 +16,18 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754dp.h" 22#include "ieee754dp.h"
28 23
29ieee754dp ieee754dp_flong(s64 x) 24union ieee754dp ieee754dp_flong(s64 x)
30{ 25{
31 u64 xm; 26 u64 xm;
32 int xe; 27 int xe;
33 int xs; 28 int xs;
34 29
35 CLEARCX; 30 ieee754_clearcx();
36 31
37 if (x == 0) 32 if (x == 0)
38 return ieee754dp_zero(0); 33 return ieee754dp_zero(0);
@@ -52,26 +47,19 @@ ieee754dp ieee754dp_flong(s64 x)
52 } 47 }
53 48
54 /* normalize */ 49 /* normalize */
55 xe = DP_MBITS + 3; 50 xe = DP_FBITS + 3;
56 if (xm >> (DP_MBITS + 1 + 3)) { 51 if (xm >> (DP_FBITS + 1 + 3)) {
57 /* shunt out overflow bits */ 52 /* shunt out overflow bits */
58 while (xm >> (DP_MBITS + 1 + 3)) { 53 while (xm >> (DP_FBITS + 1 + 3)) {
59 XDPSRSX1(); 54 XDPSRSX1();
60 } 55 }
61 } else { 56 } else {
62 /* normalize in grs extended double precision */ 57 /* normalize in grs extended double precision */
63 while ((xm >> (DP_MBITS + 3)) == 0) { 58 while ((xm >> (DP_FBITS + 3)) == 0) {
64 xm <<= 1; 59 xm <<= 1;
65 xe--; 60 xe--;
66 } 61 }
67 } 62 }
68 DPNORMRET1(xs, xe, xm, "dp_flong", x);
69}
70 63
71ieee754dp ieee754dp_fulong(u64 u) 64 return ieee754dp_format(xs, xe, xm);
72{
73 if ((s64) u < 0)
74 return ieee754dp_add(ieee754dp_1e63(),
75 ieee754dp_flong(u & ~(1ULL << 63)));
76 return ieee754dp_flong(u);
77} 65}
diff --git a/arch/mips/math-emu/dp_frexp.c b/arch/mips/math-emu/dp_frexp.c
deleted file mode 100644
index cb15a5eaecbb..000000000000
--- a/arch/mips/math-emu/dp_frexp.c
+++ /dev/null
@@ -1,52 +0,0 @@
1/* IEEE754 floating point arithmetic
2 * double precision: common utilities
3 */
4/*
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * ########################################################################
24 */
25
26
27#include "ieee754dp.h"
28
29/* close to ieeep754dp_logb
30*/
31ieee754dp ieee754dp_frexp(ieee754dp x, int *eptr)
32{
33 COMPXDP;
34 CLEARCX;
35 EXPLODEXDP;
36
37 switch (xc) {
38 case IEEE754_CLASS_SNAN:
39 case IEEE754_CLASS_QNAN:
40 case IEEE754_CLASS_INF:
41 case IEEE754_CLASS_ZERO:
42 *eptr = 0;
43 return x;
44 case IEEE754_CLASS_DNORM:
45 DPDNORMX;
46 break;
47 case IEEE754_CLASS_NORM:
48 break;
49 }
50 *eptr = xe + 1;
51 return builddp(xs, -1 + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
52}
diff --git a/arch/mips/math-emu/dp_fsp.c b/arch/mips/math-emu/dp_fsp.c
index daed6834dc15..ffb69c5830b0 100644
--- a/arch/mips/math-emu/dp_fsp.c
+++ b/arch/mips/math-emu/dp_fsp.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,56 +16,58 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26 22#include "ieee754sp.h"
27#include "ieee754dp.h" 23#include "ieee754dp.h"
28 24
29ieee754dp ieee754dp_fsp(ieee754sp x) 25union ieee754dp ieee754dp_fsp(union ieee754sp x)
30{ 26{
31 COMPXSP; 27 COMPXSP;
32 28
33 EXPLODEXSP; 29 EXPLODEXSP;
34 30
35 CLEARCX; 31 ieee754_clearcx();
36 32
37 FLUSHXSP; 33 FLUSHXSP;
38 34
39 switch (xc) { 35 switch (xc) {
40 case IEEE754_CLASS_SNAN: 36 case IEEE754_CLASS_SNAN:
41 SETCX(IEEE754_INVALID_OPERATION); 37 ieee754_setcx(IEEE754_INVALID_OPERATION);
42 return ieee754dp_nanxcpt(ieee754dp_indef(), "fsp"); 38 return ieee754dp_nanxcpt(ieee754dp_indef());
39
43 case IEEE754_CLASS_QNAN: 40 case IEEE754_CLASS_QNAN:
44 return ieee754dp_nanxcpt(builddp(xs, 41 return ieee754dp_nanxcpt(builddp(xs,
45 DP_EMAX + 1 + DP_EBIAS, 42 DP_EMAX + 1 + DP_EBIAS,
46 ((u64) xm 43 ((u64) xm
47 << (DP_MBITS - 44 << (DP_FBITS -
48 SP_MBITS))), "fsp", 45 SP_FBITS))));
49 x);
50 case IEEE754_CLASS_INF: 46 case IEEE754_CLASS_INF:
51 return ieee754dp_inf(xs); 47 return ieee754dp_inf(xs);
48
52 case IEEE754_CLASS_ZERO: 49 case IEEE754_CLASS_ZERO:
53 return ieee754dp_zero(xs); 50 return ieee754dp_zero(xs);
51
54 case IEEE754_CLASS_DNORM: 52 case IEEE754_CLASS_DNORM:
55 /* normalize */ 53 /* normalize */
56 while ((xm >> SP_MBITS) == 0) { 54 while ((xm >> SP_FBITS) == 0) {
57 xm <<= 1; 55 xm <<= 1;
58 xe--; 56 xe--;
59 } 57 }
60 break; 58 break;
59
61 case IEEE754_CLASS_NORM: 60 case IEEE754_CLASS_NORM:
62 break; 61 break;
63 } 62 }
64 63
65 /* CAN'T possibly overflow,underflow, or need rounding 64 /*
65 * Can't possibly overflow,underflow, or need rounding
66 */ 66 */
67 67
68 /* drop the hidden bit */ 68 /* drop the hidden bit */
69 xm &= ~SP_HIDDEN_BIT; 69 xm &= ~SP_HIDDEN_BIT;
70 70
71 return builddp(xs, xe + DP_EBIAS, 71 return builddp(xs, xe + DP_EBIAS,
72 (u64) xm << (DP_MBITS - SP_MBITS)); 72 (u64) xm << (DP_FBITS - SP_FBITS));
73} 73}
diff --git a/arch/mips/math-emu/dp_logb.c b/arch/mips/math-emu/dp_logb.c
deleted file mode 100644
index 151127e59f5c..000000000000
--- a/arch/mips/math-emu/dp_logb.c
+++ /dev/null
@@ -1,53 +0,0 @@
1/* IEEE754 floating point arithmetic
2 * double precision: common utilities
3 */
4/*
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * ########################################################################
24 */
25
26
27#include "ieee754dp.h"
28
29ieee754dp ieee754dp_logb(ieee754dp x)
30{
31 COMPXDP;
32
33 CLEARCX;
34
35 EXPLODEXDP;
36
37 switch (xc) {
38 case IEEE754_CLASS_SNAN:
39 return ieee754dp_nanxcpt(x, "logb", x);
40 case IEEE754_CLASS_QNAN:
41 return x;
42 case IEEE754_CLASS_INF:
43 return ieee754dp_inf(0);
44 case IEEE754_CLASS_ZERO:
45 return ieee754dp_inf(1);
46 case IEEE754_CLASS_DNORM:
47 DPDNORMX;
48 break;
49 case IEEE754_CLASS_NORM:
50 break;
51 }
52 return ieee754dp_fint(xe);
53}
diff --git a/arch/mips/math-emu/dp_modf.c b/arch/mips/math-emu/dp_modf.c
deleted file mode 100644
index b01f9cf6d402..000000000000
--- a/arch/mips/math-emu/dp_modf.c
+++ /dev/null
@@ -1,79 +0,0 @@
1/* IEEE754 floating point arithmetic
2 * double precision: common utilities
3 */
4/*
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * ########################################################################
24 */
25
26
27#include "ieee754dp.h"
28
29/* modf function is always exact for a finite number
30*/
31ieee754dp ieee754dp_modf(ieee754dp x, ieee754dp *ip)
32{
33 COMPXDP;
34
35 CLEARCX;
36
37 EXPLODEXDP;
38
39 switch (xc) {
40 case IEEE754_CLASS_SNAN:
41 case IEEE754_CLASS_QNAN:
42 case IEEE754_CLASS_INF:
43 case IEEE754_CLASS_ZERO:
44 *ip = x;
45 return x;
46 case IEEE754_CLASS_DNORM:
47 /* far to small */
48 *ip = ieee754dp_zero(xs);
49 return x;
50 case IEEE754_CLASS_NORM:
51 break;
52 }
53 if (xe < 0) {
54 *ip = ieee754dp_zero(xs);
55 return x;
56 }
57 if (xe >= DP_MBITS) {
58 *ip = x;
59 return ieee754dp_zero(xs);
60 }
61 /* generate ipart mantissa by clearing bottom bits
62 */
63 *ip = builddp(xs, xe + DP_EBIAS,
64 ((xm >> (DP_MBITS - xe)) << (DP_MBITS - xe)) &
65 ~DP_HIDDEN_BIT);
66
67 /* generate fpart mantissa by clearing top bits
68 * and normalizing (must be able to normalize)
69 */
70 xm = (xm << (64 - (DP_MBITS - xe))) >> (64 - (DP_MBITS - xe));
71 if (xm == 0)
72 return ieee754dp_zero(xs);
73
74 while ((xm >> DP_MBITS) == 0) {
75 xm <<= 1;
76 xe--;
77 }
78 return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
79}
diff --git a/arch/mips/math-emu/dp_mul.c b/arch/mips/math-emu/dp_mul.c
index 09175f461920..d3acdedb5b9d 100644
--- a/arch/mips/math-emu/dp_mul.c
+++ b/arch/mips/math-emu/dp_mul.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,23 +16,32 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754dp.h" 22#include "ieee754dp.h"
28 23
29ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y) 24union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y)
30{ 25{
26 int re;
27 int rs;
28 u64 rm;
29 unsigned lxm;
30 unsigned hxm;
31 unsigned lym;
32 unsigned hym;
33 u64 lrm;
34 u64 hrm;
35 u64 t;
36 u64 at;
37
31 COMPXDP; 38 COMPXDP;
32 COMPYDP; 39 COMPYDP;
33 40
34 EXPLODEXDP; 41 EXPLODEXDP;
35 EXPLODEYDP; 42 EXPLODEYDP;
36 43
37 CLEARCX; 44 ieee754_clearcx();
38 45
39 FLUSHXDP; 46 FLUSHXDP;
40 FLUSHYDP; 47 FLUSHYDP;
@@ -51,8 +58,8 @@ ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y)
51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 58 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 59 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 60 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
54 SETCX(IEEE754_INVALID_OPERATION); 61 ieee754_setcx(IEEE754_INVALID_OPERATION);
55 return ieee754dp_nanxcpt(ieee754dp_indef(), "mul", x, y); 62 return ieee754dp_nanxcpt(ieee754dp_indef());
56 63
57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 64 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): 65 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -68,12 +75,13 @@ ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y)
68 return x; 75 return x;
69 76
70 77
71 /* Infinity handling */ 78 /*
72 79 * Infinity handling
80 */
73 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): 81 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
74 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 82 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
75 SETCX(IEEE754_INVALID_OPERATION); 83 ieee754_setcx(IEEE754_INVALID_OPERATION);
76 return ieee754dp_xcpt(ieee754dp_indef(), "mul", x, y); 84 return ieee754dp_indef();
77 85
78 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 86 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
79 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): 87 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
@@ -107,70 +115,59 @@ ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y)
107 /* rm = xm * ym, re = xe+ye basically */ 115 /* rm = xm * ym, re = xe+ye basically */
108 assert(xm & DP_HIDDEN_BIT); 116 assert(xm & DP_HIDDEN_BIT);
109 assert(ym & DP_HIDDEN_BIT); 117 assert(ym & DP_HIDDEN_BIT);
110 {
111 int re = xe + ye;
112 int rs = xs ^ ys;
113 u64 rm;
114 118
115 /* shunt to top of word */ 119 re = xe + ye;
116 xm <<= 64 - (DP_MBITS + 1); 120 rs = xs ^ ys;
117 ym <<= 64 - (DP_MBITS + 1); 121
122 /* shunt to top of word */
123 xm <<= 64 - (DP_FBITS + 1);
124 ym <<= 64 - (DP_FBITS + 1);
118 125
119 /* multiply 32bits xm,ym to give high 32bits rm with stickness 126 /*
120 */ 127 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
128 */
121 129
122 /* 32 * 32 => 64 */ 130 /* 32 * 32 => 64 */
123#define DPXMULT(x, y) ((u64)(x) * (u64)y) 131#define DPXMULT(x, y) ((u64)(x) * (u64)y)
124 132
125 { 133 lxm = xm;
126 unsigned lxm = xm; 134 hxm = xm >> 32;
127 unsigned hxm = xm >> 32; 135 lym = ym;
128 unsigned lym = ym; 136 hym = ym >> 32;
129 unsigned hym = ym >> 32; 137
130 u64 lrm; 138 lrm = DPXMULT(lxm, lym);
131 u64 hrm; 139 hrm = DPXMULT(hxm, hym);
132 140
133 lrm = DPXMULT(lxm, lym); 141 t = DPXMULT(lxm, hym);
134 hrm = DPXMULT(hxm, hym); 142
135 143 at = lrm + (t << 32);
136 { 144 hrm += at < lrm;
137 u64 t = DPXMULT(lxm, hym); 145 lrm = at;
138 { 146
139 u64 at = 147 hrm = hrm + (t >> 32);
140 lrm + (t << 32); 148
141 hrm += at < lrm; 149 t = DPXMULT(hxm, lym);
142 lrm = at; 150
143 } 151 at = lrm + (t << 32);
144 hrm = hrm + (t >> 32); 152 hrm += at < lrm;
145 } 153 lrm = at;
146 154
147 { 155 hrm = hrm + (t >> 32);
148 u64 t = DPXMULT(hxm, lym); 156
149 { 157 rm = hrm | (lrm != 0);
150 u64 at = 158
151 lrm + (t << 32); 159 /*
152 hrm += at < lrm; 160 * Sticky shift down to normal rounding precision.
153 lrm = at; 161 */
154 } 162 if ((s64) rm < 0) {
155 hrm = hrm + (t >> 32); 163 rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
156 } 164 ((rm << (DP_FBITS + 1 + 3)) != 0);
157 rm = hrm | (lrm != 0);
158 }
159
160 /*
161 * sticky shift down to normal rounding precision
162 */
163 if ((s64) rm < 0) {
164 rm =
165 (rm >> (64 - (DP_MBITS + 1 + 3))) |
166 ((rm << (DP_MBITS + 1 + 3)) != 0);
167 re++; 165 re++;
168 } else { 166 } else {
169 rm = 167 rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
170 (rm >> (64 - (DP_MBITS + 1 + 3 + 1))) | 168 ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
171 ((rm << (DP_MBITS + 1 + 3 + 1)) != 0);
172 }
173 assert(rm & (DP_HIDDEN_BIT << 3));
174 DPNORMRET2(rs, re, rm, "mul", x, y);
175 } 169 }
170 assert(rm & (DP_HIDDEN_BIT << 3));
171
172 return ieee754dp_format(rs, re, rm);
176} 173}
diff --git a/arch/mips/math-emu/dp_scalb.c b/arch/mips/math-emu/dp_scalb.c
deleted file mode 100644
index 6f5df438dda8..000000000000
--- a/arch/mips/math-emu/dp_scalb.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/* IEEE754 floating point arithmetic
2 * double precision: common utilities
3 */
4/*
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * ########################################################################
24 */
25
26
27#include "ieee754dp.h"
28
29ieee754dp ieee754dp_scalb(ieee754dp x, int n)
30{
31 COMPXDP;
32
33 CLEARCX;
34
35 EXPLODEXDP;
36
37 switch (xc) {
38 case IEEE754_CLASS_SNAN:
39 return ieee754dp_nanxcpt(x, "scalb", x, n);
40 case IEEE754_CLASS_QNAN:
41 case IEEE754_CLASS_INF:
42 case IEEE754_CLASS_ZERO:
43 return x;
44 case IEEE754_CLASS_DNORM:
45 DPDNORMX;
46 break;
47 case IEEE754_CLASS_NORM:
48 break;
49 }
50 DPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n);
51}
52
53
54ieee754dp ieee754dp_ldexp(ieee754dp x, int n)
55{
56 return ieee754dp_scalb(x, n);
57}
diff --git a/arch/mips/math-emu/dp_simple.c b/arch/mips/math-emu/dp_simple.c
index 79ce2673a714..bccbe90efceb 100644
--- a/arch/mips/math-emu/dp_simple.c
+++ b/arch/mips/math-emu/dp_simple.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,33 +16,17 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754dp.h" 22#include "ieee754dp.h"
28 23
29int ieee754dp_finite(ieee754dp x) 24union ieee754dp ieee754dp_neg(union ieee754dp x)
30{
31 return DPBEXP(x) != DP_EMAX + 1 + DP_EBIAS;
32}
33
34ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y)
35{
36 CLEARCX;
37 DPSIGN(x) = DPSIGN(y);
38 return x;
39}
40
41
42ieee754dp ieee754dp_neg(ieee754dp x)
43{ 25{
44 COMPXDP; 26 COMPXDP;
45 27
46 EXPLODEXDP; 28 EXPLODEXDP;
47 CLEARCX; 29 ieee754_clearcx();
48 FLUSHXDP; 30 FLUSHXDP;
49 31
50 /* 32 /*
@@ -55,30 +37,29 @@ ieee754dp ieee754dp_neg(ieee754dp x)
55 DPSIGN(x) ^= 1; 37 DPSIGN(x) ^= 1;
56 38
57 if (xc == IEEE754_CLASS_SNAN) { 39 if (xc == IEEE754_CLASS_SNAN) {
58 ieee754dp y = ieee754dp_indef(); 40 union ieee754dp y = ieee754dp_indef();
59 SETCX(IEEE754_INVALID_OPERATION); 41 ieee754_setcx(IEEE754_INVALID_OPERATION);
60 DPSIGN(y) = DPSIGN(x); 42 DPSIGN(y) = DPSIGN(x);
61 return ieee754dp_nanxcpt(y, "neg"); 43 return ieee754dp_nanxcpt(y);
62 } 44 }
63 45
64 return x; 46 return x;
65} 47}
66 48
67 49union ieee754dp ieee754dp_abs(union ieee754dp x)
68ieee754dp ieee754dp_abs(ieee754dp x)
69{ 50{
70 COMPXDP; 51 COMPXDP;
71 52
72 EXPLODEXDP; 53 EXPLODEXDP;
73 CLEARCX; 54 ieee754_clearcx();
74 FLUSHXDP; 55 FLUSHXDP;
75 56
76 /* Clear sign ALWAYS, irrespective of NaN */ 57 /* Clear sign ALWAYS, irrespective of NaN */
77 DPSIGN(x) = 0; 58 DPSIGN(x) = 0;
78 59
79 if (xc == IEEE754_CLASS_SNAN) { 60 if (xc == IEEE754_CLASS_SNAN) {
80 SETCX(IEEE754_INVALID_OPERATION); 61 ieee754_setcx(IEEE754_INVALID_OPERATION);
81 return ieee754dp_nanxcpt(ieee754dp_indef(), "abs"); 62 return ieee754dp_nanxcpt(ieee754dp_indef());
82 } 63 }
83 64
84 return x; 65 return x;
diff --git a/arch/mips/math-emu/dp_sqrt.c b/arch/mips/math-emu/dp_sqrt.c
index b874d60a942b..041bbb6124bb 100644
--- a/arch/mips/math-emu/dp_sqrt.c
+++ b/arch/mips/math-emu/dp_sqrt.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,12 +16,9 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754dp.h" 22#include "ieee754dp.h"
28 23
29static const unsigned table[] = { 24static const unsigned table[] = {
@@ -34,44 +29,49 @@ static const unsigned table[] = {
34 1742, 661, 130 29 1742, 661, 130
35}; 30};
36 31
37ieee754dp ieee754dp_sqrt(ieee754dp x) 32union ieee754dp ieee754dp_sqrt(union ieee754dp x)
38{ 33{
39 struct _ieee754_csr oldcsr; 34 struct _ieee754_csr oldcsr;
40 ieee754dp y, z, t; 35 union ieee754dp y, z, t;
41 unsigned scalx, yh; 36 unsigned scalx, yh;
42 COMPXDP; 37 COMPXDP;
43 38
44 EXPLODEXDP; 39 EXPLODEXDP;
45 CLEARCX; 40 ieee754_clearcx();
46 FLUSHXDP; 41 FLUSHXDP;
47 42
48 /* x == INF or NAN? */ 43 /* x == INF or NAN? */
49 switch (xc) { 44 switch (xc) {
50 case IEEE754_CLASS_QNAN: 45 case IEEE754_CLASS_QNAN:
51 /* sqrt(Nan) = Nan */ 46 /* sqrt(Nan) = Nan */
52 return ieee754dp_nanxcpt(x, "sqrt"); 47 return ieee754dp_nanxcpt(x);
48
53 case IEEE754_CLASS_SNAN: 49 case IEEE754_CLASS_SNAN:
54 SETCX(IEEE754_INVALID_OPERATION); 50 ieee754_setcx(IEEE754_INVALID_OPERATION);
55 return ieee754dp_nanxcpt(ieee754dp_indef(), "sqrt"); 51 return ieee754dp_nanxcpt(ieee754dp_indef());
52
56 case IEEE754_CLASS_ZERO: 53 case IEEE754_CLASS_ZERO:
57 /* sqrt(0) = 0 */ 54 /* sqrt(0) = 0 */
58 return x; 55 return x;
56
59 case IEEE754_CLASS_INF: 57 case IEEE754_CLASS_INF:
60 if (xs) { 58 if (xs) {
61 /* sqrt(-Inf) = Nan */ 59 /* sqrt(-Inf) = Nan */
62 SETCX(IEEE754_INVALID_OPERATION); 60 ieee754_setcx(IEEE754_INVALID_OPERATION);
63 return ieee754dp_nanxcpt(ieee754dp_indef(), "sqrt"); 61 return ieee754dp_nanxcpt(ieee754dp_indef());
64 } 62 }
65 /* sqrt(+Inf) = Inf */ 63 /* sqrt(+Inf) = Inf */
66 return x; 64 return x;
65
67 case IEEE754_CLASS_DNORM: 66 case IEEE754_CLASS_DNORM:
68 DPDNORMX; 67 DPDNORMX;
69 /* fall through */ 68 /* fall through */
69
70 case IEEE754_CLASS_NORM: 70 case IEEE754_CLASS_NORM:
71 if (xs) { 71 if (xs) {
72 /* sqrt(-x) = Nan */ 72 /* sqrt(-x) = Nan */
73 SETCX(IEEE754_INVALID_OPERATION); 73 ieee754_setcx(IEEE754_INVALID_OPERATION);
74 return ieee754dp_nanxcpt(ieee754dp_indef(), "sqrt"); 74 return ieee754dp_nanxcpt(ieee754dp_indef());
75 } 75 }
76 break; 76 break;
77 } 77 }
@@ -80,7 +80,7 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
80 oldcsr = ieee754_csr; 80 oldcsr = ieee754_csr;
81 ieee754_csr.mx &= ~IEEE754_INEXACT; 81 ieee754_csr.mx &= ~IEEE754_INEXACT;
82 ieee754_csr.sx &= ~IEEE754_INEXACT; 82 ieee754_csr.sx &= ~IEEE754_INEXACT;
83 ieee754_csr.rm = IEEE754_RN; 83 ieee754_csr.rm = FPU_CSR_RN;
84 84
85 /* adjust exponent to prevent overflow */ 85 /* adjust exponent to prevent overflow */
86 scalx = 0; 86 scalx = 0;
@@ -110,19 +110,19 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
110 /* triple to almost 56 sig. bits: y ~= sqrt(x) to within 1 ulp */ 110 /* triple to almost 56 sig. bits: y ~= sqrt(x) to within 1 ulp */
111 /* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */ 111 /* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */
112 z = t = ieee754dp_mul(y, y); 112 z = t = ieee754dp_mul(y, y);
113 t.parts.bexp += 0x001; 113 t.bexp += 0x001;
114 t = ieee754dp_add(t, z); 114 t = ieee754dp_add(t, z);
115 z = ieee754dp_mul(ieee754dp_sub(x, z), y); 115 z = ieee754dp_mul(ieee754dp_sub(x, z), y);
116 116
117 /* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */ 117 /* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */
118 t = ieee754dp_div(z, ieee754dp_add(t, x)); 118 t = ieee754dp_div(z, ieee754dp_add(t, x));
119 t.parts.bexp += 0x001; 119 t.bexp += 0x001;
120 y = ieee754dp_add(y, t); 120 y = ieee754dp_add(y, t);
121 121
122 /* twiddle last bit to force y correctly rounded */ 122 /* twiddle last bit to force y correctly rounded */
123 123
124 /* set RZ, clear INEX flag */ 124 /* set RZ, clear INEX flag */
125 ieee754_csr.rm = IEEE754_RZ; 125 ieee754_csr.rm = FPU_CSR_RZ;
126 ieee754_csr.sx &= ~IEEE754_INEXACT; 126 ieee754_csr.sx &= ~IEEE754_INEXACT;
127 127
128 /* t=x/y; ...chopped quotient, possibly inexact */ 128 /* t=x/y; ...chopped quotient, possibly inexact */
@@ -139,10 +139,10 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
139 oldcsr.sx |= IEEE754_INEXACT; 139 oldcsr.sx |= IEEE754_INEXACT;
140 140
141 switch (oldcsr.rm) { 141 switch (oldcsr.rm) {
142 case IEEE754_RP: 142 case FPU_CSR_RU:
143 y.bits += 1; 143 y.bits += 1;
144 /* drop through */ 144 /* drop through */
145 case IEEE754_RN: 145 case FPU_CSR_RN:
146 t.bits += 1; 146 t.bits += 1;
147 break; 147 break;
148 } 148 }
@@ -155,7 +155,7 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
155 } 155 }
156 156
157 /* py[n0]=py[n0]+scalx; ...scale back y */ 157 /* py[n0]=py[n0]+scalx; ...scale back y */
158 y.parts.bexp += scalx; 158 y.bexp += scalx;
159 159
160 /* restore rounding mode, possibly set inexact */ 160 /* restore rounding mode, possibly set inexact */
161 ieee754_csr = oldcsr; 161 ieee754_csr = oldcsr;
diff --git a/arch/mips/math-emu/dp_sub.c b/arch/mips/math-emu/dp_sub.c
index 91e0a4b5cbc7..7a174029043a 100644
--- a/arch/mips/math-emu/dp_sub.c
+++ b/arch/mips/math-emu/dp_sub.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,23 +16,22 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754dp.h" 22#include "ieee754dp.h"
28 23
29ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y) 24union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y)
30{ 25{
26 int s;
27
31 COMPXDP; 28 COMPXDP;
32 COMPYDP; 29 COMPYDP;
33 30
34 EXPLODEXDP; 31 EXPLODEXDP;
35 EXPLODEYDP; 32 EXPLODEYDP;
36 33
37 CLEARCX; 34 ieee754_clearcx();
38 35
39 FLUSHXDP; 36 FLUSHXDP;
40 FLUSHYDP; 37 FLUSHYDP;
@@ -51,8 +48,8 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 48 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 49 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 50 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
54 SETCX(IEEE754_INVALID_OPERATION); 51 ieee754_setcx(IEEE754_INVALID_OPERATION);
55 return ieee754dp_nanxcpt(ieee754dp_indef(), "sub", x, y); 52 return ieee754dp_nanxcpt(ieee754dp_indef());
56 53
57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 54 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): 55 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -68,14 +65,14 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
68 return x; 65 return x;
69 66
70 67
71 /* Infinity handling 68 /*
72 */ 69 * Infinity handling
73 70 */
74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 71 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
75 if (xs != ys) 72 if (xs != ys)
76 return x; 73 return x;
77 SETCX(IEEE754_INVALID_OPERATION); 74 ieee754_setcx(IEEE754_INVALID_OPERATION);
78 return ieee754dp_xcpt(ieee754dp_indef(), "sub", x, y); 75 return ieee754dp_indef();
79 76
80 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 77 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
81 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): 78 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
@@ -87,15 +84,14 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
87 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 84 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
88 return x; 85 return x;
89 86
90 /* Zero handling 87 /*
91 */ 88 * Zero handling
92 89 */
93 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 90 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
94 if (xs != ys) 91 if (xs != ys)
95 return x; 92 return x;
96 else 93 else
97 return ieee754dp_zero(ieee754_csr.rm == 94 return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
98 IEEE754_RD);
99 95
100 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 96 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
101 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): 97 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
@@ -136,15 +132,17 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
136 ym <<= 3; 132 ym <<= 3;
137 133
138 if (xe > ye) { 134 if (xe > ye) {
139 /* have to shift y fraction right to align 135 /*
136 * Have to shift y fraction right to align
140 */ 137 */
141 int s = xe - ye; 138 s = xe - ye;
142 ym = XDPSRS(ym, s); 139 ym = XDPSRS(ym, s);
143 ye += s; 140 ye += s;
144 } else if (ye > xe) { 141 } else if (ye > xe) {
145 /* have to shift x fraction right to align 142 /*
143 * Have to shift x fraction right to align
146 */ 144 */
147 int s = ye - xe; 145 s = ye - xe;
148 xm = XDPSRS(xm, s); 146 xm = XDPSRS(xm, s);
149 xe += s; 147 xe += s;
150 } 148 }
@@ -158,7 +156,7 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
158 xe = xe; 156 xe = xe;
159 xs = xs; 157 xs = xs;
160 158
161 if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ 159 if (xm >> (DP_FBITS + 1 + 3)) { /* carry out */
162 xm = XDPSRS1(xm); /* shift preserving sticky */ 160 xm = XDPSRS1(xm); /* shift preserving sticky */
163 xe++; 161 xe++;
164 } 162 }
@@ -173,7 +171,7 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
173 xs = ys; 171 xs = ys;
174 } 172 }
175 if (xm == 0) { 173 if (xm == 0) {
176 if (ieee754_csr.rm == IEEE754_RD) 174 if (ieee754_csr.rm == FPU_CSR_RD)
177 return ieee754dp_zero(1); /* round negative inf. => sign = -1 */ 175 return ieee754dp_zero(1); /* round negative inf. => sign = -1 */
178 else 176 else
179 return ieee754dp_zero(0); /* other round modes => sign = 1 */ 177 return ieee754dp_zero(0); /* other round modes => sign = 1 */
@@ -181,10 +179,11 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
181 179
182 /* normalize to rounding precision 180 /* normalize to rounding precision
183 */ 181 */
184 while ((xm >> (DP_MBITS + 3)) == 0) { 182 while ((xm >> (DP_FBITS + 3)) == 0) {
185 xm <<= 1; 183 xm <<= 1;
186 xe--; 184 xe--;
187 } 185 }
188 } 186 }
189 DPNORMRET2(xs, xe, xm, "sub", x, y); 187
188 return ieee754dp_format(xs, xe, xm);
190} 189}
diff --git a/arch/mips/math-emu/dp_tint.c b/arch/mips/math-emu/dp_tint.c
index 0ebe8598b94a..6ffc336c530e 100644
--- a/arch/mips/math-emu/dp_tint.c
+++ b/arch/mips/math-emu/dp_tint.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,20 +16,21 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include <linux/kernel.h>
28#include "ieee754dp.h" 22#include "ieee754dp.h"
29 23
30int ieee754dp_tint(ieee754dp x) 24int ieee754dp_tint(union ieee754dp x)
31{ 25{
26 u64 residue;
27 int round;
28 int sticky;
29 int odd;
30
32 COMPXDP; 31 COMPXDP;
33 32
34 CLEARCX; 33 ieee754_clearcx();
35 34
36 EXPLODEXDP; 35 EXPLODEXDP;
37 FLUSHXDP; 36 FLUSHXDP;
@@ -40,10 +39,12 @@ int ieee754dp_tint(ieee754dp x)
40 case IEEE754_CLASS_SNAN: 39 case IEEE754_CLASS_SNAN:
41 case IEEE754_CLASS_QNAN: 40 case IEEE754_CLASS_QNAN:
42 case IEEE754_CLASS_INF: 41 case IEEE754_CLASS_INF:
43 SETCX(IEEE754_INVALID_OPERATION); 42 ieee754_setcx(IEEE754_INVALID_OPERATION);
44 return ieee754si_xcpt(ieee754si_indef(), "dp_tint", x); 43 return ieee754si_indef();
44
45 case IEEE754_CLASS_ZERO: 45 case IEEE754_CLASS_ZERO:
46 return 0; 46 return 0;
47
47 case IEEE754_CLASS_DNORM: 48 case IEEE754_CLASS_DNORM:
48 case IEEE754_CLASS_NORM: 49 case IEEE754_CLASS_NORM:
49 break; 50 break;
@@ -51,44 +52,39 @@ int ieee754dp_tint(ieee754dp x)
51 if (xe > 31) { 52 if (xe > 31) {
52 /* Set invalid. We will only use overflow for floating 53 /* Set invalid. We will only use overflow for floating
53 point overflow */ 54 point overflow */
54 SETCX(IEEE754_INVALID_OPERATION); 55 ieee754_setcx(IEEE754_INVALID_OPERATION);
55 return ieee754si_xcpt(ieee754si_indef(), "dp_tint", x); 56 return ieee754si_indef();
56 } 57 }
57 /* oh gawd */ 58 /* oh gawd */
58 if (xe > DP_MBITS) { 59 if (xe > DP_FBITS) {
59 xm <<= xe - DP_MBITS; 60 xm <<= xe - DP_FBITS;
60 } else if (xe < DP_MBITS) { 61 } else if (xe < DP_FBITS) {
61 u64 residue;
62 int round;
63 int sticky;
64 int odd;
65
66 if (xe < -1) { 62 if (xe < -1) {
67 residue = xm; 63 residue = xm;
68 round = 0; 64 round = 0;
69 sticky = residue != 0; 65 sticky = residue != 0;
70 xm = 0; 66 xm = 0;
71 } else { 67 } else {
72 residue = xm << (64 - DP_MBITS + xe); 68 residue = xm << (64 - DP_FBITS + xe);
73 round = (residue >> 63) != 0; 69 round = (residue >> 63) != 0;
74 sticky = (residue << 1) != 0; 70 sticky = (residue << 1) != 0;
75 xm >>= DP_MBITS - xe; 71 xm >>= DP_FBITS - xe;
76 } 72 }
77 /* Note: At this point upper 32 bits of xm are guaranteed 73 /* Note: At this point upper 32 bits of xm are guaranteed
78 to be zero */ 74 to be zero */
79 odd = (xm & 0x1) != 0x0; 75 odd = (xm & 0x1) != 0x0;
80 switch (ieee754_csr.rm) { 76 switch (ieee754_csr.rm) {
81 case IEEE754_RN: 77 case FPU_CSR_RN:
82 if (round && (sticky || odd)) 78 if (round && (sticky || odd))
83 xm++; 79 xm++;
84 break; 80 break;
85 case IEEE754_RZ: 81 case FPU_CSR_RZ:
86 break; 82 break;
87 case IEEE754_RU: /* toward +Infinity */ 83 case FPU_CSR_RU: /* toward +Infinity */
88 if ((round || sticky) && !xs) 84 if ((round || sticky) && !xs)
89 xm++; 85 xm++;
90 break; 86 break;
91 case IEEE754_RD: /* toward -Infinity */ 87 case FPU_CSR_RD: /* toward -Infinity */
92 if ((round || sticky) && xs) 88 if ((round || sticky) && xs)
93 xm++; 89 xm++;
94 break; 90 break;
@@ -96,27 +92,14 @@ int ieee754dp_tint(ieee754dp x)
96 /* look for valid corner case 0x80000000 */ 92 /* look for valid corner case 0x80000000 */
97 if ((xm >> 31) != 0 && (xs == 0 || xm != 0x80000000)) { 93 if ((xm >> 31) != 0 && (xs == 0 || xm != 0x80000000)) {
98 /* This can happen after rounding */ 94 /* This can happen after rounding */
99 SETCX(IEEE754_INVALID_OPERATION); 95 ieee754_setcx(IEEE754_INVALID_OPERATION);
100 return ieee754si_xcpt(ieee754si_indef(), "dp_tint", x); 96 return ieee754si_indef();
101 } 97 }
102 if (round || sticky) 98 if (round || sticky)
103 SETCX(IEEE754_INEXACT); 99 ieee754_setcx(IEEE754_INEXACT);
104 } 100 }
105 if (xs) 101 if (xs)
106 return -xm; 102 return -xm;
107 else 103 else
108 return xm; 104 return xm;
109} 105}
110
111
112unsigned int ieee754dp_tuns(ieee754dp x)
113{
114 ieee754dp hb = ieee754dp_1e31();
115
116 /* what if x < 0 ?? */
117 if (ieee754dp_lt(x, hb))
118 return (unsigned) ieee754dp_tint(x);
119
120 return (unsigned) ieee754dp_tint(ieee754dp_sub(x, hb)) |
121 ((unsigned) 1 << 31);
122}
diff --git a/arch/mips/math-emu/dp_tlong.c b/arch/mips/math-emu/dp_tlong.c
index 133ce2ba0012..9cdc145b75e0 100644
--- a/arch/mips/math-emu/dp_tlong.c
+++ b/arch/mips/math-emu/dp_tlong.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,19 +16,21 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754dp.h" 22#include "ieee754dp.h"
28 23
29s64 ieee754dp_tlong(ieee754dp x) 24s64 ieee754dp_tlong(union ieee754dp x)
30{ 25{
26 u64 residue;
27 int round;
28 int sticky;
29 int odd;
30
31 COMPXDP; 31 COMPXDP;
32 32
33 CLEARCX; 33 ieee754_clearcx();
34 34
35 EXPLODEXDP; 35 EXPLODEXDP;
36 FLUSHXDP; 36 FLUSHXDP;
@@ -39,10 +39,12 @@ s64 ieee754dp_tlong(ieee754dp x)
39 case IEEE754_CLASS_SNAN: 39 case IEEE754_CLASS_SNAN:
40 case IEEE754_CLASS_QNAN: 40 case IEEE754_CLASS_QNAN:
41 case IEEE754_CLASS_INF: 41 case IEEE754_CLASS_INF:
42 SETCX(IEEE754_INVALID_OPERATION); 42 ieee754_setcx(IEEE754_INVALID_OPERATION);
43 return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x); 43 return ieee754di_indef();
44
44 case IEEE754_CLASS_ZERO: 45 case IEEE754_CLASS_ZERO:
45 return 0; 46 return 0;
47
46 case IEEE754_CLASS_DNORM: 48 case IEEE754_CLASS_DNORM:
47 case IEEE754_CLASS_NORM: 49 case IEEE754_CLASS_NORM:
48 break; 50 break;
@@ -53,18 +55,13 @@ s64 ieee754dp_tlong(ieee754dp x)
53 return -0x8000000000000000LL; 55 return -0x8000000000000000LL;
54 /* Set invalid. We will only use overflow for floating 56 /* Set invalid. We will only use overflow for floating
55 point overflow */ 57 point overflow */
56 SETCX(IEEE754_INVALID_OPERATION); 58 ieee754_setcx(IEEE754_INVALID_OPERATION);
57 return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x); 59 return ieee754di_indef();
58 } 60 }
59 /* oh gawd */ 61 /* oh gawd */
60 if (xe > DP_MBITS) { 62 if (xe > DP_FBITS) {
61 xm <<= xe - DP_MBITS; 63 xm <<= xe - DP_FBITS;
62 } else if (xe < DP_MBITS) { 64 } else if (xe < DP_FBITS) {
63 u64 residue;
64 int round;
65 int sticky;
66 int odd;
67
68 if (xe < -1) { 65 if (xe < -1) {
69 residue = xm; 66 residue = xm;
70 round = 0; 67 round = 0;
@@ -75,51 +72,38 @@ s64 ieee754dp_tlong(ieee754dp x)
75 * so we do it in two steps. Be aware that xe 72 * so we do it in two steps. Be aware that xe
76 * may be -1 */ 73 * may be -1 */
77 residue = xm << (xe + 1); 74 residue = xm << (xe + 1);
78 residue <<= 63 - DP_MBITS; 75 residue <<= 63 - DP_FBITS;
79 round = (residue >> 63) != 0; 76 round = (residue >> 63) != 0;
80 sticky = (residue << 1) != 0; 77 sticky = (residue << 1) != 0;
81 xm >>= DP_MBITS - xe; 78 xm >>= DP_FBITS - xe;
82 } 79 }
83 odd = (xm & 0x1) != 0x0; 80 odd = (xm & 0x1) != 0x0;
84 switch (ieee754_csr.rm) { 81 switch (ieee754_csr.rm) {
85 case IEEE754_RN: 82 case FPU_CSR_RN:
86 if (round && (sticky || odd)) 83 if (round && (sticky || odd))
87 xm++; 84 xm++;
88 break; 85 break;
89 case IEEE754_RZ: 86 case FPU_CSR_RZ:
90 break; 87 break;
91 case IEEE754_RU: /* toward +Infinity */ 88 case FPU_CSR_RU: /* toward +Infinity */
92 if ((round || sticky) && !xs) 89 if ((round || sticky) && !xs)
93 xm++; 90 xm++;
94 break; 91 break;
95 case IEEE754_RD: /* toward -Infinity */ 92 case FPU_CSR_RD: /* toward -Infinity */
96 if ((round || sticky) && xs) 93 if ((round || sticky) && xs)
97 xm++; 94 xm++;
98 break; 95 break;
99 } 96 }
100 if ((xm >> 63) != 0) { 97 if ((xm >> 63) != 0) {
101 /* This can happen after rounding */ 98 /* This can happen after rounding */
102 SETCX(IEEE754_INVALID_OPERATION); 99 ieee754_setcx(IEEE754_INVALID_OPERATION);
103 return ieee754di_xcpt(ieee754di_indef(), "dp_tlong", x); 100 return ieee754di_indef();
104 } 101 }
105 if (round || sticky) 102 if (round || sticky)
106 SETCX(IEEE754_INEXACT); 103 ieee754_setcx(IEEE754_INEXACT);
107 } 104 }
108 if (xs) 105 if (xs)
109 return -xm; 106 return -xm;
110 else 107 else
111 return xm; 108 return xm;
112} 109}
113
114
115u64 ieee754dp_tulong(ieee754dp x)
116{
117 ieee754dp hb = ieee754dp_1e63();
118
119 /* what if x < 0 ?? */
120 if (ieee754dp_lt(x, hb))
121 return (u64) ieee754dp_tlong(x);
122
123 return (u64) ieee754dp_tlong(ieee754dp_sub(x, hb)) |
124 (1ULL << 63);
125}
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 7ea622ab8dad..4f514f3724cb 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -1,30 +1,12 @@
1#include <linux/compiler.h>
2#include <linux/mm.h>
3#include <linux/signal.h>
4#include <linux/smp.h>
5
6#include <asm/asm.h>
7#include <asm/bootinfo.h>
8#include <asm/byteorder.h>
9#include <asm/cpu.h>
10#include <asm/inst.h>
11#include <asm/processor.h>
12#include <asm/uaccess.h>
13#include <asm/branch.h> 1#include <asm/branch.h>
14#include <asm/mipsregs.h>
15#include <asm/cacheflush.h> 2#include <asm/cacheflush.h>
16
17#include <asm/fpu_emulator.h> 3#include <asm/fpu_emulator.h>
4#include <asm/inst.h>
5#include <asm/mipsregs.h>
6#include <asm/uaccess.h>
18 7
19#include "ieee754.h" 8#include "ieee754.h"
20 9
21/* Strap kernel emulator for full MIPS IV emulation */
22
23#ifdef __mips
24#undef __mips
25#endif
26#define __mips 4
27
28/* 10/*
29 * Emulate the arbritrary instruction ir at xcp->cp0_epc. Required when 11 * Emulate the arbritrary instruction ir at xcp->cp0_epc. Required when
30 * we have to emulate the instruction in a COP1 branch delay slot. Do 12 * we have to emulate the instruction in a COP1 branch delay slot. Do
@@ -59,13 +41,11 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
59 (ir == 0)) { 41 (ir == 0)) {
60 /* NOP is easy */ 42 /* NOP is easy */
61 regs->cp0_epc = cpc; 43 regs->cp0_epc = cpc;
62 regs->cp0_cause &= ~CAUSEF_BD; 44 clear_delay_slot(regs);
63 return 0; 45 return 0;
64 } 46 }
65#ifdef DSEMUL_TRACE
66 printk("dsemul %lx %lx\n", regs->cp0_epc, cpc);
67 47
68#endif 48 pr_debug("dsemul %lx %lx\n", regs->cp0_epc, cpc);
69 49
70 /* 50 /*
71 * The strategy is to push the instruction onto the user stack 51 * The strategy is to push the instruction onto the user stack
@@ -167,9 +147,8 @@ int do_dsemulret(struct pt_regs *xcp)
167 * emulating the branch delay instruction. 147 * emulating the branch delay instruction.
168 */ 148 */
169 149
170#ifdef DSEMUL_TRACE 150 pr_debug("dsemulret\n");
171 printk("dsemulret\n"); 151
172#endif
173 if (__get_user(epc, &fr->epc)) { /* Saved EPC */ 152 if (__get_user(epc, &fr->epc)) { /* Saved EPC */
174 /* This is not a good situation to be in */ 153 /* This is not a good situation to be in */
175 force_sig(SIGBUS, current); 154 force_sig(SIGBUS, current);
diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c
index 0015cf1989da..53f1d2287084 100644
--- a/arch/mips/math-emu/ieee754.c
+++ b/arch/mips/math-emu/ieee754.c
@@ -10,8 +10,6 @@
10 * MIPS floating point support 10 * MIPS floating point support
11 * Copyright (C) 1994-2000 Algorithmics Ltd. 11 * Copyright (C) 1994-2000 Algorithmics Ltd.
12 * 12 *
13 * ########################################################################
14 *
15 * This program is free software; you can distribute it and/or modify it 13 * This program is free software; you can distribute it and/or modify it
16 * under the terms of the GNU General Public License (Version 2) as 14 * under the terms of the GNU General Public License (Version 2) as
17 * published by the Free Software Foundation. 15 * published by the Free Software Foundation.
@@ -23,105 +21,69 @@
23 * 21 *
24 * You should have received a copy of the GNU General Public License along 22 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc., 23 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 24 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
27 *
28 * ########################################################################
29 */ 25 */
30 26
27#include <linux/compiler.h>
31 28
32#include "ieee754int.h" 29#include "ieee754.h"
33#include "ieee754sp.h" 30#include "ieee754sp.h"
34#include "ieee754dp.h" 31#include "ieee754dp.h"
35 32
36#define DP_EBIAS 1023 33/*
37#define DP_EMIN (-1022) 34 * Special constants
38#define DP_EMAX 1023 35 */
39
40#define SP_EBIAS 127
41#define SP_EMIN (-126)
42#define SP_EMAX 127
43
44/* special constants
45*/
46
47
48#if (defined(BYTE_ORDER) && BYTE_ORDER == LITTLE_ENDIAN) || defined(__MIPSEL__)
49#define SPSTR(s, b, m) {m, b, s}
50#define DPSTR(s, b, mh, ml) {ml, mh, b, s}
51#endif
52
53#ifdef __MIPSEB__
54#define SPSTR(s, b, m) {s, b, m}
55#define DPSTR(s, b, mh, ml) {s, b, mh, ml}
56#endif
57 36
58const struct ieee754dp_konst __ieee754dp_spcvals[] = { 37#define DPCNST(s, b, m) \
59 DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero */ 38{ \
60 DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero */ 39 .sign = (s), \
61 DPSTR(0, DP_EBIAS, 0, 0), /* + 1.0 */ 40 .bexp = (b) + DP_EBIAS, \
62 DPSTR(1, DP_EBIAS, 0, 0), /* - 1.0 */ 41 .mant = (m) \
63 DPSTR(0, 3 + DP_EBIAS, 0x40000, 0), /* + 10.0 */ 42}
64 DPSTR(1, 3 + DP_EBIAS, 0x40000, 0), /* - 10.0 */
65 DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */
66 DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */
67 DPSTR(0, DP_EMAX+1+DP_EBIAS, 0x7FFFF, 0xFFFFFFFF), /* + indef quiet Nan */
68 DPSTR(0, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* + max */
69 DPSTR(1, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* - max */
70 DPSTR(0, DP_EMIN + DP_EBIAS, 0, 0), /* + min normal */
71 DPSTR(1, DP_EMIN + DP_EBIAS, 0, 0), /* - min normal */
72 DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */
73 DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */
74 DPSTR(0, 31 + DP_EBIAS, 0, 0), /* + 1.0e31 */
75 DPSTR(0, 63 + DP_EBIAS, 0, 0), /* + 1.0e63 */
76};
77 43
78const struct ieee754sp_konst __ieee754sp_spcvals[] = { 44const union ieee754dp __ieee754dp_spcvals[] = {
79 SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 0), /* + zero */ 45 DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */
80 SPSTR(1, SP_EMIN - 1 + SP_EBIAS, 0), /* - zero */ 46 DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */
81 SPSTR(0, SP_EBIAS, 0), /* + 1.0 */ 47 DPCNST(0, 0, 0x0000000000000ULL), /* + 1.0 */
82 SPSTR(1, SP_EBIAS, 0), /* - 1.0 */ 48 DPCNST(1, 0, 0x0000000000000ULL), /* - 1.0 */
83 SPSTR(0, 3 + SP_EBIAS, 0x200000), /* + 10.0 */ 49 DPCNST(0, 3, 0x4000000000000ULL), /* + 10.0 */
84 SPSTR(1, 3 + SP_EBIAS, 0x200000), /* - 10.0 */ 50 DPCNST(1, 3, 0x4000000000000ULL), /* - 10.0 */
85 SPSTR(0, SP_EMAX + 1 + SP_EBIAS, 0), /* + infinity */ 51 DPCNST(0, DP_EMAX + 1, 0x0000000000000ULL), /* + infinity */
86 SPSTR(1, SP_EMAX + 1 + SP_EBIAS, 0), /* - infinity */ 52 DPCNST(1, DP_EMAX + 1, 0x0000000000000ULL), /* - infinity */
87 SPSTR(0, SP_EMAX+1+SP_EBIAS, 0x3FFFFF), /* + indef quiet Nan */ 53 DPCNST(0, DP_EMAX + 1, 0x7FFFFFFFFFFFFULL), /* + indef quiet Nan */
88 SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */ 54 DPCNST(0, DP_EMAX, 0xFFFFFFFFFFFFFULL), /* + max */
89 SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */ 55 DPCNST(1, DP_EMAX, 0xFFFFFFFFFFFFFULL), /* - max */
90 SPSTR(0, SP_EMIN + SP_EBIAS, 0), /* + min normal */ 56 DPCNST(0, DP_EMIN, 0x0000000000000ULL), /* + min normal */
91 SPSTR(1, SP_EMIN + SP_EBIAS, 0), /* - min normal */ 57 DPCNST(1, DP_EMIN, 0x0000000000000ULL), /* - min normal */
92 SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 1), /* + min denormal */ 58 DPCNST(0, DP_EMIN - 1, 0x0000000000001ULL), /* + min denormal */
93 SPSTR(1, SP_EMIN - 1 + SP_EBIAS, 1), /* - min denormal */ 59 DPCNST(1, DP_EMIN - 1, 0x0000000000001ULL), /* - min denormal */
94 SPSTR(0, 31 + SP_EBIAS, 0), /* + 1.0e31 */ 60 DPCNST(0, 31, 0x0000000000000ULL), /* + 1.0e31 */
95 SPSTR(0, 63 + SP_EBIAS, 0), /* + 1.0e63 */ 61 DPCNST(0, 63, 0x0000000000000ULL), /* + 1.0e63 */
96}; 62};
97 63
98 64#define SPCNST(s, b, m) \
99int ieee754si_xcpt(int r, const char *op, ...) 65{ \
100{ 66 .sign = (s), \
101 struct ieee754xctx ax; 67 .bexp = (b) + SP_EBIAS, \
102 68 .mant = (m) \
103 if (!TSTX())
104 return r;
105 ax.op = op;
106 ax.rt = IEEE754_RT_SI;
107 ax.rv.si = r;
108 va_start(ax.ap, op);
109 ieee754_xcpt(&ax);
110 va_end(ax.ap);
111 return ax.rv.si;
112} 69}
113 70
114s64 ieee754di_xcpt(s64 r, const char *op, ...) 71const union ieee754sp __ieee754sp_spcvals[] = {
115{ 72 SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */
116 struct ieee754xctx ax; 73 SPCNST(1, SP_EMIN - 1, 0x000000), /* - zero */
117 74 SPCNST(0, 0, 0x000000), /* + 1.0 */
118 if (!TSTX()) 75 SPCNST(1, 0, 0x000000), /* - 1.0 */
119 return r; 76 SPCNST(0, 3, 0x200000), /* + 10.0 */
120 ax.op = op; 77 SPCNST(1, 3, 0x200000), /* - 10.0 */
121 ax.rt = IEEE754_RT_DI; 78 SPCNST(0, SP_EMAX + 1, 0x000000), /* + infinity */
122 ax.rv.di = r; 79 SPCNST(1, SP_EMAX + 1, 0x000000), /* - infinity */
123 va_start(ax.ap, op); 80 SPCNST(0, SP_EMAX + 1, 0x3FFFFF), /* + indef quiet Nan */
124 ieee754_xcpt(&ax); 81 SPCNST(0, SP_EMAX, 0x7FFFFF), /* + max normal */
125 va_end(ax.ap); 82 SPCNST(1, SP_EMAX, 0x7FFFFF), /* - max normal */
126 return ax.rv.di; 83 SPCNST(0, SP_EMIN, 0x000000), /* + min normal */
127} 84 SPCNST(1, SP_EMIN, 0x000000), /* - min normal */
85 SPCNST(0, SP_EMIN - 1, 0x000001), /* + min denormal */
86 SPCNST(1, SP_EMIN - 1, 0x000001), /* - min denormal */
87 SPCNST(0, 31, 0x000000), /* + 1.0e31 */
88 SPCNST(0, 63, 0x000000), /* + 1.0e63 */
89};
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
index 22796e012060..43c4fb522ac2 100644
--- a/arch/mips/math-emu/ieee754.h
+++ b/arch/mips/math-emu/ieee754.h
@@ -13,7 +13,7 @@
13 * 13 *
14 * You should have received a copy of the GNU General Public License along 14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc., 15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 16 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 * 17 *
18 * Nov 7, 2000 18 * Nov 7, 2000
19 * Modification to allow integration with Linux kernel 19 * Modification to allow integration with Linux kernel
@@ -24,186 +24,93 @@
24#ifndef __ARCH_MIPS_MATH_EMU_IEEE754_H 24#ifndef __ARCH_MIPS_MATH_EMU_IEEE754_H
25#define __ARCH_MIPS_MATH_EMU_IEEE754_H 25#define __ARCH_MIPS_MATH_EMU_IEEE754_H
26 26
27#include <linux/compiler.h>
27#include <asm/byteorder.h> 28#include <asm/byteorder.h>
29#include <linux/kernel.h>
28#include <linux/types.h> 30#include <linux/types.h>
29#include <linux/sched.h> 31#include <linux/sched.h>
32#include <asm/bitfield.h>
30 33
31/* 34union ieee754dp {
32 * Not very pretty, but the Linux kernel's normal va_list definition
33 * does not allow it to be used as a structure element, as it is here.
34 */
35#ifndef _STDARG_H
36#include <stdarg.h>
37#endif
38
39#ifdef __LITTLE_ENDIAN
40struct ieee754dp_konst {
41 unsigned mantlo:32;
42 unsigned manthi:20;
43 unsigned bexp:11;
44 unsigned sign:1;
45};
46struct ieee754sp_konst {
47 unsigned mant:23;
48 unsigned bexp:8;
49 unsigned sign:1;
50};
51
52typedef union _ieee754dp {
53 struct ieee754dp_konst oparts;
54 struct { 35 struct {
55 u64 mant:52; 36 __BITFIELD_FIELD(unsigned int sign:1,
56 unsigned int bexp:11; 37 __BITFIELD_FIELD(unsigned int bexp:11,
57 unsigned int sign:1; 38 __BITFIELD_FIELD(u64 mant:52,
58 } parts; 39 ;)))
40 };
59 u64 bits; 41 u64 bits;
60 double d;
61} ieee754dp;
62
63typedef union _ieee754sp {
64 struct ieee754sp_konst parts;
65 float f;
66 u32 bits;
67} ieee754sp;
68#endif
69
70#ifdef __BIG_ENDIAN
71struct ieee754dp_konst {
72 unsigned sign:1;
73 unsigned bexp:11;
74 unsigned manthi:20;
75 unsigned mantlo:32;
76}; 42};
77 43
78typedef union _ieee754dp { 44union ieee754sp {
79 struct ieee754dp_konst oparts;
80 struct { 45 struct {
81 unsigned int sign:1; 46 __BITFIELD_FIELD(unsigned sign:1,
82 unsigned int bexp:11; 47 __BITFIELD_FIELD(unsigned bexp:8,
83 u64 mant:52; 48 __BITFIELD_FIELD(unsigned mant:23,
84 } parts; 49 ;)))
85 double d; 50 };
86 u64 bits;
87} ieee754dp;
88
89struct ieee754sp_konst {
90 unsigned sign:1;
91 unsigned bexp:8;
92 unsigned mant:23;
93};
94
95typedef union _ieee754sp {
96 struct ieee754sp_konst parts;
97 float f;
98 u32 bits; 51 u32 bits;
99} ieee754sp; 52};
100#endif
101 53
102/* 54/*
103 * single precision (often aka float) 55 * single precision (often aka float)
104*/ 56*/
105int ieee754sp_finite(ieee754sp x); 57int ieee754sp_class(union ieee754sp x);
106int ieee754sp_class(ieee754sp x);
107
108ieee754sp ieee754sp_abs(ieee754sp x);
109ieee754sp ieee754sp_neg(ieee754sp x);
110ieee754sp ieee754sp_scalb(ieee754sp x, int);
111ieee754sp ieee754sp_logb(ieee754sp x);
112
113/* x with sign of y */
114ieee754sp ieee754sp_copysign(ieee754sp x, ieee754sp y);
115
116ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y);
117ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y);
118ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y);
119ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y);
120
121ieee754sp ieee754sp_fint(int x);
122ieee754sp ieee754sp_funs(unsigned x);
123ieee754sp ieee754sp_flong(s64 x);
124ieee754sp ieee754sp_fulong(u64 x);
125ieee754sp ieee754sp_fdp(ieee754dp x);
126
127int ieee754sp_tint(ieee754sp x);
128unsigned int ieee754sp_tuns(ieee754sp x);
129s64 ieee754sp_tlong(ieee754sp x);
130u64 ieee754sp_tulong(ieee754sp x);
131
132int ieee754sp_cmp(ieee754sp x, ieee754sp y, int cop, int sig);
133/*
134 * basic sp math
135 */
136ieee754sp ieee754sp_modf(ieee754sp x, ieee754sp * ip);
137ieee754sp ieee754sp_frexp(ieee754sp x, int *exp);
138ieee754sp ieee754sp_ldexp(ieee754sp x, int exp);
139 58
140ieee754sp ieee754sp_ceil(ieee754sp x); 59union ieee754sp ieee754sp_abs(union ieee754sp x);
141ieee754sp ieee754sp_floor(ieee754sp x); 60union ieee754sp ieee754sp_neg(union ieee754sp x);
142ieee754sp ieee754sp_trunc(ieee754sp x);
143 61
144ieee754sp ieee754sp_sqrt(ieee754sp x); 62union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y);
63union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y);
64union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y);
65union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y);
145 66
146/* 67union ieee754sp ieee754sp_fint(int x);
147 * double precision (often aka double) 68union ieee754sp ieee754sp_flong(s64 x);
148*/ 69union ieee754sp ieee754sp_fdp(union ieee754dp x);
149int ieee754dp_finite(ieee754dp x);
150int ieee754dp_class(ieee754dp x);
151 70
152/* x with sign of y */ 71int ieee754sp_tint(union ieee754sp x);
153ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y); 72s64 ieee754sp_tlong(union ieee754sp x);
154 73
155ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y); 74int ieee754sp_cmp(union ieee754sp x, union ieee754sp y, int cop, int sig);
156ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y);
157ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y);
158ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y);
159 75
160ieee754dp ieee754dp_abs(ieee754dp x); 76union ieee754sp ieee754sp_sqrt(union ieee754sp x);
161ieee754dp ieee754dp_neg(ieee754dp x);
162ieee754dp ieee754dp_scalb(ieee754dp x, int);
163 77
164/* return exponent as integer in floating point format 78/*
165 */ 79 * double precision (often aka double)
166ieee754dp ieee754dp_logb(ieee754dp x); 80*/
81int ieee754dp_class(union ieee754dp x);
167 82
168ieee754dp ieee754dp_fint(int x); 83union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y);
169ieee754dp ieee754dp_funs(unsigned x); 84union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y);
170ieee754dp ieee754dp_flong(s64 x); 85union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y);
171ieee754dp ieee754dp_fulong(u64 x); 86union ieee754dp ieee754dp_div(union ieee754dp x, union ieee754dp y);
172ieee754dp ieee754dp_fsp(ieee754sp x);
173 87
174ieee754dp ieee754dp_ceil(ieee754dp x); 88union ieee754dp ieee754dp_abs(union ieee754dp x);
175ieee754dp ieee754dp_floor(ieee754dp x); 89union ieee754dp ieee754dp_neg(union ieee754dp x);
176ieee754dp ieee754dp_trunc(ieee754dp x);
177 90
178int ieee754dp_tint(ieee754dp x); 91union ieee754dp ieee754dp_fint(int x);
179unsigned int ieee754dp_tuns(ieee754dp x); 92union ieee754dp ieee754dp_flong(s64 x);
180s64 ieee754dp_tlong(ieee754dp x); 93union ieee754dp ieee754dp_fsp(union ieee754sp x);
181u64 ieee754dp_tulong(ieee754dp x);
182 94
183int ieee754dp_cmp(ieee754dp x, ieee754dp y, int cop, int sig); 95int ieee754dp_tint(union ieee754dp x);
184/* 96s64 ieee754dp_tlong(union ieee754dp x);
185 * basic sp math
186 */
187ieee754dp ieee754dp_modf(ieee754dp x, ieee754dp * ip);
188ieee754dp ieee754dp_frexp(ieee754dp x, int *exp);
189ieee754dp ieee754dp_ldexp(ieee754dp x, int exp);
190 97
191ieee754dp ieee754dp_ceil(ieee754dp x); 98int ieee754dp_cmp(union ieee754dp x, union ieee754dp y, int cop, int sig);
192ieee754dp ieee754dp_floor(ieee754dp x);
193ieee754dp ieee754dp_trunc(ieee754dp x);
194 99
195ieee754dp ieee754dp_sqrt(ieee754dp x); 100union ieee754dp ieee754dp_sqrt(union ieee754dp x);
196 101
197 102
198 103
199/* 5 types of floating point number 104/* 5 types of floating point number
200*/ 105*/
201#define IEEE754_CLASS_NORM 0x00 106enum {
202#define IEEE754_CLASS_ZERO 0x01 107 IEEE754_CLASS_NORM = 0x00,
203#define IEEE754_CLASS_DNORM 0x02 108 IEEE754_CLASS_ZERO = 0x01,
204#define IEEE754_CLASS_INF 0x03 109 IEEE754_CLASS_DNORM = 0x02,
205#define IEEE754_CLASS_SNAN 0x04 110 IEEE754_CLASS_INF = 0x03,
206#define IEEE754_CLASS_QNAN 0x05 111 IEEE754_CLASS_SNAN = 0x04,
112 IEEE754_CLASS_QNAN = 0x05,
113};
207 114
208/* exception numbers */ 115/* exception numbers */
209#define IEEE754_INEXACT 0x01 116#define IEEE754_INEXACT 0x01
@@ -219,114 +126,84 @@ ieee754dp ieee754dp_sqrt(ieee754dp x);
219#define IEEE754_CGT 0x04 126#define IEEE754_CGT 0x04
220#define IEEE754_CUN 0x08 127#define IEEE754_CUN 0x08
221 128
222/* rounding mode
223*/
224#define IEEE754_RN 0 /* round to nearest */
225#define IEEE754_RZ 1 /* round toward zero */
226#define IEEE754_RD 2 /* round toward -Infinity */
227#define IEEE754_RU 3 /* round toward +Infinity */
228
229/* other naming */
230#define IEEE754_RM IEEE754_RD
231#define IEEE754_RP IEEE754_RU
232
233/* "normal" comparisons 129/* "normal" comparisons
234*/ 130*/
235static inline int ieee754sp_eq(ieee754sp x, ieee754sp y) 131static inline int ieee754sp_eq(union ieee754sp x, union ieee754sp y)
236{ 132{
237 return ieee754sp_cmp(x, y, IEEE754_CEQ, 0); 133 return ieee754sp_cmp(x, y, IEEE754_CEQ, 0);
238} 134}
239 135
240static inline int ieee754sp_ne(ieee754sp x, ieee754sp y) 136static inline int ieee754sp_ne(union ieee754sp x, union ieee754sp y)
241{ 137{
242 return ieee754sp_cmp(x, y, 138 return ieee754sp_cmp(x, y,
243 IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0); 139 IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0);
244} 140}
245 141
246static inline int ieee754sp_lt(ieee754sp x, ieee754sp y) 142static inline int ieee754sp_lt(union ieee754sp x, union ieee754sp y)
247{ 143{
248 return ieee754sp_cmp(x, y, IEEE754_CLT, 0); 144 return ieee754sp_cmp(x, y, IEEE754_CLT, 0);
249} 145}
250 146
251static inline int ieee754sp_le(ieee754sp x, ieee754sp y) 147static inline int ieee754sp_le(union ieee754sp x, union ieee754sp y)
252{ 148{
253 return ieee754sp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0); 149 return ieee754sp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0);
254} 150}
255 151
256static inline int ieee754sp_gt(ieee754sp x, ieee754sp y) 152static inline int ieee754sp_gt(union ieee754sp x, union ieee754sp y)
257{ 153{
258 return ieee754sp_cmp(x, y, IEEE754_CGT, 0); 154 return ieee754sp_cmp(x, y, IEEE754_CGT, 0);
259} 155}
260 156
261 157
262static inline int ieee754sp_ge(ieee754sp x, ieee754sp y) 158static inline int ieee754sp_ge(union ieee754sp x, union ieee754sp y)
263{ 159{
264 return ieee754sp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0); 160 return ieee754sp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0);
265} 161}
266 162
267static inline int ieee754dp_eq(ieee754dp x, ieee754dp y) 163static inline int ieee754dp_eq(union ieee754dp x, union ieee754dp y)
268{ 164{
269 return ieee754dp_cmp(x, y, IEEE754_CEQ, 0); 165 return ieee754dp_cmp(x, y, IEEE754_CEQ, 0);
270} 166}
271 167
272static inline int ieee754dp_ne(ieee754dp x, ieee754dp y) 168static inline int ieee754dp_ne(union ieee754dp x, union ieee754dp y)
273{ 169{
274 return ieee754dp_cmp(x, y, 170 return ieee754dp_cmp(x, y,
275 IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0); 171 IEEE754_CLT | IEEE754_CGT | IEEE754_CUN, 0);
276} 172}
277 173
278static inline int ieee754dp_lt(ieee754dp x, ieee754dp y) 174static inline int ieee754dp_lt(union ieee754dp x, union ieee754dp y)
279{ 175{
280 return ieee754dp_cmp(x, y, IEEE754_CLT, 0); 176 return ieee754dp_cmp(x, y, IEEE754_CLT, 0);
281} 177}
282 178
283static inline int ieee754dp_le(ieee754dp x, ieee754dp y) 179static inline int ieee754dp_le(union ieee754dp x, union ieee754dp y)
284{ 180{
285 return ieee754dp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0); 181 return ieee754dp_cmp(x, y, IEEE754_CLT | IEEE754_CEQ, 0);
286} 182}
287 183
288static inline int ieee754dp_gt(ieee754dp x, ieee754dp y) 184static inline int ieee754dp_gt(union ieee754dp x, union ieee754dp y)
289{ 185{
290 return ieee754dp_cmp(x, y, IEEE754_CGT, 0); 186 return ieee754dp_cmp(x, y, IEEE754_CGT, 0);
291} 187}
292 188
293static inline int ieee754dp_ge(ieee754dp x, ieee754dp y) 189static inline int ieee754dp_ge(union ieee754dp x, union ieee754dp y)
294{ 190{
295 return ieee754dp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0); 191 return ieee754dp_cmp(x, y, IEEE754_CGT | IEEE754_CEQ, 0);
296} 192}
297 193
298
299/*
300 * Like strtod
301 */
302ieee754dp ieee754dp_fstr(const char *s, char **endp);
303char *ieee754dp_tstr(ieee754dp x, int prec, int fmt, int af);
304
305
306/* 194/*
307 * The control status register 195 * The control status register
308 */ 196 */
309struct _ieee754_csr { 197struct _ieee754_csr {
310#ifdef __BIG_ENDIAN 198 __BITFIELD_FIELD(unsigned pad0:7,
311 unsigned pad0:7; 199 __BITFIELD_FIELD(unsigned nod:1, /* set 1 for no denormalised numbers */
312 unsigned nod:1; /* set 1 for no denormalised numbers */ 200 __BITFIELD_FIELD(unsigned c:1, /* condition */
313 unsigned c:1; /* condition */ 201 __BITFIELD_FIELD(unsigned pad1:5,
314 unsigned pad1:5; 202 __BITFIELD_FIELD(unsigned cx:6, /* exceptions this operation */
315 unsigned cx:6; /* exceptions this operation */ 203 __BITFIELD_FIELD(unsigned mx:5, /* exception enable mask */
316 unsigned mx:5; /* exception enable mask */ 204 __BITFIELD_FIELD(unsigned sx:5, /* exceptions total */
317 unsigned sx:5; /* exceptions total */ 205 __BITFIELD_FIELD(unsigned rm:2, /* current rounding mode */
318 unsigned rm:2; /* current rounding mode */ 206 ;))))))))
319#endif
320#ifdef __LITTLE_ENDIAN
321 unsigned rm:2; /* current rounding mode */
322 unsigned sx:5; /* exceptions total */
323 unsigned mx:5; /* exception enable mask */
324 unsigned cx:6; /* exceptions this operation */
325 unsigned pad1:5;
326 unsigned c:1; /* condition */
327 unsigned nod:1; /* set 1 for no denormalised numbers */
328 unsigned pad0:7;
329#endif
330}; 207};
331#define ieee754_csr (*(struct _ieee754_csr *)(&current->thread.fpu.fcr31)) 208#define ieee754_csr (*(struct _ieee754_csr *)(&current->thread.fpu.fcr31))
332 209
@@ -377,8 +254,8 @@ static inline int ieee754_sxtest(unsigned n)
377} 254}
378 255
379/* debugging */ 256/* debugging */
380ieee754sp ieee754sp_dump(char *s, ieee754sp x); 257union ieee754sp ieee754sp_dump(char *s, union ieee754sp x);
381ieee754dp ieee754dp_dump(char *s, ieee754dp x); 258union ieee754dp ieee754dp_dump(char *s, union ieee754dp x);
382 259
383#define IEEE754_SPCVAL_PZERO 0 260#define IEEE754_SPCVAL_PZERO 0
384#define IEEE754_SPCVAL_NZERO 1 261#define IEEE754_SPCVAL_NZERO 1
@@ -398,10 +275,10 @@ ieee754dp ieee754dp_dump(char *s, ieee754dp x);
398#define IEEE754_SPCVAL_P1E31 15 /* + 1.0e31 */ 275#define IEEE754_SPCVAL_P1E31 15 /* + 1.0e31 */
399#define IEEE754_SPCVAL_P1E63 16 /* + 1.0e63 */ 276#define IEEE754_SPCVAL_P1E63 16 /* + 1.0e63 */
400 277
401extern const struct ieee754dp_konst __ieee754dp_spcvals[]; 278extern const union ieee754dp __ieee754dp_spcvals[];
402extern const struct ieee754sp_konst __ieee754sp_spcvals[]; 279extern const union ieee754sp __ieee754sp_spcvals[];
403#define ieee754dp_spcvals ((const ieee754dp *)__ieee754dp_spcvals) 280#define ieee754dp_spcvals ((const union ieee754dp *)__ieee754dp_spcvals)
404#define ieee754sp_spcvals ((const ieee754sp *)__ieee754sp_spcvals) 281#define ieee754sp_spcvals ((const union ieee754sp *)__ieee754sp_spcvals)
405 282
406/* 283/*
407 * Return infinity with given sign 284 * Return infinity with given sign
@@ -431,28 +308,15 @@ extern const struct ieee754sp_konst __ieee754sp_spcvals[];
431/* 308/*
432 * Indefinite integer value 309 * Indefinite integer value
433 */ 310 */
434#define ieee754si_indef() INT_MAX 311static inline int ieee754si_indef(void)
435#ifdef LONG_LONG_MAX 312{
436#define ieee754di_indef() LONG_LONG_MAX 313 return INT_MAX;
437#else 314}
438#define ieee754di_indef() ((s64)(~0ULL>>1)) 315
439#endif 316static inline s64 ieee754di_indef(void)
440 317{
441/* IEEE exception context, passed to handler */ 318 return S64_MAX;
442struct ieee754xctx { 319}
443 const char *op; /* operation name */
444 int rt; /* result type */
445 union {
446 ieee754sp sp; /* single precision */
447 ieee754dp dp; /* double precision */
448#ifdef IEEE854_XP
449 ieee754xp xp; /* extended precision */
450#endif
451 int si; /* standard signed integer (32bits) */
452 s64 di; /* extended signed integer (64bits) */
453 } rv; /* default result format implied by op */
454 va_list ap;
455};
456 320
457/* result types for xctx.rt */ 321/* result types for xctx.rt */
458#define IEEE754_RT_SP 0 322#define IEEE754_RT_SP 0
@@ -461,8 +325,6 @@ struct ieee754xctx {
461#define IEEE754_RT_SI 3 325#define IEEE754_RT_SI 3
462#define IEEE754_RT_DI 4 326#define IEEE754_RT_DI 4
463 327
464extern void ieee754_xcpt(struct ieee754xctx *xcp);
465
466/* compat */ 328/* compat */
467#define ieee754dp_fix(x) ieee754dp_tint(x) 329#define ieee754dp_fix(x) ieee754dp_tint(x)
468#define ieee754sp_fix(x) ieee754sp_tint(x) 330#define ieee754sp_fix(x) ieee754sp_tint(x)
diff --git a/arch/mips/math-emu/ieee754d.c b/arch/mips/math-emu/ieee754d.c
index 9599bdd32585..a04e8a7e5ac3 100644
--- a/arch/mips/math-emu/ieee754d.c
+++ b/arch/mips/math-emu/ieee754d.c
@@ -16,7 +16,7 @@
16 * 16 *
17 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * 20 *
21 * Nov 7, 2000 21 * Nov 7, 2000
22 * Modified to build and operate in Linux kernel environment. 22 * Modified to build and operate in Linux kernel environment.
@@ -25,38 +25,13 @@
25 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 25 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
26 */ 26 */
27 27
28#include <linux/kernel.h> 28#include <linux/types.h>
29#include <linux/printk.h>
29#include "ieee754.h" 30#include "ieee754.h"
31#include "ieee754sp.h"
32#include "ieee754dp.h"
30 33
31#define DP_EBIAS 1023 34union ieee754dp ieee754dp_dump(char *m, union ieee754dp x)
32#define DP_EMIN (-1022)
33#define DP_EMAX 1023
34#define DP_FBITS 52
35
36#define SP_EBIAS 127
37#define SP_EMIN (-126)
38#define SP_EMAX 127
39#define SP_FBITS 23
40
41#define DP_MBIT(x) ((u64)1 << (x))
42#define DP_HIDDEN_BIT DP_MBIT(DP_FBITS)
43#define DP_SIGN_BIT DP_MBIT(63)
44
45
46#define SP_MBIT(x) ((u32)1 << (x))
47#define SP_HIDDEN_BIT SP_MBIT(SP_FBITS)
48#define SP_SIGN_BIT SP_MBIT(31)
49
50
51#define SPSIGN(sp) (sp.parts.sign)
52#define SPBEXP(sp) (sp.parts.bexp)
53#define SPMANT(sp) (sp.parts.mant)
54
55#define DPSIGN(dp) (dp.parts.sign)
56#define DPBEXP(dp) (dp.parts.bexp)
57#define DPMANT(dp) (dp.parts.mant)
58
59ieee754dp ieee754dp_dump(char *m, ieee754dp x)
60{ 35{
61 int i; 36 int i;
62 37
@@ -96,7 +71,7 @@ ieee754dp ieee754dp_dump(char *m, ieee754dp x)
96 return x; 71 return x;
97} 72}
98 73
99ieee754sp ieee754sp_dump(char *m, ieee754sp x) 74union ieee754sp ieee754sp_dump(char *m, union ieee754sp x)
100{ 75{
101 int i; 76 int i;
102 77
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index 068e56be8de9..fd134675fc2e 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,104 +16,68 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
22#include <linux/compiler.h>
26 23
27#include "ieee754dp.h" 24#include "ieee754dp.h"
28 25
29int ieee754dp_class(ieee754dp x) 26int ieee754dp_class(union ieee754dp x)
30{ 27{
31 COMPXDP; 28 COMPXDP;
32 EXPLODEXDP; 29 EXPLODEXDP;
33 return xc; 30 return xc;
34} 31}
35 32
36int ieee754dp_isnan(ieee754dp x) 33int ieee754dp_isnan(union ieee754dp x)
37{ 34{
38 return ieee754dp_class(x) >= IEEE754_CLASS_SNAN; 35 return ieee754dp_class(x) >= IEEE754_CLASS_SNAN;
39} 36}
40 37
41int ieee754dp_issnan(ieee754dp x) 38static inline int ieee754dp_issnan(union ieee754dp x)
42{ 39{
43 assert(ieee754dp_isnan(x)); 40 assert(ieee754dp_isnan(x));
44 return ((DPMANT(x) & DP_MBIT(DP_MBITS-1)) == DP_MBIT(DP_MBITS-1)); 41 return ((DPMANT(x) & DP_MBIT(DP_FBITS-1)) == DP_MBIT(DP_FBITS-1));
45} 42}
46 43
47 44
48ieee754dp ieee754dp_xcpt(ieee754dp r, const char *op, ...) 45union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp r)
49{
50 struct ieee754xctx ax;
51 if (!TSTX())
52 return r;
53
54 ax.op = op;
55 ax.rt = IEEE754_RT_DP;
56 ax.rv.dp = r;
57 va_start(ax.ap, op);
58 ieee754_xcpt(&ax);
59 va_end(ax.ap);
60 return ax.rv.dp;
61}
62
63ieee754dp ieee754dp_nanxcpt(ieee754dp r, const char *op, ...)
64{ 46{
65 struct ieee754xctx ax;
66
67 assert(ieee754dp_isnan(r)); 47 assert(ieee754dp_isnan(r));
68 48
69 if (!ieee754dp_issnan(r)) /* QNAN does not cause invalid op !! */ 49 if (!ieee754dp_issnan(r)) /* QNAN does not cause invalid op !! */
70 return r; 50 return r;
71 51
72 if (!SETANDTESTCX(IEEE754_INVALID_OPERATION)) { 52 if (!ieee754_setandtestcx(IEEE754_INVALID_OPERATION)) {
73 /* not enabled convert to a quiet NaN */ 53 /* not enabled convert to a quiet NaN */
74 DPMANT(r) &= (~DP_MBIT(DP_MBITS-1)); 54 DPMANT(r) &= (~DP_MBIT(DP_FBITS-1));
75 if (ieee754dp_isnan(r)) 55 if (ieee754dp_isnan(r))
76 return r; 56 return r;
77 else 57 else
78 return ieee754dp_indef(); 58 return ieee754dp_indef();
79 } 59 }
80 60
81 ax.op = op; 61 return r;
82 ax.rt = 0;
83 ax.rv.dp = r;
84 va_start(ax.ap, op);
85 ieee754_xcpt(&ax);
86 va_end(ax.ap);
87 return ax.rv.dp;
88} 62}
89 63
90ieee754dp ieee754dp_bestnan(ieee754dp x, ieee754dp y) 64static u64 ieee754dp_get_rounding(int sn, u64 xm)
91{
92 assert(ieee754dp_isnan(x));
93 assert(ieee754dp_isnan(y));
94
95 if (DPMANT(x) > DPMANT(y))
96 return x;
97 else
98 return y;
99}
100
101
102static u64 get_rounding(int sn, u64 xm)
103{ 65{
104 /* inexact must round of 3 bits 66 /* inexact must round of 3 bits
105 */ 67 */
106 if (xm & (DP_MBIT(3) - 1)) { 68 if (xm & (DP_MBIT(3) - 1)) {
107 switch (ieee754_csr.rm) { 69 switch (ieee754_csr.rm) {
108 case IEEE754_RZ: 70 case FPU_CSR_RZ:
109 break; 71 break;
110 case IEEE754_RN: 72 case FPU_CSR_RN:
111 xm += 0x3 + ((xm >> 3) & 1); 73 xm += 0x3 + ((xm >> 3) & 1);
112 /* xm += (xm&0x8)?0x4:0x3 */ 74 /* xm += (xm&0x8)?0x4:0x3 */
113 break; 75 break;
114 case IEEE754_RU: /* toward +Infinity */ 76 case FPU_CSR_RU: /* toward +Infinity */
115 if (!sn) /* ?? */ 77 if (!sn) /* ?? */
116 xm += 0x8; 78 xm += 0x8;
117 break; 79 break;
118 case IEEE754_RD: /* toward -Infinity */ 80 case FPU_CSR_RD: /* toward -Infinity */
119 if (sn) /* ?? */ 81 if (sn) /* ?? */
120 xm += 0x8; 82 xm += 0x8;
121 break; 83 break;
@@ -130,11 +92,11 @@ static u64 get_rounding(int sn, u64 xm)
130 * xe is an unbiased exponent 92 * xe is an unbiased exponent
131 * xm is 3bit extended precision value. 93 * xm is 3bit extended precision value.
132 */ 94 */
133ieee754dp ieee754dp_format(int sn, int xe, u64 xm) 95union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
134{ 96{
135 assert(xm); /* we don't gen exact zeros (probably should) */ 97 assert(xm); /* we don't gen exact zeros (probably should) */
136 98
137 assert((xm >> (DP_MBITS + 1 + 3)) == 0); /* no execess */ 99 assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no execess */
138 assert(xm & (DP_HIDDEN_BIT << 3)); 100 assert(xm & (DP_HIDDEN_BIT << 3));
139 101
140 if (xe < DP_EMIN) { 102 if (xe < DP_EMIN) {
@@ -142,32 +104,32 @@ ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
142 int es = DP_EMIN - xe; 104 int es = DP_EMIN - xe;
143 105
144 if (ieee754_csr.nod) { 106 if (ieee754_csr.nod) {
145 SETCX(IEEE754_UNDERFLOW); 107 ieee754_setcx(IEEE754_UNDERFLOW);
146 SETCX(IEEE754_INEXACT); 108 ieee754_setcx(IEEE754_INEXACT);
147 109
148 switch(ieee754_csr.rm) { 110 switch(ieee754_csr.rm) {
149 case IEEE754_RN: 111 case FPU_CSR_RN:
150 case IEEE754_RZ: 112 case FPU_CSR_RZ:
151 return ieee754dp_zero(sn); 113 return ieee754dp_zero(sn);
152 case IEEE754_RU: /* toward +Infinity */ 114 case FPU_CSR_RU: /* toward +Infinity */
153 if(sn == 0) 115 if (sn == 0)
154 return ieee754dp_min(0); 116 return ieee754dp_min(0);
155 else 117 else
156 return ieee754dp_zero(1); 118 return ieee754dp_zero(1);
157 case IEEE754_RD: /* toward -Infinity */ 119 case FPU_CSR_RD: /* toward -Infinity */
158 if(sn == 0) 120 if (sn == 0)
159 return ieee754dp_zero(0); 121 return ieee754dp_zero(0);
160 else 122 else
161 return ieee754dp_min(1); 123 return ieee754dp_min(1);
162 } 124 }
163 } 125 }
164 126
165 if (xe == DP_EMIN - 1 127 if (xe == DP_EMIN - 1 &&
166 && get_rounding(sn, xm) >> (DP_MBITS + 1 + 3)) 128 ieee754dp_get_rounding(sn, xm) >> (DP_FBITS + 1 + 3))
167 { 129 {
168 /* Not tiny after rounding */ 130 /* Not tiny after rounding */
169 SETCX(IEEE754_INEXACT); 131 ieee754_setcx(IEEE754_INEXACT);
170 xm = get_rounding(sn, xm); 132 xm = ieee754dp_get_rounding(sn, xm);
171 xm >>= 1; 133 xm >>= 1;
172 /* Clear grs bits */ 134 /* Clear grs bits */
173 xm &= ~(DP_MBIT(3) - 1); 135 xm &= ~(DP_MBIT(3) - 1);
@@ -183,17 +145,17 @@ ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
183 } 145 }
184 } 146 }
185 if (xm & (DP_MBIT(3) - 1)) { 147 if (xm & (DP_MBIT(3) - 1)) {
186 SETCX(IEEE754_INEXACT); 148 ieee754_setcx(IEEE754_INEXACT);
187 if ((xm & (DP_HIDDEN_BIT << 3)) == 0) { 149 if ((xm & (DP_HIDDEN_BIT << 3)) == 0) {
188 SETCX(IEEE754_UNDERFLOW); 150 ieee754_setcx(IEEE754_UNDERFLOW);
189 } 151 }
190 152
191 /* inexact must round of 3 bits 153 /* inexact must round of 3 bits
192 */ 154 */
193 xm = get_rounding(sn, xm); 155 xm = ieee754dp_get_rounding(sn, xm);
194 /* adjust exponent for rounding add overflowing 156 /* adjust exponent for rounding add overflowing
195 */ 157 */
196 if (xm >> (DP_MBITS + 3 + 1)) { 158 if (xm >> (DP_FBITS + 3 + 1)) {
197 /* add causes mantissa overflow */ 159 /* add causes mantissa overflow */
198 xm >>= 1; 160 xm >>= 1;
199 xe++; 161 xe++;
@@ -202,24 +164,24 @@ ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
202 /* strip grs bits */ 164 /* strip grs bits */
203 xm >>= 3; 165 xm >>= 3;
204 166
205 assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */ 167 assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */
206 assert(xe >= DP_EMIN); 168 assert(xe >= DP_EMIN);
207 169
208 if (xe > DP_EMAX) { 170 if (xe > DP_EMAX) {
209 SETCX(IEEE754_OVERFLOW); 171 ieee754_setcx(IEEE754_OVERFLOW);
210 SETCX(IEEE754_INEXACT); 172 ieee754_setcx(IEEE754_INEXACT);
211 /* -O can be table indexed by (rm,sn) */ 173 /* -O can be table indexed by (rm,sn) */
212 switch (ieee754_csr.rm) { 174 switch (ieee754_csr.rm) {
213 case IEEE754_RN: 175 case FPU_CSR_RN:
214 return ieee754dp_inf(sn); 176 return ieee754dp_inf(sn);
215 case IEEE754_RZ: 177 case FPU_CSR_RZ:
216 return ieee754dp_max(sn); 178 return ieee754dp_max(sn);
217 case IEEE754_RU: /* toward +Infinity */ 179 case FPU_CSR_RU: /* toward +Infinity */
218 if (sn == 0) 180 if (sn == 0)
219 return ieee754dp_inf(0); 181 return ieee754dp_inf(0);
220 else 182 else
221 return ieee754dp_max(1); 183 return ieee754dp_max(1);
222 case IEEE754_RD: /* toward -Infinity */ 184 case FPU_CSR_RD: /* toward -Infinity */
223 if (sn == 0) 185 if (sn == 0)
224 return ieee754dp_max(0); 186 return ieee754dp_max(0);
225 else 187 else
@@ -232,10 +194,10 @@ ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
232 /* we underflow (tiny/zero) */ 194 /* we underflow (tiny/zero) */
233 assert(xe == DP_EMIN); 195 assert(xe == DP_EMIN);
234 if (ieee754_csr.mx & IEEE754_UNDERFLOW) 196 if (ieee754_csr.mx & IEEE754_UNDERFLOW)
235 SETCX(IEEE754_UNDERFLOW); 197 ieee754_setcx(IEEE754_UNDERFLOW);
236 return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm); 198 return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
237 } else { 199 } else {
238 assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */ 200 assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */
239 assert(xm & DP_HIDDEN_BIT); 201 assert(xm & DP_HIDDEN_BIT);
240 202
241 return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); 203 return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/ieee754dp.h b/arch/mips/math-emu/ieee754dp.h
index f139c724c59a..61fd6fd31350 100644
--- a/arch/mips/math-emu/ieee754dp.h
+++ b/arch/mips/math-emu/ieee754dp.h
@@ -6,8 +6,6 @@
6 * MIPS floating point support 6 * MIPS floating point support
7 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 * Copyright (C) 1994-2000 Algorithmics Ltd.
8 * 8 *
9 * ########################################################################
10 *
11 * This program is free software; you can distribute it and/or modify it 9 * This program is free software; you can distribute it and/or modify it
12 * under the terms of the GNU General Public License (Version 2) as 10 * under the terms of the GNU General Public License (Version 2) as
13 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
@@ -19,64 +17,66 @@
19 * 17 *
20 * You should have received a copy of the GNU General Public License along 18 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc., 19 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
23 *
24 * ########################################################################
25 */ 21 */
26 22
23#include <linux/compiler.h>
27 24
28#include "ieee754int.h" 25#include "ieee754int.h"
29 26
30#define assert(expr) ((void)0) 27#define assert(expr) ((void)0)
31 28
29#define DP_EBIAS 1023
30#define DP_EMIN (-1022)
31#define DP_EMAX 1023
32#define DP_FBITS 52
33#define DP_MBITS 52
34
35#define DP_MBIT(x) ((u64)1 << (x))
36#define DP_HIDDEN_BIT DP_MBIT(DP_FBITS)
37#define DP_SIGN_BIT DP_MBIT(63)
38
39#define DPSIGN(dp) (dp.sign)
40#define DPBEXP(dp) (dp.bexp)
41#define DPMANT(dp) (dp.mant)
42
43static inline int ieee754dp_finite(union ieee754dp x)
44{
45 return DPBEXP(x) != DP_EMAX + 1 + DP_EBIAS;
46}
47
32/* 3bit extended double precision sticky right shift */ 48/* 3bit extended double precision sticky right shift */
33#define XDPSRS(v,rs) \ 49#define XDPSRS(v,rs) \
34 ((rs > (DP_MBITS+3))?1:((v) >> (rs)) | ((v) << (64-(rs)) != 0)) 50 ((rs > (DP_FBITS+3))?1:((v) >> (rs)) | ((v) << (64-(rs)) != 0))
35 51
36#define XDPSRSX1() \ 52#define XDPSRSX1() \
37 (xe++, (xm = (xm >> 1) | (xm & 1))) 53 (xe++, (xm = (xm >> 1) | (xm & 1)))
38 54
39#define XDPSRS1(v) \ 55#define XDPSRS1(v) \
40 (((v) >> 1) | ((v) & 1)) 56 (((v) >> 1) | ((v) & 1))
41 57
42/* convert denormal to normalized with extended exponent */ 58/* convert denormal to normalized with extended exponent */
43#define DPDNORMx(m,e) \ 59#define DPDNORMx(m,e) \
44 while( (m >> DP_MBITS) == 0) { m <<= 1; e--; } 60 while ((m >> DP_FBITS) == 0) { m <<= 1; e--; }
45#define DPDNORMX DPDNORMx(xm, xe) 61#define DPDNORMX DPDNORMx(xm, xe)
46#define DPDNORMY DPDNORMx(ym, ye) 62#define DPDNORMY DPDNORMx(ym, ye)
47 63
48static inline ieee754dp builddp(int s, int bx, u64 m) 64static inline union ieee754dp builddp(int s, int bx, u64 m)
49{ 65{
50 ieee754dp r; 66 union ieee754dp r;
51 67
52 assert((s) == 0 || (s) == 1); 68 assert((s) == 0 || (s) == 1);
53 assert((bx) >= DP_EMIN - 1 + DP_EBIAS 69 assert((bx) >= DP_EMIN - 1 + DP_EBIAS
54 && (bx) <= DP_EMAX + 1 + DP_EBIAS); 70 && (bx) <= DP_EMAX + 1 + DP_EBIAS);
55 assert(((m) >> DP_MBITS) == 0); 71 assert(((m) >> DP_FBITS) == 0);
56 72
57 r.parts.sign = s; 73 r.sign = s;
58 r.parts.bexp = bx; 74 r.bexp = bx;
59 r.parts.mant = m; 75 r.mant = m;
60 return r;
61}
62 76
63extern int ieee754dp_isnan(ieee754dp); 77 return r;
64extern int ieee754dp_issnan(ieee754dp);
65extern int ieee754si_xcpt(int, const char *, ...);
66extern s64 ieee754di_xcpt(s64, const char *, ...);
67extern ieee754dp ieee754dp_xcpt(ieee754dp, const char *, ...);
68extern ieee754dp ieee754dp_nanxcpt(ieee754dp, const char *, ...);
69extern ieee754dp ieee754dp_bestnan(ieee754dp, ieee754dp);
70extern ieee754dp ieee754dp_format(int, int, u64);
71
72
73#define DPNORMRET2(s, e, m, name, a0, a1) \
74{ \
75 ieee754dp V = ieee754dp_format(s, e, m); \
76 if(TSTX()) \
77 return ieee754dp_xcpt(V, name, a0, a1); \
78 else \
79 return V; \
80} 78}
81 79
82#define DPNORMRET1(s, e, m, name, a0) DPNORMRET2(s, e, m, name, a0, a0) 80extern int ieee754dp_isnan(union ieee754dp);
81extern union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp);
82extern union ieee754dp ieee754dp_format(int, int, u64);
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 4b6c6fb35304..f0365bb86747 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -6,8 +6,6 @@
6 * MIPS floating point support 6 * MIPS floating point support
7 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 * Copyright (C) 1994-2000 Algorithmics Ltd.
8 * 8 *
9 * ########################################################################
10 *
11 * This program is free software; you can distribute it and/or modify it 9 * This program is free software; you can distribute it and/or modify it
12 * under the terms of the GNU General Public License (Version 2) as 10 * under the terms of the GNU General Public License (Version 2) as
13 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
@@ -19,146 +17,125 @@
19 * 17 *
20 * You should have received a copy of the GNU General Public License along 18 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc., 19 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
23 *
24 * ########################################################################
25 */ 21 */
26 22#ifndef __IEEE754INT_H
23#define __IEEE754INT_H
27 24
28#include "ieee754.h" 25#include "ieee754.h"
29 26
30#define DP_EBIAS 1023
31#define DP_EMIN (-1022)
32#define DP_EMAX 1023
33#define DP_MBITS 52
34
35#define SP_EBIAS 127
36#define SP_EMIN (-126)
37#define SP_EMAX 127
38#define SP_MBITS 23
39
40#define DP_MBIT(x) ((u64)1 << (x))
41#define DP_HIDDEN_BIT DP_MBIT(DP_MBITS)
42#define DP_SIGN_BIT DP_MBIT(63)
43
44#define SP_MBIT(x) ((u32)1 << (x))
45#define SP_HIDDEN_BIT SP_MBIT(SP_MBITS)
46#define SP_SIGN_BIT SP_MBIT(31)
47
48
49#define SPSIGN(sp) (sp.parts.sign)
50#define SPBEXP(sp) (sp.parts.bexp)
51#define SPMANT(sp) (sp.parts.mant)
52
53#define DPSIGN(dp) (dp.parts.sign)
54#define DPBEXP(dp) (dp.parts.bexp)
55#define DPMANT(dp) (dp.parts.mant)
56
57#define CLPAIR(x, y) ((x)*6+(y)) 27#define CLPAIR(x, y) ((x)*6+(y))
58 28
59#define CLEARCX \ 29static inline void ieee754_clearcx(void)
60 (ieee754_csr.cx = 0) 30{
61 31 ieee754_csr.cx = 0;
62#define SETCX(x) \ 32}
63 (ieee754_csr.cx |= (x), ieee754_csr.sx |= (x))
64 33
65#define SETANDTESTCX(x) \ 34static inline void ieee754_setcx(const unsigned int flags)
66 (SETCX(x), ieee754_csr.mx & (x)) 35{
36 ieee754_csr.cx |= flags;
37 ieee754_csr.sx |= flags;
38}
67 39
68#define TSTX() \ 40static inline int ieee754_setandtestcx(const unsigned int x)
69 (ieee754_csr.cx & ieee754_csr.mx) 41{
42 ieee754_setcx(x);
70 43
44 return ieee754_csr.mx & x;
45}
71 46
72#define COMPXSP \ 47#define COMPXSP \
73 unsigned xm; int xe; int xs __maybe_unused; int xc 48 unsigned xm; int xe; int xs __maybe_unused; int xc
74 49
75#define COMPYSP \ 50#define COMPYSP \
76 unsigned ym; int ye; int ys; int yc 51 unsigned ym; int ye; int ys; int yc
77 52
78#define EXPLODESP(v, vc, vs, ve, vm) \ 53#define EXPLODESP(v, vc, vs, ve, vm) \
79{\ 54{ \
80 vs = SPSIGN(v);\ 55 vs = SPSIGN(v); \
81 ve = SPBEXP(v);\ 56 ve = SPBEXP(v); \
82 vm = SPMANT(v);\ 57 vm = SPMANT(v); \
83 if(ve == SP_EMAX+1+SP_EBIAS){\ 58 if (ve == SP_EMAX+1+SP_EBIAS) { \
84 if(vm == 0)\ 59 if (vm == 0) \
85 vc = IEEE754_CLASS_INF;\ 60 vc = IEEE754_CLASS_INF; \
86 else if(vm & SP_MBIT(SP_MBITS-1)) \ 61 else if (vm & SP_MBIT(SP_FBITS-1)) \
87 vc = IEEE754_CLASS_SNAN;\ 62 vc = IEEE754_CLASS_SNAN; \
88 else \ 63 else \
89 vc = IEEE754_CLASS_QNAN;\ 64 vc = IEEE754_CLASS_QNAN; \
90 } else if(ve == SP_EMIN-1+SP_EBIAS) {\ 65 } else if (ve == SP_EMIN-1+SP_EBIAS) { \
91 if(vm) {\ 66 if (vm) { \
92 ve = SP_EMIN;\ 67 ve = SP_EMIN; \
93 vc = IEEE754_CLASS_DNORM;\ 68 vc = IEEE754_CLASS_DNORM; \
94 } else\ 69 } else \
95 vc = IEEE754_CLASS_ZERO;\ 70 vc = IEEE754_CLASS_ZERO; \
96 } else {\ 71 } else { \
97 ve -= SP_EBIAS;\ 72 ve -= SP_EBIAS; \
98 vm |= SP_HIDDEN_BIT;\ 73 vm |= SP_HIDDEN_BIT; \
99 vc = IEEE754_CLASS_NORM;\ 74 vc = IEEE754_CLASS_NORM; \
100 }\ 75 } \
101} 76}
102#define EXPLODEXSP EXPLODESP(x, xc, xs, xe, xm) 77#define EXPLODEXSP EXPLODESP(x, xc, xs, xe, xm)
103#define EXPLODEYSP EXPLODESP(y, yc, ys, ye, ym) 78#define EXPLODEYSP EXPLODESP(y, yc, ys, ye, ym)
104 79
105 80
106#define COMPXDP \ 81#define COMPXDP \
107u64 xm; int xe; int xs __maybe_unused; int xc 82 u64 xm; int xe; int xs __maybe_unused; int xc
108 83
109#define COMPYDP \ 84#define COMPYDP \
110u64 ym; int ye; int ys; int yc 85 u64 ym; int ye; int ys; int yc
111 86
112#define EXPLODEDP(v, vc, vs, ve, vm) \ 87#define EXPLODEDP(v, vc, vs, ve, vm) \
113{\ 88{ \
114 vm = DPMANT(v);\ 89 vm = DPMANT(v); \
115 vs = DPSIGN(v);\ 90 vs = DPSIGN(v); \
116 ve = DPBEXP(v);\ 91 ve = DPBEXP(v); \
117 if(ve == DP_EMAX+1+DP_EBIAS){\ 92 if (ve == DP_EMAX+1+DP_EBIAS) { \
118 if(vm == 0)\ 93 if (vm == 0) \
119 vc = IEEE754_CLASS_INF;\ 94 vc = IEEE754_CLASS_INF; \
120 else if(vm & DP_MBIT(DP_MBITS-1)) \ 95 else if (vm & DP_MBIT(DP_FBITS-1)) \
121 vc = IEEE754_CLASS_SNAN;\ 96 vc = IEEE754_CLASS_SNAN; \
122 else \ 97 else \
123 vc = IEEE754_CLASS_QNAN;\ 98 vc = IEEE754_CLASS_QNAN; \
124 } else if(ve == DP_EMIN-1+DP_EBIAS) {\ 99 } else if (ve == DP_EMIN-1+DP_EBIAS) { \
125 if(vm) {\ 100 if (vm) { \
126 ve = DP_EMIN;\ 101 ve = DP_EMIN; \
127 vc = IEEE754_CLASS_DNORM;\ 102 vc = IEEE754_CLASS_DNORM; \
128 } else\ 103 } else \
129 vc = IEEE754_CLASS_ZERO;\ 104 vc = IEEE754_CLASS_ZERO; \
130 } else {\ 105 } else { \
131 ve -= DP_EBIAS;\ 106 ve -= DP_EBIAS; \
132 vm |= DP_HIDDEN_BIT;\ 107 vm |= DP_HIDDEN_BIT; \
133 vc = IEEE754_CLASS_NORM;\ 108 vc = IEEE754_CLASS_NORM; \
134 }\ 109 } \
135} 110}
136#define EXPLODEXDP EXPLODEDP(x, xc, xs, xe, xm) 111#define EXPLODEXDP EXPLODEDP(x, xc, xs, xe, xm)
137#define EXPLODEYDP EXPLODEDP(y, yc, ys, ye, ym) 112#define EXPLODEYDP EXPLODEDP(y, yc, ys, ye, ym)
138 113
139#define FLUSHDP(v, vc, vs, ve, vm) \ 114#define FLUSHDP(v, vc, vs, ve, vm) \
140 if(vc==IEEE754_CLASS_DNORM) {\ 115 if (vc==IEEE754_CLASS_DNORM) { \
141 if(ieee754_csr.nod) {\ 116 if (ieee754_csr.nod) { \
142 SETCX(IEEE754_INEXACT);\ 117 ieee754_setcx(IEEE754_INEXACT); \
143 vc = IEEE754_CLASS_ZERO;\ 118 vc = IEEE754_CLASS_ZERO; \
144 ve = DP_EMIN-1+DP_EBIAS;\ 119 ve = DP_EMIN-1+DP_EBIAS; \
145 vm = 0;\ 120 vm = 0; \
146 v = ieee754dp_zero(vs);\ 121 v = ieee754dp_zero(vs); \
147 }\ 122 } \
148 } 123 }
149 124
150#define FLUSHSP(v, vc, vs, ve, vm) \ 125#define FLUSHSP(v, vc, vs, ve, vm) \
151 if(vc==IEEE754_CLASS_DNORM) {\ 126 if (vc==IEEE754_CLASS_DNORM) { \
152 if(ieee754_csr.nod) {\ 127 if (ieee754_csr.nod) { \
153 SETCX(IEEE754_INEXACT);\ 128 ieee754_setcx(IEEE754_INEXACT); \
154 vc = IEEE754_CLASS_ZERO;\ 129 vc = IEEE754_CLASS_ZERO; \
155 ve = SP_EMIN-1+SP_EBIAS;\ 130 ve = SP_EMIN-1+SP_EBIAS; \
156 vm = 0;\ 131 vm = 0; \
157 v = ieee754sp_zero(vs);\ 132 v = ieee754sp_zero(vs); \
158 }\ 133 } \
159 } 134 }
160 135
161#define FLUSHXDP FLUSHDP(x, xc, xs, xe, xm) 136#define FLUSHXDP FLUSHDP(x, xc, xs, xe, xm)
162#define FLUSHYDP FLUSHDP(y, yc, ys, ye, ym) 137#define FLUSHYDP FLUSHDP(y, yc, ys, ye, ym)
163#define FLUSHXSP FLUSHSP(x, xc, xs, xe, xm) 138#define FLUSHXSP FLUSHSP(x, xc, xs, xe, xm)
164#define FLUSHYSP FLUSHSP(y, yc, ys, ye, ym) 139#define FLUSHYSP FLUSHSP(y, yc, ys, ye, ym)
140
141#endif /* __IEEE754INT_H */
diff --git a/arch/mips/math-emu/ieee754m.c b/arch/mips/math-emu/ieee754m.c
deleted file mode 100644
index 24190f3c9dd6..000000000000
--- a/arch/mips/math-emu/ieee754m.c
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * floor, trunc, ceil
3 */
4/*
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * ########################################################################
24 */
25
26
27#include "ieee754.h"
28
29ieee754dp ieee754dp_floor(ieee754dp x)
30{
31 ieee754dp i;
32
33 if (ieee754dp_lt(ieee754dp_modf(x, &i), ieee754dp_zero(0)))
34 return ieee754dp_sub(i, ieee754dp_one(0));
35 else
36 return i;
37}
38
39ieee754dp ieee754dp_ceil(ieee754dp x)
40{
41 ieee754dp i;
42
43 if (ieee754dp_gt(ieee754dp_modf(x, &i), ieee754dp_zero(0)))
44 return ieee754dp_add(i, ieee754dp_one(0));
45 else
46 return i;
47}
48
49ieee754dp ieee754dp_trunc(ieee754dp x)
50{
51 ieee754dp i;
52
53 (void) ieee754dp_modf(x, &i);
54 return i;
55}
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index 15d1e36cfe64..d348efe91445 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,105 +16,68 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
22#include <linux/compiler.h>
26 23
27#include "ieee754sp.h" 24#include "ieee754sp.h"
28 25
29int ieee754sp_class(ieee754sp x) 26int ieee754sp_class(union ieee754sp x)
30{ 27{
31 COMPXSP; 28 COMPXSP;
32 EXPLODEXSP; 29 EXPLODEXSP;
33 return xc; 30 return xc;
34} 31}
35 32
36int ieee754sp_isnan(ieee754sp x) 33int ieee754sp_isnan(union ieee754sp x)
37{ 34{
38 return ieee754sp_class(x) >= IEEE754_CLASS_SNAN; 35 return ieee754sp_class(x) >= IEEE754_CLASS_SNAN;
39} 36}
40 37
41int ieee754sp_issnan(ieee754sp x) 38static inline int ieee754sp_issnan(union ieee754sp x)
42{ 39{
43 assert(ieee754sp_isnan(x)); 40 assert(ieee754sp_isnan(x));
44 return (SPMANT(x) & SP_MBIT(SP_MBITS-1)); 41 return (SPMANT(x) & SP_MBIT(SP_FBITS-1));
45} 42}
46 43
47 44
48ieee754sp ieee754sp_xcpt(ieee754sp r, const char *op, ...) 45union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp r)
49{
50 struct ieee754xctx ax;
51
52 if (!TSTX())
53 return r;
54
55 ax.op = op;
56 ax.rt = IEEE754_RT_SP;
57 ax.rv.sp = r;
58 va_start(ax.ap, op);
59 ieee754_xcpt(&ax);
60 va_end(ax.ap);
61 return ax.rv.sp;
62}
63
64ieee754sp ieee754sp_nanxcpt(ieee754sp r, const char *op, ...)
65{ 46{
66 struct ieee754xctx ax;
67
68 assert(ieee754sp_isnan(r)); 47 assert(ieee754sp_isnan(r));
69 48
70 if (!ieee754sp_issnan(r)) /* QNAN does not cause invalid op !! */ 49 if (!ieee754sp_issnan(r)) /* QNAN does not cause invalid op !! */
71 return r; 50 return r;
72 51
73 if (!SETANDTESTCX(IEEE754_INVALID_OPERATION)) { 52 if (!ieee754_setandtestcx(IEEE754_INVALID_OPERATION)) {
74 /* not enabled convert to a quiet NaN */ 53 /* not enabled convert to a quiet NaN */
75 SPMANT(r) &= (~SP_MBIT(SP_MBITS-1)); 54 SPMANT(r) &= (~SP_MBIT(SP_FBITS-1));
76 if (ieee754sp_isnan(r)) 55 if (ieee754sp_isnan(r))
77 return r; 56 return r;
78 else 57 else
79 return ieee754sp_indef(); 58 return ieee754sp_indef();
80 } 59 }
81 60
82 ax.op = op; 61 return r;
83 ax.rt = 0;
84 ax.rv.sp = r;
85 va_start(ax.ap, op);
86 ieee754_xcpt(&ax);
87 va_end(ax.ap);
88 return ax.rv.sp;
89}
90
91ieee754sp ieee754sp_bestnan(ieee754sp x, ieee754sp y)
92{
93 assert(ieee754sp_isnan(x));
94 assert(ieee754sp_isnan(y));
95
96 if (SPMANT(x) > SPMANT(y))
97 return x;
98 else
99 return y;
100} 62}
101 63
102 64static unsigned ieee754sp_get_rounding(int sn, unsigned xm)
103static unsigned get_rounding(int sn, unsigned xm)
104{ 65{
105 /* inexact must round of 3 bits 66 /* inexact must round of 3 bits
106 */ 67 */
107 if (xm & (SP_MBIT(3) - 1)) { 68 if (xm & (SP_MBIT(3) - 1)) {
108 switch (ieee754_csr.rm) { 69 switch (ieee754_csr.rm) {
109 case IEEE754_RZ: 70 case FPU_CSR_RZ:
110 break; 71 break;
111 case IEEE754_RN: 72 case FPU_CSR_RN:
112 xm += 0x3 + ((xm >> 3) & 1); 73 xm += 0x3 + ((xm >> 3) & 1);
113 /* xm += (xm&0x8)?0x4:0x3 */ 74 /* xm += (xm&0x8)?0x4:0x3 */
114 break; 75 break;
115 case IEEE754_RU: /* toward +Infinity */ 76 case FPU_CSR_RU: /* toward +Infinity */
116 if (!sn) /* ?? */ 77 if (!sn) /* ?? */
117 xm += 0x8; 78 xm += 0x8;
118 break; 79 break;
119 case IEEE754_RD: /* toward -Infinity */ 80 case FPU_CSR_RD: /* toward -Infinity */
120 if (sn) /* ?? */ 81 if (sn) /* ?? */
121 xm += 0x8; 82 xm += 0x8;
122 break; 83 break;
@@ -131,11 +92,11 @@ static unsigned get_rounding(int sn, unsigned xm)
131 * xe is an unbiased exponent 92 * xe is an unbiased exponent
132 * xm is 3bit extended precision value. 93 * xm is 3bit extended precision value.
133 */ 94 */
134ieee754sp ieee754sp_format(int sn, int xe, unsigned xm) 95union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
135{ 96{
136 assert(xm); /* we don't gen exact zeros (probably should) */ 97 assert(xm); /* we don't gen exact zeros (probably should) */
137 98
138 assert((xm >> (SP_MBITS + 1 + 3)) == 0); /* no execess */ 99 assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no execess */
139 assert(xm & (SP_HIDDEN_BIT << 3)); 100 assert(xm & (SP_HIDDEN_BIT << 3));
140 101
141 if (xe < SP_EMIN) { 102 if (xe < SP_EMIN) {
@@ -143,38 +104,37 @@ ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
143 int es = SP_EMIN - xe; 104 int es = SP_EMIN - xe;
144 105
145 if (ieee754_csr.nod) { 106 if (ieee754_csr.nod) {
146 SETCX(IEEE754_UNDERFLOW); 107 ieee754_setcx(IEEE754_UNDERFLOW);
147 SETCX(IEEE754_INEXACT); 108 ieee754_setcx(IEEE754_INEXACT);
148 109
149 switch(ieee754_csr.rm) { 110 switch(ieee754_csr.rm) {
150 case IEEE754_RN: 111 case FPU_CSR_RN:
151 case IEEE754_RZ: 112 case FPU_CSR_RZ:
152 return ieee754sp_zero(sn); 113 return ieee754sp_zero(sn);
153 case IEEE754_RU: /* toward +Infinity */ 114 case FPU_CSR_RU: /* toward +Infinity */
154 if(sn == 0) 115 if (sn == 0)
155 return ieee754sp_min(0); 116 return ieee754sp_min(0);
156 else 117 else
157 return ieee754sp_zero(1); 118 return ieee754sp_zero(1);
158 case IEEE754_RD: /* toward -Infinity */ 119 case FPU_CSR_RD: /* toward -Infinity */
159 if(sn == 0) 120 if (sn == 0)
160 return ieee754sp_zero(0); 121 return ieee754sp_zero(0);
161 else 122 else
162 return ieee754sp_min(1); 123 return ieee754sp_min(1);
163 } 124 }
164 } 125 }
165 126
166 if (xe == SP_EMIN - 1 127 if (xe == SP_EMIN - 1 &&
167 && get_rounding(sn, xm) >> (SP_MBITS + 1 + 3)) 128 ieee754sp_get_rounding(sn, xm) >> (SP_FBITS + 1 + 3))
168 { 129 {
169 /* Not tiny after rounding */ 130 /* Not tiny after rounding */
170 SETCX(IEEE754_INEXACT); 131 ieee754_setcx(IEEE754_INEXACT);
171 xm = get_rounding(sn, xm); 132 xm = ieee754sp_get_rounding(sn, xm);
172 xm >>= 1; 133 xm >>= 1;
173 /* Clear grs bits */ 134 /* Clear grs bits */
174 xm &= ~(SP_MBIT(3) - 1); 135 xm &= ~(SP_MBIT(3) - 1);
175 xe++; 136 xe++;
176 } 137 } else {
177 else {
178 /* sticky right shift es bits 138 /* sticky right shift es bits
179 */ 139 */
180 SPXSRSXn(es); 140 SPXSRSXn(es);
@@ -183,17 +143,17 @@ ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
183 } 143 }
184 } 144 }
185 if (xm & (SP_MBIT(3) - 1)) { 145 if (xm & (SP_MBIT(3) - 1)) {
186 SETCX(IEEE754_INEXACT); 146 ieee754_setcx(IEEE754_INEXACT);
187 if ((xm & (SP_HIDDEN_BIT << 3)) == 0) { 147 if ((xm & (SP_HIDDEN_BIT << 3)) == 0) {
188 SETCX(IEEE754_UNDERFLOW); 148 ieee754_setcx(IEEE754_UNDERFLOW);
189 } 149 }
190 150
191 /* inexact must round of 3 bits 151 /* inexact must round of 3 bits
192 */ 152 */
193 xm = get_rounding(sn, xm); 153 xm = ieee754sp_get_rounding(sn, xm);
194 /* adjust exponent for rounding add overflowing 154 /* adjust exponent for rounding add overflowing
195 */ 155 */
196 if (xm >> (SP_MBITS + 1 + 3)) { 156 if (xm >> (SP_FBITS + 1 + 3)) {
197 /* add causes mantissa overflow */ 157 /* add causes mantissa overflow */
198 xm >>= 1; 158 xm >>= 1;
199 xe++; 159 xe++;
@@ -202,24 +162,24 @@ ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
202 /* strip grs bits */ 162 /* strip grs bits */
203 xm >>= 3; 163 xm >>= 3;
204 164
205 assert((xm >> (SP_MBITS + 1)) == 0); /* no execess */ 165 assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */
206 assert(xe >= SP_EMIN); 166 assert(xe >= SP_EMIN);
207 167
208 if (xe > SP_EMAX) { 168 if (xe > SP_EMAX) {
209 SETCX(IEEE754_OVERFLOW); 169 ieee754_setcx(IEEE754_OVERFLOW);
210 SETCX(IEEE754_INEXACT); 170 ieee754_setcx(IEEE754_INEXACT);
211 /* -O can be table indexed by (rm,sn) */ 171 /* -O can be table indexed by (rm,sn) */
212 switch (ieee754_csr.rm) { 172 switch (ieee754_csr.rm) {
213 case IEEE754_RN: 173 case FPU_CSR_RN:
214 return ieee754sp_inf(sn); 174 return ieee754sp_inf(sn);
215 case IEEE754_RZ: 175 case FPU_CSR_RZ:
216 return ieee754sp_max(sn); 176 return ieee754sp_max(sn);
217 case IEEE754_RU: /* toward +Infinity */ 177 case FPU_CSR_RU: /* toward +Infinity */
218 if (sn == 0) 178 if (sn == 0)
219 return ieee754sp_inf(0); 179 return ieee754sp_inf(0);
220 else 180 else
221 return ieee754sp_max(1); 181 return ieee754sp_max(1);
222 case IEEE754_RD: /* toward -Infinity */ 182 case FPU_CSR_RD: /* toward -Infinity */
223 if (sn == 0) 183 if (sn == 0)
224 return ieee754sp_max(0); 184 return ieee754sp_max(0);
225 else 185 else
@@ -232,10 +192,10 @@ ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
232 /* we underflow (tiny/zero) */ 192 /* we underflow (tiny/zero) */
233 assert(xe == SP_EMIN); 193 assert(xe == SP_EMIN);
234 if (ieee754_csr.mx & IEEE754_UNDERFLOW) 194 if (ieee754_csr.mx & IEEE754_UNDERFLOW)
235 SETCX(IEEE754_UNDERFLOW); 195 ieee754_setcx(IEEE754_UNDERFLOW);
236 return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm); 196 return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
237 } else { 197 } else {
238 assert((xm >> (SP_MBITS + 1)) == 0); /* no execess */ 198 assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */
239 assert(xm & SP_HIDDEN_BIT); 199 assert(xm & SP_HIDDEN_BIT);
240 200
241 return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT); 201 return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
index 754fd54649b5..ad268e332318 100644
--- a/arch/mips/math-emu/ieee754sp.h
+++ b/arch/mips/math-emu/ieee754sp.h
@@ -6,8 +6,6 @@
6 * MIPS floating point support 6 * MIPS floating point support
7 * Copyright (C) 1994-2000 Algorithmics Ltd. 7 * Copyright (C) 1994-2000 Algorithmics Ltd.
8 * 8 *
9 * ########################################################################
10 *
11 * This program is free software; you can distribute it and/or modify it 9 * This program is free software; you can distribute it and/or modify it
12 * under the terms of the GNU General Public License (Version 2) as 10 * under the terms of the GNU General Public License (Version 2) as
13 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
@@ -19,70 +17,71 @@
19 * 17 *
20 * You should have received a copy of the GNU General Public License along 18 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc., 19 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 20 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
23 *
24 * ########################################################################
25 */ 21 */
26 22
23#include <linux/compiler.h>
27 24
28#include "ieee754int.h" 25#include "ieee754int.h"
29 26
30#define assert(expr) ((void)0) 27#define assert(expr) ((void)0)
31 28
29#define SP_EBIAS 127
30#define SP_EMIN (-126)
31#define SP_EMAX 127
32#define SP_FBITS 23
33#define SP_MBITS 23
34
35#define SP_MBIT(x) ((u32)1 << (x))
36#define SP_HIDDEN_BIT SP_MBIT(SP_FBITS)
37#define SP_SIGN_BIT SP_MBIT(31)
38
39#define SPSIGN(sp) (sp.sign)
40#define SPBEXP(sp) (sp.bexp)
41#define SPMANT(sp) (sp.mant)
42
43static inline int ieee754sp_finite(union ieee754sp x)
44{
45 return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS;
46}
47
32/* 3bit extended single precision sticky right shift */ 48/* 3bit extended single precision sticky right shift */
33#define SPXSRSXn(rs) \ 49#define SPXSRSXn(rs) \
34 (xe += rs, \ 50 (xe += rs, \
35 xm = (rs > (SP_MBITS+3))?1:((xm) >> (rs)) | ((xm) << (32-(rs)) != 0)) 51 xm = (rs > (SP_FBITS+3))?1:((xm) >> (rs)) | ((xm) << (32-(rs)) != 0))
36 52
37#define SPXSRSX1() \ 53#define SPXSRSX1() \
38 (xe++, (xm = (xm >> 1) | (xm & 1))) 54 (xe++, (xm = (xm >> 1) | (xm & 1)))
39 55
40#define SPXSRSYn(rs) \ 56#define SPXSRSYn(rs) \
41 (ye+=rs, \ 57 (ye+=rs, \
42 ym = (rs > (SP_MBITS+3))?1:((ym) >> (rs)) | ((ym) << (32-(rs)) != 0)) 58 ym = (rs > (SP_FBITS+3))?1:((ym) >> (rs)) | ((ym) << (32-(rs)) != 0))
43 59
44#define SPXSRSY1() \ 60#define SPXSRSY1() \
45 (ye++, (ym = (ym >> 1) | (ym & 1))) 61 (ye++, (ym = (ym >> 1) | (ym & 1)))
46 62
47/* convert denormal to normalized with extended exponent */ 63/* convert denormal to normalized with extended exponent */
48#define SPDNORMx(m,e) \ 64#define SPDNORMx(m,e) \
49 while( (m >> SP_MBITS) == 0) { m <<= 1; e--; } 65 while ((m >> SP_FBITS) == 0) { m <<= 1; e--; }
50#define SPDNORMX SPDNORMx(xm, xe) 66#define SPDNORMX SPDNORMx(xm, xe)
51#define SPDNORMY SPDNORMx(ym, ye) 67#define SPDNORMY SPDNORMx(ym, ye)
52 68
53static inline ieee754sp buildsp(int s, int bx, unsigned m) 69static inline union ieee754sp buildsp(int s, int bx, unsigned m)
54{ 70{
55 ieee754sp r; 71 union ieee754sp r;
56 72
57 assert((s) == 0 || (s) == 1); 73 assert((s) == 0 || (s) == 1);
58 assert((bx) >= SP_EMIN - 1 + SP_EBIAS 74 assert((bx) >= SP_EMIN - 1 + SP_EBIAS
59 && (bx) <= SP_EMAX + 1 + SP_EBIAS); 75 && (bx) <= SP_EMAX + 1 + SP_EBIAS);
60 assert(((m) >> SP_MBITS) == 0); 76 assert(((m) >> SP_FBITS) == 0);
61 77
62 r.parts.sign = s; 78 r.sign = s;
63 r.parts.bexp = bx; 79 r.bexp = bx;
64 r.parts.mant = m; 80 r.mant = m;
65 81
66 return r; 82 return r;
67} 83}
68 84
69extern int ieee754sp_isnan(ieee754sp); 85extern int ieee754sp_isnan(union ieee754sp);
70extern int ieee754sp_issnan(ieee754sp); 86extern union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp);
71extern int ieee754si_xcpt(int, const char *, ...); 87extern union ieee754sp ieee754sp_format(int, int, unsigned);
72extern s64 ieee754di_xcpt(s64, const char *, ...);
73extern ieee754sp ieee754sp_xcpt(ieee754sp, const char *, ...);
74extern ieee754sp ieee754sp_nanxcpt(ieee754sp, const char *, ...);
75extern ieee754sp ieee754sp_bestnan(ieee754sp, ieee754sp);
76extern ieee754sp ieee754sp_format(int, int, unsigned);
77
78
79#define SPNORMRET2(s, e, m, name, a0, a1) \
80{ \
81 ieee754sp V = ieee754sp_format(s, e, m); \
82 if(TSTX()) \
83 return ieee754sp_xcpt(V, name, a0, a1); \
84 else \
85 return V; \
86}
87
88#define SPNORMRET1(s, e, m, name, a0) SPNORMRET2(s, e, m, name, a0, a0)
diff --git a/arch/mips/math-emu/ieee754xcpt.c b/arch/mips/math-emu/ieee754xcpt.c
deleted file mode 100644
index 967167116ae8..000000000000
--- a/arch/mips/math-emu/ieee754xcpt.c
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * MIPS floating point support
3 * Copyright (C) 1994-2000 Algorithmics Ltd.
4 *
5 * ########################################################################
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 *
20 * ########################################################################
21 */
22
23/**************************************************************************
24 * Nov 7, 2000
25 * Added preprocessor hacks to map to Linux kernel diagnostics.
26 *
27 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
28 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
29 *************************************************************************/
30
31#include <linux/kernel.h>
32#include "ieee754.h"
33
34/*
35 * Very naff exception handler (you can plug in your own and
36 * override this).
37 */
38
39static const char *const rtnames[] = {
40 "sp", "dp", "xp", "si", "di"
41};
42
43void ieee754_xcpt(struct ieee754xctx *xcp)
44{
45 printk(KERN_DEBUG "floating point exception in \"%s\", type=%s\n",
46 xcp->op, rtnames[xcp->rt]);
47}
diff --git a/arch/mips/math-emu/kernel_linkage.c b/arch/mips/math-emu/kernel_linkage.c
deleted file mode 100644
index eb58a85b3157..000000000000
--- a/arch/mips/math-emu/kernel_linkage.c
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Kevin D. Kissell, kevink@mips and Carsten Langgaard, carstenl@mips.com
3 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
4 *
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 *
18 * Routines corresponding to Linux kernel FP context
19 * manipulation primitives for the Algorithmics MIPS
20 * FPU Emulator
21 */
22#include <linux/sched.h>
23#include <asm/processor.h>
24#include <asm/signal.h>
25#include <asm/uaccess.h>
26
27#include <asm/fpu.h>
28#include <asm/fpu_emulator.h>
29
30#define SIGNALLING_NAN 0x7ff800007ff80000LL
31
32void fpu_emulator_init_fpu(void)
33{
34 static int first = 1;
35 int i;
36
37 if (first) {
38 first = 0;
39 printk("Algorithmics/MIPS FPU Emulator v1.5\n");
40 }
41
42 current->thread.fpu.fcr31 = 0;
43 for (i = 0; i < 32; i++)
44 set_fpr64(&current->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
45}
diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c
new file mode 100644
index 000000000000..becdd63e14a9
--- /dev/null
+++ b/arch/mips/math-emu/me-debugfs.c
@@ -0,0 +1,67 @@
1#include <linux/cpumask.h>
2#include <linux/debugfs.h>
3#include <linux/fs.h>
4#include <linux/init.h>
5#include <linux/percpu.h>
6#include <linux/types.h>
7#include <asm/fpu_emulator.h>
8#include <asm/local.h>
9
10DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
11
12static int fpuemu_stat_get(void *data, u64 *val)
13{
14 int cpu;
15 unsigned long sum = 0;
16
17 for_each_online_cpu(cpu) {
18 struct mips_fpu_emulator_stats *ps;
19 local_t *pv;
20
21 ps = &per_cpu(fpuemustats, cpu);
22 pv = (void *)ps + (unsigned long)data;
23 sum += local_read(pv);
24 }
25 *val = sum;
26 return 0;
27}
28DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n");
29
30extern struct dentry *mips_debugfs_dir;
31static int __init debugfs_fpuemu(void)
32{
33 struct dentry *d, *dir;
34
35 if (!mips_debugfs_dir)
36 return -ENODEV;
37 dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir);
38 if (!dir)
39 return -ENOMEM;
40
41#define FPU_EMU_STAT_OFFSET(m) \
42 offsetof(struct mips_fpu_emulator_stats, m)
43
44#define FPU_STAT_CREATE(m) \
45do { \
46 d = debugfs_create_file(#m , S_IRUGO, dir, \
47 (void *)FPU_EMU_STAT_OFFSET(m), \
48 &fops_fpuemu_stat); \
49 if (!d) \
50 return -ENOMEM; \
51} while (0)
52
53 FPU_STAT_CREATE(emulated);
54 FPU_STAT_CREATE(loads);
55 FPU_STAT_CREATE(stores);
56 FPU_STAT_CREATE(cp1ops);
57 FPU_STAT_CREATE(cp1xops);
58 FPU_STAT_CREATE(errors);
59 FPU_STAT_CREATE(ieee754_inexact);
60 FPU_STAT_CREATE(ieee754_underflow);
61 FPU_STAT_CREATE(ieee754_overflow);
62 FPU_STAT_CREATE(ieee754_zerodiv);
63 FPU_STAT_CREATE(ieee754_invalidop);
64
65 return 0;
66}
67__initcall(debugfs_fpuemu);
diff --git a/arch/mips/math-emu/sp_add.c b/arch/mips/math-emu/sp_add.c
index c446e64637e2..2d84d460cb67 100644
--- a/arch/mips/math-emu/sp_add.c
+++ b/arch/mips/math-emu/sp_add.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,23 +16,22 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
28 23
29ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y) 24union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y)
30{ 25{
26 int s;
27
31 COMPXSP; 28 COMPXSP;
32 COMPYSP; 29 COMPYSP;
33 30
34 EXPLODEXSP; 31 EXPLODEXSP;
35 EXPLODEYSP; 32 EXPLODEYSP;
36 33
37 CLEARCX; 34 ieee754_clearcx();
38 35
39 FLUSHXSP; 36 FLUSHXSP;
40 FLUSHYSP; 37 FLUSHYSP;
@@ -51,8 +48,8 @@ ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y)
51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 48 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 49 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 50 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
54 SETCX(IEEE754_INVALID_OPERATION); 51 ieee754_setcx(IEEE754_INVALID_OPERATION);
55 return ieee754sp_nanxcpt(ieee754sp_indef(), "add", x, y); 52 return ieee754sp_nanxcpt(ieee754sp_indef());
56 53
57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 54 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): 55 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -68,14 +65,14 @@ ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y)
68 return x; 65 return x;
69 66
70 67
71 /* Infinity handling 68 /*
72 */ 69 * Infinity handling
73 70 */
74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 71 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
75 if (xs == ys) 72 if (xs == ys)
76 return x; 73 return x;
77 SETCX(IEEE754_INVALID_OPERATION); 74 ieee754_setcx(IEEE754_INVALID_OPERATION);
78 return ieee754sp_xcpt(ieee754sp_indef(), "add", x, y); 75 return ieee754sp_indef();
79 76
80 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 77 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
81 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 78 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -87,15 +84,14 @@ ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y)
87 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 84 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
88 return x; 85 return x;
89 86
90 /* Zero handling 87 /*
91 */ 88 * Zero handling
92 89 */
93 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 90 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
94 if (xs == ys) 91 if (xs == ys)
95 return x; 92 return x;
96 else 93 else
97 return ieee754sp_zero(ieee754_csr.rm == 94 return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
98 IEEE754_RD);
99 95
100 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 96 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
101 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): 97 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
@@ -108,6 +104,8 @@ ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y)
108 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): 104 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
109 SPDNORMX; 105 SPDNORMX;
110 106
107 /* FALL THROUGH */
108
111 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): 109 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
112 SPDNORMY; 110 SPDNORMY;
113 break; 111 break;
@@ -122,33 +120,38 @@ ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y)
122 assert(xm & SP_HIDDEN_BIT); 120 assert(xm & SP_HIDDEN_BIT);
123 assert(ym & SP_HIDDEN_BIT); 121 assert(ym & SP_HIDDEN_BIT);
124 122
125 /* provide guard,round and stick bit space */ 123 /*
124 * Provide guard, round and stick bit space.
125 */
126 xm <<= 3; 126 xm <<= 3;
127 ym <<= 3; 127 ym <<= 3;
128 128
129 if (xe > ye) { 129 if (xe > ye) {
130 /* have to shift y fraction right to align 130 /*
131 * Have to shift y fraction right to align.
131 */ 132 */
132 int s = xe - ye; 133 s = xe - ye;
133 SPXSRSYn(s); 134 SPXSRSYn(s);
134 } else if (ye > xe) { 135 } else if (ye > xe) {
135 /* have to shift x fraction right to align 136 /*
137 * Have to shift x fraction right to align.
136 */ 138 */
137 int s = ye - xe; 139 s = ye - xe;
138 SPXSRSXn(s); 140 SPXSRSXn(s);
139 } 141 }
140 assert(xe == ye); 142 assert(xe == ye);
141 assert(xe <= SP_EMAX); 143 assert(xe <= SP_EMAX);
142 144
143 if (xs == ys) { 145 if (xs == ys) {
144 /* generate 28 bit result of adding two 27 bit numbers 146 /*
145 * leaving result in xm,xs,xe 147 * Generate 28 bit result of adding two 27 bit numbers
148 * leaving result in xm, xs and xe.
146 */ 149 */
147 xm = xm + ym; 150 xm = xm + ym;
148 xe = xe; 151 xe = xe;
149 xs = xs; 152 xs = xs;
150 153
151 if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */ 154 if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */
152 SPXSRSX1(); 155 SPXSRSX1();
153 } 156 }
154 } else { 157 } else {
@@ -162,15 +165,16 @@ ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y)
162 xs = ys; 165 xs = ys;
163 } 166 }
164 if (xm == 0) 167 if (xm == 0)
165 return ieee754sp_zero(ieee754_csr.rm == 168 return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
166 IEEE754_RD);
167 169
168 /* normalize in extended single precision */ 170 /*
169 while ((xm >> (SP_MBITS + 3)) == 0) { 171 * Normalize in extended single precision
172 */
173 while ((xm >> (SP_FBITS + 3)) == 0) {
170 xm <<= 1; 174 xm <<= 1;
171 xe--; 175 xe--;
172 } 176 }
173
174 } 177 }
175 SPNORMRET2(xs, xe, xm, "add", x, y); 178
179 return ieee754sp_format(xs, xe, xm);
176} 180}
diff --git a/arch/mips/math-emu/sp_cmp.c b/arch/mips/math-emu/sp_cmp.c
index 716cf37e2465..addbccb2f556 100644
--- a/arch/mips/math-emu/sp_cmp.c
+++ b/arch/mips/math-emu/sp_cmp.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,16 +16,16 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
28 23
29int ieee754sp_cmp(ieee754sp x, ieee754sp y, int cmp, int sig) 24int ieee754sp_cmp(union ieee754sp x, union ieee754sp y, int cmp, int sig)
30{ 25{
26 int vx;
27 int vy;
28
31 COMPXSP; 29 COMPXSP;
32 COMPYSP; 30 COMPYSP;
33 31
@@ -35,21 +33,21 @@ int ieee754sp_cmp(ieee754sp x, ieee754sp y, int cmp, int sig)
35 EXPLODEYSP; 33 EXPLODEYSP;
36 FLUSHXSP; 34 FLUSHXSP;
37 FLUSHYSP; 35 FLUSHYSP;
38 CLEARCX; /* Even clear inexact flag here */ 36 ieee754_clearcx(); /* Even clear inexact flag here */
39 37
40 if (ieee754sp_isnan(x) || ieee754sp_isnan(y)) { 38 if (ieee754sp_isnan(x) || ieee754sp_isnan(y)) {
41 if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN) 39 if (sig || xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
42 SETCX(IEEE754_INVALID_OPERATION); 40 ieee754_setcx(IEEE754_INVALID_OPERATION);
43 if (cmp & IEEE754_CUN) 41 if (cmp & IEEE754_CUN)
44 return 1; 42 return 1;
45 if (cmp & (IEEE754_CLT | IEEE754_CGT)) { 43 if (cmp & (IEEE754_CLT | IEEE754_CGT)) {
46 if (sig && SETANDTESTCX(IEEE754_INVALID_OPERATION)) 44 if (sig && ieee754_setandtestcx(IEEE754_INVALID_OPERATION))
47 return ieee754si_xcpt(0, "fcmpf", x); 45 return 0;
48 } 46 }
49 return 0; 47 return 0;
50 } else { 48 } else {
51 int vx = x.bits; 49 vx = x.bits;
52 int vy = y.bits; 50 vy = y.bits;
53 51
54 if (vx < 0) 52 if (vx < 0)
55 vx = -vx ^ SP_SIGN_BIT; 53 vx = -vx ^ SP_SIGN_BIT;
diff --git a/arch/mips/math-emu/sp_div.c b/arch/mips/math-emu/sp_div.c
index d7747928c954..721f317aa877 100644
--- a/arch/mips/math-emu/sp_div.c
+++ b/arch/mips/math-emu/sp_div.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,23 +16,24 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
28 23
29ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y) 24union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y)
30{ 25{
26 unsigned rm;
27 int re;
28 unsigned bm;
29
31 COMPXSP; 30 COMPXSP;
32 COMPYSP; 31 COMPYSP;
33 32
34 EXPLODEXSP; 33 EXPLODEXSP;
35 EXPLODEYSP; 34 EXPLODEYSP;
36 35
37 CLEARCX; 36 ieee754_clearcx();
38 37
39 FLUSHXSP; 38 FLUSHXSP;
40 FLUSHYSP; 39 FLUSHYSP;
@@ -51,8 +50,8 @@ ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y)
51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 50 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
54 SETCX(IEEE754_INVALID_OPERATION); 53 ieee754_setcx(IEEE754_INVALID_OPERATION);
55 return ieee754sp_nanxcpt(ieee754sp_indef(), "div", x, y); 54 return ieee754sp_nanxcpt(ieee754sp_indef());
56 55
57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 56 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): 57 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -68,12 +67,12 @@ ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y)
68 return x; 67 return x;
69 68
70 69
71 /* Infinity handling 70 /*
72 */ 71 * Infinity handling
73 72 */
74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 73 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
75 SETCX(IEEE754_INVALID_OPERATION); 74 ieee754_setcx(IEEE754_INVALID_OPERATION);
76 return ieee754sp_xcpt(ieee754sp_indef(), "div", x, y); 75 return ieee754sp_indef();
77 76
78 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 77 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
79 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 78 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -85,17 +84,17 @@ ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y)
85 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 84 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
86 return ieee754sp_inf(xs ^ ys); 85 return ieee754sp_inf(xs ^ ys);
87 86
88 /* Zero handling 87 /*
89 */ 88 * Zero handling
90 89 */
91 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 90 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
92 SETCX(IEEE754_INVALID_OPERATION); 91 ieee754_setcx(IEEE754_INVALID_OPERATION);
93 return ieee754sp_xcpt(ieee754sp_indef(), "div", x, y); 92 return ieee754sp_indef();
94 93
95 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 94 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
96 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): 95 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
97 SETCX(IEEE754_ZERO_DIVIDE); 96 ieee754_setcx(IEEE754_ZERO_DIVIDE);
98 return ieee754sp_xcpt(ieee754sp_inf(xs ^ ys), "div", x, y); 97 return ieee754sp_inf(xs ^ ys);
99 98
100 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): 99 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
101 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): 100 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
@@ -122,35 +121,33 @@ ieee754sp ieee754sp_div(ieee754sp x, ieee754sp y)
122 xm <<= 3; 121 xm <<= 3;
123 ym <<= 3; 122 ym <<= 3;
124 123
125 { 124 /* now the dirty work */
126 /* now the dirty work */
127
128 unsigned rm = 0;
129 int re = xe - ye;
130 unsigned bm;
131
132 for (bm = SP_MBIT(SP_MBITS + 2); bm; bm >>= 1) {
133 if (xm >= ym) {
134 xm -= ym;
135 rm |= bm;
136 if (xm == 0)
137 break;
138 }
139 xm <<= 1;
140 }
141 rm <<= 1;
142 if (xm)
143 rm |= 1; /* have remainder, set sticky */
144 125
145 assert(rm); 126 rm = 0;
127 re = xe - ye;
146 128
147 /* normalise rm to rounding precision ? 129 for (bm = SP_MBIT(SP_FBITS + 2); bm; bm >>= 1) {
148 */ 130 if (xm >= ym) {
149 while ((rm >> (SP_MBITS + 3)) == 0) { 131 xm -= ym;
150 rm <<= 1; 132 rm |= bm;
151 re--; 133 if (xm == 0)
134 break;
152 } 135 }
136 xm <<= 1;
137 }
138
139 rm <<= 1;
140 if (xm)
141 rm |= 1; /* have remainder, set sticky */
153 142
154 SPNORMRET2(xs == ys ? 0 : 1, re, rm, "div", x, y); 143 assert(rm);
144
145 /* normalise rm to rounding precision ?
146 */
147 while ((rm >> (SP_FBITS + 3)) == 0) {
148 rm <<= 1;
149 re--;
155 } 150 }
151
152 return ieee754sp_format(xs == ys ? 0 : 1, re, rm);
156} 153}
diff --git a/arch/mips/math-emu/sp_fdp.c b/arch/mips/math-emu/sp_fdp.c
index e1515aae0166..1b266fb16973 100644
--- a/arch/mips/math-emu/sp_fdp.c
+++ b/arch/mips/math-emu/sp_fdp.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,59 +16,61 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
23#include "ieee754dp.h"
28 24
29ieee754sp ieee754sp_fdp(ieee754dp x) 25union ieee754sp ieee754sp_fdp(union ieee754dp x)
30{ 26{
27 u32 rm;
28
31 COMPXDP; 29 COMPXDP;
32 ieee754sp nan; 30 union ieee754sp nan;
33 31
34 EXPLODEXDP; 32 EXPLODEXDP;
35 33
36 CLEARCX; 34 ieee754_clearcx();
37 35
38 FLUSHXDP; 36 FLUSHXDP;
39 37
40 switch (xc) { 38 switch (xc) {
41 case IEEE754_CLASS_SNAN: 39 case IEEE754_CLASS_SNAN:
42 SETCX(IEEE754_INVALID_OPERATION); 40 ieee754_setcx(IEEE754_INVALID_OPERATION);
43 return ieee754sp_nanxcpt(ieee754sp_indef(), "fdp"); 41 return ieee754sp_nanxcpt(ieee754sp_indef());
42
44 case IEEE754_CLASS_QNAN: 43 case IEEE754_CLASS_QNAN:
45 nan = buildsp(xs, SP_EMAX + 1 + SP_EBIAS, (u32) 44 nan = buildsp(xs, SP_EMAX + 1 + SP_EBIAS, (u32)
46 (xm >> (DP_MBITS - SP_MBITS))); 45 (xm >> (DP_FBITS - SP_FBITS)));
47 if (!ieee754sp_isnan(nan)) 46 if (!ieee754sp_isnan(nan))
48 nan = ieee754sp_indef(); 47 nan = ieee754sp_indef();
49 return ieee754sp_nanxcpt(nan, "fdp", x); 48 return ieee754sp_nanxcpt(nan);
49
50 case IEEE754_CLASS_INF: 50 case IEEE754_CLASS_INF:
51 return ieee754sp_inf(xs); 51 return ieee754sp_inf(xs);
52
52 case IEEE754_CLASS_ZERO: 53 case IEEE754_CLASS_ZERO:
53 return ieee754sp_zero(xs); 54 return ieee754sp_zero(xs);
55
54 case IEEE754_CLASS_DNORM: 56 case IEEE754_CLASS_DNORM:
55 /* can't possibly be sp representable */ 57 /* can't possibly be sp representable */
56 SETCX(IEEE754_UNDERFLOW); 58 ieee754_setcx(IEEE754_UNDERFLOW);
57 SETCX(IEEE754_INEXACT); 59 ieee754_setcx(IEEE754_INEXACT);
58 if ((ieee754_csr.rm == IEEE754_RU && !xs) || 60 if ((ieee754_csr.rm == FPU_CSR_RU && !xs) ||
59 (ieee754_csr.rm == IEEE754_RD && xs)) 61 (ieee754_csr.rm == FPU_CSR_RD && xs))
60 return ieee754sp_xcpt(ieee754sp_mind(xs), "fdp", x); 62 return ieee754sp_mind(xs);
61 return ieee754sp_xcpt(ieee754sp_zero(xs), "fdp", x); 63 return ieee754sp_zero(xs);
64
62 case IEEE754_CLASS_NORM: 65 case IEEE754_CLASS_NORM:
63 break; 66 break;
64 } 67 }
65 68
66 { 69 /*
67 u32 rm; 70 * Convert from DP_FBITS to SP_FBITS+3 with sticky right shift.
68 71 */
69 /* convert from DP_MBITS to SP_MBITS+3 with sticky right shift 72 rm = (xm >> (DP_FBITS - (SP_FBITS + 3))) |
70 */ 73 ((xm << (64 - (DP_FBITS - (SP_FBITS + 3)))) != 0);
71 rm = (xm >> (DP_MBITS - (SP_MBITS + 3))) |
72 ((xm << (64 - (DP_MBITS - (SP_MBITS + 3)))) != 0);
73 74
74 SPNORMRET1(xs, xe, rm, "fdp", x); 75 return ieee754sp_format(xs, xe, rm);
75 }
76} 76}
diff --git a/arch/mips/math-emu/sp_fint.c b/arch/mips/math-emu/sp_fint.c
index 9694d6c016cb..d5d8495b2cc4 100644
--- a/arch/mips/math-emu/sp_fint.c
+++ b/arch/mips/math-emu/sp_fint.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,21 +16,18 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
28 23
29ieee754sp ieee754sp_fint(int x) 24union ieee754sp ieee754sp_fint(int x)
30{ 25{
31 unsigned xm; 26 unsigned xm;
32 int xe; 27 int xe;
33 int xs; 28 int xs;
34 29
35 CLEARCX; 30 ieee754_clearcx();
36 31
37 if (x == 0) 32 if (x == 0)
38 return ieee754sp_zero(0); 33 return ieee754sp_zero(0);
@@ -50,30 +45,21 @@ ieee754sp ieee754sp_fint(int x)
50 } else { 45 } else {
51 xm = x; 46 xm = x;
52 } 47 }
53 xe = SP_MBITS + 3; 48 xe = SP_FBITS + 3;
54 49
55 if (xm >> (SP_MBITS + 1 + 3)) { 50 if (xm >> (SP_FBITS + 1 + 3)) {
56 /* shunt out overflow bits 51 /* shunt out overflow bits
57 */ 52 */
58 while (xm >> (SP_MBITS + 1 + 3)) { 53 while (xm >> (SP_FBITS + 1 + 3)) {
59 SPXSRSX1(); 54 SPXSRSX1();
60 } 55 }
61 } else { 56 } else {
62 /* normalize in grs extended single precision 57 /* normalize in grs extended single precision
63 */ 58 */
64 while ((xm >> (SP_MBITS + 3)) == 0) { 59 while ((xm >> (SP_FBITS + 3)) == 0) {
65 xm <<= 1; 60 xm <<= 1;
66 xe--; 61 xe--;
67 } 62 }
68 } 63 }
69 SPNORMRET1(xs, xe, xm, "fint", x); 64 return ieee754sp_format(xs, xe, xm);
70}
71
72
73ieee754sp ieee754sp_funs(unsigned int u)
74{
75 if ((int) u < 0)
76 return ieee754sp_add(ieee754sp_1e31(),
77 ieee754sp_fint(u & ~(1 << 31)));
78 return ieee754sp_fint(u);
79} 65}
diff --git a/arch/mips/math-emu/sp_flong.c b/arch/mips/math-emu/sp_flong.c
index 16a651f29865..012e30ce7589 100644
--- a/arch/mips/math-emu/sp_flong.c
+++ b/arch/mips/math-emu/sp_flong.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,21 +16,18 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
28 23
29ieee754sp ieee754sp_flong(s64 x) 24union ieee754sp ieee754sp_flong(s64 x)
30{ 25{
31 u64 xm; /* <--- need 64-bit mantissa temp */ 26 u64 xm; /* <--- need 64-bit mantissa temp */
32 int xe; 27 int xe;
33 int xs; 28 int xs;
34 29
35 CLEARCX; 30 ieee754_clearcx();
36 31
37 if (x == 0) 32 if (x == 0)
38 return ieee754sp_zero(0); 33 return ieee754sp_zero(0);
@@ -50,29 +45,20 @@ ieee754sp ieee754sp_flong(s64 x)
50 } else { 45 } else {
51 xm = x; 46 xm = x;
52 } 47 }
53 xe = SP_MBITS + 3; 48 xe = SP_FBITS + 3;
54 49
55 if (xm >> (SP_MBITS + 1 + 3)) { 50 if (xm >> (SP_FBITS + 1 + 3)) {
56 /* shunt out overflow bits 51 /* shunt out overflow bits
57 */ 52 */
58 while (xm >> (SP_MBITS + 1 + 3)) { 53 while (xm >> (SP_FBITS + 1 + 3)) {
59 SPXSRSX1(); 54 SPXSRSX1();
60 } 55 }
61 } else { 56 } else {
62 /* normalize in grs extended single precision */ 57 /* normalize in grs extended single precision */
63 while ((xm >> (SP_MBITS + 3)) == 0) { 58 while ((xm >> (SP_FBITS + 3)) == 0) {
64 xm <<= 1; 59 xm <<= 1;
65 xe--; 60 xe--;
66 } 61 }
67 } 62 }
68 SPNORMRET1(xs, xe, xm, "sp_flong", x); 63 return ieee754sp_format(xs, xe, xm);
69}
70
71
72ieee754sp ieee754sp_fulong(u64 u)
73{
74 if ((s64) u < 0)
75 return ieee754sp_add(ieee754sp_1e63(),
76 ieee754sp_flong(u & ~(1ULL << 63)));
77 return ieee754sp_flong(u);
78} 64}
diff --git a/arch/mips/math-emu/sp_frexp.c b/arch/mips/math-emu/sp_frexp.c
deleted file mode 100644
index 5bc993c30044..000000000000
--- a/arch/mips/math-emu/sp_frexp.c
+++ /dev/null
@@ -1,52 +0,0 @@
1/* IEEE754 floating point arithmetic
2 * single precision
3 */
4/*
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * ########################################################################
24 */
25
26
27#include "ieee754sp.h"
28
29/* close to ieeep754sp_logb
30*/
31ieee754sp ieee754sp_frexp(ieee754sp x, int *eptr)
32{
33 COMPXSP;
34 CLEARCX;
35 EXPLODEXSP;
36
37 switch (xc) {
38 case IEEE754_CLASS_SNAN:
39 case IEEE754_CLASS_QNAN:
40 case IEEE754_CLASS_INF:
41 case IEEE754_CLASS_ZERO:
42 *eptr = 0;
43 return x;
44 case IEEE754_CLASS_DNORM:
45 SPDNORMX;
46 break;
47 case IEEE754_CLASS_NORM:
48 break;
49 }
50 *eptr = xe + 1;
51 return buildsp(xs, -1 + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
52}
diff --git a/arch/mips/math-emu/sp_logb.c b/arch/mips/math-emu/sp_logb.c
deleted file mode 100644
index 9c14e0c75bd2..000000000000
--- a/arch/mips/math-emu/sp_logb.c
+++ /dev/null
@@ -1,53 +0,0 @@
1/* IEEE754 floating point arithmetic
2 * single precision
3 */
4/*
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * ########################################################################
24 */
25
26
27#include "ieee754sp.h"
28
29ieee754sp ieee754sp_logb(ieee754sp x)
30{
31 COMPXSP;
32
33 CLEARCX;
34
35 EXPLODEXSP;
36
37 switch (xc) {
38 case IEEE754_CLASS_SNAN:
39 return ieee754sp_nanxcpt(x, "logb", x);
40 case IEEE754_CLASS_QNAN:
41 return x;
42 case IEEE754_CLASS_INF:
43 return ieee754sp_inf(0);
44 case IEEE754_CLASS_ZERO:
45 return ieee754sp_inf(1);
46 case IEEE754_CLASS_DNORM:
47 SPDNORMX;
48 break;
49 case IEEE754_CLASS_NORM:
50 break;
51 }
52 return ieee754sp_fint(xe);
53}
diff --git a/arch/mips/math-emu/sp_modf.c b/arch/mips/math-emu/sp_modf.c
deleted file mode 100644
index 25a0fbaa0556..000000000000
--- a/arch/mips/math-emu/sp_modf.c
+++ /dev/null
@@ -1,79 +0,0 @@
1/* IEEE754 floating point arithmetic
2 * single precision
3 */
4/*
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * ########################################################################
24 */
25
26
27#include "ieee754sp.h"
28
29/* modf function is always exact for a finite number
30*/
31ieee754sp ieee754sp_modf(ieee754sp x, ieee754sp *ip)
32{
33 COMPXSP;
34
35 CLEARCX;
36
37 EXPLODEXSP;
38
39 switch (xc) {
40 case IEEE754_CLASS_SNAN:
41 case IEEE754_CLASS_QNAN:
42 case IEEE754_CLASS_INF:
43 case IEEE754_CLASS_ZERO:
44 *ip = x;
45 return x;
46 case IEEE754_CLASS_DNORM:
47 /* far to small */
48 *ip = ieee754sp_zero(xs);
49 return x;
50 case IEEE754_CLASS_NORM:
51 break;
52 }
53 if (xe < 0) {
54 *ip = ieee754sp_zero(xs);
55 return x;
56 }
57 if (xe >= SP_MBITS) {
58 *ip = x;
59 return ieee754sp_zero(xs);
60 }
61 /* generate ipart mantissa by clearing bottom bits
62 */
63 *ip = buildsp(xs, xe + SP_EBIAS,
64 ((xm >> (SP_MBITS - xe)) << (SP_MBITS - xe)) &
65 ~SP_HIDDEN_BIT);
66
67 /* generate fpart mantissa by clearing top bits
68 * and normalizing (must be able to normalize)
69 */
70 xm = (xm << (32 - (SP_MBITS - xe))) >> (32 - (SP_MBITS - xe));
71 if (xm == 0)
72 return ieee754sp_zero(xs);
73
74 while ((xm >> SP_MBITS) == 0) {
75 xm <<= 1;
76 xe--;
77 }
78 return buildsp(xs, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
79}
diff --git a/arch/mips/math-emu/sp_mul.c b/arch/mips/math-emu/sp_mul.c
index fa4675cf2aad..890c13a2965e 100644
--- a/arch/mips/math-emu/sp_mul.c
+++ b/arch/mips/math-emu/sp_mul.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,23 +16,32 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
28 23
29ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y) 24union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y)
30{ 25{
26 int re;
27 int rs;
28 unsigned rm;
29 unsigned short lxm;
30 unsigned short hxm;
31 unsigned short lym;
32 unsigned short hym;
33 unsigned lrm;
34 unsigned hrm;
35 unsigned t;
36 unsigned at;
37
31 COMPXSP; 38 COMPXSP;
32 COMPYSP; 39 COMPYSP;
33 40
34 EXPLODEXSP; 41 EXPLODEXSP;
35 EXPLODEYSP; 42 EXPLODEYSP;
36 43
37 CLEARCX; 44 ieee754_clearcx();
38 45
39 FLUSHXSP; 46 FLUSHXSP;
40 FLUSHYSP; 47 FLUSHYSP;
@@ -51,8 +58,8 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 58 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 59 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 60 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
54 SETCX(IEEE754_INVALID_OPERATION); 61 ieee754_setcx(IEEE754_INVALID_OPERATION);
55 return ieee754sp_nanxcpt(ieee754sp_indef(), "mul", x, y); 62 return ieee754sp_nanxcpt(ieee754sp_indef());
56 63
57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 64 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): 65 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -68,12 +75,13 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
68 return x; 75 return x;
69 76
70 77
71 /* Infinity handling */ 78 /*
72 79 * Infinity handling
80 */
73 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): 81 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
74 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 82 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
75 SETCX(IEEE754_INVALID_OPERATION); 83 ieee754_setcx(IEEE754_INVALID_OPERATION);
76 return ieee754sp_xcpt(ieee754sp_indef(), "mul", x, y); 84 return ieee754sp_indef();
77 85
78 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): 86 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
79 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): 87 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
@@ -108,63 +116,50 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
108 assert(xm & SP_HIDDEN_BIT); 116 assert(xm & SP_HIDDEN_BIT);
109 assert(ym & SP_HIDDEN_BIT); 117 assert(ym & SP_HIDDEN_BIT);
110 118
111 { 119 re = xe + ye;
112 int re = xe + ye; 120 rs = xs ^ ys;
113 int rs = xs ^ ys; 121
114 unsigned rm; 122 /* shunt to top of word */
115 123 xm <<= 32 - (SP_FBITS + 1);
116 /* shunt to top of word */ 124 ym <<= 32 - (SP_FBITS + 1);
117 xm <<= 32 - (SP_MBITS + 1); 125
118 ym <<= 32 - (SP_MBITS + 1); 126 /*
119 127 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
120 /* multiply 32bits xm,ym to give high 32bits rm with stickness 128 */
121 */ 129 lxm = xm & 0xffff;
122 { 130 hxm = xm >> 16;
123 unsigned short lxm = xm & 0xffff; 131 lym = ym & 0xffff;
124 unsigned short hxm = xm >> 16; 132 hym = ym >> 16;
125 unsigned short lym = ym & 0xffff; 133
126 unsigned short hym = ym >> 16; 134 lrm = lxm * lym; /* 16 * 16 => 32 */
127 unsigned lrm; 135 hrm = hxm * hym; /* 16 * 16 => 32 */
128 unsigned hrm; 136
129 137 t = lxm * hym; /* 16 * 16 => 32 */
130 lrm = lxm * lym; /* 16 * 16 => 32 */ 138 at = lrm + (t << 16);
131 hrm = hxm * hym; /* 16 * 16 => 32 */ 139 hrm += at < lrm;
132 140 lrm = at;
133 { 141 hrm = hrm + (t >> 16);
134 unsigned t = lxm * hym; /* 16 * 16 => 32 */ 142
135 { 143 t = hxm * lym; /* 16 * 16 => 32 */
136 unsigned at = lrm + (t << 16); 144 at = lrm + (t << 16);
137 hrm += at < lrm; 145 hrm += at < lrm;
138 lrm = at; 146 lrm = at;
139 } 147 hrm = hrm + (t >> 16);
140 hrm = hrm + (t >> 16); 148
141 } 149 rm = hrm | (lrm != 0);
142 150
143 { 151 /*
144 unsigned t = hxm * lym; /* 16 * 16 => 32 */ 152 * Sticky shift down to normal rounding precision.
145 { 153 */
146 unsigned at = lrm + (t << 16); 154 if ((int) rm < 0) {
147 hrm += at < lrm; 155 rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
148 lrm = at; 156 ((rm << (SP_FBITS + 1 + 3)) != 0);
149 } 157 re++;
150 hrm = hrm + (t >> 16); 158 } else {
151 } 159 rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
152 rm = hrm | (lrm != 0); 160 ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
153 }
154
155 /*
156 * sticky shift down to normal rounding precision
157 */
158 if ((int) rm < 0) {
159 rm = (rm >> (32 - (SP_MBITS + 1 + 3))) |
160 ((rm << (SP_MBITS + 1 + 3)) != 0);
161 re++;
162 } else {
163 rm = (rm >> (32 - (SP_MBITS + 1 + 3 + 1))) |
164 ((rm << (SP_MBITS + 1 + 3 + 1)) != 0);
165 }
166 assert(rm & (SP_HIDDEN_BIT << 3));
167
168 SPNORMRET2(rs, re, rm, "mul", x, y);
169 } 161 }
162 assert(rm & (SP_HIDDEN_BIT << 3));
163
164 return ieee754sp_format(rs, re, rm);
170} 165}
diff --git a/arch/mips/math-emu/sp_scalb.c b/arch/mips/math-emu/sp_scalb.c
deleted file mode 100644
index dd76196984c8..000000000000
--- a/arch/mips/math-emu/sp_scalb.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/* IEEE754 floating point arithmetic
2 * single precision
3 */
4/*
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * ########################################################################
24 */
25
26
27#include "ieee754sp.h"
28
29ieee754sp ieee754sp_scalb(ieee754sp x, int n)
30{
31 COMPXSP;
32
33 CLEARCX;
34
35 EXPLODEXSP;
36
37 switch (xc) {
38 case IEEE754_CLASS_SNAN:
39 return ieee754sp_nanxcpt(x, "scalb", x, n);
40 case IEEE754_CLASS_QNAN:
41 case IEEE754_CLASS_INF:
42 case IEEE754_CLASS_ZERO:
43 return x;
44 case IEEE754_CLASS_DNORM:
45 SPDNORMX;
46 break;
47 case IEEE754_CLASS_NORM:
48 break;
49 }
50 SPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n);
51}
52
53
54ieee754sp ieee754sp_ldexp(ieee754sp x, int n)
55{
56 return ieee754sp_scalb(x, n);
57}
diff --git a/arch/mips/math-emu/sp_simple.c b/arch/mips/math-emu/sp_simple.c
index ae4fcfafd853..f1ffaa9a17e0 100644
--- a/arch/mips/math-emu/sp_simple.c
+++ b/arch/mips/math-emu/sp_simple.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,33 +16,17 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
28 23
29int ieee754sp_finite(ieee754sp x) 24union ieee754sp ieee754sp_neg(union ieee754sp x)
30{
31 return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS;
32}
33
34ieee754sp ieee754sp_copysign(ieee754sp x, ieee754sp y)
35{
36 CLEARCX;
37 SPSIGN(x) = SPSIGN(y);
38 return x;
39}
40
41
42ieee754sp ieee754sp_neg(ieee754sp x)
43{ 25{
44 COMPXSP; 26 COMPXSP;
45 27
46 EXPLODEXSP; 28 EXPLODEXSP;
47 CLEARCX; 29 ieee754_clearcx();
48 FLUSHXSP; 30 FLUSHXSP;
49 31
50 /* 32 /*
@@ -55,30 +37,29 @@ ieee754sp ieee754sp_neg(ieee754sp x)
55 SPSIGN(x) ^= 1; 37 SPSIGN(x) ^= 1;
56 38
57 if (xc == IEEE754_CLASS_SNAN) { 39 if (xc == IEEE754_CLASS_SNAN) {
58 ieee754sp y = ieee754sp_indef(); 40 union ieee754sp y = ieee754sp_indef();
59 SETCX(IEEE754_INVALID_OPERATION); 41 ieee754_setcx(IEEE754_INVALID_OPERATION);
60 SPSIGN(y) = SPSIGN(x); 42 SPSIGN(y) = SPSIGN(x);
61 return ieee754sp_nanxcpt(y, "neg"); 43 return ieee754sp_nanxcpt(y);
62 } 44 }
63 45
64 return x; 46 return x;
65} 47}
66 48
67 49union ieee754sp ieee754sp_abs(union ieee754sp x)
68ieee754sp ieee754sp_abs(ieee754sp x)
69{ 50{
70 COMPXSP; 51 COMPXSP;
71 52
72 EXPLODEXSP; 53 EXPLODEXSP;
73 CLEARCX; 54 ieee754_clearcx();
74 FLUSHXSP; 55 FLUSHXSP;
75 56
76 /* Clear sign ALWAYS, irrespective of NaN */ 57 /* Clear sign ALWAYS, irrespective of NaN */
77 SPSIGN(x) = 0; 58 SPSIGN(x) = 0;
78 59
79 if (xc == IEEE754_CLASS_SNAN) { 60 if (xc == IEEE754_CLASS_SNAN) {
80 SETCX(IEEE754_INVALID_OPERATION); 61 ieee754_setcx(IEEE754_INVALID_OPERATION);
81 return ieee754sp_nanxcpt(ieee754sp_indef(), "abs"); 62 return ieee754sp_nanxcpt(ieee754sp_indef());
82 } 63 }
83 64
84 return x; 65 return x;
diff --git a/arch/mips/math-emu/sp_sqrt.c b/arch/mips/math-emu/sp_sqrt.c
index fed20175f5fb..b7c098a86f95 100644
--- a/arch/mips/math-emu/sp_sqrt.c
+++ b/arch/mips/math-emu/sp_sqrt.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,15 +16,12 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
28 23
29ieee754sp ieee754sp_sqrt(ieee754sp x) 24union ieee754sp ieee754sp_sqrt(union ieee754sp x)
30{ 25{
31 int ix, s, q, m, t, i; 26 int ix, s, q, m, t, i;
32 unsigned int r; 27 unsigned int r;
@@ -35,34 +30,38 @@ ieee754sp ieee754sp_sqrt(ieee754sp x)
35 /* take care of Inf and NaN */ 30 /* take care of Inf and NaN */
36 31
37 EXPLODEXSP; 32 EXPLODEXSP;
38 CLEARCX; 33 ieee754_clearcx();
39 FLUSHXSP; 34 FLUSHXSP;
40 35
41 /* x == INF or NAN? */ 36 /* x == INF or NAN? */
42 switch (xc) { 37 switch (xc) {
43 case IEEE754_CLASS_QNAN: 38 case IEEE754_CLASS_QNAN:
44 /* sqrt(Nan) = Nan */ 39 /* sqrt(Nan) = Nan */
45 return ieee754sp_nanxcpt(x, "sqrt"); 40 return ieee754sp_nanxcpt(x);
41
46 case IEEE754_CLASS_SNAN: 42 case IEEE754_CLASS_SNAN:
47 SETCX(IEEE754_INVALID_OPERATION); 43 ieee754_setcx(IEEE754_INVALID_OPERATION);
48 return ieee754sp_nanxcpt(ieee754sp_indef(), "sqrt"); 44 return ieee754sp_nanxcpt(ieee754sp_indef());
45
49 case IEEE754_CLASS_ZERO: 46 case IEEE754_CLASS_ZERO:
50 /* sqrt(0) = 0 */ 47 /* sqrt(0) = 0 */
51 return x; 48 return x;
49
52 case IEEE754_CLASS_INF: 50 case IEEE754_CLASS_INF:
53 if (xs) { 51 if (xs) {
54 /* sqrt(-Inf) = Nan */ 52 /* sqrt(-Inf) = Nan */
55 SETCX(IEEE754_INVALID_OPERATION); 53 ieee754_setcx(IEEE754_INVALID_OPERATION);
56 return ieee754sp_nanxcpt(ieee754sp_indef(), "sqrt"); 54 return ieee754sp_nanxcpt(ieee754sp_indef());
57 } 55 }
58 /* sqrt(+Inf) = Inf */ 56 /* sqrt(+Inf) = Inf */
59 return x; 57 return x;
58
60 case IEEE754_CLASS_DNORM: 59 case IEEE754_CLASS_DNORM:
61 case IEEE754_CLASS_NORM: 60 case IEEE754_CLASS_NORM:
62 if (xs) { 61 if (xs) {
63 /* sqrt(-x) = Nan */ 62 /* sqrt(-x) = Nan */
64 SETCX(IEEE754_INVALID_OPERATION); 63 ieee754_setcx(IEEE754_INVALID_OPERATION);
65 return ieee754sp_nanxcpt(ieee754sp_indef(), "sqrt"); 64 return ieee754sp_nanxcpt(ieee754sp_indef());
66 } 65 }
67 break; 66 break;
68 } 67 }
@@ -99,12 +98,12 @@ ieee754sp ieee754sp_sqrt(ieee754sp x)
99 } 98 }
100 99
101 if (ix != 0) { 100 if (ix != 0) {
102 SETCX(IEEE754_INEXACT); 101 ieee754_setcx(IEEE754_INEXACT);
103 switch (ieee754_csr.rm) { 102 switch (ieee754_csr.rm) {
104 case IEEE754_RP: 103 case FPU_CSR_RU:
105 q += 2; 104 q += 2;
106 break; 105 break;
107 case IEEE754_RN: 106 case FPU_CSR_RN:
108 q += (q & 1); 107 q += (q & 1);
109 break; 108 break;
110 } 109 }
diff --git a/arch/mips/math-emu/sp_sub.c b/arch/mips/math-emu/sp_sub.c
index e595c6f3d0bb..8592e49032b8 100644
--- a/arch/mips/math-emu/sp_sub.c
+++ b/arch/mips/math-emu/sp_sub.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,23 +16,22 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
28 23
29ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y) 24union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y)
30{ 25{
26 int s;
27
31 COMPXSP; 28 COMPXSP;
32 COMPYSP; 29 COMPYSP;
33 30
34 EXPLODEXSP; 31 EXPLODEXSP;
35 EXPLODEYSP; 32 EXPLODEYSP;
36 33
37 CLEARCX; 34 ieee754_clearcx();
38 35
39 FLUSHXSP; 36 FLUSHXSP;
40 FLUSHYSP; 37 FLUSHYSP;
@@ -51,8 +48,8 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
51 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): 48 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
52 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): 49 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
53 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): 50 case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
54 SETCX(IEEE754_INVALID_OPERATION); 51 ieee754_setcx(IEEE754_INVALID_OPERATION);
55 return ieee754sp_nanxcpt(ieee754sp_indef(), "sub", x, y); 52 return ieee754sp_nanxcpt(ieee754sp_indef());
56 53
57 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): 54 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
58 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): 55 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
@@ -68,14 +65,14 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
68 return x; 65 return x;
69 66
70 67
71 /* Infinity handling 68 /*
72 */ 69 * Infinity handling
73 70 */
74 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): 71 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
75 if (xs != ys) 72 if (xs != ys)
76 return x; 73 return x;
77 SETCX(IEEE754_INVALID_OPERATION); 74 ieee754_setcx(IEEE754_INVALID_OPERATION);
78 return ieee754sp_xcpt(ieee754sp_indef(), "sub", x, y); 75 return ieee754sp_indef();
79 76
80 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): 77 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
81 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): 78 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
@@ -87,15 +84,14 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
87 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): 84 case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
88 return x; 85 return x;
89 86
90 /* Zero handling 87 /*
91 */ 88 * Zero handling
92 89 */
93 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): 90 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
94 if (xs != ys) 91 if (xs != ys)
95 return x; 92 return x;
96 else 93 else
97 return ieee754sp_zero(ieee754_csr.rm == 94 return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
98 IEEE754_RD);
99 95
100 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): 96 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
101 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): 97 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
@@ -104,7 +100,7 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
104 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): 100 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
105 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): 101 case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
106 /* quick fix up */ 102 /* quick fix up */
107 DPSIGN(y) ^= 1; 103 SPSIGN(y) ^= 1;
108 return y; 104 return y;
109 105
110 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): 106 case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
@@ -133,14 +129,16 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
133 ym <<= 3; 129 ym <<= 3;
134 130
135 if (xe > ye) { 131 if (xe > ye) {
136 /* have to shift y fraction right to align 132 /*
133 * have to shift y fraction right to align
137 */ 134 */
138 int s = xe - ye; 135 s = xe - ye;
139 SPXSRSYn(s); 136 SPXSRSYn(s);
140 } else if (ye > xe) { 137 } else if (ye > xe) {
141 /* have to shift x fraction right to align 138 /*
139 * have to shift x fraction right to align
142 */ 140 */
143 int s = ye - xe; 141 s = ye - xe;
144 SPXSRSXn(s); 142 SPXSRSXn(s);
145 } 143 }
146 assert(xe == ye); 144 assert(xe == ye);
@@ -153,7 +151,7 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
153 xe = xe; 151 xe = xe;
154 xs = xs; 152 xs = xs;
155 153
156 if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */ 154 if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */
157 SPXSRSX1(); /* shift preserving sticky */ 155 SPXSRSX1(); /* shift preserving sticky */
158 } 156 }
159 } else { 157 } else {
@@ -167,17 +165,18 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
167 xs = ys; 165 xs = ys;
168 } 166 }
169 if (xm == 0) { 167 if (xm == 0) {
170 if (ieee754_csr.rm == IEEE754_RD) 168 if (ieee754_csr.rm == FPU_CSR_RD)
171 return ieee754sp_zero(1); /* round negative inf. => sign = -1 */ 169 return ieee754sp_zero(1); /* round negative inf. => sign = -1 */
172 else 170 else
173 return ieee754sp_zero(0); /* other round modes => sign = 1 */ 171 return ieee754sp_zero(0); /* other round modes => sign = 1 */
174 } 172 }
175 /* normalize to rounding precision 173 /* normalize to rounding precision
176 */ 174 */
177 while ((xm >> (SP_MBITS + 3)) == 0) { 175 while ((xm >> (SP_FBITS + 3)) == 0) {
178 xm <<= 1; 176 xm <<= 1;
179 xe--; 177 xe--;
180 } 178 }
181 } 179 }
182 SPNORMRET2(xs, xe, xm, "sub", x, y); 180
181 return ieee754sp_format(xs, xe, xm);
183} 182}
diff --git a/arch/mips/math-emu/sp_tint.c b/arch/mips/math-emu/sp_tint.c
index 0fe9acc7716e..091299a31798 100644
--- a/arch/mips/math-emu/sp_tint.c
+++ b/arch/mips/math-emu/sp_tint.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,20 +16,21 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include <linux/kernel.h>
28#include "ieee754sp.h" 22#include "ieee754sp.h"
29 23
30int ieee754sp_tint(ieee754sp x) 24int ieee754sp_tint(union ieee754sp x)
31{ 25{
26 u32 residue;
27 int round;
28 int sticky;
29 int odd;
30
32 COMPXSP; 31 COMPXSP;
33 32
34 CLEARCX; 33 ieee754_clearcx();
35 34
36 EXPLODEXSP; 35 EXPLODEXSP;
37 FLUSHXSP; 36 FLUSHXSP;
@@ -40,10 +39,12 @@ int ieee754sp_tint(ieee754sp x)
40 case IEEE754_CLASS_SNAN: 39 case IEEE754_CLASS_SNAN:
41 case IEEE754_CLASS_QNAN: 40 case IEEE754_CLASS_QNAN:
42 case IEEE754_CLASS_INF: 41 case IEEE754_CLASS_INF:
43 SETCX(IEEE754_INVALID_OPERATION); 42 ieee754_setcx(IEEE754_INVALID_OPERATION);
44 return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); 43 return ieee754si_indef();
44
45 case IEEE754_CLASS_ZERO: 45 case IEEE754_CLASS_ZERO:
46 return 0; 46 return 0;
47
47 case IEEE754_CLASS_DNORM: 48 case IEEE754_CLASS_DNORM:
48 case IEEE754_CLASS_NORM: 49 case IEEE754_CLASS_NORM:
49 break; 50 break;
@@ -54,18 +55,13 @@ int ieee754sp_tint(ieee754sp x)
54 return -0x80000000; 55 return -0x80000000;
55 /* Set invalid. We will only use overflow for floating 56 /* Set invalid. We will only use overflow for floating
56 point overflow */ 57 point overflow */
57 SETCX(IEEE754_INVALID_OPERATION); 58 ieee754_setcx(IEEE754_INVALID_OPERATION);
58 return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); 59 return ieee754si_indef();
59 } 60 }
60 /* oh gawd */ 61 /* oh gawd */
61 if (xe > SP_MBITS) { 62 if (xe > SP_FBITS) {
62 xm <<= xe - SP_MBITS; 63 xm <<= xe - SP_FBITS;
63 } else { 64 } else {
64 u32 residue;
65 int round;
66 int sticky;
67 int odd;
68
69 if (xe < -1) { 65 if (xe < -1) {
70 residue = xm; 66 residue = xm;
71 round = 0; 67 round = 0;
@@ -76,51 +72,38 @@ int ieee754sp_tint(ieee754sp x)
76 * so we do it in two steps. Be aware that xe 72 * so we do it in two steps. Be aware that xe
77 * may be -1 */ 73 * may be -1 */
78 residue = xm << (xe + 1); 74 residue = xm << (xe + 1);
79 residue <<= 31 - SP_MBITS; 75 residue <<= 31 - SP_FBITS;
80 round = (residue >> 31) != 0; 76 round = (residue >> 31) != 0;
81 sticky = (residue << 1) != 0; 77 sticky = (residue << 1) != 0;
82 xm >>= SP_MBITS - xe; 78 xm >>= SP_FBITS - xe;
83 } 79 }
84 odd = (xm & 0x1) != 0x0; 80 odd = (xm & 0x1) != 0x0;
85 switch (ieee754_csr.rm) { 81 switch (ieee754_csr.rm) {
86 case IEEE754_RN: 82 case FPU_CSR_RN:
87 if (round && (sticky || odd)) 83 if (round && (sticky || odd))
88 xm++; 84 xm++;
89 break; 85 break;
90 case IEEE754_RZ: 86 case FPU_CSR_RZ:
91 break; 87 break;
92 case IEEE754_RU: /* toward +Infinity */ 88 case FPU_CSR_RU: /* toward +Infinity */
93 if ((round || sticky) && !xs) 89 if ((round || sticky) && !xs)
94 xm++; 90 xm++;
95 break; 91 break;
96 case IEEE754_RD: /* toward -Infinity */ 92 case FPU_CSR_RD: /* toward -Infinity */
97 if ((round || sticky) && xs) 93 if ((round || sticky) && xs)
98 xm++; 94 xm++;
99 break; 95 break;
100 } 96 }
101 if ((xm >> 31) != 0) { 97 if ((xm >> 31) != 0) {
102 /* This can happen after rounding */ 98 /* This can happen after rounding */
103 SETCX(IEEE754_INVALID_OPERATION); 99 ieee754_setcx(IEEE754_INVALID_OPERATION);
104 return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); 100 return ieee754si_indef();
105 } 101 }
106 if (round || sticky) 102 if (round || sticky)
107 SETCX(IEEE754_INEXACT); 103 ieee754_setcx(IEEE754_INEXACT);
108 } 104 }
109 if (xs) 105 if (xs)
110 return -xm; 106 return -xm;
111 else 107 else
112 return xm; 108 return xm;
113} 109}
114
115
116unsigned int ieee754sp_tuns(ieee754sp x)
117{
118 ieee754sp hb = ieee754sp_1e31();
119
120 /* what if x < 0 ?? */
121 if (ieee754sp_lt(x, hb))
122 return (unsigned) ieee754sp_tint(x);
123
124 return (unsigned) ieee754sp_tint(ieee754sp_sub(x, hb)) |
125 ((unsigned) 1 << 31);
126}
diff --git a/arch/mips/math-emu/sp_tlong.c b/arch/mips/math-emu/sp_tlong.c
index d0ca6e22be29..9f3c742c1cea 100644
--- a/arch/mips/math-emu/sp_tlong.c
+++ b/arch/mips/math-emu/sp_tlong.c
@@ -5,8 +5,6 @@
5 * MIPS floating point support 5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd. 6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 * 7 *
8 * ########################################################################
9 *
10 * This program is free software; you can distribute it and/or modify it 8 * This program is free software; you can distribute it and/or modify it
11 * under the terms of the GNU General Public License (Version 2) as 9 * under the terms of the GNU General Public License (Version 2) as
12 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
@@ -18,19 +16,22 @@
18 * 16 *
19 * You should have received a copy of the GNU General Public License along 17 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
22 *
23 * ########################################################################
24 */ 20 */
25 21
26
27#include "ieee754sp.h" 22#include "ieee754sp.h"
23#include "ieee754dp.h"
28 24
29s64 ieee754sp_tlong(ieee754sp x) 25s64 ieee754sp_tlong(union ieee754sp x)
30{ 26{
27 u32 residue;
28 int round;
29 int sticky;
30 int odd;
31
31 COMPXDP; /* <-- need 64-bit mantissa tmp */ 32 COMPXDP; /* <-- need 64-bit mantissa tmp */
32 33
33 CLEARCX; 34 ieee754_clearcx();
34 35
35 EXPLODEXSP; 36 EXPLODEXSP;
36 FLUSHXSP; 37 FLUSHXSP;
@@ -39,10 +40,12 @@ s64 ieee754sp_tlong(ieee754sp x)
39 case IEEE754_CLASS_SNAN: 40 case IEEE754_CLASS_SNAN:
40 case IEEE754_CLASS_QNAN: 41 case IEEE754_CLASS_QNAN:
41 case IEEE754_CLASS_INF: 42 case IEEE754_CLASS_INF:
42 SETCX(IEEE754_INVALID_OPERATION); 43 ieee754_setcx(IEEE754_INVALID_OPERATION);
43 return ieee754di_xcpt(ieee754di_indef(), "sp_tlong", x); 44 return ieee754di_indef();
45
44 case IEEE754_CLASS_ZERO: 46 case IEEE754_CLASS_ZERO:
45 return 0; 47 return 0;
48
46 case IEEE754_CLASS_DNORM: 49 case IEEE754_CLASS_DNORM:
47 case IEEE754_CLASS_NORM: 50 case IEEE754_CLASS_NORM:
48 break; 51 break;
@@ -53,69 +56,51 @@ s64 ieee754sp_tlong(ieee754sp x)
53 return -0x8000000000000000LL; 56 return -0x8000000000000000LL;
54 /* Set invalid. We will only use overflow for floating 57 /* Set invalid. We will only use overflow for floating
55 point overflow */ 58 point overflow */
56 SETCX(IEEE754_INVALID_OPERATION); 59 ieee754_setcx(IEEE754_INVALID_OPERATION);
57 return ieee754di_xcpt(ieee754di_indef(), "sp_tlong", x); 60 return ieee754di_indef();
58 } 61 }
59 /* oh gawd */ 62 /* oh gawd */
60 if (xe > SP_MBITS) { 63 if (xe > SP_FBITS) {
61 xm <<= xe - SP_MBITS; 64 xm <<= xe - SP_FBITS;
62 } else if (xe < SP_MBITS) { 65 } else if (xe < SP_FBITS) {
63 u32 residue;
64 int round;
65 int sticky;
66 int odd;
67
68 if (xe < -1) { 66 if (xe < -1) {
69 residue = xm; 67 residue = xm;
70 round = 0; 68 round = 0;
71 sticky = residue != 0; 69 sticky = residue != 0;
72 xm = 0; 70 xm = 0;
73 } else { 71 } else {
74 residue = xm << (32 - SP_MBITS + xe); 72 residue = xm << (32 - SP_FBITS + xe);
75 round = (residue >> 31) != 0; 73 round = (residue >> 31) != 0;
76 sticky = (residue << 1) != 0; 74 sticky = (residue << 1) != 0;
77 xm >>= SP_MBITS - xe; 75 xm >>= SP_FBITS - xe;
78 } 76 }
79 odd = (xm & 0x1) != 0x0; 77 odd = (xm & 0x1) != 0x0;
80 switch (ieee754_csr.rm) { 78 switch (ieee754_csr.rm) {
81 case IEEE754_RN: 79 case FPU_CSR_RN:
82 if (round && (sticky || odd)) 80 if (round && (sticky || odd))
83 xm++; 81 xm++;
84 break; 82 break;
85 case IEEE754_RZ: 83 case FPU_CSR_RZ:
86 break; 84 break;
87 case IEEE754_RU: /* toward +Infinity */ 85 case FPU_CSR_RU: /* toward +Infinity */
88 if ((round || sticky) && !xs) 86 if ((round || sticky) && !xs)
89 xm++; 87 xm++;
90 break; 88 break;
91 case IEEE754_RD: /* toward -Infinity */ 89 case FPU_CSR_RD: /* toward -Infinity */
92 if ((round || sticky) && xs) 90 if ((round || sticky) && xs)
93 xm++; 91 xm++;
94 break; 92 break;
95 } 93 }
96 if ((xm >> 63) != 0) { 94 if ((xm >> 63) != 0) {
97 /* This can happen after rounding */ 95 /* This can happen after rounding */
98 SETCX(IEEE754_INVALID_OPERATION); 96 ieee754_setcx(IEEE754_INVALID_OPERATION);
99 return ieee754di_xcpt(ieee754di_indef(), "sp_tlong", x); 97 return ieee754di_indef();
100 } 98 }
101 if (round || sticky) 99 if (round || sticky)
102 SETCX(IEEE754_INEXACT); 100 ieee754_setcx(IEEE754_INEXACT);
103 } 101 }
104 if (xs) 102 if (xs)
105 return -xm; 103 return -xm;
106 else 104 else
107 return xm; 105 return xm;
108} 106}
109
110
111u64 ieee754sp_tulong(ieee754sp x)
112{
113 ieee754sp hb = ieee754sp_1e63();
114
115 /* what if x < 0 ?? */
116 if (ieee754sp_lt(x, hb))
117 return (u64) ieee754sp_tlong(x);
118
119 return (u64) ieee754sp_tlong(ieee754sp_sub(x, hb)) |
120 (1ULL << 63);
121}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 1c74a6ad072a..f2e8302fa70f 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) 7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */ 9 */
10#include <linux/cpu_pm.h>
10#include <linux/hardirq.h> 11#include <linux/hardirq.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/highmem.h> 13#include <linux/highmem.h>
@@ -50,7 +51,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
50{ 51{
51 preempt_disable(); 52 preempt_disable();
52 53
53#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 54#ifndef CONFIG_MIPS_MT_SMP
54 smp_call_function(func, info, 1); 55 smp_call_function(func, info, 1);
55#endif 56#endif
56 func(info); 57 func(info);
@@ -105,22 +106,37 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
105 106
106static inline void r4k_blast_dcache_page_dc64(unsigned long addr) 107static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
107{ 108{
108 R4600_HIT_CACHEOP_WAR_IMPL;
109 blast_dcache64_page(addr); 109 blast_dcache64_page(addr);
110} 110}
111 111
112static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
113{
114 blast_dcache128_page(addr);
115}
116
112static void r4k_blast_dcache_page_setup(void) 117static void r4k_blast_dcache_page_setup(void)
113{ 118{
114 unsigned long dc_lsize = cpu_dcache_line_size(); 119 unsigned long dc_lsize = cpu_dcache_line_size();
115 120
116 if (dc_lsize == 0) 121 switch (dc_lsize) {
122 case 0:
117 r4k_blast_dcache_page = (void *)cache_noop; 123 r4k_blast_dcache_page = (void *)cache_noop;
118 else if (dc_lsize == 16) 124 break;
125 case 16:
119 r4k_blast_dcache_page = blast_dcache16_page; 126 r4k_blast_dcache_page = blast_dcache16_page;
120 else if (dc_lsize == 32) 127 break;
128 case 32:
121 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; 129 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
122 else if (dc_lsize == 64) 130 break;
131 case 64:
123 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; 132 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
133 break;
134 case 128:
135 r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
136 break;
137 default:
138 break;
139 }
124} 140}
125 141
126#ifndef CONFIG_EVA 142#ifndef CONFIG_EVA
@@ -159,6 +175,8 @@ static void r4k_blast_dcache_page_indexed_setup(void)
159 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; 175 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
160 else if (dc_lsize == 64) 176 else if (dc_lsize == 64)
161 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; 177 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
178 else if (dc_lsize == 128)
179 r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
162} 180}
163 181
164void (* r4k_blast_dcache)(void); 182void (* r4k_blast_dcache)(void);
@@ -176,6 +194,8 @@ static void r4k_blast_dcache_setup(void)
176 r4k_blast_dcache = blast_dcache32; 194 r4k_blast_dcache = blast_dcache32;
177 else if (dc_lsize == 64) 195 else if (dc_lsize == 64)
178 r4k_blast_dcache = blast_dcache64; 196 r4k_blast_dcache = blast_dcache64;
197 else if (dc_lsize == 128)
198 r4k_blast_dcache = blast_dcache128;
179} 199}
180 200
181/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ 201/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
@@ -265,6 +285,8 @@ static void r4k_blast_icache_page_setup(void)
265 r4k_blast_icache_page = blast_icache32_page; 285 r4k_blast_icache_page = blast_icache32_page;
266 else if (ic_lsize == 64) 286 else if (ic_lsize == 64)
267 r4k_blast_icache_page = blast_icache64_page; 287 r4k_blast_icache_page = blast_icache64_page;
288 else if (ic_lsize == 128)
289 r4k_blast_icache_page = blast_icache128_page;
268} 290}
269 291
270#ifndef CONFIG_EVA 292#ifndef CONFIG_EVA
@@ -338,6 +360,8 @@ static void r4k_blast_icache_setup(void)
338 r4k_blast_icache = blast_icache32; 360 r4k_blast_icache = blast_icache32;
339 } else if (ic_lsize == 64) 361 } else if (ic_lsize == 64)
340 r4k_blast_icache = blast_icache64; 362 r4k_blast_icache = blast_icache64;
363 else if (ic_lsize == 128)
364 r4k_blast_icache = blast_icache128;
341} 365}
342 366
343static void (* r4k_blast_scache_page)(unsigned long addr); 367static void (* r4k_blast_scache_page)(unsigned long addr);
@@ -428,7 +452,7 @@ static void r4k___flush_cache_all(void)
428 452
429static inline int has_valid_asid(const struct mm_struct *mm) 453static inline int has_valid_asid(const struct mm_struct *mm)
430{ 454{
431#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 455#ifdef CONFIG_MIPS_MT_SMP
432 int i; 456 int i;
433 457
434 for_each_online_cpu(i) 458 for_each_online_cpu(i)
@@ -1094,6 +1118,21 @@ static void probe_pcache(void)
1094 c->dcache.waybit = 0; 1118 c->dcache.waybit = 0;
1095 break; 1119 break;
1096 1120
1121 case CPU_CAVIUM_OCTEON3:
1122 /* For now lie about the number of ways. */
1123 c->icache.linesz = 128;
1124 c->icache.sets = 16;
1125 c->icache.ways = 8;
1126 c->icache.flags |= MIPS_CACHE_VTAG;
1127 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
1128
1129 c->dcache.linesz = 128;
1130 c->dcache.ways = 8;
1131 c->dcache.sets = 8;
1132 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
1133 c->options |= MIPS_CPU_PREFETCH;
1134 break;
1135
1097 default: 1136 default:
1098 if (!(config & MIPS_CONF_M)) 1137 if (!(config & MIPS_CONF_M))
1099 panic("Don't know how to probe P-caches on this cpu."); 1138 panic("Don't know how to probe P-caches on this cpu.");
@@ -1414,6 +1453,7 @@ static void setup_scache(void)
1414 loongson3_sc_init(); 1453 loongson3_sc_init();
1415 return; 1454 return;
1416 1455
1456 case CPU_CAVIUM_OCTEON3:
1417 case CPU_XLP: 1457 case CPU_XLP:
1418 /* don't need to worry about L2, fully coherent */ 1458 /* don't need to worry about L2, fully coherent */
1419 return; 1459 return;
@@ -1644,3 +1684,26 @@ void r4k_cache_init(void)
1644 coherency_setup(); 1684 coherency_setup();
1645 board_cache_error_setup = r4k_cache_error_setup; 1685 board_cache_error_setup = r4k_cache_error_setup;
1646} 1686}
1687
1688static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
1689 void *v)
1690{
1691 switch (cmd) {
1692 case CPU_PM_ENTER_FAILED:
1693 case CPU_PM_EXIT:
1694 coherency_setup();
1695 break;
1696 }
1697
1698 return NOTIFY_OK;
1699}
1700
1701static struct notifier_block r4k_cache_pm_notifier_block = {
1702 .notifier_call = r4k_cache_pm_notifier,
1703};
1704
1705int __init r4k_cache_init_pm(void)
1706{
1707 return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
1708}
1709arch_initcall(r4k_cache_init_pm);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 4fc74c78265a..6e4413330e36 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -44,27 +44,6 @@
44#include <asm/tlb.h> 44#include <asm/tlb.h>
45#include <asm/fixmap.h> 45#include <asm/fixmap.h>
46 46
47/* Atomicity and interruptability */
48#ifdef CONFIG_MIPS_MT_SMTC
49
50#include <asm/mipsmtregs.h>
51
52#define ENTER_CRITICAL(flags) \
53 { \
54 unsigned int mvpflags; \
55 local_irq_save(flags);\
56 mvpflags = dvpe()
57#define EXIT_CRITICAL(flags) \
58 evpe(mvpflags); \
59 local_irq_restore(flags); \
60 }
61#else
62
63#define ENTER_CRITICAL(flags) local_irq_save(flags)
64#define EXIT_CRITICAL(flags) local_irq_restore(flags)
65
66#endif /* CONFIG_MIPS_MT_SMTC */
67
68/* 47/*
69 * We have up to 8 empty zeroed pages so we can map one of the right colour 48 * We have up to 8 empty zeroed pages so we can map one of the right colour
70 * when needed. This is necessary only on R4000 / R4400 SC and MC versions 49 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
@@ -100,21 +79,7 @@ void setup_zero_pages(void)
100 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; 79 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
101} 80}
102 81
103#ifdef CONFIG_MIPS_MT_SMTC 82static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
104static pte_t *kmap_coherent_pte;
105static void __init kmap_coherent_init(void)
106{
107 unsigned long vaddr;
108
109 /* cache the first coherent kmap pte */
110 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
111 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
112}
113#else
114static inline void kmap_coherent_init(void) {}
115#endif
116
117void *kmap_coherent(struct page *page, unsigned long addr)
118{ 83{
119 enum fixed_addresses idx; 84 enum fixed_addresses idx;
120 unsigned long vaddr, flags, entrylo; 85 unsigned long vaddr, flags, entrylo;
@@ -126,58 +91,48 @@ void *kmap_coherent(struct page *page, unsigned long addr)
126 91
127 pagefault_disable(); 92 pagefault_disable();
128 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); 93 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
129#ifdef CONFIG_MIPS_MT_SMTC
130 idx += FIX_N_COLOURS * smp_processor_id() +
131 (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
132#else
133 idx += in_interrupt() ? FIX_N_COLOURS : 0; 94 idx += in_interrupt() ? FIX_N_COLOURS : 0;
134#endif
135 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 95 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
136 pte = mk_pte(page, PAGE_KERNEL); 96 pte = mk_pte(page, prot);
137#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 97#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
138 entrylo = pte.pte_high; 98 entrylo = pte.pte_high;
139#else 99#else
140 entrylo = pte_to_entrylo(pte_val(pte)); 100 entrylo = pte_to_entrylo(pte_val(pte));
141#endif 101#endif
142 102
143 ENTER_CRITICAL(flags); 103 local_irq_save(flags);
144 old_ctx = read_c0_entryhi(); 104 old_ctx = read_c0_entryhi();
145 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 105 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
146 write_c0_entrylo0(entrylo); 106 write_c0_entrylo0(entrylo);
147 write_c0_entrylo1(entrylo); 107 write_c0_entrylo1(entrylo);
148#ifdef CONFIG_MIPS_MT_SMTC
149 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
150 /* preload TLB instead of local_flush_tlb_one() */
151 mtc0_tlbw_hazard();
152 tlb_probe();
153 tlb_probe_hazard();
154 tlbidx = read_c0_index();
155 mtc0_tlbw_hazard();
156 if (tlbidx < 0)
157 tlb_write_random();
158 else
159 tlb_write_indexed();
160#else
161 tlbidx = read_c0_wired(); 108 tlbidx = read_c0_wired();
162 write_c0_wired(tlbidx + 1); 109 write_c0_wired(tlbidx + 1);
163 write_c0_index(tlbidx); 110 write_c0_index(tlbidx);
164 mtc0_tlbw_hazard(); 111 mtc0_tlbw_hazard();
165 tlb_write_indexed(); 112 tlb_write_indexed();
166#endif
167 tlbw_use_hazard(); 113 tlbw_use_hazard();
168 write_c0_entryhi(old_ctx); 114 write_c0_entryhi(old_ctx);
169 EXIT_CRITICAL(flags); 115 local_irq_restore(flags);
170 116
171 return (void*) vaddr; 117 return (void*) vaddr;
172} 118}
173 119
120void *kmap_coherent(struct page *page, unsigned long addr)
121{
122 return __kmap_pgprot(page, addr, PAGE_KERNEL);
123}
124
125void *kmap_noncoherent(struct page *page, unsigned long addr)
126{
127 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
128}
129
174void kunmap_coherent(void) 130void kunmap_coherent(void)
175{ 131{
176#ifndef CONFIG_MIPS_MT_SMTC
177 unsigned int wired; 132 unsigned int wired;
178 unsigned long flags, old_ctx; 133 unsigned long flags, old_ctx;
179 134
180 ENTER_CRITICAL(flags); 135 local_irq_save(flags);
181 old_ctx = read_c0_entryhi(); 136 old_ctx = read_c0_entryhi();
182 wired = read_c0_wired() - 1; 137 wired = read_c0_wired() - 1;
183 write_c0_wired(wired); 138 write_c0_wired(wired);
@@ -189,8 +144,7 @@ void kunmap_coherent(void)
189 tlb_write_indexed(); 144 tlb_write_indexed();
190 tlbw_use_hazard(); 145 tlbw_use_hazard();
191 write_c0_entryhi(old_ctx); 146 write_c0_entryhi(old_ctx);
192 EXIT_CRITICAL(flags); 147 local_irq_restore(flags);
193#endif
194 pagefault_enable(); 148 pagefault_enable();
195} 149}
196 150
@@ -256,7 +210,7 @@ EXPORT_SYMBOL_GPL(copy_from_user_page);
256void __init fixrange_init(unsigned long start, unsigned long end, 210void __init fixrange_init(unsigned long start, unsigned long end,
257 pgd_t *pgd_base) 211 pgd_t *pgd_base)
258{ 212{
259#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) 213#ifdef CONFIG_HIGHMEM
260 pgd_t *pgd; 214 pgd_t *pgd;
261 pud_t *pud; 215 pud_t *pud;
262 pmd_t *pmd; 216 pmd_t *pmd;
@@ -327,8 +281,6 @@ void __init paging_init(void)
327#ifdef CONFIG_HIGHMEM 281#ifdef CONFIG_HIGHMEM
328 kmap_init(); 282 kmap_init();
329#endif 283#endif
330 kmap_coherent_init();
331
332#ifdef CONFIG_ZONE_DMA 284#ifdef CONFIG_ZONE_DMA
333 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 285 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
334#endif 286#endif
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index eeaf50f5df2b..3914e27456f2 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -8,6 +8,7 @@
8 * Carsten Langgaard, carstenl@mips.com 8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. 9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
10 */ 10 */
11#include <linux/cpu_pm.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/smp.h> 14#include <linux/smp.h>
@@ -25,28 +26,6 @@
25 26
26extern void build_tlb_refill_handler(void); 27extern void build_tlb_refill_handler(void);
27 28
28/* Atomicity and interruptability */
29#ifdef CONFIG_MIPS_MT_SMTC
30
31#include <asm/smtc.h>
32#include <asm/mipsmtregs.h>
33
34#define ENTER_CRITICAL(flags) \
35 { \
36 unsigned int mvpflags; \
37 local_irq_save(flags);\
38 mvpflags = dvpe()
39#define EXIT_CRITICAL(flags) \
40 evpe(mvpflags); \
41 local_irq_restore(flags); \
42 }
43#else
44
45#define ENTER_CRITICAL(flags) local_irq_save(flags)
46#define EXIT_CRITICAL(flags) local_irq_restore(flags)
47
48#endif /* CONFIG_MIPS_MT_SMTC */
49
50/* 29/*
51 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, 30 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
52 * unfortunately, itlb is not totally transparent to software. 31 * unfortunately, itlb is not totally transparent to software.
@@ -75,7 +54,7 @@ void local_flush_tlb_all(void)
75 unsigned long old_ctx; 54 unsigned long old_ctx;
76 int entry, ftlbhighset; 55 int entry, ftlbhighset;
77 56
78 ENTER_CRITICAL(flags); 57 local_irq_save(flags);
79 /* Save old context and create impossible VPN2 value */ 58 /* Save old context and create impossible VPN2 value */
80 old_ctx = read_c0_entryhi(); 59 old_ctx = read_c0_entryhi();
81 write_c0_entrylo0(0); 60 write_c0_entrylo0(0);
@@ -112,7 +91,7 @@ void local_flush_tlb_all(void)
112 tlbw_use_hazard(); 91 tlbw_use_hazard();
113 write_c0_entryhi(old_ctx); 92 write_c0_entryhi(old_ctx);
114 flush_itlb(); 93 flush_itlb();
115 EXIT_CRITICAL(flags); 94 local_irq_restore(flags);
116} 95}
117EXPORT_SYMBOL(local_flush_tlb_all); 96EXPORT_SYMBOL(local_flush_tlb_all);
118 97
@@ -142,7 +121,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
142 if (cpu_context(cpu, mm) != 0) { 121 if (cpu_context(cpu, mm) != 0) {
143 unsigned long size, flags; 122 unsigned long size, flags;
144 123
145 ENTER_CRITICAL(flags); 124 local_irq_save(flags);
146 start = round_down(start, PAGE_SIZE << 1); 125 start = round_down(start, PAGE_SIZE << 1);
147 end = round_up(end, PAGE_SIZE << 1); 126 end = round_up(end, PAGE_SIZE << 1);
148 size = (end - start) >> (PAGE_SHIFT + 1); 127 size = (end - start) >> (PAGE_SHIFT + 1);
@@ -176,7 +155,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
176 drop_mmu_context(mm, cpu); 155 drop_mmu_context(mm, cpu);
177 } 156 }
178 flush_itlb(); 157 flush_itlb();
179 EXIT_CRITICAL(flags); 158 local_irq_restore(flags);
180 } 159 }
181} 160}
182 161
@@ -184,7 +163,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
184{ 163{
185 unsigned long size, flags; 164 unsigned long size, flags;
186 165
187 ENTER_CRITICAL(flags); 166 local_irq_save(flags);
188 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 167 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
189 size = (size + 1) >> 1; 168 size = (size + 1) >> 1;
190 if (size <= (current_cpu_data.tlbsizeftlbsets ? 169 if (size <= (current_cpu_data.tlbsizeftlbsets ?
@@ -220,7 +199,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
220 local_flush_tlb_all(); 199 local_flush_tlb_all();
221 } 200 }
222 flush_itlb(); 201 flush_itlb();
223 EXIT_CRITICAL(flags); 202 local_irq_restore(flags);
224} 203}
225 204
226void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 205void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
@@ -233,7 +212,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
233 212
234 newpid = cpu_asid(cpu, vma->vm_mm); 213 newpid = cpu_asid(cpu, vma->vm_mm);
235 page &= (PAGE_MASK << 1); 214 page &= (PAGE_MASK << 1);
236 ENTER_CRITICAL(flags); 215 local_irq_save(flags);
237 oldpid = read_c0_entryhi(); 216 oldpid = read_c0_entryhi();
238 write_c0_entryhi(page | newpid); 217 write_c0_entryhi(page | newpid);
239 mtc0_tlbw_hazard(); 218 mtc0_tlbw_hazard();
@@ -253,7 +232,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
253 finish: 232 finish:
254 write_c0_entryhi(oldpid); 233 write_c0_entryhi(oldpid);
255 flush_itlb_vm(vma); 234 flush_itlb_vm(vma);
256 EXIT_CRITICAL(flags); 235 local_irq_restore(flags);
257 } 236 }
258} 237}
259 238
@@ -266,7 +245,7 @@ void local_flush_tlb_one(unsigned long page)
266 unsigned long flags; 245 unsigned long flags;
267 int oldpid, idx; 246 int oldpid, idx;
268 247
269 ENTER_CRITICAL(flags); 248 local_irq_save(flags);
270 oldpid = read_c0_entryhi(); 249 oldpid = read_c0_entryhi();
271 page &= (PAGE_MASK << 1); 250 page &= (PAGE_MASK << 1);
272 write_c0_entryhi(page); 251 write_c0_entryhi(page);
@@ -285,7 +264,7 @@ void local_flush_tlb_one(unsigned long page)
285 } 264 }
286 write_c0_entryhi(oldpid); 265 write_c0_entryhi(oldpid);
287 flush_itlb(); 266 flush_itlb();
288 EXIT_CRITICAL(flags); 267 local_irq_restore(flags);
289} 268}
290 269
291/* 270/*
@@ -308,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
308 if (current->active_mm != vma->vm_mm) 287 if (current->active_mm != vma->vm_mm)
309 return; 288 return;
310 289
311 ENTER_CRITICAL(flags); 290 local_irq_save(flags);
312 291
313 pid = read_c0_entryhi() & ASID_MASK; 292 pid = read_c0_entryhi() & ASID_MASK;
314 address &= (PAGE_MASK << 1); 293 address &= (PAGE_MASK << 1);
@@ -358,7 +337,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
358 } 337 }
359 tlbw_use_hazard(); 338 tlbw_use_hazard();
360 flush_itlb_vm(vma); 339 flush_itlb_vm(vma);
361 EXIT_CRITICAL(flags); 340 local_irq_restore(flags);
362} 341}
363 342
364void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 343void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
@@ -369,7 +348,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
369 unsigned long old_pagemask; 348 unsigned long old_pagemask;
370 unsigned long old_ctx; 349 unsigned long old_ctx;
371 350
372 ENTER_CRITICAL(flags); 351 local_irq_save(flags);
373 /* Save old context and create impossible VPN2 value */ 352 /* Save old context and create impossible VPN2 value */
374 old_ctx = read_c0_entryhi(); 353 old_ctx = read_c0_entryhi();
375 old_pagemask = read_c0_pagemask(); 354 old_pagemask = read_c0_pagemask();
@@ -389,7 +368,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
389 tlbw_use_hazard(); /* What is the hazard here? */ 368 tlbw_use_hazard(); /* What is the hazard here? */
390 write_c0_pagemask(old_pagemask); 369 write_c0_pagemask(old_pagemask);
391 local_flush_tlb_all(); 370 local_flush_tlb_all();
392 EXIT_CRITICAL(flags); 371 local_irq_restore(flags);
393} 372}
394 373
395#ifdef CONFIG_TRANSPARENT_HUGEPAGE 374#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -399,13 +378,13 @@ int __init has_transparent_hugepage(void)
399 unsigned int mask; 378 unsigned int mask;
400 unsigned long flags; 379 unsigned long flags;
401 380
402 ENTER_CRITICAL(flags); 381 local_irq_save(flags);
403 write_c0_pagemask(PM_HUGE_MASK); 382 write_c0_pagemask(PM_HUGE_MASK);
404 back_to_back_c0_hazard(); 383 back_to_back_c0_hazard();
405 mask = read_c0_pagemask(); 384 mask = read_c0_pagemask();
406 write_c0_pagemask(PM_DEFAULT_MASK); 385 write_c0_pagemask(PM_DEFAULT_MASK);
407 386
408 EXIT_CRITICAL(flags); 387 local_irq_restore(flags);
409 388
410 return mask == PM_HUGE_MASK; 389 return mask == PM_HUGE_MASK;
411} 390}
@@ -421,7 +400,10 @@ static int __init set_ntlb(char *str)
421 400
422__setup("ntlb=", set_ntlb); 401__setup("ntlb=", set_ntlb);
423 402
424void tlb_init(void) 403/*
404 * Configure TLB (for init or after a CPU has been powered off).
405 */
406static void r4k_tlb_configure(void)
425{ 407{
426 /* 408 /*
427 * You should never change this register: 409 * You should never change this register:
@@ -453,6 +435,11 @@ void tlb_init(void)
453 local_flush_tlb_all(); 435 local_flush_tlb_all();
454 436
455 /* Did I tell you that ARC SUCKS? */ 437 /* Did I tell you that ARC SUCKS? */
438}
439
440void tlb_init(void)
441{
442 r4k_tlb_configure();
456 443
457 if (ntlb) { 444 if (ntlb) {
458 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { 445 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
@@ -466,3 +453,26 @@ void tlb_init(void)
466 453
467 build_tlb_refill_handler(); 454 build_tlb_refill_handler();
468} 455}
456
457static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
458 void *v)
459{
460 switch (cmd) {
461 case CPU_PM_ENTER_FAILED:
462 case CPU_PM_EXIT:
463 r4k_tlb_configure();
464 break;
465 }
466
467 return NOTIFY_OK;
468}
469
470static struct notifier_block r4k_tlb_pm_notifier_block = {
471 .notifier_call = r4k_tlb_pm_notifier,
472};
473
474static int __init r4k_tlb_init_pm(void)
475{
476 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
477}
478arch_initcall(r4k_tlb_init_pm);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index f99ec587b151..e80e10bafc83 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1256,7 +1256,7 @@ static void build_r4000_tlb_refill_handler(void)
1256 memset(relocs, 0, sizeof(relocs)); 1256 memset(relocs, 0, sizeof(relocs));
1257 memset(final_handler, 0, sizeof(final_handler)); 1257 memset(final_handler, 0, sizeof(final_handler));
1258 1258
1259 if ((scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { 1259 if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
1260 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1260 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1261 scratch_reg); 1261 scratch_reg);
1262 vmalloc_mode = refill_scratch; 1262 vmalloc_mode = refill_scratch;
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index b8d580ca02e5..775c2800cba2 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -63,6 +63,7 @@ static struct insn insn_table_MM[] = {
63 { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM }, 63 { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
64 { insn_daddu, 0, 0 }, 64 { insn_daddu, 0, 0 },
65 { insn_daddiu, 0, 0 }, 65 { insn_daddiu, 0, 0 },
66 { insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS },
66 { insn_dmfc0, 0, 0 }, 67 { insn_dmfc0, 0, 0 },
67 { insn_dmtc0, 0, 0 }, 68 { insn_dmtc0, 0, 0 },
68 { insn_dsll, 0, 0 }, 69 { insn_dsll, 0, 0 },
@@ -78,14 +79,20 @@ static struct insn insn_table_MM[] = {
78 { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE }, 79 { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
79 { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM }, 80 { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
80 { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM }, 81 { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
82 { insn_jalr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS },
81 { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS }, 83 { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
84 { insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
82 { insn_ld, 0, 0 }, 85 { insn_ld, 0, 0 },
86 { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },
83 { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM }, 87 { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
84 { insn_lld, 0, 0 }, 88 { insn_lld, 0, 0 },
85 { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM }, 89 { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
86 { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 90 { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
87 { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD }, 91 { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
92 { insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS },
93 { insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS },
88 { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD }, 94 { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
95 { insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD },
89 { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD }, 96 { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
90 { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, 97 { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
91 { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM }, 98 { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
@@ -94,15 +101,22 @@ static struct insn insn_table_MM[] = {
94 { insn_scd, 0, 0 }, 101 { insn_scd, 0, 0 },
95 { insn_sd, 0, 0 }, 102 { insn_sd, 0, 0 },
96 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, 103 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
104 { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
105 { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
106 { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
97 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD }, 107 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
98 { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD }, 108 { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
109 { insn_srlv, M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD },
99 { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD }, 110 { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
100 { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD }, 111 { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
101 { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 112 { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
113 { insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS },
102 { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 }, 114 { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
103 { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 }, 115 { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
104 { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 }, 116 { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
105 { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 }, 117 { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
118 { insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM },
119 { insn_wsbh, M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS },
106 { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD }, 120 { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
107 { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, 121 { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
108 { insn_dins, 0, 0 }, 122 { insn_dins, 0, 0 },
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 3abd609518c9..38792c2364f5 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -67,6 +67,7 @@ static struct insn insn_table[] = {
67 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 67 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
68 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, 68 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
69 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, 69 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
70 { insn_divu, M(spec_op, 0, 0, 0, 0, divu_op), RS | RT },
70 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, 71 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
71 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, 72 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
72 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, 73 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
@@ -82,17 +83,23 @@ static struct insn insn_table[] = {
82 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, 83 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
83 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 84 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
84 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 85 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
86 { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
85 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 87 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
86 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 88 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
89 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
87 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 90 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
88 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 91 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
92 { insn_lh, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
89 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 93 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
90 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 94 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 95 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
92 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 96 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
93 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, 97 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
94 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 98 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
99 { insn_mfhi, M(spec_op, 0, 0, 0, 0, mfhi_op), RD },
100 { insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD },
95 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 101 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
102 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
96 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 103 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
97 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 104 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
98 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 105 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
@@ -102,17 +109,25 @@ static struct insn insn_table[] = {
102 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 109 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
103 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 110 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
104 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 111 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
112 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD },
113 { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
114 { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },
105 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 115 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
106 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, 116 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
117 { insn_srlv, M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD },
107 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, 118 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
108 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 119 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
120 { insn_sync, M(spec_op, 0, 0, 0, 0, sync_op), RE },
109 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, 121 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
110 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, 122 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
111 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, 123 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
112 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, 124 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
113 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, 125 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
126 { insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM },
127 { insn_wsbh, M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD },
114 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 128 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
115 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, 129 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
130 { insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD },
116 { insn_invalid, 0, 0 } 131 { insn_invalid, 0, 0 }
117}; 132};
118 133
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index b9d14b6c7f58..00515805fe41 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -47,14 +47,16 @@ enum opcode {
47 insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1, 47 insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,
48 insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, 48 insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
49 insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, 49 insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm,
50 insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, 50 insn_divu, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
51 insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, 51 insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
52 insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, 52 insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb,
53 insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, 53 insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
54 insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,
54 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, 55 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
55 insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, 56 insn_sd, insn_sll, insn_sllv, insn_sltiu, insn_sltu, insn_sra,
56 insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, 57 insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
57 insn_xori, 58 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
59 insn_xor, insn_xori, insn_yield,
58}; 60};
59 61
60struct insn { 62struct insn {
@@ -144,6 +146,13 @@ Ip_u2u1u3(op) \
144} \ 146} \
145UASM_EXPORT_SYMBOL(uasm_i##op); 147UASM_EXPORT_SYMBOL(uasm_i##op);
146 148
149#define I_u3u2u1(op) \
150Ip_u3u2u1(op) \
151{ \
152 build_insn(buf, insn##op, c, b, a); \
153} \
154UASM_EXPORT_SYMBOL(uasm_i##op);
155
147#define I_u3u1u2(op) \ 156#define I_u3u1u2(op) \
148Ip_u3u1u2(op) \ 157Ip_u3u1u2(op) \
149{ \ 158{ \
@@ -200,6 +209,13 @@ Ip_u1u2(op) \
200} \ 209} \
201UASM_EXPORT_SYMBOL(uasm_i##op); 210UASM_EXPORT_SYMBOL(uasm_i##op);
202 211
212#define I_u2u1(op) \
213Ip_u1u2(op) \
214{ \
215 build_insn(buf, insn##op, b, a); \
216} \
217UASM_EXPORT_SYMBOL(uasm_i##op);
218
203#define I_u1s2(op) \ 219#define I_u1s2(op) \
204Ip_u1s2(op) \ 220Ip_u1s2(op) \
205{ \ 221{ \
@@ -237,6 +253,7 @@ I_u1u2u3(_dmfc0)
237I_u1u2u3(_dmtc0) 253I_u1u2u3(_dmtc0)
238I_u2u1s3(_daddiu) 254I_u2u1s3(_daddiu)
239I_u3u1u2(_daddu) 255I_u3u1u2(_daddu)
256I_u1u2(_divu)
240I_u2u1u3(_dsll) 257I_u2u1u3(_dsll)
241I_u2u1u3(_dsll32) 258I_u2u1u3(_dsll32)
242I_u2u1u3(_dsra) 259I_u2u1u3(_dsra)
@@ -250,14 +267,20 @@ I_u2u1msbdu3(_ext)
250I_u2u1msbu3(_ins) 267I_u2u1msbu3(_ins)
251I_u1(_j) 268I_u1(_j)
252I_u1(_jal) 269I_u1(_jal)
270I_u2u1(_jalr)
253I_u1(_jr) 271I_u1(_jr)
272I_u2s3u1(_lb)
254I_u2s3u1(_ld) 273I_u2s3u1(_ld)
274I_u2s3u1(_lh)
255I_u2s3u1(_ll) 275I_u2s3u1(_ll)
256I_u2s3u1(_lld) 276I_u2s3u1(_lld)
257I_u1s2(_lui) 277I_u1s2(_lui)
258I_u2s3u1(_lw) 278I_u2s3u1(_lw)
259I_u1u2u3(_mfc0) 279I_u1u2u3(_mfc0)
280I_u1(_mfhi)
281I_u1(_mflo)
260I_u1u2u3(_mtc0) 282I_u1u2u3(_mtc0)
283I_u3u1u2(_mul)
261I_u2u1u3(_ori) 284I_u2u1u3(_ori)
262I_u3u1u2(_or) 285I_u3u1u2(_or)
263I_0(_rfe) 286I_0(_rfe)
@@ -265,17 +288,25 @@ I_u2s3u1(_sc)
265I_u2s3u1(_scd) 288I_u2s3u1(_scd)
266I_u2s3u1(_sd) 289I_u2s3u1(_sd)
267I_u2u1u3(_sll) 290I_u2u1u3(_sll)
291I_u3u2u1(_sllv)
292I_u2u1s3(_sltiu)
293I_u3u1u2(_sltu)
268I_u2u1u3(_sra) 294I_u2u1u3(_sra)
269I_u2u1u3(_srl) 295I_u2u1u3(_srl)
296I_u3u2u1(_srlv)
270I_u2u1u3(_rotr) 297I_u2u1u3(_rotr)
271I_u3u1u2(_subu) 298I_u3u1u2(_subu)
272I_u2s3u1(_sw) 299I_u2s3u1(_sw)
300I_u1(_sync)
273I_0(_tlbp) 301I_0(_tlbp)
274I_0(_tlbr) 302I_0(_tlbr)
275I_0(_tlbwi) 303I_0(_tlbwi)
276I_0(_tlbwr) 304I_0(_tlbwr)
305I_u1(_wait);
306I_u2u1(_wsbh)
277I_u3u1u2(_xor) 307I_u3u1u2(_xor)
278I_u2u1u3(_xori) 308I_u2u1u3(_xori)
309I_u2u1(_yield)
279I_u2u1msbu3(_dins); 310I_u2u1msbu3(_dins);
280I_u2u1msb32u3(_dinsm); 311I_u2u1msb32u3(_dinsm);
281I_u1(_syscall); 312I_u1(_syscall);
@@ -469,6 +500,14 @@ void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
469} 500}
470UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); 501UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
471 502
503void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1,
504 unsigned int r2, int lid)
505{
506 uasm_r_mips_pc16(r, *p, lid);
507 ISAFUNC(uasm_i_beq)(p, r1, r2, 0);
508}
509UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq));
510
472void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, 511void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
473 int lid) 512 int lid)
474{ 513{
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index eae0ba3876d9..b9510ea8db56 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -9,5 +9,4 @@ obj-y := malta-amon.o malta-display.o malta-init.o \
9 malta-int.o malta-memory.o malta-platform.o \ 9 malta-int.o malta-memory.o malta-platform.o \
10 malta-reset.o malta-setup.o malta-time.o 10 malta-reset.o malta-setup.o malta-time.o
11 11
12# FIXME FIXME FIXME 12obj-$(CONFIG_MIPS_MALTA_PM) += malta-pm.o
13obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 4f9e44d358b7..0f60256d3784 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -116,8 +116,6 @@ phys_t mips_cpc_default_phys_base(void)
116 return CPC_BASE_ADDR; 116 return CPC_BASE_ADDR;
117} 117}
118 118
119extern struct plat_smp_ops msmtc_smp_ops;
120
121void __init prom_init(void) 119void __init prom_init(void)
122{ 120{
123 mips_display_message("LINUX"); 121 mips_display_message("LINUX");
@@ -304,8 +302,4 @@ mips_pci_controller:
304 return; 302 return;
305 if (!register_vsmp_smp_ops()) 303 if (!register_vsmp_smp_ops())
306 return; 304 return;
307
308#ifdef CONFIG_MIPS_MT_SMTC
309 register_smp_ops(&msmtc_smp_ops);
310#endif
311} 305}
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index b71ee809191a..ecc2785f7858 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -504,28 +504,9 @@ void __init arch_init_irq(void)
504 } else if (cpu_has_vint) { 504 } else if (cpu_has_vint) {
505 set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); 505 set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
506 set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch); 506 set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch);
507#ifdef CONFIG_MIPS_MT_SMTC
508 setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq,
509 (0x100 << MIPSCPU_INT_I8259A));
510 setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
511 &corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI));
512 /*
513 * Temporary hack to ensure that the subsidiary device
514 * interrupts coing in via the i8259A, but associated
515 * with low IRQ numbers, will restore the Status.IM
516 * value associated with the i8259A.
517 */
518 {
519 int i;
520
521 for (i = 0; i < 16; i++)
522 irq_hwmask[i] = (0x100 << MIPSCPU_INT_I8259A);
523 }
524#else /* Not SMTC */
525 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); 507 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
526 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, 508 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
527 &corehi_irqaction); 509 &corehi_irqaction);
528#endif /* CONFIG_MIPS_MT_SMTC */
529 } else { 510 } else {
530 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); 511 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
531 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, 512 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index f2364e419682..6d9773096750 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -26,8 +26,8 @@ unsigned long physical_memsize = 0L;
26 26
27fw_memblock_t * __init fw_getmdesc(int eva) 27fw_memblock_t * __init fw_getmdesc(int eva)
28{ 28{
29 char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr; 29 char *memsize_str, *ememsize_str = NULL, *ptr;
30 unsigned long memsize = 0, ememsize __maybe_unused = 0; 30 unsigned long memsize = 0, ememsize = 0;
31 static char cmdline[COMMAND_LINE_SIZE] __initdata; 31 static char cmdline[COMMAND_LINE_SIZE] __initdata;
32 int tmp; 32 int tmp;
33 33
diff --git a/arch/mips/mti-malta/malta-pm.c b/arch/mips/mti-malta/malta-pm.c
new file mode 100644
index 000000000000..c1e456c01a44
--- /dev/null
+++ b/arch/mips/mti-malta/malta-pm.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/io.h>
14#include <linux/pci.h>
15
16#include <asm/mach-malta/malta-pm.h>
17
18static struct pci_bus *pm_pci_bus;
19static resource_size_t pm_io_offset;
20
21int mips_pm_suspend(unsigned state)
22{
23 int spec_devid;
24 u16 sts;
25
26 if (!pm_pci_bus || !pm_io_offset)
27 return -ENODEV;
28
29 /* Ensure the power button status is clear */
30 while (1) {
31 sts = inw(pm_io_offset + PIIX4_FUNC3IO_PMSTS);
32 if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
33 break;
34 outw(sts, pm_io_offset + PIIX4_FUNC3IO_PMSTS);
35 }
36
37 /* Enable entry to suspend */
38 outw(state | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
39 pm_io_offset + PIIX4_FUNC3IO_PMCNTRL);
40
41 /* If the special cycle occurs too soon this doesn't work... */
42 mdelay(10);
43
44 /*
45 * The PIIX4 will enter the suspend state only after seeing a special
46 * cycle with the correct magic data on the PCI bus. Generate that
47 * cycle now.
48 */
49 spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
50 pci_bus_write_config_dword(pm_pci_bus, spec_devid, 0,
51 PIIX4_SUSPEND_MAGIC);
52
53 /* Give the system some time to power down */
54 mdelay(1000);
55
56 return 0;
57}
58
59static int __init malta_pm_setup(void)
60{
61 struct pci_dev *dev;
62 int res, io_region = PCI_BRIDGE_RESOURCES;
63
64 /* Find a reference to the PCI bus */
65 pm_pci_bus = pci_find_next_bus(NULL);
66 if (!pm_pci_bus) {
67 pr_warn("malta-pm: failed to find reference to PCI bus\n");
68 return -ENODEV;
69 }
70
71 /* Find the PIIX4 PM device */
72 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
73 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
74 PCI_ANY_ID, NULL);
75 if (!dev) {
76 pr_warn("malta-pm: failed to find PIIX4 PM\n");
77 return -ENODEV;
78 }
79
80 /* Request access to the PIIX4 PM IO registers */
81 res = pci_request_region(dev, io_region, "PIIX4 PM IO registers");
82 if (res) {
83 pr_warn("malta-pm: failed to request PM IO registers (%d)\n",
84 res);
85 pci_dev_put(dev);
86 return -ENODEV;
87 }
88
89 /* Find the offset to the PIIX4 PM IO registers */
90 pm_io_offset = pci_resource_start(dev, io_region);
91
92 pci_dev_put(dev);
93 return 0;
94}
95
96late_initcall(malta_pm_setup);
diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c
index d627d4b2b47f..2fd2cc2c5034 100644
--- a/arch/mips/mti-malta/malta-reset.c
+++ b/arch/mips/mti-malta/malta-reset.c
@@ -10,6 +10,7 @@
10#include <linux/pm.h> 10#include <linux/pm.h>
11 11
12#include <asm/reboot.h> 12#include <asm/reboot.h>
13#include <asm/mach-malta/malta-pm.h>
13 14
14#define SOFTRES_REG 0x1f000500 15#define SOFTRES_REG 0x1f000500
15#define GORESET 0x42 16#define GORESET 0x42
@@ -24,17 +25,22 @@ static void mips_machine_restart(char *command)
24 25
25static void mips_machine_halt(void) 26static void mips_machine_halt(void)
26{ 27{
27 unsigned int __iomem *softres_reg = 28 while (true);
28 ioremap(SOFTRES_REG, sizeof(unsigned int)); 29}
29 30
30 __raw_writel(GORESET, softres_reg); 31static void mips_machine_power_off(void)
32{
33 mips_pm_suspend(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF);
34
35 pr_info("Failed to power down, resetting\n");
36 mips_machine_restart(NULL);
31} 37}
32 38
33static int __init mips_reboot_setup(void) 39static int __init mips_reboot_setup(void)
34{ 40{
35 _machine_restart = mips_machine_restart; 41 _machine_restart = mips_machine_restart;
36 _machine_halt = mips_machine_halt; 42 _machine_halt = mips_machine_halt;
37 pm_power_off = mips_machine_halt; 43 pm_power_off = mips_machine_power_off;
38 44
39 return 0; 45 return 0;
40} 46}
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index bf621516afff..db7c9e5826a6 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -77,11 +77,7 @@ const char *get_system_type(void)
77 return "MIPS Malta"; 77 return "MIPS Malta";
78} 78}
79 79
80#if defined(CONFIG_MIPS_MT_SMTC)
81const char display_string[] = " SMTC LINUX ON MALTA ";
82#else
83const char display_string[] = " LINUX ON MALTA "; 80const char display_string[] = " LINUX ON MALTA ";
84#endif /* CONFIG_MIPS_MT_SMTC */
85 81
86#ifdef CONFIG_BLK_DEV_FD 82#ifdef CONFIG_BLK_DEV_FD
87static void __init fd_activate(void) 83static void __init fd_activate(void)
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
deleted file mode 100644
index c4849904f013..000000000000
--- a/arch/mips/mti-malta/malta-smtc.c
+++ /dev/null
@@ -1,162 +0,0 @@
1/*
2 * Malta Platform-specific hooks for SMP operation
3 */
4#include <linux/irq.h>
5#include <linux/init.h>
6
7#include <asm/mipsregs.h>
8#include <asm/mipsmtregs.h>
9#include <asm/smtc.h>
10#include <asm/smtc_ipi.h>
11
12/* VPE/SMP Prototype implements platform interfaces directly */
13
14/*
15 * Cause the specified action to be performed on a targeted "CPU"
16 */
17
18static void msmtc_send_ipi_single(int cpu, unsigned int action)
19{
20 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
22}
23
24static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
25{
26 unsigned int i;
27
28 for_each_cpu(i, mask)
29 msmtc_send_ipi_single(i, action);
30}
31
32/*
33 * Post-config but pre-boot cleanup entry point
34 */
35static void msmtc_init_secondary(void)
36{
37 int myvpe;
38
39 /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
40 myvpe = read_c0_tcbind() & TCBIND_CURVPE;
41 if (myvpe != 0) {
42 /* Ideally, this should be done only once per VPE, but... */
43 clear_c0_status(ST0_IM);
44 set_c0_status((0x100 << cp0_compare_irq)
45 | (0x100 << MIPS_CPU_IPI_IRQ));
46 if (cp0_perfcount_irq >= 0)
47 set_c0_status(0x100 << cp0_perfcount_irq);
48 }
49
50 smtc_init_secondary();
51}
52
53/*
54 * Platform "CPU" startup hook
55 */
56static void msmtc_boot_secondary(int cpu, struct task_struct *idle)
57{
58 smtc_boot_secondary(cpu, idle);
59}
60
61/*
62 * SMP initialization finalization entry point
63 */
64static void msmtc_smp_finish(void)
65{
66 smtc_smp_finish();
67}
68
69/*
70 * Hook for after all CPUs are online
71 */
72
73static void msmtc_cpus_done(void)
74{
75}
76
77/*
78 * Platform SMP pre-initialization
79 *
80 * As noted above, we can assume a single CPU for now
81 * but it may be multithreaded.
82 */
83
84static void __init msmtc_smp_setup(void)
85{
86 /*
87 * we won't get the definitive value until
88 * we've run smtc_prepare_cpus later, but
89 * we would appear to need an upper bound now.
90 */
91 smp_num_siblings = smtc_build_cpu_map(0);
92}
93
94static void __init msmtc_prepare_cpus(unsigned int max_cpus)
95{
96 smtc_prepare_cpus(max_cpus);
97}
98
99struct plat_smp_ops msmtc_smp_ops = {
100 .send_ipi_single = msmtc_send_ipi_single,
101 .send_ipi_mask = msmtc_send_ipi_mask,
102 .init_secondary = msmtc_init_secondary,
103 .smp_finish = msmtc_smp_finish,
104 .cpus_done = msmtc_cpus_done,
105 .boot_secondary = msmtc_boot_secondary,
106 .smp_setup = msmtc_smp_setup,
107 .prepare_cpus = msmtc_prepare_cpus,
108};
109
110#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
111/*
112 * IRQ affinity hook
113 */
114
115
116int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
117 bool force)
118{
119 cpumask_t tmask;
120 int cpu = 0;
121 void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
122
123 /*
124 * On the legacy Malta development board, all I/O interrupts
125 * are routed through the 8259 and combined in a single signal
126 * to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
127 * that signal is brought to IP2 of both VPEs. To avoid racing
128 * concurrent interrupt service events, IP2 is enabled only on
129 * one VPE, by convention VPE0. So long as no bits are ever
130 * cleared in the affinity mask, there will never be any
131 * interrupt forwarding. But as soon as a program or operator
132 * sets affinity for one of the related IRQs, we need to make
133 * sure that we don't ever try to forward across the VPE boundary,
134 * at least not until we engineer a system where the interrupt
135 * _ack() or _end() function can somehow know that it corresponds
136 * to an interrupt taken on another VPE, and perform the appropriate
137 * restoration of Status.IM state using MFTR/MTTR instead of the
138 * normal local behavior. We also ensure that no attempt will
139 * be made to forward to an offline "CPU".
140 */
141
142 cpumask_copy(&tmask, affinity);
143 for_each_cpu(cpu, affinity) {
144 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
145 cpu_clear(cpu, tmask);
146 }
147 cpumask_copy(d->affinity, &tmask);
148
149 if (cpus_empty(tmask))
150 /*
151 * We could restore a default mask here, but the
152 * runtime code can anyway deal with the null set
153 */
154 printk(KERN_WARNING
155 "IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq);
156
157 /* Do any generic SMTC IRQ affinity setup */
158 smtc_set_irq_affinity(d->irq, tmask);
159
160 return IRQ_SET_MASK_OK_NOCOPY;
161}
162#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
diff --git a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c b/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
index b921e5ec507c..80fe194cfa53 100644
--- a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
+++ b/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
@@ -312,16 +312,13 @@ static int i2c_platform_probe(struct platform_device *pdev)
312 312
313 pr_debug("i2c_platform_probe\n"); 313 pr_debug("i2c_platform_probe\n");
314 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 314 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
315 if (!r) { 315 if (!r)
316 ret = -ENODEV; 316 return -ENODEV;
317 goto out;
318 }
319 317
320 priv = kzalloc(sizeof(struct i2c_platform_data), GFP_KERNEL); 318 priv = devm_kzalloc(&pdev->dev, sizeof(struct i2c_platform_data),
321 if (!priv) { 319 GFP_KERNEL);
322 ret = -ENOMEM; 320 if (!priv)
323 goto out; 321 return -ENOMEM;
324 }
325 322
326 /* FIXME: need to allocate resource in PIC32 space */ 323 /* FIXME: need to allocate resource in PIC32 space */
327#if 0 324#if 0
@@ -330,10 +327,8 @@ static int i2c_platform_probe(struct platform_device *pdev)
330#else 327#else
331 priv->base = r->start; 328 priv->base = r->start;
332#endif 329#endif
333 if (!priv->base) { 330 if (!priv->base)
334 ret = -EBUSY; 331 return -EBUSY;
335 goto out_mem;
336 }
337 332
338 priv->xfer_timeout = 200; 333 priv->xfer_timeout = 200;
339 priv->ack_timeout = 200; 334 priv->ack_timeout = 200;
@@ -348,17 +343,13 @@ static int i2c_platform_probe(struct platform_device *pdev)
348 i2c_platform_setup(priv); 343 i2c_platform_setup(priv);
349 344
350 ret = i2c_add_numbered_adapter(&priv->adap); 345 ret = i2c_add_numbered_adapter(&priv->adap);
351 if (ret == 0) { 346 if (ret) {
352 platform_set_drvdata(pdev, priv); 347 i2c_platform_disable(priv);
353 return 0; 348 return ret;
354 } 349 }
355 350
356 i2c_platform_disable(priv); 351 platform_set_drvdata(pdev, priv);
357 352 return 0;
358out_mem:
359 kfree(priv);
360out:
361 return ret;
362} 353}
363 354
364static int i2c_platform_remove(struct platform_device *pdev) 355static int i2c_platform_remove(struct platform_device *pdev)
@@ -369,7 +360,6 @@ static int i2c_platform_remove(struct platform_device *pdev)
369 platform_set_drvdata(pdev, NULL); 360 platform_set_drvdata(pdev, NULL);
370 i2c_del_adapter(&priv->adap); 361 i2c_del_adapter(&priv->adap);
371 i2c_platform_disable(priv); 362 i2c_platform_disable(priv);
372 kfree(priv);
373 return 0; 363 return 0;
374} 364}
375 365
diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile
new file mode 100644
index 000000000000..ae74b3a91f5c
--- /dev/null
+++ b/arch/mips/net/Makefile
@@ -0,0 +1,3 @@
1# MIPS networking code
2
3obj-$(CONFIG_BPF_JIT) += bpf_jit.o
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
new file mode 100644
index 000000000000..a67b9753330b
--- /dev/null
+++ b/arch/mips/net/bpf_jit.c
@@ -0,0 +1,1399 @@
1/*
2 * Just-In-Time compiler for BPF filters on MIPS
3 *
4 * Copyright (c) 2014 Imagination Technologies Ltd.
5 * Author: Markos Chandras <markos.chandras@imgtec.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License.
10 */
11
12#include <linux/bitops.h>
13#include <linux/compiler.h>
14#include <linux/errno.h>
15#include <linux/filter.h>
16#include <linux/if_vlan.h>
17#include <linux/kconfig.h>
18#include <linux/moduleloader.h>
19#include <linux/netdevice.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/types.h>
23#include <asm/bitops.h>
24#include <asm/cacheflush.h>
25#include <asm/cpu-features.h>
26#include <asm/uasm.h>
27
28#include "bpf_jit.h"
29
30/* ABI
31 *
32 * s0 1st scratch register
33 * s1 2nd scratch register
34 * s2 offset register
35 * s3 BPF register A
36 * s4 BPF register X
37 * s5 *skb
38 * s6 *scratch memory
39 *
40 * On entry (*bpf_func)(*skb, *filter)
41 * a0 = MIPS_R_A0 = skb;
42 * a1 = MIPS_R_A1 = filter;
43 *
44 * Stack
45 * ...
46 * M[15]
47 * M[14]
48 * M[13]
49 * ...
50 * M[0] <-- r_M
51 * saved reg k-1
52 * saved reg k-2
53 * ...
54 * saved reg 0 <-- r_sp
55 * <no argument area>
56 *
57 * Packet layout
58 *
59 * <--------------------- len ------------------------>
60 * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
61 * ----------------------------------------------------
62 * | skb->data |
63 * ----------------------------------------------------
64 */
65
66#define RSIZE (sizeof(unsigned long))
67#define ptr typeof(unsigned long)
68
69/* ABI specific return values */
70#ifdef CONFIG_32BIT /* O32 */
71#ifdef CONFIG_CPU_LITTLE_ENDIAN
72#define r_err MIPS_R_V1
73#define r_val MIPS_R_V0
74#else /* CONFIG_CPU_LITTLE_ENDIAN */
75#define r_err MIPS_R_V0
76#define r_val MIPS_R_V1
77#endif
78#else /* N64 */
79#define r_err MIPS_R_V0
80#define r_val MIPS_R_V0
81#endif
82
83#define r_ret MIPS_R_V0
84
85/*
86 * Use 2 scratch registers to avoid pipeline interlocks.
87 * There is no overhead during epilogue and prologue since
88 * any of the $s0-$s6 registers will only be preserved if
89 * they are going to actually be used.
90 */
91#define r_s0 MIPS_R_S0 /* scratch reg 1 */
92#define r_s1 MIPS_R_S1 /* scratch reg 2 */
93#define r_off MIPS_R_S2
94#define r_A MIPS_R_S3
95#define r_X MIPS_R_S4
96#define r_skb MIPS_R_S5
97#define r_M MIPS_R_S6
98#define r_tmp_imm MIPS_R_T6 /* No need to preserve this */
99#define r_tmp MIPS_R_T7 /* No need to preserve this */
100#define r_zero MIPS_R_ZERO
101#define r_sp MIPS_R_SP
102#define r_ra MIPS_R_RA
103
104#define SCRATCH_OFF(k) (4 * (k))
105
106/* JIT flags */
107#define SEEN_CALL (1 << BPF_MEMWORDS)
108#define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
109#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
110#define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
111#define SEEN_S0 SEEN_SREG(0)
112#define SEEN_S1 SEEN_SREG(1)
113#define SEEN_OFF SEEN_SREG(2)
114#define SEEN_A SEEN_SREG(3)
115#define SEEN_X SEEN_SREG(4)
116#define SEEN_SKB SEEN_SREG(5)
117#define SEEN_MEM SEEN_SREG(6)
118
119/* Arguments used by JIT */
120#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
121
122#define FLAG_NEED_X_RESET (1 << 0)
123
124#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
125
126/**
127 * struct jit_ctx - JIT context
128 * @skf: The sk_filter
129 * @prologue_bytes: Number of bytes for prologue
130 * @idx: Instruction index
131 * @flags: JIT flags
132 * @offsets: Instruction offsets
133 * @target: Memory location for the compiled filter
134 */
135struct jit_ctx {
136 const struct sk_filter *skf;
137 unsigned int prologue_bytes;
138 u32 idx;
139 u32 flags;
140 u32 *offsets;
141 u32 *target;
142};
143
144
145static inline int optimize_div(u32 *k)
146{
147 /* power of 2 divides can be implemented with right shift */
148 if (!(*k & (*k-1))) {
149 *k = ilog2(*k);
150 return 1;
151 }
152
153 return 0;
154}
155
156/* Simply emit the instruction if the JIT memory space has been allocated */
157#define emit_instr(ctx, func, ...) \
158do { \
159 if ((ctx)->target != NULL) { \
160 u32 *p = &(ctx)->target[ctx->idx]; \
161 uasm_i_##func(&p, ##__VA_ARGS__); \
162 } \
163 (ctx)->idx++; \
164} while (0)
165
166/* Determine if immediate is within the 16-bit signed range */
167static inline bool is_range16(s32 imm)
168{
169 if (imm >= SBIT(15) || imm < -SBIT(15))
170 return true;
171 return false;
172}
173
174static inline void emit_addu(unsigned int dst, unsigned int src1,
175 unsigned int src2, struct jit_ctx *ctx)
176{
177 emit_instr(ctx, addu, dst, src1, src2);
178}
179
180static inline void emit_nop(struct jit_ctx *ctx)
181{
182 emit_instr(ctx, nop);
183}
184
185/* Load a u32 immediate to a register */
186static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
187{
188 if (ctx->target != NULL) {
189 /* addiu can only handle s16 */
190 if (is_range16(imm)) {
191 u32 *p = &ctx->target[ctx->idx];
192 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
193 p = &ctx->target[ctx->idx + 1];
194 uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
195 } else {
196 u32 *p = &ctx->target[ctx->idx];
197 uasm_i_addiu(&p, dst, r_zero, imm);
198 }
199 }
200 ctx->idx++;
201
202 if (is_range16(imm))
203 ctx->idx++;
204}
205
206static inline void emit_or(unsigned int dst, unsigned int src1,
207 unsigned int src2, struct jit_ctx *ctx)
208{
209 emit_instr(ctx, or, dst, src1, src2);
210}
211
212static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
213 struct jit_ctx *ctx)
214{
215 if (imm >= BIT(16)) {
216 emit_load_imm(r_tmp, imm, ctx);
217 emit_or(dst, src, r_tmp, ctx);
218 } else {
219 emit_instr(ctx, ori, dst, src, imm);
220 }
221}
222
223
224static inline void emit_daddu(unsigned int dst, unsigned int src1,
225 unsigned int src2, struct jit_ctx *ctx)
226{
227 emit_instr(ctx, daddu, dst, src1, src2);
228}
229
230static inline void emit_daddiu(unsigned int dst, unsigned int src,
231 int imm, struct jit_ctx *ctx)
232{
233 /*
234 * Only used for stack, so the imm is relatively small
235 * and it fits in 15-bits
236 */
237 emit_instr(ctx, daddiu, dst, src, imm);
238}
239
240static inline void emit_addiu(unsigned int dst, unsigned int src,
241 u32 imm, struct jit_ctx *ctx)
242{
243 if (is_range16(imm)) {
244 emit_load_imm(r_tmp, imm, ctx);
245 emit_addu(dst, r_tmp, src, ctx);
246 } else {
247 emit_instr(ctx, addiu, dst, src, imm);
248 }
249}
250
251static inline void emit_and(unsigned int dst, unsigned int src1,
252 unsigned int src2, struct jit_ctx *ctx)
253{
254 emit_instr(ctx, and, dst, src1, src2);
255}
256
257static inline void emit_andi(unsigned int dst, unsigned int src,
258 u32 imm, struct jit_ctx *ctx)
259{
260 /* If imm does not fit in u16 then load it to register */
261 if (imm >= BIT(16)) {
262 emit_load_imm(r_tmp, imm, ctx);
263 emit_and(dst, src, r_tmp, ctx);
264 } else {
265 emit_instr(ctx, andi, dst, src, imm);
266 }
267}
268
269static inline void emit_xor(unsigned int dst, unsigned int src1,
270 unsigned int src2, struct jit_ctx *ctx)
271{
272 emit_instr(ctx, xor, dst, src1, src2);
273}
274
275static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
276{
277 /* If imm does not fit in u16 then load it to register */
278 if (imm >= BIT(16)) {
279 emit_load_imm(r_tmp, imm, ctx);
280 emit_xor(dst, src, r_tmp, ctx);
281 } else {
282 emit_instr(ctx, xori, dst, src, imm);
283 }
284}
285
286static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
287{
288 if (config_enabled(CONFIG_64BIT))
289 emit_instr(ctx, daddiu, r_sp, r_sp, offset);
290 else
291 emit_instr(ctx, addiu, r_sp, r_sp, offset);
292
293}
294
295static inline void emit_subu(unsigned int dst, unsigned int src1,
296 unsigned int src2, struct jit_ctx *ctx)
297{
298 emit_instr(ctx, subu, dst, src1, src2);
299}
300
301static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
302{
303 emit_subu(reg, r_zero, reg, ctx);
304}
305
306static inline void emit_sllv(unsigned int dst, unsigned int src,
307 unsigned int sa, struct jit_ctx *ctx)
308{
309 emit_instr(ctx, sllv, dst, src, sa);
310}
311
312static inline void emit_sll(unsigned int dst, unsigned int src,
313 unsigned int sa, struct jit_ctx *ctx)
314{
315 /* sa is 5-bits long */
316 BUG_ON(sa >= BIT(5));
317 emit_instr(ctx, sll, dst, src, sa);
318}
319
320static inline void emit_srlv(unsigned int dst, unsigned int src,
321 unsigned int sa, struct jit_ctx *ctx)
322{
323 emit_instr(ctx, srlv, dst, src, sa);
324}
325
326static inline void emit_srl(unsigned int dst, unsigned int src,
327 unsigned int sa, struct jit_ctx *ctx)
328{
329 /* sa is 5-bits long */
330 BUG_ON(sa >= BIT(5));
331 emit_instr(ctx, srl, dst, src, sa);
332}
333
334static inline void emit_sltu(unsigned int dst, unsigned int src1,
335 unsigned int src2, struct jit_ctx *ctx)
336{
337 emit_instr(ctx, sltu, dst, src1, src2);
338}
339
340static inline void emit_sltiu(unsigned dst, unsigned int src,
341 unsigned int imm, struct jit_ctx *ctx)
342{
343 /* 16 bit immediate */
344 if (is_range16((s32)imm)) {
345 emit_load_imm(r_tmp, imm, ctx);
346 emit_sltu(dst, src, r_tmp, ctx);
347 } else {
348 emit_instr(ctx, sltiu, dst, src, imm);
349 }
350
351}
352
353/* Store register on the stack */
354static inline void emit_store_stack_reg(ptr reg, ptr base,
355 unsigned int offset,
356 struct jit_ctx *ctx)
357{
358 if (config_enabled(CONFIG_64BIT))
359 emit_instr(ctx, sd, reg, offset, base);
360 else
361 emit_instr(ctx, sw, reg, offset, base);
362}
363
364static inline void emit_store(ptr reg, ptr base, unsigned int offset,
365 struct jit_ctx *ctx)
366{
367 emit_instr(ctx, sw, reg, offset, base);
368}
369
370static inline void emit_load_stack_reg(ptr reg, ptr base,
371 unsigned int offset,
372 struct jit_ctx *ctx)
373{
374 if (config_enabled(CONFIG_64BIT))
375 emit_instr(ctx, ld, reg, offset, base);
376 else
377 emit_instr(ctx, lw, reg, offset, base);
378}
379
380static inline void emit_load(unsigned int reg, unsigned int base,
381 unsigned int offset, struct jit_ctx *ctx)
382{
383 emit_instr(ctx, lw, reg, offset, base);
384}
385
386static inline void emit_load_byte(unsigned int reg, unsigned int base,
387 unsigned int offset, struct jit_ctx *ctx)
388{
389 emit_instr(ctx, lb, reg, offset, base);
390}
391
392static inline void emit_half_load(unsigned int reg, unsigned int base,
393 unsigned int offset, struct jit_ctx *ctx)
394{
395 emit_instr(ctx, lh, reg, offset, base);
396}
397
398static inline void emit_mul(unsigned int dst, unsigned int src1,
399 unsigned int src2, struct jit_ctx *ctx)
400{
401 emit_instr(ctx, mul, dst, src1, src2);
402}
403
404static inline void emit_div(unsigned int dst, unsigned int src,
405 struct jit_ctx *ctx)
406{
407 if (ctx->target != NULL) {
408 u32 *p = &ctx->target[ctx->idx];
409 uasm_i_divu(&p, dst, src);
410 p = &ctx->target[ctx->idx + 1];
411 uasm_i_mfhi(&p, dst);
412 }
413 ctx->idx += 2; /* 2 insts */
414}
415
416static inline void emit_mod(unsigned int dst, unsigned int src,
417 struct jit_ctx *ctx)
418{
419 if (ctx->target != NULL) {
420 u32 *p = &ctx->target[ctx->idx];
421 uasm_i_divu(&p, dst, src);
422 p = &ctx->target[ctx->idx + 1];
423 uasm_i_mflo(&p, dst);
424 }
425 ctx->idx += 2; /* 2 insts */
426}
427
428static inline void emit_dsll(unsigned int dst, unsigned int src,
429 unsigned int sa, struct jit_ctx *ctx)
430{
431 emit_instr(ctx, dsll, dst, src, sa);
432}
433
434static inline void emit_dsrl32(unsigned int dst, unsigned int src,
435 unsigned int sa, struct jit_ctx *ctx)
436{
437 emit_instr(ctx, dsrl32, dst, src, sa);
438}
439
440static inline void emit_wsbh(unsigned int dst, unsigned int src,
441 struct jit_ctx *ctx)
442{
443 emit_instr(ctx, wsbh, dst, src);
444}
445
446/* load a function pointer to register */
447static inline void emit_load_func(unsigned int reg, ptr imm,
448 struct jit_ctx *ctx)
449{
450 if (config_enabled(CONFIG_64BIT)) {
451 /* At this point imm is always 64-bit */
452 emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
453 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
454 emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
455 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
456 emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
457 } else {
458 emit_load_imm(reg, imm, ctx);
459 }
460}
461
462/* Move to real MIPS register */
463static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
464{
465 if (config_enabled(CONFIG_64BIT))
466 emit_daddu(dst, src, r_zero, ctx);
467 else
468 emit_addu(dst, src, r_zero, ctx);
469}
470
471/* Move to JIT (32-bit) register */
472static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
473{
474 emit_addu(dst, src, r_zero, ctx);
475}
476
477/* Compute the immediate value for PC-relative branches. */
478static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
479{
480 if (ctx->target == NULL)
481 return 0;
482
483 /*
484 * We want a pc-relative branch. We only do forward branches
485 * so tgt is always after pc. tgt is the instruction offset
486 * we want to jump to.
487
488 * Branch on MIPS:
489 * I: target_offset <- sign_extend(offset)
490 * I+1: PC += target_offset (delay slot)
491 *
492 * ctx->idx currently points to the branch instruction
493 * but the offset is added to the delay slot so we need
494 * to subtract 4.
495 */
496 return ctx->offsets[tgt] -
497 (ctx->idx * 4 - ctx->prologue_bytes) - 4;
498}
499
500static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
501 unsigned int imm, struct jit_ctx *ctx)
502{
503 if (ctx->target != NULL) {
504 u32 *p = &ctx->target[ctx->idx];
505
506 switch (cond) {
507 case MIPS_COND_EQ:
508 uasm_i_beq(&p, reg1, reg2, imm);
509 break;
510 case MIPS_COND_NE:
511 uasm_i_bne(&p, reg1, reg2, imm);
512 break;
513 case MIPS_COND_ALL:
514 uasm_i_b(&p, imm);
515 break;
516 default:
517 pr_warn("%s: Unhandled branch conditional: %d\n",
518 __func__, cond);
519 }
520 }
521 ctx->idx++;
522}
523
524static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
525{
526 emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
527}
528
529static inline void emit_jalr(unsigned int link, unsigned int reg,
530 struct jit_ctx *ctx)
531{
532 emit_instr(ctx, jalr, link, reg);
533}
534
535static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
536{
537 emit_instr(ctx, jr, reg);
538}
539
540static inline u16 align_sp(unsigned int num)
541{
542 /* Double word alignment for 32-bit, quadword for 64-bit */
543 unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
544 num = (num + (align - 1)) & -align;
545 return num;
546}
547
548static inline void update_on_xread(struct jit_ctx *ctx)
549{
550 if (!(ctx->flags & SEEN_X))
551 ctx->flags |= FLAG_NEED_X_RESET;
552
553 ctx->flags |= SEEN_X;
554}
555
556static bool is_load_to_a(u16 inst)
557{
558 switch (inst) {
559 case BPF_S_LD_W_LEN:
560 case BPF_S_LD_W_ABS:
561 case BPF_S_LD_H_ABS:
562 case BPF_S_LD_B_ABS:
563 case BPF_S_ANC_CPU:
564 case BPF_S_ANC_IFINDEX:
565 case BPF_S_ANC_MARK:
566 case BPF_S_ANC_PROTOCOL:
567 case BPF_S_ANC_RXHASH:
568 case BPF_S_ANC_VLAN_TAG:
569 case BPF_S_ANC_VLAN_TAG_PRESENT:
570 case BPF_S_ANC_QUEUE:
571 return true;
572 default:
573 return false;
574 }
575}
576
577static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
578{
579 int i = 0, real_off = 0;
580 u32 sflags, tmp_flags;
581
582 /* Adjust the stack pointer */
583 emit_stack_offset(-align_sp(offset), ctx);
584
585 if (ctx->flags & SEEN_CALL) {
586 /* Argument save area */
587 if (config_enabled(CONFIG_64BIT))
588 /* Bottom of current frame */
589 real_off = align_sp(offset) - RSIZE;
590 else
591 /* Top of previous frame */
592 real_off = align_sp(offset) + RSIZE;
593 emit_store_stack_reg(MIPS_R_A0, r_sp, real_off, ctx);
594 emit_store_stack_reg(MIPS_R_A1, r_sp, real_off + RSIZE, ctx);
595
596 real_off = 0;
597 }
598
599 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
600 /* sflags is essentially a bitmap */
601 while (tmp_flags) {
602 if ((sflags >> i) & 0x1) {
603 emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
604 ctx);
605 real_off += RSIZE;
606 }
607 i++;
608 tmp_flags >>= 1;
609 }
610
611 /* save return address */
612 if (ctx->flags & SEEN_CALL) {
613 emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
614 real_off += RSIZE;
615 }
616
617 /* Setup r_M leaving the alignment gap if necessary */
618 if (ctx->flags & SEEN_MEM) {
619 if (real_off % (RSIZE * 2))
620 real_off += RSIZE;
621 emit_addiu(r_M, r_sp, real_off, ctx);
622 }
623}
624
625static void restore_bpf_jit_regs(struct jit_ctx *ctx,
626 unsigned int offset)
627{
628 int i, real_off = 0;
629 u32 sflags, tmp_flags;
630
631 if (ctx->flags & SEEN_CALL) {
632 if (config_enabled(CONFIG_64BIT))
633 /* Bottom of current frame */
634 real_off = align_sp(offset) - RSIZE;
635 else
636 /* Top of previous frame */
637 real_off = align_sp(offset) + RSIZE;
638 emit_load_stack_reg(MIPS_R_A0, r_sp, real_off, ctx);
639 emit_load_stack_reg(MIPS_R_A1, r_sp, real_off + RSIZE, ctx);
640
641 real_off = 0;
642 }
643
644 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
645 /* sflags is a bitmap */
646 i = 0;
647 while (tmp_flags) {
648 if ((sflags >> i) & 0x1) {
649 emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
650 ctx);
651 real_off += RSIZE;
652 }
653 i++;
654 tmp_flags >>= 1;
655 }
656
657 /* restore return address */
658 if (ctx->flags & SEEN_CALL)
659 emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
660
661 /* Restore the sp and discard the scrach memory */
662 emit_stack_offset(align_sp(offset), ctx);
663}
664
665static unsigned int get_stack_depth(struct jit_ctx *ctx)
666{
667 int sp_off = 0;
668
669
670 /* How may s* regs do we need to preserved? */
671 sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * RSIZE;
672
673 if (ctx->flags & SEEN_MEM)
674 sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
675
676 if (ctx->flags & SEEN_CALL)
677 /*
678 * The JIT code make calls to external functions using 2
679 * arguments. Therefore, for o32 we don't need to allocate
680 * space because we don't care if the argumetns are lost
681 * across calls. We do need however to preserve incoming
682 * arguments but the space is already allocated for us by
683 * the caller. On the other hand, for n64, we need to allocate
684 * this space ourselves. We need to preserve $ra as well.
685 */
686 sp_off += config_enabled(CONFIG_64BIT) ?
687 (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE;
688
689 /*
690 * Subtract the bytes for the last registers since we only care about
691 * the location on the stack pointer.
692 */
693 return sp_off - RSIZE;
694}
695
696static void build_prologue(struct jit_ctx *ctx)
697{
698 u16 first_inst = ctx->skf->insns[0].code;
699 int sp_off;
700
701 /* Calculate the total offset for the stack pointer */
702 sp_off = get_stack_depth(ctx);
703 save_bpf_jit_regs(ctx, sp_off);
704
705 if (ctx->flags & SEEN_SKB)
706 emit_reg_move(r_skb, MIPS_R_A0, ctx);
707
708 if (ctx->flags & FLAG_NEED_X_RESET)
709 emit_jit_reg_move(r_X, r_zero, ctx);
710
711 /* Do not leak kernel data to userspace */
712 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
713 emit_jit_reg_move(r_A, r_zero, ctx);
714}
715
716static void build_epilogue(struct jit_ctx *ctx)
717{
718 unsigned int sp_off;
719
720 /* Calculate the total offset for the stack pointer */
721
722 sp_off = get_stack_depth(ctx);
723 restore_bpf_jit_regs(ctx, sp_off);
724
725 /* Return */
726 emit_jr(r_ra, ctx);
727 emit_nop(ctx);
728}
729
730static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
731{
732 u8 ret;
733 int err;
734
735 err = skb_copy_bits(skb, offset, &ret, 1);
736
737 return (u64)err << 32 | ret;
738}
739
740static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
741{
742 u16 ret;
743 int err;
744
745 err = skb_copy_bits(skb, offset, &ret, 2);
746
747 return (u64)err << 32 | ntohs(ret);
748}
749
750static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
751{
752 u32 ret;
753 int err;
754
755 err = skb_copy_bits(skb, offset, &ret, 4);
756
757 return (u64)err << 32 | ntohl(ret);
758}
759
760#define PKT_TYPE_MAX 7
761static int pkt_type_offset(void)
762{
763 struct sk_buff skb_probe = {
764 .pkt_type = ~0,
765 };
766 char *ct = (char *)&skb_probe;
767 unsigned int off;
768
769 for (off = 0; off < sizeof(struct sk_buff); off++) {
770 if (ct[off] == PKT_TYPE_MAX)
771 return off;
772 }
773 pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
774 return -1;
775}
776
777static int build_body(struct jit_ctx *ctx)
778{
779 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
780 const struct sk_filter *prog = ctx->skf;
781 const struct sock_filter *inst;
782 unsigned int i, off, load_order, condt;
783 u32 k, b_off __maybe_unused;
784
785 for (i = 0; i < prog->len; i++) {
786 inst = &(prog->insns[i]);
787 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
788 __func__, inst->code, inst->jt, inst->jf, inst->k);
789 k = inst->k;
790
791 if (ctx->target == NULL)
792 ctx->offsets[i] = ctx->idx * 4;
793
794 switch (inst->code) {
795 case BPF_S_LD_IMM:
796 /* A <- k ==> li r_A, k */
797 ctx->flags |= SEEN_A;
798 emit_load_imm(r_A, k, ctx);
799 break;
800 case BPF_S_LD_W_LEN:
801 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
802 /* A <- len ==> lw r_A, offset(skb) */
803 ctx->flags |= SEEN_SKB | SEEN_A;
804 off = offsetof(struct sk_buff, len);
805 emit_load(r_A, r_skb, off, ctx);
806 break;
807 case BPF_S_LD_MEM:
808 /* A <- M[k] ==> lw r_A, offset(M) */
809 ctx->flags |= SEEN_MEM | SEEN_A;
810 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
811 break;
812 case BPF_S_LD_W_ABS:
813 /* A <- P[k:4] */
814 load_order = 2;
815 goto load;
816 case BPF_S_LD_H_ABS:
817 /* A <- P[k:2] */
818 load_order = 1;
819 goto load;
820 case BPF_S_LD_B_ABS:
821 /* A <- P[k:1] */
822 load_order = 0;
823load:
824 emit_load_imm(r_off, k, ctx);
825load_common:
826 ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 |
827 SEEN_SKB | SEEN_A;
828
829 emit_load_func(r_s0, (ptr)load_func[load_order],
830 ctx);
831 emit_reg_move(MIPS_R_A0, r_skb, ctx);
832 emit_jalr(MIPS_R_RA, r_s0, ctx);
833 /* Load second argument to delay slot */
834 emit_reg_move(MIPS_R_A1, r_off, ctx);
835 /* Check the error value */
836 if (config_enabled(CONFIG_64BIT)) {
837 /* Get error code from the top 32-bits */
838 emit_dsrl32(r_s0, r_val, 0, ctx);
839 /* Branch to 3 instructions ahead */
840 emit_bcond(MIPS_COND_NE, r_s0, r_zero, 3 << 2,
841 ctx);
842 } else {
843 /* Branch to 3 instructions ahead */
844 emit_bcond(MIPS_COND_NE, r_err, r_zero, 3 << 2,
845 ctx);
846 }
847 emit_nop(ctx);
848 /* We are good */
849 emit_b(b_imm(i + 1, ctx), ctx);
850 emit_jit_reg_move(r_A, r_val, ctx);
851 /* Return with error */
852 emit_b(b_imm(prog->len, ctx), ctx);
853 emit_reg_move(r_ret, r_zero, ctx);
854 break;
855 case BPF_S_LD_W_IND:
856 /* A <- P[X + k:4] */
857 load_order = 2;
858 goto load_ind;
859 case BPF_S_LD_H_IND:
860 /* A <- P[X + k:2] */
861 load_order = 1;
862 goto load_ind;
863 case BPF_S_LD_B_IND:
864 /* A <- P[X + k:1] */
865 load_order = 0;
866load_ind:
867 update_on_xread(ctx);
868 ctx->flags |= SEEN_OFF | SEEN_X;
869 emit_addiu(r_off, r_X, k, ctx);
870 goto load_common;
871 case BPF_S_LDX_IMM:
872 /* X <- k */
873 ctx->flags |= SEEN_X;
874 emit_load_imm(r_X, k, ctx);
875 break;
876 case BPF_S_LDX_MEM:
877 /* X <- M[k] */
878 ctx->flags |= SEEN_X | SEEN_MEM;
879 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
880 break;
881 case BPF_S_LDX_W_LEN:
882 /* X <- len */
883 ctx->flags |= SEEN_X | SEEN_SKB;
884 off = offsetof(struct sk_buff, len);
885 emit_load(r_X, r_skb, off, ctx);
886 break;
887 case BPF_S_LDX_B_MSH:
888 /* X <- 4 * (P[k:1] & 0xf) */
889 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB;
890 /* Load offset to a1 */
891 emit_load_func(r_s0, (ptr)jit_get_skb_b, ctx);
892 /*
893 * This may emit two instructions so it may not fit
894 * in the delay slot. So use a0 in the delay slot.
895 */
896 emit_load_imm(MIPS_R_A1, k, ctx);
897 emit_jalr(MIPS_R_RA, r_s0, ctx);
898 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
899 /* Check the error value */
900 if (config_enabled(CONFIG_64BIT)) {
901 /* Top 32-bits of $v0 on 64-bit */
902 emit_dsrl32(r_s0, r_val, 0, ctx);
903 emit_bcond(MIPS_COND_NE, r_s0, r_zero,
904 3 << 2, ctx);
905 } else {
906 emit_bcond(MIPS_COND_NE, r_err, r_zero,
907 3 << 2, ctx);
908 }
909 /* No need for delay slot */
910 /* We are good */
911 /* X <- P[1:K] & 0xf */
912 emit_andi(r_X, r_val, 0xf, ctx);
913 /* X << 2 */
914 emit_b(b_imm(i + 1, ctx), ctx);
915 emit_sll(r_X, r_X, 2, ctx); /* delay slot */
916 /* Return with error */
917 emit_b(b_imm(prog->len, ctx), ctx);
918 emit_load_imm(r_ret, 0, ctx); /* delay slot */
919 break;
920 case BPF_S_ST:
921 /* M[k] <- A */
922 ctx->flags |= SEEN_MEM | SEEN_A;
923 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
924 break;
925 case BPF_S_STX:
926 /* M[k] <- X */
927 ctx->flags |= SEEN_MEM | SEEN_X;
928 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
929 break;
930 case BPF_S_ALU_ADD_K:
931 /* A += K */
932 ctx->flags |= SEEN_A;
933 emit_addiu(r_A, r_A, k, ctx);
934 break;
935 case BPF_S_ALU_ADD_X:
936 /* A += X */
937 ctx->flags |= SEEN_A | SEEN_X;
938 emit_addu(r_A, r_A, r_X, ctx);
939 break;
940 case BPF_S_ALU_SUB_K:
941 /* A -= K */
942 ctx->flags |= SEEN_A;
943 emit_addiu(r_A, r_A, -k, ctx);
944 break;
945 case BPF_S_ALU_SUB_X:
946 /* A -= X */
947 ctx->flags |= SEEN_A | SEEN_X;
948 emit_subu(r_A, r_A, r_X, ctx);
949 break;
950 case BPF_S_ALU_MUL_K:
951 /* A *= K */
952 /* Load K to scratch register before MUL */
953 ctx->flags |= SEEN_A | SEEN_S0;
954 emit_load_imm(r_s0, k, ctx);
955 emit_mul(r_A, r_A, r_s0, ctx);
956 break;
957 case BPF_S_ALU_MUL_X:
958 /* A *= X */
959 update_on_xread(ctx);
960 ctx->flags |= SEEN_A | SEEN_X;
961 emit_mul(r_A, r_A, r_X, ctx);
962 break;
963 case BPF_S_ALU_DIV_K:
964 /* A /= k */
965 if (k == 1)
966 break;
967 if (optimize_div(&k)) {
968 ctx->flags |= SEEN_A;
969 emit_srl(r_A, r_A, k, ctx);
970 break;
971 }
972 ctx->flags |= SEEN_A | SEEN_S0;
973 emit_load_imm(r_s0, k, ctx);
974 emit_div(r_A, r_s0, ctx);
975 break;
976 case BPF_S_ALU_MOD_K:
977 /* A %= k */
978 if (k == 1 || optimize_div(&k)) {
979 ctx->flags |= SEEN_A;
980 emit_jit_reg_move(r_A, r_zero, ctx);
981 } else {
982 ctx->flags |= SEEN_A | SEEN_S0;
983 emit_load_imm(r_s0, k, ctx);
984 emit_mod(r_A, r_s0, ctx);
985 }
986 break;
987 case BPF_S_ALU_DIV_X:
988 /* A /= X */
989 update_on_xread(ctx);
990 ctx->flags |= SEEN_X | SEEN_A;
991 /* Check if r_X is zero */
992 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
993 b_imm(prog->len, ctx), ctx);
994 emit_load_imm(r_val, 0, ctx); /* delay slot */
995 emit_div(r_A, r_X, ctx);
996 break;
997 case BPF_S_ALU_MOD_X:
998 /* A %= X */
999 update_on_xread(ctx);
1000 ctx->flags |= SEEN_X | SEEN_A;
1001 /* Check if r_X is zero */
1002 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
1003 b_imm(prog->len, ctx), ctx);
1004 emit_load_imm(r_val, 0, ctx); /* delay slot */
1005 emit_mod(r_A, r_X, ctx);
1006 break;
1007 case BPF_S_ALU_OR_K:
1008 /* A |= K */
1009 ctx->flags |= SEEN_A;
1010 emit_ori(r_A, r_A, k, ctx);
1011 break;
1012 case BPF_S_ALU_OR_X:
1013 /* A |= X */
1014 update_on_xread(ctx);
1015 ctx->flags |= SEEN_A;
1016 emit_ori(r_A, r_A, r_X, ctx);
1017 break;
1018 case BPF_S_ALU_XOR_K:
1019 /* A ^= k */
1020 ctx->flags |= SEEN_A;
1021 emit_xori(r_A, r_A, k, ctx);
1022 break;
1023 case BPF_S_ANC_ALU_XOR_X:
1024 case BPF_S_ALU_XOR_X:
1025 /* A ^= X */
1026 update_on_xread(ctx);
1027 ctx->flags |= SEEN_A;
1028 emit_xor(r_A, r_A, r_X, ctx);
1029 break;
1030 case BPF_S_ALU_AND_K:
1031 /* A &= K */
1032 ctx->flags |= SEEN_A;
1033 emit_andi(r_A, r_A, k, ctx);
1034 break;
1035 case BPF_S_ALU_AND_X:
1036 /* A &= X */
1037 update_on_xread(ctx);
1038 ctx->flags |= SEEN_A | SEEN_X;
1039 emit_and(r_A, r_A, r_X, ctx);
1040 break;
1041 case BPF_S_ALU_LSH_K:
1042 /* A <<= K */
1043 ctx->flags |= SEEN_A;
1044 emit_sll(r_A, r_A, k, ctx);
1045 break;
1046 case BPF_S_ALU_LSH_X:
1047 /* A <<= X */
1048 ctx->flags |= SEEN_A | SEEN_X;
1049 update_on_xread(ctx);
1050 emit_sllv(r_A, r_A, r_X, ctx);
1051 break;
1052 case BPF_S_ALU_RSH_K:
1053 /* A >>= K */
1054 ctx->flags |= SEEN_A;
1055 emit_srl(r_A, r_A, k, ctx);
1056 break;
1057 case BPF_S_ALU_RSH_X:
1058 ctx->flags |= SEEN_A | SEEN_X;
1059 update_on_xread(ctx);
1060 emit_srlv(r_A, r_A, r_X, ctx);
1061 break;
1062 case BPF_S_ALU_NEG:
1063 /* A = -A */
1064 ctx->flags |= SEEN_A;
1065 emit_neg(r_A, ctx);
1066 break;
1067 case BPF_S_JMP_JA:
1068 /* pc += K */
1069 emit_b(b_imm(i + k + 1, ctx), ctx);
1070 emit_nop(ctx);
1071 break;
1072 case BPF_S_JMP_JEQ_K:
1073 /* pc += ( A == K ) ? pc->jt : pc->jf */
1074 condt = MIPS_COND_EQ | MIPS_COND_K;
1075 goto jmp_cmp;
1076 case BPF_S_JMP_JEQ_X:
1077 ctx->flags |= SEEN_X;
1078 /* pc += ( A == X ) ? pc->jt : pc->jf */
1079 condt = MIPS_COND_EQ | MIPS_COND_X;
1080 goto jmp_cmp;
1081 case BPF_S_JMP_JGE_K:
1082 /* pc += ( A >= K ) ? pc->jt : pc->jf */
1083 condt = MIPS_COND_GE | MIPS_COND_K;
1084 goto jmp_cmp;
1085 case BPF_S_JMP_JGE_X:
1086 ctx->flags |= SEEN_X;
1087 /* pc += ( A >= X ) ? pc->jt : pc->jf */
1088 condt = MIPS_COND_GE | MIPS_COND_X;
1089 goto jmp_cmp;
1090 case BPF_S_JMP_JGT_K:
1091 /* pc += ( A > K ) ? pc->jt : pc->jf */
1092 condt = MIPS_COND_GT | MIPS_COND_K;
1093 goto jmp_cmp;
1094 case BPF_S_JMP_JGT_X:
1095 ctx->flags |= SEEN_X;
1096 /* pc += ( A > X ) ? pc->jt : pc->jf */
1097 condt = MIPS_COND_GT | MIPS_COND_X;
1098jmp_cmp:
1099 /* Greater or Equal */
1100 if ((condt & MIPS_COND_GE) ||
1101 (condt & MIPS_COND_GT)) {
1102 if (condt & MIPS_COND_K) { /* K */
1103 ctx->flags |= SEEN_S0 | SEEN_A;
1104 emit_sltiu(r_s0, r_A, k, ctx);
1105 } else { /* X */
1106 ctx->flags |= SEEN_S0 | SEEN_A |
1107 SEEN_X;
1108 emit_sltu(r_s0, r_A, r_X, ctx);
1109 }
1110 /* A < (K|X) ? r_scrach = 1 */
1111 b_off = b_imm(i + inst->jf + 1, ctx);
1112 emit_bcond(MIPS_COND_GT, r_s0, r_zero, b_off,
1113 ctx);
1114 emit_nop(ctx);
1115 /* A > (K|X) ? scratch = 0 */
1116 if (condt & MIPS_COND_GT) {
1117 /* Checking for equality */
1118 ctx->flags |= SEEN_S0 | SEEN_A | SEEN_X;
1119 if (condt & MIPS_COND_K)
1120 emit_load_imm(r_s0, k, ctx);
1121 else
1122 emit_jit_reg_move(r_s0, r_X,
1123 ctx);
1124 b_off = b_imm(i + inst->jf + 1, ctx);
1125 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1126 b_off, ctx);
1127 emit_nop(ctx);
1128 /* Finally, A > K|X */
1129 b_off = b_imm(i + inst->jt + 1, ctx);
1130 emit_b(b_off, ctx);
1131 emit_nop(ctx);
1132 } else {
1133 /* A >= (K|X) so jump */
1134 b_off = b_imm(i + inst->jt + 1, ctx);
1135 emit_b(b_off, ctx);
1136 emit_nop(ctx);
1137 }
1138 } else {
1139 /* A == K|X */
1140 if (condt & MIPS_COND_K) { /* K */
1141 ctx->flags |= SEEN_S0 | SEEN_A;
1142 emit_load_imm(r_s0, k, ctx);
1143 /* jump true */
1144 b_off = b_imm(i + inst->jt + 1, ctx);
1145 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1146 b_off, ctx);
1147 emit_nop(ctx);
1148 /* jump false */
1149 b_off = b_imm(i + inst->jf + 1,
1150 ctx);
1151 emit_bcond(MIPS_COND_NE, r_A, r_s0,
1152 b_off, ctx);
1153 emit_nop(ctx);
1154 } else { /* X */
1155 /* jump true */
1156 ctx->flags |= SEEN_A | SEEN_X;
1157 b_off = b_imm(i + inst->jt + 1,
1158 ctx);
1159 emit_bcond(MIPS_COND_EQ, r_A, r_X,
1160 b_off, ctx);
1161 emit_nop(ctx);
1162 /* jump false */
1163 b_off = b_imm(i + inst->jf + 1, ctx);
1164 emit_bcond(MIPS_COND_NE, r_A, r_X,
1165 b_off, ctx);
1166 emit_nop(ctx);
1167 }
1168 }
1169 break;
1170 case BPF_S_JMP_JSET_K:
1171 ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A;
1172 /* pc += (A & K) ? pc -> jt : pc -> jf */
1173 emit_load_imm(r_s1, k, ctx);
1174 emit_and(r_s0, r_A, r_s1, ctx);
1175 /* jump true */
1176 b_off = b_imm(i + inst->jt + 1, ctx);
1177 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1178 emit_nop(ctx);
1179 /* jump false */
1180 b_off = b_imm(i + inst->jf + 1, ctx);
1181 emit_b(b_off, ctx);
1182 emit_nop(ctx);
1183 break;
1184 case BPF_S_JMP_JSET_X:
1185 ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A;
1186 /* pc += (A & X) ? pc -> jt : pc -> jf */
1187 emit_and(r_s0, r_A, r_X, ctx);
1188 /* jump true */
1189 b_off = b_imm(i + inst->jt + 1, ctx);
1190 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1191 emit_nop(ctx);
1192 /* jump false */
1193 b_off = b_imm(i + inst->jf + 1, ctx);
1194 emit_b(b_off, ctx);
1195 emit_nop(ctx);
1196 break;
1197 case BPF_S_RET_A:
1198 ctx->flags |= SEEN_A;
1199 if (i != prog->len - 1)
1200 /*
1201 * If this is not the last instruction
1202 * then jump to the epilogue
1203 */
1204 emit_b(b_imm(prog->len, ctx), ctx);
1205 emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1206 break;
1207 case BPF_S_RET_K:
1208 /*
1209 * It can emit two instructions so it does not fit on
1210 * the delay slot.
1211 */
1212 emit_load_imm(r_ret, k, ctx);
1213 if (i != prog->len - 1) {
1214 /*
1215 * If this is not the last instruction
1216 * then jump to the epilogue
1217 */
1218 emit_b(b_imm(prog->len, ctx), ctx);
1219 emit_nop(ctx);
1220 }
1221 break;
1222 case BPF_S_MISC_TAX:
1223 /* X = A */
1224 ctx->flags |= SEEN_X | SEEN_A;
1225 emit_jit_reg_move(r_X, r_A, ctx);
1226 break;
1227 case BPF_S_MISC_TXA:
1228 /* A = X */
1229 ctx->flags |= SEEN_A | SEEN_X;
1230 update_on_xread(ctx);
1231 emit_jit_reg_move(r_A, r_X, ctx);
1232 break;
1233 /* AUX */
1234 case BPF_S_ANC_PROTOCOL:
1235 /* A = ntohs(skb->protocol */
1236 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1237 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1238 protocol) != 2);
1239 off = offsetof(struct sk_buff, protocol);
1240 emit_half_load(r_A, r_skb, off, ctx);
1241#ifdef CONFIG_CPU_LITTLE_ENDIAN
1242 /* This needs little endian fixup */
1243 if (cpu_has_mips_r2) {
1244 /* R2 and later have the wsbh instruction */
1245 emit_wsbh(r_A, r_A, ctx);
1246 } else {
1247 /* Get first byte */
1248 emit_andi(r_tmp_imm, r_A, 0xff, ctx);
1249 /* Shift it */
1250 emit_sll(r_tmp, r_tmp_imm, 8, ctx);
1251 /* Get second byte */
1252 emit_srl(r_tmp_imm, r_A, 8, ctx);
1253 emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
1254 /* Put everyting together in r_A */
1255 emit_or(r_A, r_tmp, r_tmp_imm, ctx);
1256 }
1257#endif
1258 break;
1259 case BPF_S_ANC_CPU:
1260 ctx->flags |= SEEN_A | SEEN_OFF;
1261 /* A = current_thread_info()->cpu */
1262 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
1263 cpu) != 4);
1264 off = offsetof(struct thread_info, cpu);
1265 /* $28/gp points to the thread_info struct */
1266 emit_load(r_A, 28, off, ctx);
1267 break;
1268 case BPF_S_ANC_IFINDEX:
1269 /* A = skb->dev->ifindex */
1270 ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0;
1271 off = offsetof(struct sk_buff, dev);
1272 emit_load(r_s0, r_skb, off, ctx);
1273 /* error (0) in the delay slot */
1274 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1275 b_imm(prog->len, ctx), ctx);
1276 emit_reg_move(r_ret, r_zero, ctx);
1277 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
1278 ifindex) != 4);
1279 off = offsetof(struct net_device, ifindex);
1280 emit_load(r_A, r_s0, off, ctx);
1281 break;
1282 case BPF_S_ANC_MARK:
1283 ctx->flags |= SEEN_SKB | SEEN_A;
1284 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1285 off = offsetof(struct sk_buff, mark);
1286 emit_load(r_A, r_skb, off, ctx);
1287 break;
1288 case BPF_S_ANC_RXHASH:
1289 ctx->flags |= SEEN_SKB | SEEN_A;
1290 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1291 off = offsetof(struct sk_buff, hash);
1292 emit_load(r_A, r_skb, off, ctx);
1293 break;
1294 case BPF_S_ANC_VLAN_TAG:
1295 case BPF_S_ANC_VLAN_TAG_PRESENT:
1296 ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A;
1297 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1298 vlan_tci) != 2);
1299 off = offsetof(struct sk_buff, vlan_tci);
1300 emit_half_load(r_s0, r_skb, off, ctx);
1301 if (inst->code == BPF_S_ANC_VLAN_TAG)
1302 emit_and(r_A, r_s0, VLAN_VID_MASK, ctx);
1303 else
1304 emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
1305 break;
1306 case BPF_S_ANC_PKTTYPE:
1307 off = pkt_type_offset();
1308
1309 if (off < 0)
1310 return -1;
1311 emit_load_byte(r_tmp, r_skb, off, ctx);
1312 /* Keep only the last 3 bits */
1313 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1314 break;
1315 case BPF_S_ANC_QUEUE:
1316 ctx->flags |= SEEN_SKB | SEEN_A;
1317 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1318 queue_mapping) != 2);
1319 BUILD_BUG_ON(offsetof(struct sk_buff,
1320 queue_mapping) > 0xff);
1321 off = offsetof(struct sk_buff, queue_mapping);
1322 emit_half_load(r_A, r_skb, off, ctx);
1323 break;
1324 default:
1325 pr_warn("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1326 inst->code);
1327 return -1;
1328 }
1329 }
1330
1331 /* compute offsets only during the first pass */
1332 if (ctx->target == NULL)
1333 ctx->offsets[i] = ctx->idx * 4;
1334
1335 return 0;
1336}
1337
1338int bpf_jit_enable __read_mostly;
1339
1340void bpf_jit_compile(struct sk_filter *fp)
1341{
1342 struct jit_ctx ctx;
1343 unsigned int alloc_size, tmp_idx;
1344
1345 if (!bpf_jit_enable)
1346 return;
1347
1348 memset(&ctx, 0, sizeof(ctx));
1349
1350 ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL);
1351 if (ctx.offsets == NULL)
1352 return;
1353
1354 ctx.skf = fp;
1355
1356 if (build_body(&ctx))
1357 goto out;
1358
1359 tmp_idx = ctx.idx;
1360 build_prologue(&ctx);
1361 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1362 /* just to complete the ctx.idx count */
1363 build_epilogue(&ctx);
1364
1365 alloc_size = 4 * ctx.idx;
1366 ctx.target = module_alloc(alloc_size);
1367 if (ctx.target == NULL)
1368 goto out;
1369
1370 /* Clean it */
1371 memset(ctx.target, 0, alloc_size);
1372
1373 ctx.idx = 0;
1374
1375 /* Generate the actual JIT code */
1376 build_prologue(&ctx);
1377 build_body(&ctx);
1378 build_epilogue(&ctx);
1379
1380 /* Update the icache */
1381 flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
1382
1383 if (bpf_jit_enable > 1)
1384 /* Dump JIT code */
1385 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1386
1387 fp->bpf_func = (void *)ctx.target;
1388 fp->jited = 1;
1389
1390out:
1391 kfree(ctx.offsets);
1392}
1393
1394void bpf_jit_free(struct sk_filter *fp)
1395{
1396 if (fp->jited)
1397 module_free(NULL, fp->bpf_func);
1398 kfree(fp);
1399}
diff --git a/arch/mips/net/bpf_jit.h b/arch/mips/net/bpf_jit.h
new file mode 100644
index 000000000000..3a5751b4335a
--- /dev/null
+++ b/arch/mips/net/bpf_jit.h
@@ -0,0 +1,44 @@
1/*
2 * Just-In-Time compiler for BPF filters on MIPS
3 *
4 * Copyright (c) 2014 Imagination Technologies Ltd.
5 * Author: Markos Chandras <markos.chandras@imgtec.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License.
10 */
11
12#ifndef BPF_JIT_MIPS_OP_H
13#define BPF_JIT_MIPS_OP_H
14
15/* Registers used by JIT */
16#define MIPS_R_ZERO 0
17#define MIPS_R_V0 2
18#define MIPS_R_V1 3
19#define MIPS_R_A0 4
20#define MIPS_R_A1 5
21#define MIPS_R_T6 14
22#define MIPS_R_T7 15
23#define MIPS_R_S0 16
24#define MIPS_R_S1 17
25#define MIPS_R_S2 18
26#define MIPS_R_S3 19
27#define MIPS_R_S4 20
28#define MIPS_R_S5 21
29#define MIPS_R_S6 22
30#define MIPS_R_S7 23
31#define MIPS_R_SP 29
32#define MIPS_R_RA 31
33
34/* Conditional codes */
35#define MIPS_COND_EQ 0x1
36#define MIPS_COND_GE (0x1 << 1)
37#define MIPS_COND_GT (0x1 << 2)
38#define MIPS_COND_NE (0x1 << 3)
39#define MIPS_COND_ALL (0x1 << 4)
40/* Conditionals on X register or K immediate */
41#define MIPS_COND_X (0x1 << 5)
42#define MIPS_COND_K (0x1 << 6)
43
44#endif /* BPF_JIT_MIPS_OP_H */
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 5afc4b7fce0f..c100b9afa0ab 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -203,6 +203,8 @@ void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
203 203
204 xirq = nlm_irq_to_xirq(node, irq); 204 xirq = nlm_irq_to_xirq(node, irq);
205 pic_data = irq_get_handler_data(xirq); 205 pic_data = irq_get_handler_data(xirq);
206 if (WARN_ON(!pic_data))
207 return;
206 pic_data->extra_ack = xack; 208 pic_data->extra_ack = xack;
207} 209}
208 210
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
index b231fe1e7a09..701c4bcb9e47 100644
--- a/arch/mips/netlogic/common/reset.S
+++ b/arch/mips/netlogic/common/reset.S
@@ -35,6 +35,7 @@
35 35
36#include <asm/asm.h> 36#include <asm/asm.h>
37#include <asm/asm-offsets.h> 37#include <asm/asm-offsets.h>
38#include <asm/cpu.h>
38#include <asm/cacheops.h> 39#include <asm/cacheops.h>
39#include <asm/regdef.h> 40#include <asm/regdef.h>
40#include <asm/mipsregs.h> 41#include <asm/mipsregs.h>
@@ -74,13 +75,25 @@
74.endm 75.endm
75 76
76/* 77/*
78 * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN
79 * register. This is needed before going to C code since the SP can
80 * in this region. Called from all HW threads.
81 */
82.macro xlp_early_mmu_init
83 mfc0 t0, CP0_PAGEMASK, 1
84 li t1, (1 << 29) /* ELPA bit */
85 or t0, t1
86 mtc0 t0, CP0_PAGEMASK, 1
87.endm
88
89/*
77 * L1D cache has to be flushed before enabling threads in XLP. 90 * L1D cache has to be flushed before enabling threads in XLP.
78 * On XLP8xx/XLP3xx, we do a low level flush using processor control 91 * On XLP8xx/XLP3xx, we do a low level flush using processor control
79 * registers. On XLPII CPUs, usual cache instructions work. 92 * registers. On XLPII CPUs, usual cache instructions work.
80 */ 93 */
81.macro xlp_flush_l1_dcache 94.macro xlp_flush_l1_dcache
82 mfc0 t0, CP0_EBASE, 0 95 mfc0 t0, CP0_EBASE, 0
83 andi t0, t0, 0xff00 96 andi t0, t0, PRID_IMP_MASK
84 slt t1, t0, 0x1200 97 slt t1, t0, 0x1200
85 beqz t1, 15f 98 beqz t1, 15f
86 nop 99 nop
@@ -159,11 +172,15 @@ FEXPORT(nlm_reset_entry)
159 172
1601: /* Entry point on core wakeup */ 1731: /* Entry point on core wakeup */
161 mfc0 t0, CP0_EBASE, 0 /* processor ID */ 174 mfc0 t0, CP0_EBASE, 0 /* processor ID */
162 andi t0, 0xff00 175 andi t0, PRID_IMP_MASK
163 li t1, 0x1500 /* XLP 9xx */ 176 li t1, 0x1500 /* XLP 9xx */
164 beq t0, t1, 2f /* does not need to set coherent */ 177 beq t0, t1, 2f /* does not need to set coherent */
165 nop 178 nop
166 179
180 li t1, 0x1300 /* XLP 5xx */
181 beq t0, t1, 2f /* does not need to set coherent */
182 nop
183
167 /* set bit in SYS coherent register for the core */ 184 /* set bit in SYS coherent register for the core */
168 mfc0 t0, CP0_EBASE, 1 185 mfc0 t0, CP0_EBASE, 1
169 mfc0 t1, CP0_EBASE, 1 186 mfc0 t1, CP0_EBASE, 1
@@ -197,6 +214,9 @@ FEXPORT(nlm_reset_entry)
197EXPORT(nlm_boot_siblings) 214EXPORT(nlm_boot_siblings)
198 /* core L1D flush before enable threads */ 215 /* core L1D flush before enable threads */
199 xlp_flush_l1_dcache 216 xlp_flush_l1_dcache
217 /* save ra and sp, will be used later (only for boot cpu) */
218 dmtc0 ra, $22, 6
219 dmtc0 sp, $22, 7
200 /* Enable hw threads by writing to MAP_THREADMODE of the core */ 220 /* Enable hw threads by writing to MAP_THREADMODE of the core */
201 li t0, CKSEG1ADDR(RESET_DATA_PHYS) 221 li t0, CKSEG1ADDR(RESET_DATA_PHYS)
202 lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */ 222 lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
@@ -225,6 +245,8 @@ EXPORT(nlm_boot_siblings)
225#endif 245#endif
226 mtc0 t1, CP0_STATUS 246 mtc0 t1, CP0_STATUS
227 247
248 xlp_early_mmu_init
249
228 /* mark CPU ready */ 250 /* mark CPU ready */
229 li t3, CKSEG1ADDR(RESET_DATA_PHYS) 251 li t3, CKSEG1ADDR(RESET_DATA_PHYS)
230 ADDIU t1, t3, BOOT_CPU_READY 252 ADDIU t1, t3, BOOT_CPU_READY
@@ -238,14 +260,12 @@ EXPORT(nlm_boot_siblings)
238 nop 260 nop
239 261
240 /* 262 /*
241 * For the boot CPU, we have to restore registers and 263 * For the boot CPU, we have to restore ra and sp and return, rest
242 * return 264 * of the registers will be restored by the caller
243 */ 265 */
2444: dmfc0 t0, $4, 2 /* restore SP from UserLocal */ 2664:
245 li t1, 0xfadebeef 267 dmfc0 ra, $22, 6
246 dmtc0 t1, $4, 2 /* restore SP from UserLocal */ 268 dmfc0 sp, $22, 7
247 PTR_SUBU sp, t0, PT_SIZE
248 RESTORE_ALL
249 jr ra 269 jr ra
250 nop 270 nop
251EXPORT(nlm_reset_entry_end) 271EXPORT(nlm_reset_entry_end)
@@ -253,6 +273,7 @@ EXPORT(nlm_reset_entry_end)
253LEAF(nlm_init_boot_cpu) 273LEAF(nlm_init_boot_cpu)
254#ifdef CONFIG_CPU_XLP 274#ifdef CONFIG_CPU_XLP
255 xlp_config_lsu 275 xlp_config_lsu
276 xlp_early_mmu_init
256#endif 277#endif
257 jr ra 278 jr ra
258 nop 279 nop
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 6baae15cc7b1..4fde7ac76cc9 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -135,10 +135,6 @@ void nlm_smp_finish(void)
135 local_irq_enable(); 135 local_irq_enable();
136} 136}
137 137
138void nlm_cpus_done(void)
139{
140}
141
142/* 138/*
143 * Boot all other cpus in the system, initialize them, and bring them into 139 * Boot all other cpus in the system, initialize them, and bring them into
144 * the boot function 140 * the boot function
@@ -198,7 +194,7 @@ void __init nlm_smp_setup(void)
198 cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask); 194 cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
199 pr_info("Possible CPU mask: %s\n", buf); 195 pr_info("Possible CPU mask: %s\n", buf);
200 196
201 /* check with the cores we have worken up */ 197 /* check with the cores we have woken up */
202 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++) 198 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
203 ncore += hweight32(nlm_get_node(i)->coremask); 199 ncore += hweight32(nlm_get_node(i)->coremask);
204 200
@@ -213,6 +209,7 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
213{ 209{
214 uint32_t core0_thr_mask, core_thr_mask; 210 uint32_t core0_thr_mask, core_thr_mask;
215 int threadmode, i, j; 211 int threadmode, i, j;
212 char buf[64];
216 213
217 core0_thr_mask = 0; 214 core0_thr_mask = 0;
218 for (i = 0; i < NLM_THREADS_PER_CORE; i++) 215 for (i = 0; i < NLM_THREADS_PER_CORE; i++)
@@ -247,8 +244,8 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
247 return threadmode; 244 return threadmode;
248 245
249unsupp: 246unsupp:
250 panic("Unsupported CPU mask %lx", 247 cpumask_scnprintf(buf, ARRAY_SIZE(buf), wakeup_mask);
251 (unsigned long)cpumask_bits(wakeup_mask)[0]); 248 panic("Unsupported CPU mask %s", buf);
252 return 0; 249 return 0;
253} 250}
254 251
@@ -277,7 +274,6 @@ struct plat_smp_ops nlm_smp_ops = {
277 .send_ipi_mask = nlm_send_ipi_mask, 274 .send_ipi_mask = nlm_send_ipi_mask,
278 .init_secondary = nlm_init_secondary, 275 .init_secondary = nlm_init_secondary,
279 .smp_finish = nlm_smp_finish, 276 .smp_finish = nlm_smp_finish,
280 .cpus_done = nlm_cpus_done,
281 .boot_secondary = nlm_boot_secondary, 277 .boot_secondary = nlm_boot_secondary,
282 .smp_setup = nlm_smp_setup, 278 .smp_setup = nlm_smp_setup,
283 .prepare_cpus = nlm_prepare_cpus, 279 .prepare_cpus = nlm_prepare_cpus,
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index 8597657c27fc..805355b0bd05 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -54,8 +54,9 @@
54 .set noat 54 .set noat
55 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ 55 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
56 56
57FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */ 57/* Called by the boot cpu to wake up its sibling threads */
58 dmtc0 sp, $4, 2 /* SP saved in UserLocal */ 58NESTED(xlp_boot_core0_siblings, PT_SIZE, sp)
59 /* CPU register contents lost when enabling threads, save them first */
59 SAVE_ALL 60 SAVE_ALL
60 sync 61 sync
61 /* find the location to which nlm_boot_siblings was relocated */ 62 /* find the location to which nlm_boot_siblings was relocated */
@@ -65,9 +66,12 @@ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
65 dsubu t2, t1 66 dsubu t2, t1
66 daddu t2, t0 67 daddu t2, t0
67 /* call it */ 68 /* call it */
68 jr t2 69 jalr t2
69 nop 70 nop
70 /* not reached */ 71 RESTORE_ALL
72 jr ra
73 nop
74END(xlp_boot_core0_siblings)
71 75
72NESTED(nlm_boot_secondary_cpus, 16, sp) 76NESTED(nlm_boot_secondary_cpus, 16, sp)
73 /* Initialize CP0 Status */ 77 /* Initialize CP0 Status */
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
index 13391b8a6031..0c0a1a606f73 100644
--- a/arch/mips/netlogic/common/time.c
+++ b/arch/mips/netlogic/common/time.c
@@ -82,6 +82,7 @@ static struct clocksource csrc_pic = {
82static void nlm_init_pic_timer(void) 82static void nlm_init_pic_timer(void)
83{ 83{
84 uint64_t picbase = nlm_get_node(0)->picbase; 84 uint64_t picbase = nlm_get_node(0)->picbase;
85 u32 picfreq;
85 86
86 nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0); 87 nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
87 if (current_cpu_data.cputype == CPU_XLR) { 88 if (current_cpu_data.cputype == CPU_XLR) {
@@ -92,7 +93,9 @@ static void nlm_init_pic_timer(void)
92 csrc_pic.read = nlm_get_pic_timer; 93 csrc_pic.read = nlm_get_pic_timer;
93 } 94 }
94 csrc_pic.rating = 1000; 95 csrc_pic.rating = 1000;
95 clocksource_register_hz(&csrc_pic, pic_timer_freq()); 96 picfreq = pic_timer_freq();
97 clocksource_register_hz(&csrc_pic, picfreq);
98 pr_info("PIC clock source added, frequency %d\n", picfreq);
96} 99}
97 100
98void __init plat_time_init(void) 101void __init plat_time_init(void)
diff --git a/arch/mips/netlogic/dts/xlp_gvp.dts b/arch/mips/netlogic/dts/xlp_gvp.dts
index 047d27f54487..bb4ecd1d47fc 100644
--- a/arch/mips/netlogic/dts/xlp_gvp.dts
+++ b/arch/mips/netlogic/dts/xlp_gvp.dts
@@ -26,11 +26,12 @@
26 interrupt-parent = <&pic>; 26 interrupt-parent = <&pic>;
27 interrupts = <17>; 27 interrupts = <17>;
28 }; 28 };
29 pic: pic@4000 { 29 pic: pic@110000 {
30 interrupt-controller; 30 compatible = "netlogic,xlp-pic";
31 #address-cells = <0>; 31 #address-cells = <0>;
32 #interrupt-cells = <1>; 32 #interrupt-cells = <1>;
33 reg = <0 0x110000 0x200>; 33 reg = <0 0x110000 0x200>;
34 interrupt-controller;
34 }; 35 };
35 36
36 nor_flash@1,0 { 37 nor_flash@1,0 {
diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile
index ed9a93c04650..be358a8050c5 100644
--- a/arch/mips/netlogic/xlp/Makefile
+++ b/arch/mips/netlogic/xlp/Makefile
@@ -2,3 +2,5 @@ obj-y += setup.o nlm_hal.o cop2-ex.o dt.o
2obj-$(CONFIG_SMP) += wakeup.o 2obj-$(CONFIG_SMP) += wakeup.o
3obj-$(CONFIG_USB) += usb-init.o 3obj-$(CONFIG_USB) += usb-init.o
4obj-$(CONFIG_USB) += usb-init-xlp2.o 4obj-$(CONFIG_USB) += usb-init-xlp2.o
5obj-$(CONFIG_SATA_AHCI) += ahci-init.o
6obj-$(CONFIG_SATA_AHCI) += ahci-init-xlp2.o
diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
new file mode 100644
index 000000000000..c83dbf3689e2
--- /dev/null
+++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
@@ -0,0 +1,377 @@
1/*
2 * Copyright (c) 2003-2014 Broadcom Corporation
3 * All Rights Reserved
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/dma-mapping.h>
36#include <linux/kernel.h>
37#include <linux/delay.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42#include <linux/pci_ids.h>
43#include <linux/nodemask.h>
44
45#include <asm/cpu.h>
46#include <asm/mipsregs.h>
47
48#include <asm/netlogic/common.h>
49#include <asm/netlogic/haldefs.h>
50#include <asm/netlogic/mips-extns.h>
51#include <asm/netlogic/xlp-hal/xlp.h>
52#include <asm/netlogic/xlp-hal/iomap.h>
53
54#define SATA_CTL 0x0
55#define SATA_STATUS 0x1 /* Status Reg */
56#define SATA_INT 0x2 /* Interrupt Reg */
57#define SATA_INT_MASK 0x3 /* Interrupt Mask Reg */
58#define SATA_BIU_TIMEOUT 0x4
59#define AXIWRSPERRLOG 0x5
60#define AXIRDSPERRLOG 0x6
61#define BiuTimeoutLow 0x7
62#define BiuTimeoutHi 0x8
63#define BiuSlvErLow 0x9
64#define BiuSlvErHi 0xa
65#define IO_CONFIG_SWAP_DIS 0xb
66#define CR_REG_TIMER 0xc
67#define CORE_ID 0xd
68#define AXI_SLAVE_OPT1 0xe
69#define PHY_MEM_ACCESS 0xf
70#define PHY0_CNTRL 0x10
71#define PHY0_STAT 0x11
72#define PHY0_RX_ALIGN 0x12
73#define PHY0_RX_EQ_LO 0x13
74#define PHY0_RX_EQ_HI 0x14
75#define PHY0_BIST_LOOP 0x15
76#define PHY1_CNTRL 0x16
77#define PHY1_STAT 0x17
78#define PHY1_RX_ALIGN 0x18
79#define PHY1_RX_EQ_LO 0x19
80#define PHY1_RX_EQ_HI 0x1a
81#define PHY1_BIST_LOOP 0x1b
82#define RdExBase 0x1c
83#define RdExLimit 0x1d
84#define CacheAllocBase 0x1e
85#define CacheAllocLimit 0x1f
86#define BiuSlaveCmdGstNum 0x20
87
88/*SATA_CTL Bits */
89#define SATA_RST_N BIT(0) /* Active low reset sata_core phy */
90#define SataCtlReserve0 BIT(1)
91#define M_CSYSREQ BIT(2) /* AXI master low power, not used */
92#define S_CSYSREQ BIT(3) /* AXI slave low power, not used */
93#define P0_CP_DET BIT(8) /* Reserved, bring in from pad */
94#define P0_MP_SW BIT(9) /* Mech Switch */
95#define P0_DISABLE BIT(10) /* disable p0 */
96#define P0_ACT_LED_EN BIT(11) /* Active LED enable */
97#define P0_IRST_HARD_SYNTH BIT(12) /* PHY hard synth reset */
98#define P0_IRST_HARD_TXRX BIT(13) /* PHY lane hard reset */
99#define P0_IRST_POR BIT(14) /* PHY power on reset*/
100#define P0_IPDTXL BIT(15) /* PHY Tx lane dis/power down */
101#define P0_IPDRXL BIT(16) /* PHY Rx lane dis/power down */
102#define P0_IPDIPDMSYNTH BIT(17) /* PHY synthesizer dis/porwer down */
103#define P0_CP_POD_EN BIT(18) /* CP_POD enable */
104#define P0_AT_BYPASS BIT(19) /* P0 address translation by pass */
105#define P1_CP_DET BIT(20) /* Reserved,Cold Detect */
106#define P1_MP_SW BIT(21) /* Mech Switch */
107#define P1_DISABLE BIT(22) /* disable p1 */
108#define P1_ACT_LED_EN BIT(23) /* Active LED enable */
109#define P1_IRST_HARD_SYNTH BIT(24) /* PHY hard synth reset */
110#define P1_IRST_HARD_TXRX BIT(25) /* PHY lane hard reset */
111#define P1_IRST_POR BIT(26) /* PHY power on reset*/
112#define P1_IPDTXL BIT(27) /* PHY Tx lane dis/porwer down */
113#define P1_IPDRXL BIT(28) /* PHY Rx lane dis/porwer down */
114#define P1_IPDIPDMSYNTH BIT(29) /* PHY synthesizer dis/porwer down */
115#define P1_CP_POD_EN BIT(30)
116#define P1_AT_BYPASS BIT(31) /* P1 address translation by pass */
117
118/* Status register */
119#define M_CACTIVE BIT(0) /* m_cactive, not used */
120#define S_CACTIVE BIT(1) /* s_cactive, not used */
121#define P0_PHY_READY BIT(8) /* phy is ready */
122#define P0_CP_POD BIT(9) /* Cold PowerOn */
123#define P0_SLUMBER BIT(10) /* power mode slumber */
124#define P0_PATIAL BIT(11) /* power mode patial */
125#define P0_PHY_SIG_DET BIT(12) /* phy dignal detect */
126#define P0_PHY_CALI BIT(13) /* phy calibration done */
127#define P1_PHY_READY BIT(16) /* phy is ready */
128#define P1_CP_POD BIT(17) /* Cold PowerOn */
129#define P1_SLUMBER BIT(18) /* power mode slumber */
130#define P1_PATIAL BIT(19) /* power mode patial */
131#define P1_PHY_SIG_DET BIT(20) /* phy dignal detect */
132#define P1_PHY_CALI BIT(21) /* phy calibration done */
133
134/* SATA CR_REG_TIMER bits */
135#define CR_TIME_SCALE (0x1000 << 0)
136
137/* SATA PHY specific registers start and end address */
138#define RXCDRCALFOSC0 0x0065
139#define CALDUTY 0x006e
140#define RXDPIF 0x8065
141#define PPMDRIFTMAX_HI 0x80A4
142
143#define nlm_read_sata_reg(b, r) nlm_read_reg(b, r)
144#define nlm_write_sata_reg(b, r, v) nlm_write_reg(b, r, v)
145#define nlm_get_sata_pcibase(node) \
146 nlm_pcicfg_base(XLP9XX_IO_SATA_OFFSET(node))
147#define nlm_get_sata_regbase(node) \
148 (nlm_get_sata_pcibase(node) + 0x100)
149
150/* SATA PHY config for register block 1 0x0065 .. 0x006e */
151static const u8 sata_phy_config1[] = {
152 0xC9, 0xC9, 0x07, 0x07, 0x18, 0x18, 0x01, 0x01, 0x22, 0x00
153};
154
155/* SATA PHY config for register block 2 0x0x8065 .. 0x0x80A4 */
156static const u8 sata_phy_config2[] = {
157 0xAA, 0x00, 0x4C, 0xC9, 0xC9, 0x07, 0x07, 0x18,
158 0x18, 0x05, 0x0C, 0x10, 0x00, 0x10, 0x00, 0xFF,
159 0xCF, 0xF7, 0xE1, 0xF5, 0xFD, 0xFD, 0xFF, 0xFF,
160 0xFF, 0xFF, 0xE3, 0xE7, 0xDB, 0xF5, 0xFD, 0xFD,
161 0xF5, 0xF5, 0xFF, 0xFF, 0xE3, 0xE7, 0xDB, 0xF5,
162 0xFD, 0xFD, 0xF5, 0xF5, 0xFF, 0xFF, 0xFF, 0xF5,
163 0x3F, 0x00, 0x32, 0x00, 0x03, 0x01, 0x05, 0x05,
164 0x04, 0x00, 0x00, 0x08, 0x04, 0x00, 0x00, 0x04,
165};
166
167const int sata_phy_debug = 0; /* set to verify PHY writes */
168
169static void sata_clear_glue_reg(u64 regbase, u32 off, u32 bit)
170{
171 u32 reg_val;
172
173 reg_val = nlm_read_sata_reg(regbase, off);
174 nlm_write_sata_reg(regbase, off, (reg_val & ~bit));
175}
176
177static void sata_set_glue_reg(u64 regbase, u32 off, u32 bit)
178{
179 u32 reg_val;
180
181 reg_val = nlm_read_sata_reg(regbase, off);
182 nlm_write_sata_reg(regbase, off, (reg_val | bit));
183}
184
185static void write_phy_reg(u64 regbase, u32 addr, u32 physel, u8 data)
186{
187 nlm_write_sata_reg(regbase, PHY_MEM_ACCESS,
188 (1u << 31) | (physel << 24) | (data << 16) | addr);
189 udelay(850);
190}
191
192static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
193{
194 u32 val;
195
196 nlm_write_sata_reg(regbase, PHY_MEM_ACCESS,
197 (0 << 31) | (physel << 24) | (0 << 16) | addr);
198 udelay(850);
199 val = nlm_read_sata_reg(regbase, PHY_MEM_ACCESS);
200 return (val >> 16) & 0xff;
201}
202
203static void config_sata_phy(u64 regbase)
204{
205 u32 port, i, reg;
206
207 for (port = 0; port < 2; port++) {
208 for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
209 write_phy_reg(regbase, reg, port, sata_phy_config1[i]);
210
211 for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
212 write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
213 }
214}
215
216static void check_phy_register(u64 regbase, u32 addr, u32 physel, u8 xdata)
217{
218 u8 data;
219
220 data = read_phy_reg(regbase, addr, physel);
221 pr_info("PHY read addr = 0x%x physel = %d data = 0x%x %s\n",
222 addr, physel, data, data == xdata ? "TRUE" : "FALSE");
223}
224
225static void verify_sata_phy_config(u64 regbase)
226{
227 u32 port, i, reg;
228
229 for (port = 0; port < 2; port++) {
230 for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
231 check_phy_register(regbase, reg, port,
232 sata_phy_config1[i]);
233
234 for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
235 check_phy_register(regbase, reg, port,
236 sata_phy_config2[i]);
237 }
238}
239
240static void nlm_sata_firmware_init(int node)
241{
242 u32 reg_val;
243 u64 regbase;
244 int n;
245
246 pr_info("Initializing XLP9XX On-chip AHCI...\n");
247 regbase = nlm_get_sata_regbase(node);
248
249 /* Reset port0 */
250 sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_POR);
251 sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_TXRX);
252 sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_SYNTH);
253 sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDTXL);
254 sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDRXL);
255 sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDIPDMSYNTH);
256
257 /* port1 */
258 sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_POR);
259 sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_TXRX);
260 sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_SYNTH);
261 sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDTXL);
262 sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDRXL);
263 sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDIPDMSYNTH);
264 udelay(300);
265
266 /* Set PHY */
267 sata_set_glue_reg(regbase, SATA_CTL, P0_IPDTXL);
268 sata_set_glue_reg(regbase, SATA_CTL, P0_IPDRXL);
269 sata_set_glue_reg(regbase, SATA_CTL, P0_IPDIPDMSYNTH);
270 sata_set_glue_reg(regbase, SATA_CTL, P1_IPDTXL);
271 sata_set_glue_reg(regbase, SATA_CTL, P1_IPDRXL);
272 sata_set_glue_reg(regbase, SATA_CTL, P1_IPDIPDMSYNTH);
273
274 udelay(1000);
275 sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_POR);
276 udelay(1000);
277 sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_POR);
278 udelay(1000);
279
280 /* setup PHY */
281 config_sata_phy(regbase);
282 if (sata_phy_debug)
283 verify_sata_phy_config(regbase);
284
285 udelay(1000);
286 sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_TXRX);
287 sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_SYNTH);
288 sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_TXRX);
289 sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_SYNTH);
290 udelay(300);
291
292 /* Override reset in serial PHY mode */
293 sata_set_glue_reg(regbase, CR_REG_TIMER, CR_TIME_SCALE);
294 /* Set reset SATA */
295 sata_set_glue_reg(regbase, SATA_CTL, SATA_RST_N);
296 sata_set_glue_reg(regbase, SATA_CTL, M_CSYSREQ);
297 sata_set_glue_reg(regbase, SATA_CTL, S_CSYSREQ);
298
299 pr_debug("Waiting for PHYs to come up.\n");
300 n = 10000;
301 do {
302 reg_val = nlm_read_sata_reg(regbase, SATA_STATUS);
303 if ((reg_val & P1_PHY_READY) && (reg_val & P0_PHY_READY))
304 break;
305 udelay(10);
306 } while (--n > 0);
307
308 if (reg_val & P0_PHY_READY)
309 pr_info("PHY0 is up.\n");
310 else
311 pr_info("PHY0 is down.\n");
312 if (reg_val & P1_PHY_READY)
313 pr_info("PHY1 is up.\n");
314 else
315 pr_info("PHY1 is down.\n");
316
317 pr_info("XLP AHCI Init Done.\n");
318}
319
320static int __init nlm_ahci_init(void)
321{
322 int node;
323
324 if (!cpu_is_xlp9xx())
325 return 0;
326 for (node = 0; node < NLM_NR_NODES; node++)
327 if (nlm_node_present(node))
328 nlm_sata_firmware_init(node);
329 return 0;
330}
331
332static void nlm_sata_intr_ack(struct irq_data *data)
333{
334 u64 regbase;
335 u32 val;
336 int node;
337
338 node = data->irq / NLM_IRQS_PER_NODE;
339 regbase = nlm_get_sata_regbase(node);
340 val = nlm_read_sata_reg(regbase, SATA_INT);
341 sata_set_glue_reg(regbase, SATA_INT, val);
342}
343
344static void nlm_sata_fixup_bar(struct pci_dev *dev)
345{
346 dev->resource[5] = dev->resource[0];
347 memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
348}
349
350static void nlm_sata_fixup_final(struct pci_dev *dev)
351{
352 u32 val;
353 u64 regbase;
354 int node;
355
356 /* Find end bridge function to find node */
357 node = xlp_socdev_to_node(dev);
358 regbase = nlm_get_sata_regbase(node);
359
360 /* clear pending interrupts and then enable them */
361 val = nlm_read_sata_reg(regbase, SATA_INT);
362 sata_set_glue_reg(regbase, SATA_INT, val);
363
364 /* Enable only the core interrupt */
365 sata_set_glue_reg(regbase, SATA_INT_MASK, 0x1);
366
367 dev->irq = nlm_irq_to_xirq(node, PIC_SATA_IRQ);
368 nlm_set_pic_extra_ack(node, PIC_SATA_IRQ, nlm_sata_intr_ack);
369}
370
371arch_initcall(nlm_ahci_init);
372
373DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_SATA,
374 nlm_sata_fixup_bar);
375
376DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_SATA,
377 nlm_sata_fixup_final);
diff --git a/arch/mips/netlogic/xlp/ahci-init.c b/arch/mips/netlogic/xlp/ahci-init.c
new file mode 100644
index 000000000000..a9d0fae02103
--- /dev/null
+++ b/arch/mips/netlogic/xlp/ahci-init.c
@@ -0,0 +1,209 @@
1/*
2 * Copyright (c) 2003-2014 Broadcom Corporation
3 * All Rights Reserved
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/dma-mapping.h>
36#include <linux/kernel.h>
37#include <linux/delay.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42
43#include <asm/cpu.h>
44#include <asm/mipsregs.h>
45
46#include <asm/netlogic/haldefs.h>
47#include <asm/netlogic/xlp-hal/xlp.h>
48#include <asm/netlogic/common.h>
49#include <asm/netlogic/xlp-hal/iomap.h>
50#include <asm/netlogic/mips-extns.h>
51
52#define SATA_CTL 0x0
53#define SATA_STATUS 0x1 /* Status Reg */
54#define SATA_INT 0x2 /* Interrupt Reg */
55#define SATA_INT_MASK 0x3 /* Interrupt Mask Reg */
56#define SATA_CR_REG_TIMER 0x4 /* PHY Conrol Timer Reg */
57#define SATA_CORE_ID 0x5 /* Core ID Reg */
58#define SATA_AXI_SLAVE_OPT1 0x6 /* AXI Slave Options Reg */
59#define SATA_PHY_LOS_LEV 0x7 /* PHY LOS Level Reg */
60#define SATA_PHY_MULTI 0x8 /* PHY Multiplier Reg */
61#define SATA_PHY_CLK_SEL 0x9 /* Clock Select Reg */
62#define SATA_PHY_AMP1_GEN1 0xa /* PHY Transmit Amplitude Reg 1 */
63#define SATA_PHY_AMP1_GEN2 0xb /* PHY Transmit Amplitude Reg 2 */
64#define SATA_PHY_AMP1_GEN3 0xc /* PHY Transmit Amplitude Reg 3 */
65#define SATA_PHY_PRE1 0xd /* PHY Transmit Preemphasis Reg 1 */
66#define SATA_PHY_PRE2 0xe /* PHY Transmit Preemphasis Reg 2 */
67#define SATA_PHY_PRE3 0xf /* PHY Transmit Preemphasis Reg 3 */
68#define SATA_SPDMODE 0x10 /* Speed Mode Reg */
69#define SATA_REFCLK 0x11 /* Reference Clock Control Reg */
70#define SATA_BYTE_SWAP_DIS 0x12 /* byte swap disable */
71
72/*SATA_CTL Bits */
73#define SATA_RST_N BIT(0)
74#define PHY0_RESET_N BIT(16)
75#define PHY1_RESET_N BIT(17)
76#define PHY2_RESET_N BIT(18)
77#define PHY3_RESET_N BIT(19)
78#define M_CSYSREQ BIT(2)
79#define S_CSYSREQ BIT(3)
80
81/*SATA_STATUS Bits */
82#define P0_PHY_READY BIT(4)
83#define P1_PHY_READY BIT(5)
84#define P2_PHY_READY BIT(6)
85#define P3_PHY_READY BIT(7)
86
87#define nlm_read_sata_reg(b, r) nlm_read_reg(b, r)
88#define nlm_write_sata_reg(b, r, v) nlm_write_reg(b, r, v)
89#define nlm_get_sata_pcibase(node) \
90 nlm_pcicfg_base(XLP_IO_SATA_OFFSET(node))
91/* SATA device specific configuration registers are starts at 0x900 offset */
92#define nlm_get_sata_regbase(node) \
93 (nlm_get_sata_pcibase(node) + 0x900)
94
95static void sata_clear_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit)
96{
97 uint32_t reg_val;
98
99 reg_val = nlm_read_sata_reg(regbase, off);
100 nlm_write_sata_reg(regbase, off, (reg_val & ~bit));
101}
102
103static void sata_set_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit)
104{
105 uint32_t reg_val;
106
107 reg_val = nlm_read_sata_reg(regbase, off);
108 nlm_write_sata_reg(regbase, off, (reg_val | bit));
109}
110
111static void nlm_sata_firmware_init(int node)
112{
113 uint32_t reg_val;
114 uint64_t regbase;
115 int i;
116
117 pr_info("XLP AHCI Initialization started.\n");
118 regbase = nlm_get_sata_regbase(node);
119
120 /* Reset SATA */
121 sata_clear_glue_reg(regbase, SATA_CTL, SATA_RST_N);
122 /* Reset PHY */
123 sata_clear_glue_reg(regbase, SATA_CTL,
124 (PHY3_RESET_N | PHY2_RESET_N
125 | PHY1_RESET_N | PHY0_RESET_N));
126
127 /* Set SATA */
128 sata_set_glue_reg(regbase, SATA_CTL, SATA_RST_N);
129 /* Set PHY */
130 sata_set_glue_reg(regbase, SATA_CTL,
131 (PHY3_RESET_N | PHY2_RESET_N
132 | PHY1_RESET_N | PHY0_RESET_N));
133
134 pr_debug("Waiting for PHYs to come up.\n");
135 i = 0;
136 do {
137 reg_val = nlm_read_sata_reg(regbase, SATA_STATUS);
138 i++;
139 } while (((reg_val & 0xF0) != 0xF0) && (i < 10000));
140
141 for (i = 0; i < 4; i++) {
142 if (reg_val & (P0_PHY_READY << i))
143 pr_info("PHY%d is up.\n", i);
144 else
145 pr_info("PHY%d is down.\n", i);
146 }
147
148 pr_info("XLP AHCI init done.\n");
149}
150
151static int __init nlm_ahci_init(void)
152{
153 int node = 0;
154 int chip = read_c0_prid() & PRID_REV_MASK;
155
156 if (chip == PRID_IMP_NETLOGIC_XLP3XX)
157 nlm_sata_firmware_init(node);
158 return 0;
159}
160
161static void nlm_sata_intr_ack(struct irq_data *data)
162{
163 uint32_t val = 0;
164 uint64_t regbase;
165
166 regbase = nlm_get_sata_regbase(nlm_nodeid());
167 val = nlm_read_sata_reg(regbase, SATA_INT);
168 sata_set_glue_reg(regbase, SATA_INT, val);
169}
170
171static void nlm_sata_fixup_bar(struct pci_dev *dev)
172{
173 /*
174 * The AHCI resource is in BAR 0, move it to
175 * BAR 5, where it is expected
176 */
177 dev->resource[5] = dev->resource[0];
178 memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
179}
180
181static void nlm_sata_fixup_final(struct pci_dev *dev)
182{
183 uint32_t val;
184 uint64_t regbase;
185 int node = 0; /* XLP3XX does not support multi-node */
186
187 regbase = nlm_get_sata_regbase(node);
188
189 /* clear pending interrupts and then enable them */
190 val = nlm_read_sata_reg(regbase, SATA_INT);
191 sata_set_glue_reg(regbase, SATA_INT, val);
192
193 /* Mask the core interrupt. If all the interrupts
194 * are enabled there are spurious interrupt flow
195 * happening, to avoid only enable core interrupt
196 * mask.
197 */
198 sata_set_glue_reg(regbase, SATA_INT_MASK, 0x1);
199
200 dev->irq = PIC_SATA_IRQ;
201 nlm_set_pic_extra_ack(node, PIC_SATA_IRQ, nlm_sata_intr_ack);
202}
203
204arch_initcall(nlm_ahci_init);
205
206DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA,
207 nlm_sata_fixup_bar);
208DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA,
209 nlm_sata_fixup_final);
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
index 5754097b9cde..bba993a5b1b0 100644
--- a/arch/mips/netlogic/xlp/dt.c
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -48,9 +48,10 @@ static void *xlp_fdt_blob;
48void __init *xlp_dt_init(void *fdtp) 48void __init *xlp_dt_init(void *fdtp)
49{ 49{
50 if (!fdtp) { 50 if (!fdtp) {
51 switch (current_cpu_data.processor_id & 0xff00) { 51 switch (current_cpu_data.processor_id & PRID_IMP_MASK) {
52#ifdef CONFIG_DT_XLP_GVP 52#ifdef CONFIG_DT_XLP_GVP
53 case PRID_IMP_NETLOGIC_XLP9XX: 53 case PRID_IMP_NETLOGIC_XLP9XX:
54 case PRID_IMP_NETLOGIC_XLP5XX:
54 fdtp = __dtb_xlp_gvp_begin; 55 fdtp = __dtb_xlp_gvp_begin;
55 break; 56 break;
56#endif 57#endif
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 997cd9ee10de..bc24beb3a426 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -54,6 +54,8 @@ void nlm_node_init(int node)
54 struct nlm_soc_info *nodep; 54 struct nlm_soc_info *nodep;
55 55
56 nodep = nlm_get_node(node); 56 nodep = nlm_get_node(node);
57 if (node == 0)
58 nodep->coremask = 1; /* node 0, boot cpu */
57 nodep->sysbase = nlm_get_sys_regbase(node); 59 nodep->sysbase = nlm_get_sys_regbase(node);
58 nodep->picbase = nlm_get_pic_regbase(node); 60 nodep->picbase = nlm_get_pic_regbase(node);
59 nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1)); 61 nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
@@ -64,31 +66,39 @@ void nlm_node_init(int node)
64 spin_lock_init(&nodep->piclock); 66 spin_lock_init(&nodep->piclock);
65} 67}
66 68
67int nlm_irq_to_irt(int irq) 69static int xlp9xx_irq_to_irt(int irq)
70{
71 switch (irq) {
72 case PIC_GPIO_IRQ:
73 return 12;
74 case PIC_9XX_XHCI_0_IRQ:
75 return 114;
76 case PIC_9XX_XHCI_1_IRQ:
77 return 115;
78 case PIC_UART_0_IRQ:
79 return 133;
80 case PIC_UART_1_IRQ:
81 return 134;
82 case PIC_SATA_IRQ:
83 return 143;
84 case PIC_SPI_IRQ:
85 return 152;
86 case PIC_MMC_IRQ:
87 return 153;
88 case PIC_PCIE_LINK_LEGACY_IRQ(0):
89 case PIC_PCIE_LINK_LEGACY_IRQ(1):
90 case PIC_PCIE_LINK_LEGACY_IRQ(2):
91 case PIC_PCIE_LINK_LEGACY_IRQ(3):
92 return 191 + irq - PIC_PCIE_LINK_LEGACY_IRQ_BASE;
93 }
94 return -1;
95}
96
97static int xlp_irq_to_irt(int irq)
68{ 98{
69 uint64_t pcibase; 99 uint64_t pcibase;
70 int devoff, irt; 100 int devoff, irt;
71 101
72 /* bypass for 9xx */
73 if (cpu_is_xlp9xx()) {
74 switch (irq) {
75 case PIC_9XX_XHCI_0_IRQ:
76 return 114;
77 case PIC_9XX_XHCI_1_IRQ:
78 return 115;
79 case PIC_UART_0_IRQ:
80 return 133;
81 case PIC_UART_1_IRQ:
82 return 134;
83 case PIC_PCIE_LINK_LEGACY_IRQ(0):
84 case PIC_PCIE_LINK_LEGACY_IRQ(1):
85 case PIC_PCIE_LINK_LEGACY_IRQ(2):
86 case PIC_PCIE_LINK_LEGACY_IRQ(3):
87 return 191 + irq - PIC_PCIE_LINK_LEGACY_IRQ_BASE;
88 }
89 return -1;
90 }
91
92 devoff = 0; 102 devoff = 0;
93 switch (irq) { 103 switch (irq) {
94 case PIC_UART_0_IRQ: 104 case PIC_UART_0_IRQ:
@@ -98,7 +108,7 @@ int nlm_irq_to_irt(int irq)
98 devoff = XLP_IO_UART1_OFFSET(0); 108 devoff = XLP_IO_UART1_OFFSET(0);
99 break; 109 break;
100 case PIC_MMC_IRQ: 110 case PIC_MMC_IRQ:
101 devoff = XLP_IO_SD_OFFSET(0); 111 devoff = XLP_IO_MMC_OFFSET(0);
102 break; 112 break;
103 case PIC_I2C_0_IRQ: /* I2C will be fixed up */ 113 case PIC_I2C_0_IRQ: /* I2C will be fixed up */
104 case PIC_I2C_1_IRQ: 114 case PIC_I2C_1_IRQ:
@@ -109,6 +119,18 @@ int nlm_irq_to_irt(int irq)
109 else 119 else
110 devoff = XLP_IO_I2C0_OFFSET(0); 120 devoff = XLP_IO_I2C0_OFFSET(0);
111 break; 121 break;
122 case PIC_SATA_IRQ:
123 devoff = XLP_IO_SATA_OFFSET(0);
124 break;
125 case PIC_GPIO_IRQ:
126 devoff = XLP_IO_GPIO_OFFSET(0);
127 break;
128 case PIC_NAND_IRQ:
129 devoff = XLP_IO_NAND_OFFSET(0);
130 break;
131 case PIC_SPI_IRQ:
132 devoff = XLP_IO_SPI_OFFSET(0);
133 break;
112 default: 134 default:
113 if (cpu_is_xlpii()) { 135 if (cpu_is_xlpii()) {
114 switch (irq) { 136 switch (irq) {
@@ -164,61 +186,123 @@ int nlm_irq_to_irt(int irq)
164 /* HW bug, PCI IRT entries are bad on early silicon, fix */ 186 /* HW bug, PCI IRT entries are bad on early silicon, fix */
165 irt = PIC_IRT_PCIE_LINK_INDEX(irq - 187 irt = PIC_IRT_PCIE_LINK_INDEX(irq -
166 PIC_PCIE_LINK_LEGACY_IRQ_BASE); 188 PIC_PCIE_LINK_LEGACY_IRQ_BASE);
167 } else if (irq >= PIC_PCIE_LINK_MSI_IRQ(0) &&
168 irq <= PIC_PCIE_LINK_MSI_IRQ(3)) {
169 irt = -2;
170 } else if (irq >= PIC_PCIE_MSIX_IRQ(0) &&
171 irq <= PIC_PCIE_MSIX_IRQ(3)) {
172 irt = -2;
173 } else { 189 } else {
174 irt = -1; 190 irt = -1;
175 } 191 }
176 return irt; 192 return irt;
177} 193}
178 194
179unsigned int nlm_get_core_frequency(int node, int core) 195int nlm_irq_to_irt(int irq)
180{ 196{
181 unsigned int pll_divf, pll_divr, dfs_div, ext_div; 197 /* return -2 for irqs without 1-1 mapping */
182 unsigned int rstval, dfsval, denom; 198 if (irq >= PIC_PCIE_LINK_MSI_IRQ(0) && irq <= PIC_PCIE_LINK_MSI_IRQ(3))
183 uint64_t num, sysbase; 199 return -2;
200 if (irq >= PIC_PCIE_MSIX_IRQ(0) && irq <= PIC_PCIE_MSIX_IRQ(3))
201 return -2;
184 202
185 sysbase = nlm_get_node(node)->sysbase;
186 if (cpu_is_xlp9xx()) 203 if (cpu_is_xlp9xx())
187 rstval = nlm_read_sys_reg(sysbase, SYS_9XX_POWER_ON_RESET_CFG); 204 return xlp9xx_irq_to_irt(irq);
188 else 205 else
189 rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG); 206 return xlp_irq_to_irt(irq);
190 if (cpu_is_xlpii()) { 207}
191 num = 1000000ULL * (400 * 3 + 100 * (rstval >> 26)); 208
192 denom = 3; 209static unsigned int nlm_xlp2_get_core_frequency(int node, int core)
210{
211 unsigned int pll_post_div, ctrl_val0, ctrl_val1, denom;
212 uint64_t num, sysbase, clockbase;
213
214 if (cpu_is_xlp9xx()) {
215 clockbase = nlm_get_clock_regbase(node);
216 ctrl_val0 = nlm_read_sys_reg(clockbase,
217 SYS_9XX_CPU_PLL_CTRL0(core));
218 ctrl_val1 = nlm_read_sys_reg(clockbase,
219 SYS_9XX_CPU_PLL_CTRL1(core));
193 } else { 220 } else {
194 dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE); 221 sysbase = nlm_get_node(node)->sysbase;
195 pll_divf = ((rstval >> 10) & 0x7f) + 1; 222 ctrl_val0 = nlm_read_sys_reg(sysbase,
196 pll_divr = ((rstval >> 8) & 0x3) + 1; 223 SYS_CPU_PLL_CTRL0(core));
197 ext_div = ((rstval >> 30) & 0x3) + 1; 224 ctrl_val1 = nlm_read_sys_reg(sysbase,
198 dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1; 225 SYS_CPU_PLL_CTRL1(core));
199 226 }
200 num = 800000000ULL * pll_divf; 227
201 denom = 3 * pll_divr * ext_div * dfs_div; 228 /* Find PLL post divider value */
229 switch ((ctrl_val0 >> 24) & 0x7) {
230 case 1:
231 pll_post_div = 2;
232 break;
233 case 3:
234 pll_post_div = 4;
235 break;
236 case 7:
237 pll_post_div = 8;
238 break;
239 case 6:
240 pll_post_div = 16;
241 break;
242 case 0:
243 default:
244 pll_post_div = 1;
245 break;
202 } 246 }
247
248 num = 1000000ULL * (400 * 3 + 100 * (ctrl_val1 & 0x3f));
249 denom = 3 * pll_post_div;
203 do_div(num, denom); 250 do_div(num, denom);
251
204 return (unsigned int)num; 252 return (unsigned int)num;
205} 253}
206 254
207/* Calculate Frequency to the PIC from PLL. 255static unsigned int nlm_xlp_get_core_frequency(int node, int core)
208 * freq_out = ( ref_freq/2 * (6 + ctrl2[7:0]) + ctrl2[20:8]/2^13 ) / 256{
209 * ((2^ctrl0[7:5]) * Table(ctrl0[26:24])) 257 unsigned int pll_divf, pll_divr, dfs_div, ext_div;
258 unsigned int rstval, dfsval, denom;
259 uint64_t num, sysbase;
260
261 sysbase = nlm_get_node(node)->sysbase;
262 rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
263 dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
264 pll_divf = ((rstval >> 10) & 0x7f) + 1;
265 pll_divr = ((rstval >> 8) & 0x3) + 1;
266 ext_div = ((rstval >> 30) & 0x3) + 1;
267 dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1;
268
269 num = 800000000ULL * pll_divf;
270 denom = 3 * pll_divr * ext_div * dfs_div;
271 do_div(num, denom);
272
273 return (unsigned int)num;
274}
275
276unsigned int nlm_get_core_frequency(int node, int core)
277{
278 if (cpu_is_xlpii())
279 return nlm_xlp2_get_core_frequency(node, core);
280 else
281 return nlm_xlp_get_core_frequency(node, core);
282}
283
284/*
285 * Calculate PIC frequency from PLL registers.
286 * freq_out = (ref_freq/2 * (6 + ctrl2[7:0]) + ctrl2[20:8]/2^13) /
287 * ((2^ctrl0[7:5]) * Table(ctrl0[26:24]))
210 */ 288 */
211static unsigned int nlm_2xx_get_pic_frequency(int node) 289static unsigned int nlm_xlp2_get_pic_frequency(int node)
212{ 290{
213 u32 ctrl_val0, ctrl_val2, vco_post_div, pll_post_div; 291 u32 ctrl_val0, ctrl_val2, vco_post_div, pll_post_div, cpu_xlp9xx;
214 u32 mdiv, fdiv, pll_out_freq_den, reg_select, ref_div, pic_div; 292 u32 mdiv, fdiv, pll_out_freq_den, reg_select, ref_div, pic_div;
215 u64 ref_clk, sysbase, pll_out_freq_num, ref_clk_select; 293 u64 sysbase, pll_out_freq_num, ref_clk_select, clockbase, ref_clk;
216 294
217 sysbase = nlm_get_node(node)->sysbase; 295 sysbase = nlm_get_node(node)->sysbase;
296 clockbase = nlm_get_clock_regbase(node);
297 cpu_xlp9xx = cpu_is_xlp9xx();
218 298
219 /* Find ref_clk_base */ 299 /* Find ref_clk_base */
220 ref_clk_select = 300 if (cpu_xlp9xx)
221 (nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG) >> 18) & 0x3; 301 ref_clk_select = (nlm_read_sys_reg(sysbase,
302 SYS_9XX_POWER_ON_RESET_CFG) >> 18) & 0x3;
303 else
304 ref_clk_select = (nlm_read_sys_reg(sysbase,
305 SYS_POWER_ON_RESET_CFG) >> 18) & 0x3;
222 switch (ref_clk_select) { 306 switch (ref_clk_select) {
223 case 0: 307 case 0:
224 ref_clk = 200000000ULL; 308 ref_clk = 200000000ULL;
@@ -239,30 +323,70 @@ static unsigned int nlm_2xx_get_pic_frequency(int node)
239 } 323 }
240 324
241 /* Find the clock source PLL device for PIC */ 325 /* Find the clock source PLL device for PIC */
242 reg_select = (nlm_read_sys_reg(sysbase, SYS_CLK_DEV_SEL) >> 22) & 0x3; 326 if (cpu_xlp9xx) {
243 switch (reg_select) { 327 reg_select = nlm_read_sys_reg(clockbase,
244 case 0: 328 SYS_9XX_CLK_DEV_SEL) & 0x3;
245 ctrl_val0 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL0); 329 switch (reg_select) {
246 ctrl_val2 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL2); 330 case 0:
247 break; 331 ctrl_val0 = nlm_read_sys_reg(clockbase,
248 case 1: 332 SYS_9XX_PLL_CTRL0);
249 ctrl_val0 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL0_DEVX(0)); 333 ctrl_val2 = nlm_read_sys_reg(clockbase,
250 ctrl_val2 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL2_DEVX(0)); 334 SYS_9XX_PLL_CTRL2);
251 break; 335 break;
252 case 2: 336 case 1:
253 ctrl_val0 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL0_DEVX(1)); 337 ctrl_val0 = nlm_read_sys_reg(clockbase,
254 ctrl_val2 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL2_DEVX(1)); 338 SYS_9XX_PLL_CTRL0_DEVX(0));
255 break; 339 ctrl_val2 = nlm_read_sys_reg(clockbase,
256 case 3: 340 SYS_9XX_PLL_CTRL2_DEVX(0));
257 ctrl_val0 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL0_DEVX(2)); 341 break;
258 ctrl_val2 = nlm_read_sys_reg(sysbase, SYS_PLL_CTRL2_DEVX(2)); 342 case 2:
259 break; 343 ctrl_val0 = nlm_read_sys_reg(clockbase,
344 SYS_9XX_PLL_CTRL0_DEVX(1));
345 ctrl_val2 = nlm_read_sys_reg(clockbase,
346 SYS_9XX_PLL_CTRL2_DEVX(1));
347 break;
348 case 3:
349 ctrl_val0 = nlm_read_sys_reg(clockbase,
350 SYS_9XX_PLL_CTRL0_DEVX(2));
351 ctrl_val2 = nlm_read_sys_reg(clockbase,
352 SYS_9XX_PLL_CTRL2_DEVX(2));
353 break;
354 }
355 } else {
356 reg_select = (nlm_read_sys_reg(sysbase,
357 SYS_CLK_DEV_SEL) >> 22) & 0x3;
358 switch (reg_select) {
359 case 0:
360 ctrl_val0 = nlm_read_sys_reg(sysbase,
361 SYS_PLL_CTRL0);
362 ctrl_val2 = nlm_read_sys_reg(sysbase,
363 SYS_PLL_CTRL2);
364 break;
365 case 1:
366 ctrl_val0 = nlm_read_sys_reg(sysbase,
367 SYS_PLL_CTRL0_DEVX(0));
368 ctrl_val2 = nlm_read_sys_reg(sysbase,
369 SYS_PLL_CTRL2_DEVX(0));
370 break;
371 case 2:
372 ctrl_val0 = nlm_read_sys_reg(sysbase,
373 SYS_PLL_CTRL0_DEVX(1));
374 ctrl_val2 = nlm_read_sys_reg(sysbase,
375 SYS_PLL_CTRL2_DEVX(1));
376 break;
377 case 3:
378 ctrl_val0 = nlm_read_sys_reg(sysbase,
379 SYS_PLL_CTRL0_DEVX(2));
380 ctrl_val2 = nlm_read_sys_reg(sysbase,
381 SYS_PLL_CTRL2_DEVX(2));
382 break;
383 }
260 } 384 }
261 385
262 vco_post_div = (ctrl_val0 >> 5) & 0x7; 386 vco_post_div = (ctrl_val0 >> 5) & 0x7;
263 pll_post_div = (ctrl_val0 >> 24) & 0x7; 387 pll_post_div = (ctrl_val0 >> 24) & 0x7;
264 mdiv = ctrl_val2 & 0xff; 388 mdiv = ctrl_val2 & 0xff;
265 fdiv = (ctrl_val2 >> 8) & 0xfff; 389 fdiv = (ctrl_val2 >> 8) & 0x1fff;
266 390
267 /* Find PLL post divider value */ 391 /* Find PLL post divider value */
268 switch (pll_post_div) { 392 switch (pll_post_div) {
@@ -292,7 +416,12 @@ static unsigned int nlm_2xx_get_pic_frequency(int node)
292 do_div(pll_out_freq_num, pll_out_freq_den); 416 do_div(pll_out_freq_num, pll_out_freq_den);
293 417
294 /* PIC post divider, which happens after PLL */ 418 /* PIC post divider, which happens after PLL */
295 pic_div = (nlm_read_sys_reg(sysbase, SYS_CLK_DEV_DIV) >> 22) & 0x3; 419 if (cpu_xlp9xx)
420 pic_div = nlm_read_sys_reg(clockbase,
421 SYS_9XX_CLK_DEV_DIV) & 0x3;
422 else
423 pic_div = (nlm_read_sys_reg(sysbase,
424 SYS_CLK_DEV_DIV) >> 22) & 0x3;
296 do_div(pll_out_freq_num, 1 << pic_div); 425 do_div(pll_out_freq_num, 1 << pic_div);
297 426
298 return pll_out_freq_num; 427 return pll_out_freq_num;
@@ -300,12 +429,8 @@ static unsigned int nlm_2xx_get_pic_frequency(int node)
300 429
301unsigned int nlm_get_pic_frequency(int node) 430unsigned int nlm_get_pic_frequency(int node)
302{ 431{
303 /* TODO Has to calculate freq as like 2xx */
304 if (cpu_is_xlp9xx())
305 return 250000000;
306
307 if (cpu_is_xlpii()) 432 if (cpu_is_xlpii())
308 return nlm_2xx_get_pic_frequency(node); 433 return nlm_xlp2_get_pic_frequency(node);
309 else 434 else
310 return 133333333; 435 return 133333333;
311} 436}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 8c60a2dd9ef6..4fdd9fd29d1d 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -121,8 +121,9 @@ void __init plat_mem_setup(void)
121 121
122const char *get_system_type(void) 122const char *get_system_type(void)
123{ 123{
124 switch (read_c0_prid() & 0xff00) { 124 switch (read_c0_prid() & PRID_IMP_MASK) {
125 case PRID_IMP_NETLOGIC_XLP9XX: 125 case PRID_IMP_NETLOGIC_XLP9XX:
126 case PRID_IMP_NETLOGIC_XLP5XX:
126 case PRID_IMP_NETLOGIC_XLP2XX: 127 case PRID_IMP_NETLOGIC_XLP2XX:
127 return "Broadcom XLPII Series"; 128 return "Broadcom XLPII Series";
128 default: 129 default:
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index 9a92617a2af5..e5f44d2605a8 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -135,11 +135,19 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
135 if (cpu_is_xlp9xx()) { 135 if (cpu_is_xlp9xx()) {
136 fusebase = nlm_get_fuse_regbase(n); 136 fusebase = nlm_get_fuse_regbase(n);
137 fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6); 137 fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6);
138 mask = 0xfffff; 138 switch (read_c0_prid() & PRID_IMP_MASK) {
139 case PRID_IMP_NETLOGIC_XLP5XX:
140 mask = 0xff;
141 break;
142 case PRID_IMP_NETLOGIC_XLP9XX:
143 default:
144 mask = 0xfffff;
145 break;
146 }
139 } else { 147 } else {
140 fusemask = nlm_read_sys_reg(nodep->sysbase, 148 fusemask = nlm_read_sys_reg(nodep->sysbase,
141 SYS_EFUSE_DEVICE_CFG_STATUS0); 149 SYS_EFUSE_DEVICE_CFG_STATUS0);
142 switch (read_c0_prid() & 0xff00) { 150 switch (read_c0_prid() & PRID_IMP_MASK) {
143 case PRID_IMP_NETLOGIC_XLP3XX: 151 case PRID_IMP_NETLOGIC_XLP3XX:
144 mask = 0xf; 152 mask = 0xf;
145 break; 153 break;
@@ -159,10 +167,6 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
159 */ 167 */
160 syscoremask = (1 << hweight32(~fusemask & mask)) - 1; 168 syscoremask = (1 << hweight32(~fusemask & mask)) - 1;
161 169
162 /* The boot cpu */
163 if (n == 0)
164 nodep->coremask = 1;
165
166 pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask); 170 pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
167 for (core = 0; core < nlm_cores_per_node(); core++) { 171 for (core = 0; core < nlm_cores_per_node(); core++) {
168 /* we will be on node 0 core 0 */ 172 /* we will be on node 0 core 0 */
diff --git a/arch/mips/paravirt/Kconfig b/arch/mips/paravirt/Kconfig
new file mode 100644
index 000000000000..ecae5861b601
--- /dev/null
+++ b/arch/mips/paravirt/Kconfig
@@ -0,0 +1,6 @@
1if MIPS_PARAVIRT
2
3config MIPS_PCI_VIRTIO
4 def_bool y
5
6endif # MIPS_PARAVIRT
diff --git a/arch/mips/paravirt/Makefile b/arch/mips/paravirt/Makefile
new file mode 100644
index 000000000000..5023af733a35
--- /dev/null
+++ b/arch/mips/paravirt/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for MIPS para-virtualized specific kernel interface routines
3# under Linux.
4#
5# This file is subject to the terms and conditions of the GNU General Public
6# License. See the file "COPYING" in the main directory of this archive
7# for more details.
8#
9# Copyright (C) 2013 Cavium, Inc.
10#
11
12obj-y := setup.o serial.o paravirt-irq.o
13
14obj-$(CONFIG_SMP) += paravirt-smp.o
diff --git a/arch/mips/paravirt/Platform b/arch/mips/paravirt/Platform
new file mode 100644
index 000000000000..7e76ef25ea17
--- /dev/null
+++ b/arch/mips/paravirt/Platform
@@ -0,0 +1,8 @@
1#
2# Generic para-virtualized guest.
3#
4platform-$(CONFIG_MIPS_PARAVIRT) += paravirt/
5cflags-$(CONFIG_MIPS_PARAVIRT) += \
6 -I$(srctree)/arch/mips/include/asm/mach-paravirt
7
8load-$(CONFIG_MIPS_PARAVIRT) = 0xffffffff80010000
diff --git a/arch/mips/paravirt/paravirt-irq.c b/arch/mips/paravirt/paravirt-irq.c
new file mode 100644
index 000000000000..8987b06c9de9
--- /dev/null
+++ b/arch/mips/paravirt/paravirt-irq.c
@@ -0,0 +1,368 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc.
7 */
8
9#include <linux/interrupt.h>
10#include <linux/cpumask.h>
11#include <linux/kernel.h>
12#include <linux/mutex.h>
13
14#include <asm/io.h>
15
16#define MBOX_BITS_PER_CPU 2
17
18static int cpunum_for_cpu(int cpu)
19{
20#ifdef CONFIG_SMP
21 return cpu_logical_map(cpu);
22#else
23 return get_ebase_cpunum();
24#endif
25}
26
27struct core_chip_data {
28 struct mutex core_irq_mutex;
29 bool current_en;
30 bool desired_en;
31 u8 bit;
32};
33
34static struct core_chip_data irq_core_chip_data[8];
35
36static void irq_core_ack(struct irq_data *data)
37{
38 struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
39 unsigned int bit = cd->bit;
40
41 /*
42 * We don't need to disable IRQs to make these atomic since
43 * they are already disabled earlier in the low level
44 * interrupt code.
45 */
46 clear_c0_status(0x100 << bit);
47 /* The two user interrupts must be cleared manually. */
48 if (bit < 2)
49 clear_c0_cause(0x100 << bit);
50}
51
52static void irq_core_eoi(struct irq_data *data)
53{
54 struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
55
56 /*
57 * We don't need to disable IRQs to make these atomic since
58 * they are already disabled earlier in the low level
59 * interrupt code.
60 */
61 set_c0_status(0x100 << cd->bit);
62}
63
64static void irq_core_set_enable_local(void *arg)
65{
66 struct irq_data *data = arg;
67 struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
68 unsigned int mask = 0x100 << cd->bit;
69
70 /*
71 * Interrupts are already disabled, so these are atomic.
72 */
73 if (cd->desired_en)
74 set_c0_status(mask);
75 else
76 clear_c0_status(mask);
77
78}
79
80static void irq_core_disable(struct irq_data *data)
81{
82 struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
83 cd->desired_en = false;
84}
85
86static void irq_core_enable(struct irq_data *data)
87{
88 struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
89 cd->desired_en = true;
90}
91
92static void irq_core_bus_lock(struct irq_data *data)
93{
94 struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
95
96 mutex_lock(&cd->core_irq_mutex);
97}
98
99static void irq_core_bus_sync_unlock(struct irq_data *data)
100{
101 struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
102
103 if (cd->desired_en != cd->current_en) {
104 on_each_cpu(irq_core_set_enable_local, data, 1);
105 cd->current_en = cd->desired_en;
106 }
107
108 mutex_unlock(&cd->core_irq_mutex);
109}
110
111static struct irq_chip irq_chip_core = {
112 .name = "Core",
113 .irq_enable = irq_core_enable,
114 .irq_disable = irq_core_disable,
115 .irq_ack = irq_core_ack,
116 .irq_eoi = irq_core_eoi,
117 .irq_bus_lock = irq_core_bus_lock,
118 .irq_bus_sync_unlock = irq_core_bus_sync_unlock,
119
120 .irq_cpu_online = irq_core_eoi,
121 .irq_cpu_offline = irq_core_ack,
122 .flags = IRQCHIP_ONOFFLINE_ENABLED,
123};
124
125static void __init irq_init_core(void)
126{
127 int i;
128 int irq;
129 struct core_chip_data *cd;
130
131 /* Start with a clean slate */
132 clear_c0_status(ST0_IM);
133 clear_c0_cause(CAUSEF_IP0 | CAUSEF_IP1);
134
135 for (i = 0; i < ARRAY_SIZE(irq_core_chip_data); i++) {
136 cd = irq_core_chip_data + i;
137 cd->current_en = false;
138 cd->desired_en = false;
139 cd->bit = i;
140 mutex_init(&cd->core_irq_mutex);
141
142 irq = MIPS_CPU_IRQ_BASE + i;
143
144 switch (i) {
145 case 0: /* SW0 */
146 case 1: /* SW1 */
147 case 5: /* IP5 */
148 case 6: /* IP6 */
149 case 7: /* IP7 */
150 irq_set_chip_data(irq, cd);
151 irq_set_chip_and_handler(irq, &irq_chip_core,
152 handle_percpu_irq);
153 break;
154 default:
155 break;
156 }
157 }
158}
159
160static void __iomem *mips_irq_chip;
161#define MIPS_IRQ_CHIP_NUM_BITS 0
162#define MIPS_IRQ_CHIP_REGS 8
163
164static int mips_irq_cpu_stride;
165static int mips_irq_chip_reg_raw;
166static int mips_irq_chip_reg_src;
167static int mips_irq_chip_reg_en;
168static int mips_irq_chip_reg_raw_w1s;
169static int mips_irq_chip_reg_raw_w1c;
170static int mips_irq_chip_reg_en_w1s;
171static int mips_irq_chip_reg_en_w1c;
172
173static void irq_pci_enable(struct irq_data *data)
174{
175 u32 mask = 1u << data->irq;
176
177 __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1s);
178}
179
180static void irq_pci_disable(struct irq_data *data)
181{
182 u32 mask = 1u << data->irq;
183
184 __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1c);
185}
186
187static void irq_pci_ack(struct irq_data *data)
188{
189}
190
191static void irq_pci_mask(struct irq_data *data)
192{
193 u32 mask = 1u << data->irq;
194
195 __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1c);
196}
197
198static void irq_pci_unmask(struct irq_data *data)
199{
200 u32 mask = 1u << data->irq;
201
202 __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1s);
203}
204
205static struct irq_chip irq_chip_pci = {
206 .name = "PCI",
207 .irq_enable = irq_pci_enable,
208 .irq_disable = irq_pci_disable,
209 .irq_ack = irq_pci_ack,
210 .irq_mask = irq_pci_mask,
211 .irq_unmask = irq_pci_unmask,
212};
213
214static void irq_mbox_all(struct irq_data *data, void __iomem *base)
215{
216 int cpu;
217 unsigned int mbox = data->irq - MIPS_IRQ_MBOX0;
218 u32 mask;
219
220 WARN_ON(mbox >= MBOX_BITS_PER_CPU);
221
222 for_each_online_cpu(cpu) {
223 unsigned int cpuid = cpunum_for_cpu(cpu);
224 mask = 1 << (cpuid * MBOX_BITS_PER_CPU + mbox);
225 __raw_writel(mask, base + (cpuid * mips_irq_cpu_stride));
226 }
227}
228
229static void irq_mbox_enable(struct irq_data *data)
230{
231 irq_mbox_all(data, mips_irq_chip + mips_irq_chip_reg_en_w1s + sizeof(u32));
232}
233
234static void irq_mbox_disable(struct irq_data *data)
235{
236 irq_mbox_all(data, mips_irq_chip + mips_irq_chip_reg_en_w1c + sizeof(u32));
237}
238
239static void irq_mbox_ack(struct irq_data *data)
240{
241 u32 mask;
242 unsigned int mbox = data->irq - MIPS_IRQ_MBOX0;
243
244 WARN_ON(mbox >= MBOX_BITS_PER_CPU);
245
246 mask = 1 << (get_ebase_cpunum() * MBOX_BITS_PER_CPU + mbox);
247 __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_raw_w1c + sizeof(u32));
248}
249
250void irq_mbox_ipi(int cpu, unsigned int actions)
251{
252 unsigned int cpuid = cpunum_for_cpu(cpu);
253 u32 mask;
254
255 WARN_ON(actions >= (1 << MBOX_BITS_PER_CPU));
256
257 mask = actions << (cpuid * MBOX_BITS_PER_CPU);
258 __raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_raw_w1s + sizeof(u32));
259}
260
261static void irq_mbox_cpu_onoffline(struct irq_data *data, void __iomem *base)
262{
263 unsigned int mbox = data->irq - MIPS_IRQ_MBOX0;
264 unsigned int cpuid = get_ebase_cpunum();
265 u32 mask;
266
267 WARN_ON(mbox >= MBOX_BITS_PER_CPU);
268
269 mask = 1 << (cpuid * MBOX_BITS_PER_CPU + mbox);
270 __raw_writel(mask, base + (cpuid * mips_irq_cpu_stride));
271
272}
273
274static void irq_mbox_cpu_online(struct irq_data *data)
275{
276 irq_mbox_cpu_onoffline(data, mips_irq_chip + mips_irq_chip_reg_en_w1s + sizeof(u32));
277}
278
279static void irq_mbox_cpu_offline(struct irq_data *data)
280{
281 irq_mbox_cpu_onoffline(data, mips_irq_chip + mips_irq_chip_reg_en_w1c + sizeof(u32));
282}
283
284static struct irq_chip irq_chip_mbox = {
285 .name = "MBOX",
286 .irq_enable = irq_mbox_enable,
287 .irq_disable = irq_mbox_disable,
288 .irq_ack = irq_mbox_ack,
289 .irq_cpu_online = irq_mbox_cpu_online,
290 .irq_cpu_offline = irq_mbox_cpu_offline,
291 .flags = IRQCHIP_ONOFFLINE_ENABLED,
292};
293
294static void __init irq_pci_init(void)
295{
296 int i, stride;
297 u32 num_bits;
298
299 mips_irq_chip = ioremap(0x1e010000, 4096);
300
301 num_bits = __raw_readl(mips_irq_chip + MIPS_IRQ_CHIP_NUM_BITS);
302 stride = 8 * (1 + ((num_bits - 1) / 64));
303
304
305 pr_notice("mips_irq_chip: %u bits, reg stride: %d\n", num_bits, stride);
306 mips_irq_chip_reg_raw = MIPS_IRQ_CHIP_REGS + 0 * stride;
307 mips_irq_chip_reg_raw_w1s = MIPS_IRQ_CHIP_REGS + 1 * stride;
308 mips_irq_chip_reg_raw_w1c = MIPS_IRQ_CHIP_REGS + 2 * stride;
309 mips_irq_chip_reg_src = MIPS_IRQ_CHIP_REGS + 3 * stride;
310 mips_irq_chip_reg_en = MIPS_IRQ_CHIP_REGS + 4 * stride;
311 mips_irq_chip_reg_en_w1s = MIPS_IRQ_CHIP_REGS + 5 * stride;
312 mips_irq_chip_reg_en_w1c = MIPS_IRQ_CHIP_REGS + 6 * stride;
313 mips_irq_cpu_stride = stride * 4;
314
315 for (i = 0; i < 4; i++)
316 irq_set_chip_and_handler(i + MIPS_IRQ_PCIA, &irq_chip_pci, handle_level_irq);
317
318 for (i = 0; i < 2; i++)
319 irq_set_chip_and_handler(i + MIPS_IRQ_MBOX0, &irq_chip_mbox, handle_percpu_irq);
320
321
322 set_c0_status(STATUSF_IP2);
323}
324
325static void irq_pci_dispatch(void)
326{
327 unsigned int cpuid = get_ebase_cpunum();
328 u32 en;
329
330 en = __raw_readl(mips_irq_chip + mips_irq_chip_reg_src +
331 (cpuid * mips_irq_cpu_stride));
332
333 if (!en) {
334 en = __raw_readl(mips_irq_chip + mips_irq_chip_reg_src + (cpuid * mips_irq_cpu_stride) + sizeof(u32));
335 en = (en >> (2 * cpuid)) & 3;
336
337 if (!en)
338 spurious_interrupt();
339 else
340 do_IRQ(__ffs(en) + MIPS_IRQ_MBOX0); /* MBOX type */
341 } else {
342 do_IRQ(__ffs(en));
343 }
344}
345
346
347void __init arch_init_irq(void)
348{
349 irq_init_core();
350 irq_pci_init();
351}
352
353asmlinkage void plat_irq_dispatch(void)
354{
355 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
356 int ip;
357
358 if (unlikely(!pending)) {
359 spurious_interrupt();
360 return;
361 }
362
363 ip = ffs(pending) - 1 - STATUSB_IP0;
364 if (ip == 2)
365 irq_pci_dispatch();
366 else
367 do_IRQ(MIPS_CPU_IRQ_BASE + ip);
368}
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c
new file mode 100644
index 000000000000..0164b0c48352
--- /dev/null
+++ b/arch/mips/paravirt/paravirt-smp.c
@@ -0,0 +1,143 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc.
7 */
8
9#include <linux/interrupt.h>
10#include <linux/cpumask.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13
14#include <asm/mipsregs.h>
15#include <asm/setup.h>
16#include <asm/time.h>
17#include <asm/smp.h>
18
19/*
20 * Writing the sp releases the CPU, so writes must be ordered, gp
21 * first, then sp.
22 */
23unsigned long paravirt_smp_sp[NR_CPUS];
24unsigned long paravirt_smp_gp[NR_CPUS];
25
26static int numcpus = 1;
27
28static int __init set_numcpus(char *str)
29{
30 int newval;
31
32 if (get_option(&str, &newval)) {
33 if (newval < 1 || newval >= NR_CPUS)
34 goto bad;
35 numcpus = newval;
36 return 0;
37 }
38bad:
39 return -EINVAL;
40}
41early_param("numcpus", set_numcpus);
42
43
44static void paravirt_smp_setup(void)
45{
46 int id;
47 unsigned int cpunum = get_ebase_cpunum();
48
49 if (WARN_ON(cpunum >= NR_CPUS))
50 return;
51
52 /* The present CPUs are initially just the boot cpu (CPU 0). */
53 for (id = 0; id < NR_CPUS; id++) {
54 set_cpu_possible(id, id == 0);
55 set_cpu_present(id, id == 0);
56 }
57 __cpu_number_map[cpunum] = 0;
58 __cpu_logical_map[0] = cpunum;
59
60 for (id = 0; id < numcpus; id++) {
61 set_cpu_possible(id, true);
62 set_cpu_present(id, true);
63 __cpu_number_map[id] = id;
64 __cpu_logical_map[id] = id;
65 }
66}
67
68void irq_mbox_ipi(int cpu, unsigned int actions);
69static void paravirt_send_ipi_single(int cpu, unsigned int action)
70{
71 irq_mbox_ipi(cpu, action);
72}
73
74static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int action)
75{
76 unsigned int cpu;
77
78 for_each_cpu_mask(cpu, *mask)
79 paravirt_send_ipi_single(cpu, action);
80}
81
82static void paravirt_init_secondary(void)
83{
84 unsigned int sr;
85
86 sr = set_c0_status(ST0_BEV);
87 write_c0_ebase((u32)ebase);
88
89 sr |= STATUSF_IP2; /* Interrupt controller on IP2 */
90 write_c0_status(sr);
91
92 irq_cpu_online();
93}
94
95static void paravirt_smp_finish(void)
96{
97 /* to generate the first CPU timer interrupt */
98 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
99 local_irq_enable();
100}
101
102static void paravirt_boot_secondary(int cpu, struct task_struct *idle)
103{
104 paravirt_smp_gp[cpu] = (unsigned long)task_thread_info(idle);
105 smp_wmb();
106 paravirt_smp_sp[cpu] = __KSTK_TOS(idle);
107}
108
109static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
110{
111 scheduler_ipi();
112 return IRQ_HANDLED;
113}
114
115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
116{
117 smp_call_function_interrupt();
118 return IRQ_HANDLED;
119}
120
121static void paravirt_prepare_cpus(unsigned int max_cpus)
122{
123 if (request_irq(MIPS_IRQ_MBOX0, paravirt_reched_interrupt,
124 IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
125 paravirt_reched_interrupt)) {
126 panic("Cannot request_irq for SchedulerIPI");
127 }
128 if (request_irq(MIPS_IRQ_MBOX1, paravirt_function_interrupt,
129 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
130 paravirt_function_interrupt)) {
131 panic("Cannot request_irq for SMP-Call");
132 }
133}
134
135struct plat_smp_ops paravirt_smp_ops = {
136 .send_ipi_single = paravirt_send_ipi_single,
137 .send_ipi_mask = paravirt_send_ipi_mask,
138 .init_secondary = paravirt_init_secondary,
139 .smp_finish = paravirt_smp_finish,
140 .boot_secondary = paravirt_boot_secondary,
141 .smp_setup = paravirt_smp_setup,
142 .prepare_cpus = paravirt_prepare_cpus,
143};
diff --git a/arch/mips/paravirt/serial.c b/arch/mips/paravirt/serial.c
new file mode 100644
index 000000000000..02b665c02272
--- /dev/null
+++ b/arch/mips/paravirt/serial.c
@@ -0,0 +1,40 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc.
7 */
8
9#include <linux/kernel.h>
10#include <linux/virtio_console.h>
11#include <linux/kvm_para.h>
12
13/*
14 * Emit one character to the boot console.
15 */
16int prom_putchar(char c)
17{
18 kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, 0 /* port 0 */,
19 (unsigned long)&c, 1 /* len == 1 */);
20
21 return 1;
22}
23
24#ifdef CONFIG_VIRTIO_CONSOLE
25static int paravirt_put_chars(u32 vtermno, const char *buf, int count)
26{
27 kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, vtermno,
28 (unsigned long)buf, count);
29
30 return count;
31}
32
33static int __init paravirt_cons_init(void)
34{
35 virtio_cons_early_init(paravirt_put_chars);
36 return 0;
37}
38core_initcall(paravirt_cons_init);
39
40#endif
diff --git a/arch/mips/paravirt/setup.c b/arch/mips/paravirt/setup.c
new file mode 100644
index 000000000000..cb8448b373a7
--- /dev/null
+++ b/arch/mips/paravirt/setup.c
@@ -0,0 +1,67 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc.
7 */
8
9#include <linux/kernel.h>
10#include <linux/kvm_para.h>
11
12#include <asm/reboot.h>
13#include <asm/bootinfo.h>
14#include <asm/smp-ops.h>
15#include <asm/time.h>
16
17extern struct plat_smp_ops paravirt_smp_ops;
18
19const char *get_system_type(void)
20{
21 return "MIPS Para-Virtualized Guest";
22}
23
24void __init plat_time_init(void)
25{
26 mips_hpt_frequency = kvm_hypercall0(KVM_HC_MIPS_GET_CLOCK_FREQ);
27
28 preset_lpj = mips_hpt_frequency / (2 * HZ);
29}
30
31static void pv_machine_halt(void)
32{
33 kvm_hypercall0(KVM_HC_MIPS_EXIT_VM);
34}
35
36/*
37 * Early entry point for arch setup
38 */
39void __init prom_init(void)
40{
41 int i;
42 int argc = fw_arg0;
43 char **argv = (char **)fw_arg1;
44
45#ifdef CONFIG_32BIT
46 set_io_port_base(KSEG1ADDR(0x1e000000));
47#else /* CONFIG_64BIT */
48 set_io_port_base(PHYS_TO_XKSEG_UNCACHED(0x1e000000));
49#endif
50
51 for (i = 0; i < argc; i++) {
52 strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
53 if (i < argc - 1)
54 strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
55 }
56 _machine_halt = pv_machine_halt;
57 register_smp_ops(&paravirt_smp_ops);
58}
59
60void __init plat_mem_setup(void)
61{
62 /* Do nothing, the "mem=???" parser handles our memory. */
63}
64
65void __init prom_free_prom_memory(void)
66{
67}
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index d61138a177cc..ff8a5539b363 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -21,7 +21,7 @@ obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
21obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o 21obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o
22obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o 22obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
23obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o 23obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
24 24obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o
25# 25#
26# These are still pretty much in the old state, watch, go blind. 26# These are still pretty much in the old state, watch, go blind.
27# 27#
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
index 2f9e52a1a750..40e920c653cc 100644
--- a/arch/mips/pci/fixup-malta.c
+++ b/arch/mips/pci/fixup-malta.c
@@ -68,6 +68,7 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
68{ 68{
69 unsigned char reg_val; 69 unsigned char reg_val;
70 u32 reg_val32; 70 u32 reg_val32;
71 u16 reg_val16;
71 /* PIIX PIRQC[A:D] irq mappings */ 72 /* PIIX PIRQC[A:D] irq mappings */
72 static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = { 73 static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = {
73 0, 0, 0, 3, 74 0, 0, 0, 3,
@@ -107,6 +108,11 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
107 pci_read_config_byte(pdev, PIIX4_FUNC0_SERIRQC, &reg_val); 108 pci_read_config_byte(pdev, PIIX4_FUNC0_SERIRQC, &reg_val);
108 reg_val |= PIIX4_FUNC0_SERIRQC_EN | PIIX4_FUNC0_SERIRQC_CONT; 109 reg_val |= PIIX4_FUNC0_SERIRQC_EN | PIIX4_FUNC0_SERIRQC_CONT;
109 pci_write_config_byte(pdev, PIIX4_FUNC0_SERIRQC, reg_val); 110 pci_write_config_byte(pdev, PIIX4_FUNC0_SERIRQC, reg_val);
111
112 /* Enable response to special cycles */
113 pci_read_config_word(pdev, PCI_COMMAND, &reg_val16);
114 pci_write_config_word(pdev, PCI_COMMAND,
115 reg_val16 | PCI_COMMAND_SPECIAL);
110} 116}
111 117
112DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, 118DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2b91b0e61566..ab0c5d14c6f7 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -15,6 +15,7 @@
15#include <asm/octeon/cvmx-npi-defs.h> 15#include <asm/octeon/cvmx-npi-defs.h>
16#include <asm/octeon/cvmx-pci-defs.h> 16#include <asm/octeon/cvmx-pci-defs.h>
17#include <asm/octeon/cvmx-npei-defs.h> 17#include <asm/octeon/cvmx-npei-defs.h>
18#include <asm/octeon/cvmx-sli-defs.h>
18#include <asm/octeon/cvmx-pexp-defs.h> 19#include <asm/octeon/cvmx-pexp-defs.h>
19#include <asm/octeon/pci-octeon.h> 20#include <asm/octeon/pci-octeon.h>
20 21
@@ -162,6 +163,11 @@ msi_irq_allocated:
162 msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff; 163 msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff;
163 msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32; 164 msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32;
164 break; 165 break;
166 case OCTEON_DMA_BAR_TYPE_PCIE2:
167 /* When using PCIe2, Bar 0 is based at 0 */
168 msg.address_lo = (0 + CVMX_SLI_PCIE_MSI_RCV) & 0xffffffff;
169 msg.address_hi = (0 + CVMX_SLI_PCIE_MSI_RCV) >> 32;
170 break;
165 default: 171 default:
166 panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type"); 172 panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type");
167 } 173 }
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
index afd8405e0188..1ef3ed607f47 100644
--- a/arch/mips/pci/msi-xlp.c
+++ b/arch/mips/pci/msi-xlp.c
@@ -56,8 +56,8 @@
56#include <asm/netlogic/xlp-hal/bridge.h> 56#include <asm/netlogic/xlp-hal/bridge.h>
57 57
58#define XLP_MSIVEC_PER_LINK 32 58#define XLP_MSIVEC_PER_LINK 32
59#define XLP_MSIXVEC_TOTAL 32 59#define XLP_MSIXVEC_TOTAL (cpu_is_xlp9xx() ? 128 : 32)
60#define XLP_MSIXVEC_PER_LINK 8 60#define XLP_MSIXVEC_PER_LINK (cpu_is_xlp9xx() ? 32 : 8)
61 61
62/* 128 MSI irqs per node, mapped starting at NLM_MSI_VEC_BASE */ 62/* 128 MSI irqs per node, mapped starting at NLM_MSI_VEC_BASE */
63static inline int nlm_link_msiirq(int link, int msivec) 63static inline int nlm_link_msiirq(int link, int msivec)
@@ -65,35 +65,44 @@ static inline int nlm_link_msiirq(int link, int msivec)
65 return NLM_MSI_VEC_BASE + link * XLP_MSIVEC_PER_LINK + msivec; 65 return NLM_MSI_VEC_BASE + link * XLP_MSIVEC_PER_LINK + msivec;
66} 66}
67 67
68/* get the link MSI vector from irq number */
68static inline int nlm_irq_msivec(int irq) 69static inline int nlm_irq_msivec(int irq)
69{ 70{
70 return irq % XLP_MSIVEC_PER_LINK; 71 return (irq - NLM_MSI_VEC_BASE) % XLP_MSIVEC_PER_LINK;
71} 72}
72 73
74/* get the link from the irq number */
73static inline int nlm_irq_msilink(int irq) 75static inline int nlm_irq_msilink(int irq)
74{ 76{
75 return (irq % (XLP_MSIVEC_PER_LINK * PCIE_NLINKS)) / 77 int total_msivec = XLP_MSIVEC_PER_LINK * PCIE_NLINKS;
76 XLP_MSIVEC_PER_LINK; 78
79 return ((irq - NLM_MSI_VEC_BASE) % total_msivec) /
80 XLP_MSIVEC_PER_LINK;
77} 81}
78 82
79/* 83/*
80 * Only 32 MSI-X vectors are possible because there are only 32 PIC 84 * For XLP 8xx/4xx/3xx/2xx, only 32 MSI-X vectors are possible because
81 * interrupts for MSI. We split them statically and use 8 MSI-X vectors 85 * there are only 32 PIC interrupts for MSI. We split them statically
82 * per link - this keeps the allocation and lookup simple. 86 * and use 8 MSI-X vectors per link - this keeps the allocation and
87 * lookup simple.
88 * On XLP 9xx, there are 32 vectors per link, and the interrupts are
89 * not routed thru PIC, so we can use all 128 MSI-X vectors.
83 */ 90 */
84static inline int nlm_link_msixirq(int link, int bit) 91static inline int nlm_link_msixirq(int link, int bit)
85{ 92{
86 return NLM_MSIX_VEC_BASE + link * XLP_MSIXVEC_PER_LINK + bit; 93 return NLM_MSIX_VEC_BASE + link * XLP_MSIXVEC_PER_LINK + bit;
87} 94}
88 95
96/* get the link MSI vector from irq number */
89static inline int nlm_irq_msixvec(int irq) 97static inline int nlm_irq_msixvec(int irq)
90{ 98{
91 return irq % XLP_MSIXVEC_TOTAL; /* works when given xirq */ 99 return (irq - NLM_MSIX_VEC_BASE) % XLP_MSIXVEC_TOTAL;
92} 100}
93 101
94static inline int nlm_irq_msixlink(int irq) 102/* get the link from MSIX vec */
103static inline int nlm_irq_msixlink(int msixvec)
95{ 104{
96 return nlm_irq_msixvec(irq) / XLP_MSIXVEC_PER_LINK; 105 return msixvec / XLP_MSIXVEC_PER_LINK;
97} 106}
98 107
99/* 108/*
@@ -129,7 +138,11 @@ static void xlp_msi_enable(struct irq_data *d)
129 vec = nlm_irq_msivec(d->irq); 138 vec = nlm_irq_msivec(d->irq);
130 spin_lock_irqsave(&md->msi_lock, flags); 139 spin_lock_irqsave(&md->msi_lock, flags);
131 md->msi_enabled_mask |= 1u << vec; 140 md->msi_enabled_mask |= 1u << vec;
132 nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 141 if (cpu_is_xlp9xx())
142 nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
143 md->msi_enabled_mask);
144 else
145 nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
133 spin_unlock_irqrestore(&md->msi_lock, flags); 146 spin_unlock_irqrestore(&md->msi_lock, flags);
134} 147}
135 148
@@ -142,7 +155,11 @@ static void xlp_msi_disable(struct irq_data *d)
142 vec = nlm_irq_msivec(d->irq); 155 vec = nlm_irq_msivec(d->irq);
143 spin_lock_irqsave(&md->msi_lock, flags); 156 spin_lock_irqsave(&md->msi_lock, flags);
144 md->msi_enabled_mask &= ~(1u << vec); 157 md->msi_enabled_mask &= ~(1u << vec);
145 nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 158 if (cpu_is_xlp9xx())
159 nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
160 md->msi_enabled_mask);
161 else
162 nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
146 spin_unlock_irqrestore(&md->msi_lock, flags); 163 spin_unlock_irqrestore(&md->msi_lock, flags);
147} 164}
148 165
@@ -156,11 +173,18 @@ static void xlp_msi_mask_ack(struct irq_data *d)
156 xlp_msi_disable(d); 173 xlp_msi_disable(d);
157 174
158 /* Ack MSI on bridge */ 175 /* Ack MSI on bridge */
159 nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); 176 if (cpu_is_xlp9xx())
177 nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec);
178 else
179 nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec);
160 180
161 /* Ack at eirr and PIC */ 181 /* Ack at eirr and PIC */
162 ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link)); 182 ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link));
163 nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link)); 183 if (cpu_is_xlp9xx())
184 nlm_pic_ack(md->node->picbase,
185 PIC_9XX_IRT_PCIE_LINK_INDEX(link));
186 else
187 nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link));
164} 188}
165 189
166static struct irq_chip xlp_msi_chip = { 190static struct irq_chip xlp_msi_chip = {
@@ -172,30 +196,45 @@ static struct irq_chip xlp_msi_chip = {
172}; 196};
173 197
174/* 198/*
175 * The MSI-X interrupt handling is different from MSI, there are 32 199 * XLP8XX/4XX/3XX/2XX:
176 * MSI-X interrupts generated by the PIC and each of these correspond 200 * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X
177 * to a MSI-X vector (0-31) that can be assigned. 201 * interrupts generated by the PIC and each of these correspond to a MSI-X
202 * vector (0-31) that can be assigned.
178 * 203 *
179 * We divide the MSI-X vectors to 8 per link and do a per-link 204 * We divide the MSI-X vectors to 8 per link and do a per-link allocation
180 * allocation 205 *
206 * XLP9XX:
207 * 32 MSI-X vectors are available per link, and the interrupts are not routed
208 * thru the PIC. PIC ack not needed.
181 * 209 *
182 * Enable and disable done using standard MSI functions. 210 * Enable and disable done using standard MSI functions.
183 */ 211 */
184static void xlp_msix_mask_ack(struct irq_data *d) 212static void xlp_msix_mask_ack(struct irq_data *d)
185{ 213{
186 struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); 214 struct xlp_msi_data *md;
187 int link, msixvec; 215 int link, msixvec;
216 uint32_t status_reg, bit;
188 217
189 msixvec = nlm_irq_msixvec(d->irq); 218 msixvec = nlm_irq_msixvec(d->irq);
190 link = nlm_irq_msixlink(d->irq); 219 link = nlm_irq_msixlink(msixvec);
191 mask_msi_irq(d); 220 mask_msi_irq(d);
221 md = irq_data_get_irq_handler_data(d);
192 222
193 /* Ack MSI on bridge */ 223 /* Ack MSI on bridge */
194 nlm_write_reg(md->lnkbase, PCIE_MSIX_STATUS, 1u << msixvec); 224 if (cpu_is_xlp9xx()) {
225 status_reg = PCIE_9XX_MSIX_STATUSX(link);
226 bit = msixvec % XLP_MSIXVEC_PER_LINK;
227 } else {
228 status_reg = PCIE_MSIX_STATUS;
229 bit = msixvec;
230 }
231 nlm_write_reg(md->lnkbase, status_reg, 1u << bit);
195 232
196 /* Ack at eirr and PIC */ 233 /* Ack at eirr and PIC */
197 ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link)); 234 ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link));
198 nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_MSIX_INDEX(msixvec)); 235 if (!cpu_is_xlp9xx())
236 nlm_pic_ack(md->node->picbase,
237 PIC_IRT_PCIE_MSIX_INDEX(msixvec));
199} 238}
200 239
201static struct irq_chip xlp_msix_chip = { 240static struct irq_chip xlp_msix_chip = {
@@ -225,10 +264,18 @@ static void xlp_config_link_msi(uint64_t lnkbase, int lirq, uint64_t msiaddr)
225{ 264{
226 u32 val; 265 u32 val;
227 266
228 val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 267 if (cpu_is_xlp9xx()) {
229 if ((val & 0x200) == 0) { 268 val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
230 val |= 0x200; /* MSI Interrupt enable */ 269 if ((val & 0x200) == 0) {
231 nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 270 val |= 0x200; /* MSI Interrupt enable */
271 nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
272 }
273 } else {
274 val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
275 if ((val & 0x200) == 0) {
276 val |= 0x200;
277 nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
278 }
232 } 279 }
233 280
234 val = nlm_read_reg(lnkbase, 0x1); /* CMD */ 281 val = nlm_read_reg(lnkbase, 0x1); /* CMD */
@@ -275,9 +322,12 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
275 322
276 spin_lock_irqsave(&md->msi_lock, flags); 323 spin_lock_irqsave(&md->msi_lock, flags);
277 if (md->msi_alloc_mask == 0) { 324 if (md->msi_alloc_mask == 0) {
278 /* switch the link IRQ to MSI range */
279 xlp_config_link_msi(lnkbase, lirq, msiaddr); 325 xlp_config_link_msi(lnkbase, lirq, msiaddr);
280 irt = PIC_IRT_PCIE_LINK_INDEX(link); 326 /* switch the link IRQ to MSI range */
327 if (cpu_is_xlp9xx())
328 irt = PIC_9XX_IRT_PCIE_LINK_INDEX(link);
329 else
330 irt = PIC_IRT_PCIE_LINK_INDEX(link);
281 nlm_setup_pic_irq(node, lirq, lirq, irt); 331 nlm_setup_pic_irq(node, lirq, lirq, irt);
282 nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq, 332 nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq,
283 node * nlm_threads_per_node(), 1 /*en */); 333 node * nlm_threads_per_node(), 1 /*en */);
@@ -319,10 +369,19 @@ static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr)
319 val |= 0x80000000U; 369 val |= 0x80000000U;
320 nlm_write_reg(lnkbase, 0x2C, val); 370 nlm_write_reg(lnkbase, 0x2C, val);
321 } 371 }
322 val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 372
323 if ((val & 0x200) == 0) { 373 if (cpu_is_xlp9xx()) {
324 val |= 0x200; /* MSI Interrupt enable */ 374 val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
325 nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 375 if ((val & 0x200) == 0) {
376 val |= 0x200; /* MSI Interrupt enable */
377 nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
378 }
379 } else {
380 val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
381 if ((val & 0x200) == 0) {
382 val |= 0x200; /* MSI Interrupt enable */
383 nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
384 }
326 } 385 }
327 386
328 val = nlm_read_reg(lnkbase, 0x1); /* CMD */ 387 val = nlm_read_reg(lnkbase, 0x1); /* CMD */
@@ -337,10 +396,19 @@ static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr)
337 val |= (1 << 8) | lirq; 396 val |= (1 << 8) | lirq;
338 nlm_write_pci_reg(lnkbase, 0xf, val); 397 nlm_write_pci_reg(lnkbase, 0xf, val);
339 398
340 /* MSI-X addresses */ 399 if (cpu_is_xlp9xx()) {
341 nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE, msixaddr >> 8); 400 /* MSI-X addresses */
342 nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT, 401 nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE,
343 (msixaddr + MSI_ADDR_SZ) >> 8); 402 msixaddr >> 8);
403 nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT,
404 (msixaddr + MSI_ADDR_SZ) >> 8);
405 } else {
406 /* MSI-X addresses */
407 nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE,
408 msixaddr >> 8);
409 nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT,
410 (msixaddr + MSI_ADDR_SZ) >> 8);
411 }
344} 412}
345 413
346/* 414/*
@@ -377,6 +445,7 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
377 445
378 xirq += t; 446 xirq += t;
379 msixvec = nlm_irq_msixvec(xirq); 447 msixvec = nlm_irq_msixvec(xirq);
448
380 msg.address_hi = msixaddr >> 32; 449 msg.address_hi = msixaddr >> 32;
381 msg.address_lo = msixaddr & 0xffffffff; 450 msg.address_lo = msixaddr & 0xffffffff;
382 msg.data = 0xc00 | msixvec; 451 msg.data = 0xc00 | msixvec;
@@ -417,7 +486,7 @@ void __init xlp_init_node_msi_irqs(int node, int link)
417{ 486{
418 struct nlm_soc_info *nodep; 487 struct nlm_soc_info *nodep;
419 struct xlp_msi_data *md; 488 struct xlp_msi_data *md;
420 int irq, i, irt, msixvec; 489 int irq, i, irt, msixvec, val;
421 490
422 pr_info("[%d %d] Init node PCI IRT\n", node, link); 491 pr_info("[%d %d] Init node PCI IRT\n", node, link);
423 nodep = nlm_get_node(node); 492 nodep = nlm_get_node(node);
@@ -438,19 +507,28 @@ void __init xlp_init_node_msi_irqs(int node, int link)
438 irq_set_handler_data(i, md); 507 irq_set_handler_data(i, md);
439 } 508 }
440 509
441 for (i = 0; i < XLP_MSIXVEC_PER_LINK; i++) { 510 for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) {
442 /* Initialize MSI-X irts to generate one interrupt per link */ 511 if (cpu_is_xlp9xx()) {
443 msixvec = link * XLP_MSIXVEC_PER_LINK + i; 512 val = ((node * nlm_threads_per_node()) << 7 |
444 irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec); 513 PIC_PCIE_MSIX_IRQ(link) << 1 | 0 << 0);
445 nlm_pic_init_irt(nodep->picbase, irt, PIC_PCIE_MSIX_IRQ(link), 514 nlm_write_pcie_reg(md->lnkbase, PCIE_9XX_MSIX_VECX(i +
446 node * nlm_threads_per_node(), 1 /* enable */); 515 (link * XLP_MSIXVEC_PER_LINK)), val);
516 } else {
517 /* Initialize MSI-X irts to generate one interrupt
518 * per link
519 */
520 msixvec = link * XLP_MSIXVEC_PER_LINK + i;
521 irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec);
522 nlm_pic_init_irt(nodep->picbase, irt,
523 PIC_PCIE_MSIX_IRQ(link),
524 node * nlm_threads_per_node(), 1);
525 }
447 526
448 /* Initialize MSI-X extended irq space for the link */ 527 /* Initialize MSI-X extended irq space for the link */
449 irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i)); 528 irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));
450 irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq); 529 irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq);
451 irq_set_handler_data(irq, md); 530 irq_set_handler_data(irq, md);
452 } 531 }
453
454} 532}
455 533
456void nlm_dispatch_msi(int node, int lirq) 534void nlm_dispatch_msi(int node, int lirq)
@@ -462,7 +540,11 @@ void nlm_dispatch_msi(int node, int lirq)
462 link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE; 540 link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE;
463 irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0)); 541 irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
464 md = irq_get_handler_data(irqbase); 542 md = irq_get_handler_data(irqbase);
465 status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) & 543 if (cpu_is_xlp9xx())
544 status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) &
545 md->msi_enabled_mask;
546 else
547 status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) &
466 md->msi_enabled_mask; 548 md->msi_enabled_mask;
467 while (status) { 549 while (status) {
468 i = __ffs(status); 550 i = __ffs(status);
@@ -480,10 +562,14 @@ void nlm_dispatch_msix(int node, int lirq)
480 link = lirq - PIC_PCIE_MSIX_IRQ_BASE; 562 link = lirq - PIC_PCIE_MSIX_IRQ_BASE;
481 irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0)); 563 irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
482 md = irq_get_handler_data(irqbase); 564 md = irq_get_handler_data(irqbase);
483 status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS); 565 if (cpu_is_xlp9xx())
566 status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link));
567 else
568 status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS);
484 569
485 /* narrow it down to the MSI-x vectors for our link */ 570 /* narrow it down to the MSI-x vectors for our link */
486 status = (status >> (link * XLP_MSIXVEC_PER_LINK)) & 571 if (!cpu_is_xlp9xx())
572 status = (status >> (link * XLP_MSIXVEC_PER_LINK)) &
487 ((1 << XLP_MSIXVEC_PER_LINK) - 1); 573 ((1 << XLP_MSIXVEC_PER_LINK) - 1);
488 574
489 while (status) { 575 while (status) {
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index 3d27800edba2..50034f985be1 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -7,7 +7,7 @@
7 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net 7 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
8 * 8 *
9 * Much of the code is derived from the original DDB5074 port by 9 * Much of the code is derived from the original DDB5074 port by
10 * Geert Uytterhoeven <geert@sonycom.com> 10 * Geert Uytterhoeven <geert@linux-m68k.org>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
diff --git a/arch/mips/pci/ops-tx3927.c b/arch/mips/pci/ops-tx3927.c
index 02d64f77e967..d35dc9c9ab9d 100644
--- a/arch/mips/pci/ops-tx3927.c
+++ b/arch/mips/pci/ops-tx3927.c
@@ -11,7 +11,7 @@
11 * Define the pci_ops for TX3927. 11 * Define the pci_ops for TX3927.
12 * 12 *
13 * Much of the code is derived from the original DDB5074 port by 13 * Much of the code is derived from the original DDB5074 port by
14 * Geert Uytterhoeven <geert@sonycom.com> 14 * Geert Uytterhoeven <geert@linux-m68k.org>
15 * 15 *
16 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
diff --git a/arch/mips/pci/ops-tx4927.c b/arch/mips/pci/ops-tx4927.c
index 3d5df514d024..0e046d82e4e3 100644
--- a/arch/mips/pci/ops-tx4927.c
+++ b/arch/mips/pci/ops-tx4927.c
@@ -202,17 +202,20 @@ char *tx4927_pcibios_setup(char *str)
202 unsigned long val; 202 unsigned long val;
203 203
204 if (!strncmp(str, "trdyto=", 7)) { 204 if (!strncmp(str, "trdyto=", 7)) {
205 if (strict_strtoul(str + 7, 0, &val) == 0) 205 u8 val = 0;
206 if (kstrtou8(str + 7, 0, &val) == 0)
206 tx4927_pci_opts.trdyto = val; 207 tx4927_pci_opts.trdyto = val;
207 return NULL; 208 return NULL;
208 } 209 }
209 if (!strncmp(str, "retryto=", 8)) { 210 if (!strncmp(str, "retryto=", 8)) {
210 if (strict_strtoul(str + 8, 0, &val) == 0) 211 u8 val = 0;
212 if (kstrtou8(str + 8, 0, &val) == 0)
211 tx4927_pci_opts.retryto = val; 213 tx4927_pci_opts.retryto = val;
212 return NULL; 214 return NULL;
213 } 215 }
214 if (!strncmp(str, "gbwc=", 5)) { 216 if (!strncmp(str, "gbwc=", 5)) {
215 if (strict_strtoul(str + 5, 0, &val) == 0) 217 u16 val;
218 if (kstrtou16(str + 5, 0, &val) == 0)
216 tx4927_pci_opts.gbwc = val; 219 tx4927_pci_opts.gbwc = val;
217 return NULL; 220 return NULL;
218 } 221 }
diff --git a/arch/mips/pci/pci-virtio-guest.c b/arch/mips/pci/pci-virtio-guest.c
new file mode 100644
index 000000000000..40a078bc4617
--- /dev/null
+++ b/arch/mips/pci/pci-virtio-guest.c
@@ -0,0 +1,131 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Cavium, Inc.
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/interrupt.h>
12#include <linux/pci.h>
13
14#include <uapi/asm/bitfield.h>
15#include <asm/byteorder.h>
16#include <asm/io.h>
17
18#define PCI_CONFIG_ADDRESS 0xcf8
19#define PCI_CONFIG_DATA 0xcfc
20
21union pci_config_address {
22 struct {
23 __BITFIELD_FIELD(unsigned enable_bit : 1, /* 31 */
24 __BITFIELD_FIELD(unsigned reserved : 7, /* 30 .. 24 */
25 __BITFIELD_FIELD(unsigned bus_number : 8, /* 23 .. 16 */
26 __BITFIELD_FIELD(unsigned devfn_number : 8, /* 15 .. 8 */
27 __BITFIELD_FIELD(unsigned register_number : 8, /* 7 .. 0 */
28 )))));
29 };
30 u32 w;
31};
32
33int pcibios_plat_dev_init(struct pci_dev *dev)
34{
35 return 0;
36}
37
38int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
39{
40 return ((pin + slot) % 4)+ MIPS_IRQ_PCIA;
41}
42
43static void pci_virtio_guest_write_config_addr(struct pci_bus *bus,
44 unsigned int devfn, int reg)
45{
46 union pci_config_address pca = { .w = 0 };
47
48 pca.register_number = reg;
49 pca.devfn_number = devfn;
50 pca.bus_number = bus->number;
51 pca.enable_bit = 1;
52
53 outl(pca.w, PCI_CONFIG_ADDRESS);
54}
55
56static int pci_virtio_guest_write_config(struct pci_bus *bus,
57 unsigned int devfn, int reg, int size, u32 val)
58{
59 pci_virtio_guest_write_config_addr(bus, devfn, reg);
60
61 switch (size) {
62 case 1:
63 outb(val, PCI_CONFIG_DATA + (reg & 3));
64 break;
65 case 2:
66 outw(val, PCI_CONFIG_DATA + (reg & 2));
67 break;
68 case 4:
69 outl(val, PCI_CONFIG_DATA);
70 break;
71 }
72
73 return PCIBIOS_SUCCESSFUL;
74}
75
76static int pci_virtio_guest_read_config(struct pci_bus *bus, unsigned int devfn,
77 int reg, int size, u32 *val)
78{
79 pci_virtio_guest_write_config_addr(bus, devfn, reg);
80
81 switch (size) {
82 case 1:
83 *val = inb(PCI_CONFIG_DATA + (reg & 3));
84 break;
85 case 2:
86 *val = inw(PCI_CONFIG_DATA + (reg & 2));
87 break;
88 case 4:
89 *val = inl(PCI_CONFIG_DATA);
90 break;
91 }
92 return PCIBIOS_SUCCESSFUL;
93}
94
95static struct pci_ops pci_virtio_guest_ops = {
96 .read = pci_virtio_guest_read_config,
97 .write = pci_virtio_guest_write_config,
98};
99
100static struct resource pci_virtio_guest_mem_resource = {
101 .name = "Virtio MEM",
102 .flags = IORESOURCE_MEM,
103 .start = 0x10000000,
104 .end = 0x1dffffff
105};
106
107static struct resource pci_virtio_guest_io_resource = {
108 .name = "Virtio IO",
109 .flags = IORESOURCE_IO,
110 .start = 0,
111 .end = 0xffff
112};
113
114static struct pci_controller pci_virtio_guest_controller = {
115 .pci_ops = &pci_virtio_guest_ops,
116 .mem_resource = &pci_virtio_guest_mem_resource,
117 .io_resource = &pci_virtio_guest_io_resource,
118};
119
120static int __init pci_virtio_guest_setup(void)
121{
122 pr_err("pci_virtio_guest_setup\n");
123
124 /* Virtio comes pre-assigned */
125 pci_set_flags(PCI_PROBE_ONLY);
126
127 pci_virtio_guest_controller.io_map_base = mips_io_port_base;
128 register_pci_controller(&pci_virtio_guest_controller);
129 return 0;
130}
131arch_initcall(pci_virtio_guest_setup);
diff --git a/arch/mips/pmcs-msp71xx/Makefile b/arch/mips/pmcs-msp71xx/Makefile
index 9201c8b3858d..d4f7220f2485 100644
--- a/arch/mips/pmcs-msp71xx/Makefile
+++ b/arch/mips/pmcs-msp71xx/Makefile
@@ -10,4 +10,3 @@ obj-$(CONFIG_PCI) += msp_pci.o
10obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o 10obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o
11obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o 11obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o
12obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o 12obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o
13obj-$(CONFIG_MIPS_MT_SMTC) += msp_smtc.o
diff --git a/arch/mips/pmcs-msp71xx/msp_eth.c b/arch/mips/pmcs-msp71xx/msp_eth.c
index c584df393de2..15679b427f44 100644
--- a/arch/mips/pmcs-msp71xx/msp_eth.c
+++ b/arch/mips/pmcs-msp71xx/msp_eth.c
@@ -38,73 +38,6 @@
38#define MSP_ETHERNET_GPIO1 15 38#define MSP_ETHERNET_GPIO1 15
39#define MSP_ETHERNET_GPIO2 16 39#define MSP_ETHERNET_GPIO2 16
40 40
41#ifdef CONFIG_MSP_HAS_TSMAC
42#define MSP_TSMAC_SIZE 0x10020
43#define MSP_TSMAC_ID "pmc_tsmac"
44
45static struct resource msp_tsmac0_resources[] = {
46 [0] = {
47 .start = MSP_MAC0_BASE,
48 .end = MSP_MAC0_BASE + MSP_TSMAC_SIZE - 1,
49 .flags = IORESOURCE_MEM,
50 },
51 [1] = {
52 .start = MSP_INT_MAC0,
53 .end = MSP_INT_MAC0,
54 .flags = IORESOURCE_IRQ,
55 },
56};
57
58static struct resource msp_tsmac1_resources[] = {
59 [0] = {
60 .start = MSP_MAC1_BASE,
61 .end = MSP_MAC1_BASE + MSP_TSMAC_SIZE - 1,
62 .flags = IORESOURCE_MEM,
63 },
64 [1] = {
65 .start = MSP_INT_MAC1,
66 .end = MSP_INT_MAC1,
67 .flags = IORESOURCE_IRQ,
68 },
69};
70static struct resource msp_tsmac2_resources[] = {
71 [0] = {
72 .start = MSP_MAC2_BASE,
73 .end = MSP_MAC2_BASE + MSP_TSMAC_SIZE - 1,
74 .flags = IORESOURCE_MEM,
75 },
76 [1] = {
77 .start = MSP_INT_SAR,
78 .end = MSP_INT_SAR,
79 .flags = IORESOURCE_IRQ,
80 },
81};
82
83
84static struct platform_device tsmac_device[] = {
85 [0] = {
86 .name = MSP_TSMAC_ID,
87 .id = 0,
88 .num_resources = ARRAY_SIZE(msp_tsmac0_resources),
89 .resource = msp_tsmac0_resources,
90 },
91 [1] = {
92 .name = MSP_TSMAC_ID,
93 .id = 1,
94 .num_resources = ARRAY_SIZE(msp_tsmac1_resources),
95 .resource = msp_tsmac1_resources,
96 },
97 [2] = {
98 .name = MSP_TSMAC_ID,
99 .id = 2,
100 .num_resources = ARRAY_SIZE(msp_tsmac2_resources),
101 .resource = msp_tsmac2_resources,
102 },
103};
104#define msp_eth_devs tsmac_device
105
106#else
107/* If it is not TSMAC assume MSP_ETH (100Mbps) */
108#define MSP_ETH_ID "pmc_mspeth" 41#define MSP_ETH_ID "pmc_mspeth"
109#define MSP_ETH_SIZE 0xE0 42#define MSP_ETH_SIZE 0xE0
110static struct resource msp_eth0_resources[] = { 43static struct resource msp_eth0_resources[] = {
@@ -152,7 +85,6 @@ static struct platform_device mspeth_device[] = {
152}; 85};
153#define msp_eth_devs mspeth_device 86#define msp_eth_devs mspeth_device
154 87
155#endif
156int __init msp_eth_setup(void) 88int __init msp_eth_setup(void)
157{ 89{
158 int i, ret = 0; 90 int i, ret = 0;
@@ -161,14 +93,6 @@ int __init msp_eth_setup(void)
161 msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO0); 93 msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO0);
162 msp_gpio_pin_hi(MSP_ETHERNET_GPIO0); 94 msp_gpio_pin_hi(MSP_ETHERNET_GPIO0);
163 95
164#ifdef CONFIG_MSP_HAS_TSMAC
165 /* 3 phys on boards with TSMAC */
166 msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO1);
167 msp_gpio_pin_hi(MSP_ETHERNET_GPIO1);
168
169 msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO2);
170 msp_gpio_pin_hi(MSP_ETHERNET_GPIO2);
171#endif
172 for (i = 0; i < ARRAY_SIZE(msp_eth_devs); i++) { 96 for (i = 0; i < ARRAY_SIZE(msp_eth_devs); i++) {
173 ret = platform_device_register(&msp_eth_devs[i]); 97 ret = platform_device_register(&msp_eth_devs[i]);
174 printk(KERN_INFO "device: %d, return value = %d\n", i, ret); 98 printk(KERN_INFO "device: %d, return value = %d\n", i, ret);
diff --git a/arch/mips/pmcs-msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c
index 9da5619c00a5..941744aabb51 100644
--- a/arch/mips/pmcs-msp71xx/msp_irq.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq.c
@@ -32,7 +32,7 @@ extern void msp_vsmp_int_init(void);
32 32
33/* vectored interrupt implementation */ 33/* vectored interrupt implementation */
34 34
35/* SW0/1 interrupts are used for SMP/SMTC */ 35/* SW0/1 interrupts are used for SMP */
36static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); } 36static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); }
37static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); } 37static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); }
38static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); } 38static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); }
@@ -138,14 +138,6 @@ void __init arch_init_irq(void)
138 set_vi_handler(MSP_INT_SEC, sec_int_dispatch); 138 set_vi_handler(MSP_INT_SEC, sec_int_dispatch);
139#ifdef CONFIG_MIPS_MT_SMP 139#ifdef CONFIG_MIPS_MT_SMP
140 msp_vsmp_int_init(); 140 msp_vsmp_int_init();
141#elif defined CONFIG_MIPS_MT_SMTC
142 /*Set hwmask for all platform devices */
143 irq_hwmask[MSP_INT_MAC0] = C_IRQ0;
144 irq_hwmask[MSP_INT_MAC1] = C_IRQ1;
145 irq_hwmask[MSP_INT_USB] = C_IRQ2;
146 irq_hwmask[MSP_INT_SAR] = C_IRQ3;
147 irq_hwmask[MSP_INT_SEC] = C_IRQ5;
148
149#endif /* CONFIG_MIPS_MT_SMP */ 141#endif /* CONFIG_MIPS_MT_SMP */
150#endif /* CONFIG_MIPS_MT */ 142#endif /* CONFIG_MIPS_MT */
151 /* setup the cascaded interrupts */ 143 /* setup the cascaded interrupts */
@@ -153,8 +145,10 @@ void __init arch_init_irq(void)
153 setup_irq(MSP_INT_PER, &per_cascade_msp); 145 setup_irq(MSP_INT_PER, &per_cascade_msp);
154 146
155#else 147#else
156 /* setup the 2nd-level SLP register based interrupt controller */ 148 /*
157 /* VSMP /SMTC support support is not enabled for SLP */ 149 * Setup the 2nd-level SLP register based interrupt controller.
150 * VSMP support support is not enabled for SLP.
151 */
158 msp_slp_irq_init(); 152 msp_slp_irq_init();
159 153
160 /* setup the cascaded SLP/PER interrupts */ 154 /* setup the cascaded SLP/PER interrupts */
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
index e49b499f66db..b8df2f7b3328 100644
--- a/arch/mips/pmcs-msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
@@ -120,10 +120,9 @@ static void msp_cic_irq_ack(struct irq_data *d)
120 * hurt for the others 120 * hurt for the others
121 */ 121 */
122 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE)); 122 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
123 smtc_im_ack_irq(d->irq);
124} 123}
125 124
126/*Note: Limiting to VSMP . Not tested in SMTC */ 125/* Note: Limiting to VSMP. */
127 126
128#ifdef CONFIG_MIPS_MT_SMP 127#ifdef CONFIG_MIPS_MT_SMP
129static int msp_cic_irq_set_affinity(struct irq_data *d, 128static int msp_cic_irq_set_affinity(struct irq_data *d,
@@ -183,10 +182,6 @@ void __init msp_cic_irq_init(void)
183 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { 182 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
184 irq_set_chip_and_handler(i, &msp_cic_irq_controller, 183 irq_set_chip_and_handler(i, &msp_cic_irq_controller,
185 handle_level_irq); 184 handle_level_irq);
186#ifdef CONFIG_MIPS_MT_SMTC
187 /* Mask of CIC interrupt */
188 irq_hwmask[i] = C_IRQ4;
189#endif
190 } 185 }
191 186
192 /* Initialize the PER interrupt sub-system */ 187 /* Initialize the PER interrupt sub-system */
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_per.c b/arch/mips/pmcs-msp71xx/msp_irq_per.c
index d1fd530479d4..a111836bcec2 100644
--- a/arch/mips/pmcs-msp71xx/msp_irq_per.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_per.c
@@ -113,9 +113,6 @@ void __init msp_per_irq_init(void)
113 /* initialize all the IRQ descriptors */ 113 /* initialize all the IRQ descriptors */
114 for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { 114 for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
115 irq_set_chip(i, &msp_per_irq_controller); 115 irq_set_chip(i, &msp_per_irq_controller);
116#ifdef CONFIG_MIPS_MT_SMTC
117 irq_hwmask[i] = C_IRQ4;
118#endif
119 } 116 }
120} 117}
121 118
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 7e980767679c..4f925e06c414 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -27,7 +27,6 @@
27#endif 27#endif
28 28
29extern void msp_serial_setup(void); 29extern void msp_serial_setup(void);
30extern void pmctwiled_setup(void);
31 30
32#if defined(CONFIG_PMC_MSP7120_EVAL) || \ 31#if defined(CONFIG_PMC_MSP7120_EVAL) || \
33 defined(CONFIG_PMC_MSP7120_GW) || \ 32 defined(CONFIG_PMC_MSP7120_GW) || \
@@ -148,8 +147,6 @@ void __init plat_mem_setup(void)
148 pm_power_off = msp_power_off; 147 pm_power_off = msp_power_off;
149} 148}
150 149
151extern struct plat_smp_ops msp_smtc_smp_ops;
152
153void __init prom_init(void) 150void __init prom_init(void)
154{ 151{
155 unsigned long family; 152 unsigned long family;
@@ -230,17 +227,5 @@ void __init prom_init(void)
230 */ 227 */
231 msp_serial_setup(); 228 msp_serial_setup();
232 229
233 if (register_vsmp_smp_ops()) { 230 register_vsmp_smp_ops();
234#ifdef CONFIG_MIPS_MT_SMTC
235 register_smp_ops(&msp_smtc_smp_ops);
236#endif
237 }
238
239#ifdef CONFIG_PMCTWILED
240 /*
241 * Setup LED states before the subsys_initcall loads other
242 * dependent drivers/modules.
243 */
244 pmctwiled_setup();
245#endif
246} 231}
diff --git a/arch/mips/pmcs-msp71xx/msp_smtc.c b/arch/mips/pmcs-msp71xx/msp_smtc.c
deleted file mode 100644
index 6b5607fce279..000000000000
--- a/arch/mips/pmcs-msp71xx/msp_smtc.c
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * MSP71xx Platform-specific hooks for SMP operation
3 */
4#include <linux/irq.h>
5#include <linux/init.h>
6
7#include <asm/mipsmtregs.h>
8#include <asm/mipsregs.h>
9#include <asm/smtc.h>
10#include <asm/smtc_ipi.h>
11
12/* VPE/SMP Prototype implements platform interfaces directly */
13
14/*
15 * Cause the specified action to be performed on a targeted "CPU"
16 */
17
18static void msp_smtc_send_ipi_single(int cpu, unsigned int action)
19{
20 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
22}
23
24static void msp_smtc_send_ipi_mask(const struct cpumask *mask,
25 unsigned int action)
26{
27 unsigned int i;
28
29 for_each_cpu(i, mask)
30 msp_smtc_send_ipi_single(i, action);
31}
32
33/*
34 * Post-config but pre-boot cleanup entry point
35 */
36static void msp_smtc_init_secondary(void)
37{
38 int myvpe;
39
40 /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
41 myvpe = read_c0_tcbind() & TCBIND_CURVPE;
42 if (myvpe > 0)
43 change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
44 STATUSF_IP6 | STATUSF_IP7);
45 smtc_init_secondary();
46}
47
48/*
49 * Platform "CPU" startup hook
50 */
51static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle)
52{
53 smtc_boot_secondary(cpu, idle);
54}
55
56/*
57 * SMP initialization finalization entry point
58 */
59static void msp_smtc_smp_finish(void)
60{
61 smtc_smp_finish();
62}
63
64/*
65 * Hook for after all CPUs are online
66 */
67
68static void msp_smtc_cpus_done(void)
69{
70}
71
72/*
73 * Platform SMP pre-initialization
74 *
75 * As noted above, we can assume a single CPU for now
76 * but it may be multithreaded.
77 */
78
79static void __init msp_smtc_smp_setup(void)
80{
81 /*
82 * we won't get the definitive value until
83 * we've run smtc_prepare_cpus later, but
84 */
85
86 if (read_c0_config3() & (1 << 2))
87 smp_num_siblings = smtc_build_cpu_map(0);
88}
89
90static void __init msp_smtc_prepare_cpus(unsigned int max_cpus)
91{
92 smtc_prepare_cpus(max_cpus);
93}
94
95struct plat_smp_ops msp_smtc_smp_ops = {
96 .send_ipi_single = msp_smtc_send_ipi_single,
97 .send_ipi_mask = msp_smtc_send_ipi_mask,
98 .init_secondary = msp_smtc_init_secondary,
99 .smp_finish = msp_smtc_smp_finish,
100 .cpus_done = msp_smtc_cpus_done,
101 .boot_secondary = msp_smtc_boot_secondary,
102 .smp_setup = msp_smtc_smp_setup,
103 .prepare_cpus = msp_smtc_prepare_cpus,
104};
diff --git a/arch/mips/pmcs-msp71xx/msp_usb.c b/arch/mips/pmcs-msp71xx/msp_usb.c
index 4dab915696e7..c87c5f810cd1 100644
--- a/arch/mips/pmcs-msp71xx/msp_usb.c
+++ b/arch/mips/pmcs-msp71xx/msp_usb.c
@@ -75,47 +75,6 @@ static struct mspusb_device msp_usbhost0_device = {
75 .resource = msp_usbhost0_resources, 75 .resource = msp_usbhost0_resources,
76 }, 76 },
77}; 77};
78
79/* MSP7140/MSP82XX has two USB2 hosts. */
80#ifdef CONFIG_MSP_HAS_DUAL_USB
81static u64 msp_usbhost1_dma_mask = 0xffffffffUL;
82
83static struct resource msp_usbhost1_resources[] = {
84 [0] = { /* EHCI-HS operational and capabilities registers */
85 .start = MSP_USB1_HS_START,
86 .end = MSP_USB1_HS_END,
87 .flags = IORESOURCE_MEM,
88 },
89 [1] = {
90 .start = MSP_INT_USB,
91 .end = MSP_INT_USB,
92 .flags = IORESOURCE_IRQ,
93 },
94 [2] = { /* MSBus-to-AMBA bridge register space */
95 .start = MSP_USB1_MAB_START,
96 .end = MSP_USB1_MAB_END,
97 .flags = IORESOURCE_MEM,
98 },
99 [3] = { /* Identification and general hardware parameters */
100 .start = MSP_USB1_ID_START,
101 .end = MSP_USB1_ID_END,
102 .flags = IORESOURCE_MEM,
103 },
104};
105
106static struct mspusb_device msp_usbhost1_device = {
107 .dev = {
108 .name = "pmcmsp-ehci",
109 .id = 1,
110 .dev = {
111 .dma_mask = &msp_usbhost1_dma_mask,
112 .coherent_dma_mask = 0xffffffffUL,
113 },
114 .num_resources = ARRAY_SIZE(msp_usbhost1_resources),
115 .resource = msp_usbhost1_resources,
116 },
117};
118#endif /* CONFIG_MSP_HAS_DUAL_USB */
119#endif /* CONFIG_USB_EHCI_HCD */ 78#endif /* CONFIG_USB_EHCI_HCD */
120 79
121#if defined(CONFIG_USB_GADGET) 80#if defined(CONFIG_USB_GADGET)
@@ -157,46 +116,6 @@ static struct mspusb_device msp_usbdev0_device = {
157 .resource = msp_usbdev0_resources, 116 .resource = msp_usbdev0_resources,
158 }, 117 },
159}; 118};
160
161#ifdef CONFIG_MSP_HAS_DUAL_USB
162static struct resource msp_usbdev1_resources[] = {
163 [0] = { /* EHCI-HS operational and capabilities registers */
164 .start = MSP_USB1_HS_START,
165 .end = MSP_USB1_HS_END,
166 .flags = IORESOURCE_MEM,
167 },
168 [1] = {
169 .start = MSP_INT_USB,
170 .end = MSP_INT_USB,
171 .flags = IORESOURCE_IRQ,
172 },
173 [2] = { /* MSBus-to-AMBA bridge register space */
174 .start = MSP_USB1_MAB_START,
175 .end = MSP_USB1_MAB_END,
176 .flags = IORESOURCE_MEM,
177 },
178 [3] = { /* Identification and general hardware parameters */
179 .start = MSP_USB1_ID_START,
180 .end = MSP_USB1_ID_END,
181 .flags = IORESOURCE_MEM,
182 },
183};
184
185/* This may need to be converted to a mspusb_device, too. */
186static struct mspusb_device msp_usbdev1_device = {
187 .dev = {
188 .name = "msp71xx_udc",
189 .id = 0,
190 .dev = {
191 .dma_mask = &msp_usbdev_dma_mask,
192 .coherent_dma_mask = 0xffffffffUL,
193 },
194 .num_resources = ARRAY_SIZE(msp_usbdev1_resources),
195 .resource = msp_usbdev1_resources,
196 },
197};
198
199#endif /* CONFIG_MSP_HAS_DUAL_USB */
200#endif /* CONFIG_USB_GADGET */ 119#endif /* CONFIG_USB_GADGET */
201 120
202static int __init msp_usb_setup(void) 121static int __init msp_usb_setup(void)
@@ -231,10 +150,6 @@ static int __init msp_usb_setup(void)
231#if defined(CONFIG_USB_EHCI_HCD) 150#if defined(CONFIG_USB_EHCI_HCD)
232 msp_devs[0] = &msp_usbhost0_device.dev; 151 msp_devs[0] = &msp_usbhost0_device.dev;
233 ppfinit("platform add USB HOST done %s.\n", msp_devs[0]->name); 152 ppfinit("platform add USB HOST done %s.\n", msp_devs[0]->name);
234#ifdef CONFIG_MSP_HAS_DUAL_USB
235 msp_devs[1] = &msp_usbhost1_device.dev;
236 ppfinit("platform add USB HOST done %s.\n", msp_devs[1]->name);
237#endif
238#else 153#else
239 ppfinit("%s: echi_hcd not supported\n", __FILE__); 154 ppfinit("%s: echi_hcd not supported\n", __FILE__);
240#endif /* CONFIG_USB_EHCI_HCD */ 155#endif /* CONFIG_USB_EHCI_HCD */
@@ -244,11 +159,6 @@ static int __init msp_usb_setup(void)
244 msp_devs[0] = &msp_usbdev0_device.dev; 159 msp_devs[0] = &msp_usbdev0_device.dev;
245 ppfinit("platform add USB DEVICE done %s.\n" 160 ppfinit("platform add USB DEVICE done %s.\n"
246 , msp_devs[0]->name); 161 , msp_devs[0]->name);
247#ifdef CONFIG_MSP_HAS_DUAL_USB
248 msp_devs[1] = &msp_usbdev1_device.dev;
249 ppfinit("platform add USB DEVICE done %s.\n"
250 , msp_devs[1]->name);
251#endif
252#else 162#else
253 ppfinit("%s: usb_gadget not supported\n", __FILE__); 163 ppfinit("%s: usb_gadget not supported\n", __FILE__);
254#endif /* CONFIG_USB_GADGET */ 164#endif /* CONFIG_USB_GADGET */
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index 2b7e837dc2e2..b4b774bc3178 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -33,11 +33,6 @@
33#include <linux/mtd/nand.h> 33#include <linux/mtd/nand.h>
34#include <linux/mtd/partitions.h> 34#include <linux/mtd/partitions.h>
35 35
36#ifdef CONFIG_I2C_PNX0105
37/* Until i2c driver available in kernel.*/
38#include <linux/i2c-pnx0105.h>
39#endif
40
41#include <irq.h> 36#include <irq.h>
42#include <irq-mapping.h> 37#include <irq-mapping.h>
43#include <pnx833x.h> 38#include <pnx833x.h>
@@ -134,70 +129,6 @@ static struct platform_device pnx833x_usb_ehci_device = {
134 .resource = pnx833x_usb_ehci_resources, 129 .resource = pnx833x_usb_ehci_resources,
135}; 130};
136 131
137#ifdef CONFIG_I2C_PNX0105
138static struct resource pnx833x_i2c0_resources[] = {
139 {
140 .start = PNX833X_I2C0_PORTS_START,
141 .end = PNX833X_I2C0_PORTS_END,
142 .flags = IORESOURCE_MEM,
143 },
144 {
145 .start = PNX833X_PIC_I2C0_INT,
146 .end = PNX833X_PIC_I2C0_INT,
147 .flags = IORESOURCE_IRQ,
148 },
149};
150
151static struct resource pnx833x_i2c1_resources[] = {
152 {
153 .start = PNX833X_I2C1_PORTS_START,
154 .end = PNX833X_I2C1_PORTS_END,
155 .flags = IORESOURCE_MEM,
156 },
157 {
158 .start = PNX833X_PIC_I2C1_INT,
159 .end = PNX833X_PIC_I2C1_INT,
160 .flags = IORESOURCE_IRQ,
161 },
162};
163
164static struct i2c_pnx0105_dev pnx833x_i2c_dev[] = {
165 {
166 .base = PNX833X_I2C0_PORTS_START,
167 .irq = -1, /* should be PNX833X_PIC_I2C0_INT but polling is faster */
168 .clock = 6, /* 0 == 400 kHz, 4 == 100 kHz(Maximum HDMI), 6 = 50kHz(Preferred HDCP) */
169 .bus_addr = 0, /* no slave support */
170 },
171 {
172 .base = PNX833X_I2C1_PORTS_START,
173 .irq = -1, /* on high freq, polling is faster */
174 /*.irq = PNX833X_PIC_I2C1_INT,*/
175 .clock = 4, /* 0 == 400 kHz, 4 == 100 kHz. 100 kHz seems a safe default for now */
176 .bus_addr = 0, /* no slave support */
177 },
178};
179
180static struct platform_device pnx833x_i2c0_device = {
181 .name = "i2c-pnx0105",
182 .id = 0,
183 .dev = {
184 .platform_data = &pnx833x_i2c_dev[0],
185 },
186 .num_resources = ARRAY_SIZE(pnx833x_i2c0_resources),
187 .resource = pnx833x_i2c0_resources,
188};
189
190static struct platform_device pnx833x_i2c1_device = {
191 .name = "i2c-pnx0105",
192 .id = 1,
193 .dev = {
194 .platform_data = &pnx833x_i2c_dev[1],
195 },
196 .num_resources = ARRAY_SIZE(pnx833x_i2c1_resources),
197 .resource = pnx833x_i2c1_resources,
198};
199#endif
200
201static u64 ethernet_dmamask = DMA_BIT_MASK(32); 132static u64 ethernet_dmamask = DMA_BIT_MASK(32);
202 133
203static struct resource pnx833x_ethernet_resources[] = { 134static struct resource pnx833x_ethernet_resources[] = {
@@ -294,10 +225,6 @@ static struct platform_device pnx833x_flash_nand = {
294static struct platform_device *pnx833x_platform_devices[] __initdata = { 225static struct platform_device *pnx833x_platform_devices[] __initdata = {
295 &pnx833x_uart_device, 226 &pnx833x_uart_device,
296 &pnx833x_usb_ehci_device, 227 &pnx833x_usb_ehci_device,
297#ifdef CONFIG_I2C_PNX0105
298 &pnx833x_i2c0_device,
299 &pnx833x_i2c1_device,
300#endif
301 &pnx833x_ethernet_device, 228 &pnx833x_ethernet_device,
302 &pnx833x_sata_device, 229 &pnx833x_sata_device,
303 &pnx833x_flash_nand, 230 &pnx833x_flash_nand,
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
index ab0e379dc7e0..8e52446286ca 100644
--- a/arch/mips/sgi-ip22/ip22-gio.c
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -19,6 +19,9 @@ static struct {
19} gio_name_table[] = { 19} gio_name_table[] = {
20 { .name = "SGI Impact", .id = 0x10 }, 20 { .name = "SGI Impact", .id = 0x10 },
21 { .name = "Phobos G160", .id = 0x35 }, 21 { .name = "Phobos G160", .id = 0x35 },
22 { .name = "Phobos G130", .id = 0x36 },
23 { .name = "Phobos G100", .id = 0x37 },
24 { .name = "Set Engineering GFE", .id = 0x38 },
22 /* fake IDs */ 25 /* fake IDs */
23 { .name = "SGI Newport", .id = 0x7e }, 26 { .name = "SGI Newport", .id = 0x7e },
24 { .name = "SGI GR2/GR3", .id = 0x7f }, 27 { .name = "SGI GR2/GR3", .id = 0x7f },
@@ -293,7 +296,16 @@ static int ip22_gio_id(unsigned long addr, u32 *res)
293 * data matches 296 * data matches
294 */ 297 */
295 ptr8 = (void *)CKSEG1ADDR(addr + 3); 298 ptr8 = (void *)CKSEG1ADDR(addr + 3);
296 get_dbe(tmp8, ptr8); 299 if (get_dbe(tmp8, ptr8)) {
300 /*
301 * 32bit access worked, but 8bit doesn't
302 * so we don't see phantom reads on
303 * a pipelined bus, but a real card which
304 * doesn't support 8 bit reads
305 */
306 *res = tmp32;
307 return 1;
308 }
297 ptr16 = (void *)CKSEG1ADDR(addr + 2); 309 ptr16 = (void *)CKSEG1ADDR(addr + 2);
298 get_dbe(tmp16, ptr16); 310 get_dbe(tmp16, ptr16);
299 if (tmp8 == (tmp16 & 0xff) && 311 if (tmp8 == (tmp16 & 0xff) &&
@@ -324,7 +336,7 @@ static int ip22_is_gr2(unsigned long addr)
324} 336}
325 337
326 338
327static void ip22_check_gio(int slotno, unsigned long addr) 339static void ip22_check_gio(int slotno, unsigned long addr, int irq)
328{ 340{
329 const char *name = "Unknown"; 341 const char *name = "Unknown";
330 struct gio_device *gio_dev; 342 struct gio_device *gio_dev;
@@ -338,9 +350,9 @@ static void ip22_check_gio(int slotno, unsigned long addr)
338 else { 350 else {
339 if (!ip22_gio_id(addr, &tmp)) { 351 if (!ip22_gio_id(addr, &tmp)) {
340 /* 352 /*
341 * no GIO signature at start address of slot, but 353 * no GIO signature at start address of slot
342 * Newport doesn't have one, so let's check usea 354 * since Newport doesn't have one, we check if
343 * status register 355 * user status register is readable
344 */ 356 */
345 if (ip22_gio_id(addr + NEWPORT_USTATUS_OFFS, &tmp)) 357 if (ip22_gio_id(addr + NEWPORT_USTATUS_OFFS, &tmp))
346 tmp = 0x7e; 358 tmp = 0x7e;
@@ -369,6 +381,7 @@ static void ip22_check_gio(int slotno, unsigned long addr)
369 gio_dev->resource.start = addr; 381 gio_dev->resource.start = addr;
370 gio_dev->resource.end = addr + 0x3fffff; 382 gio_dev->resource.end = addr + 0x3fffff;
371 gio_dev->resource.flags = IORESOURCE_MEM; 383 gio_dev->resource.flags = IORESOURCE_MEM;
384 gio_dev->irq = irq;
372 dev_set_name(&gio_dev->dev, "%d", slotno); 385 dev_set_name(&gio_dev->dev, "%d", slotno);
373 gio_device_register(gio_dev); 386 gio_device_register(gio_dev);
374 } else 387 } else
@@ -408,16 +421,17 @@ int __init ip22_gio_init(void)
408 request_resource(&iomem_resource, &gio_bus_resource); 421 request_resource(&iomem_resource, &gio_bus_resource);
409 printk(KERN_INFO "GIO: Probing bus...\n"); 422 printk(KERN_INFO "GIO: Probing bus...\n");
410 423
411 if (ip22_is_fullhouse() || 424 if (ip22_is_fullhouse()) {
412 !get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) { 425 /* Indigo2 */
413 /* Indigo2 and ChallengeS */ 426 ip22_check_gio(0, GIO_SLOT_GFX_BASE, SGI_GIO_1_IRQ);
414 ip22_check_gio(0, GIO_SLOT_GFX_BASE); 427 ip22_check_gio(1, GIO_SLOT_EXP0_BASE, SGI_GIO_1_IRQ);
415 ip22_check_gio(1, GIO_SLOT_EXP0_BASE);
416 } else { 428 } else {
417 /* Indy */ 429 /* Indy/Challenge S */
418 ip22_check_gio(0, GIO_SLOT_GFX_BASE); 430 if (get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1]))
419 ip22_check_gio(1, GIO_SLOT_EXP0_BASE); 431 ip22_check_gio(0, GIO_SLOT_GFX_BASE,
420 ip22_check_gio(2, GIO_SLOT_EXP1_BASE); 432 SGI_GIO_0_IRQ);
433 ip22_check_gio(1, GIO_SLOT_EXP0_BASE, SGI_GIOEXP0_IRQ);
434 ip22_check_gio(2, GIO_SLOT_EXP1_BASE, SGI_GIOEXP1_IRQ);
421 } 435 }
422 } else 436 } else
423 device_unregister(&gio_bus); 437 device_unregister(&gio_bus);
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index 58b40ae59335..c66889fc4913 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -119,9 +119,14 @@ static void indy_local0_irqdispatch(void)
119 } else 119 } else
120 irq = lc0msk_to_irqnr[mask]; 120 irq = lc0msk_to_irqnr[mask];
121 121
122 /* if irq == 0, then the interrupt has already been cleared */ 122 /*
123 * workaround for INT2 bug; if irq == 0, INT2 has seen a fifo full
124 * irq, but failed to latch it into status register
125 */
123 if (irq) 126 if (irq)
124 do_IRQ(irq); 127 do_IRQ(irq);
128 else
129 do_IRQ(SGINT_LOCAL0 + 0);
125} 130}
126 131
127static void indy_local1_irqdispatch(void) 132static void indy_local1_irqdispatch(void)
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index f4ea8aa79ba2..f9ae6a8fa7c7 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -186,10 +186,6 @@ static void ip27_smp_finish(void)
186 local_irq_enable(); 186 local_irq_enable();
187} 187}
188 188
189static void __init ip27_cpus_done(void)
190{
191}
192
193/* 189/*
194 * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we 190 * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
195 * set sp to the kernel stack of the newly created idle process, gp to the proc 191 * set sp to the kernel stack of the newly created idle process, gp to the proc
@@ -236,7 +232,6 @@ struct plat_smp_ops ip27_smp_ops = {
236 .send_ipi_mask = ip27_send_ipi_mask, 232 .send_ipi_mask = ip27_send_ipi_mask,
237 .init_secondary = ip27_init_secondary, 233 .init_secondary = ip27_init_secondary,
238 .smp_finish = ip27_smp_finish, 234 .smp_finish = ip27_smp_finish,
239 .cpus_done = ip27_cpus_done,
240 .boot_secondary = ip27_boot_secondary, 235 .boot_secondary = ip27_boot_secondary,
241 .smp_setup = ip27_smp_setup, 236 .smp_setup = ip27_smp_setup,
242 .prepare_cpus = ip27_prepare_cpus, 237 .prepare_cpus = ip27_prepare_cpus,
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 59cfe2659771..373fbbc8425c 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -347,19 +347,8 @@ asmlinkage void plat_irq_dispatch(void)
347 unsigned int cpu = smp_processor_id(); 347 unsigned int cpu = smp_processor_id();
348 unsigned int pending; 348 unsigned int pending;
349 349
350#ifdef CONFIG_SIBYTE_BCM1480_PROF
351 /* Set compare to count to silence count/compare timer interrupts */
352 write_c0_compare(read_c0_count());
353#endif
354
355 pending = read_c0_cause() & read_c0_status(); 350 pending = read_c0_cause() & read_c0_status();
356 351
357#ifdef CONFIG_SIBYTE_BCM1480_PROF
358 if (pending & CAUSEF_IP7) /* Cpu performance counter interrupt */
359 sbprof_cpu_intr();
360 else
361#endif
362
363 if (pending & CAUSEF_IP4) 352 if (pending & CAUSEF_IP4)
364 do_IRQ(K_BCM1480_INT_TIMER_0 + cpu); 353 do_IRQ(K_BCM1480_INT_TIMER_0 + cpu);
365#ifdef CONFIG_SMP 354#ifdef CONFIG_SMP
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 70d9182b26f1..af7d44edd9a8 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -115,13 +115,6 @@ static void bcm1480_smp_finish(void)
115} 115}
116 116
117/* 117/*
118 * Final cleanup after all secondaries booted
119 */
120static void bcm1480_cpus_done(void)
121{
122}
123
124/*
125 * Setup the PC, SP, and GP of a secondary processor and start it 118 * Setup the PC, SP, and GP of a secondary processor and start it
126 * running! 119 * running!
127 */ 120 */
@@ -170,7 +163,6 @@ struct plat_smp_ops bcm1480_smp_ops = {
170 .send_ipi_mask = bcm1480_send_ipi_mask, 163 .send_ipi_mask = bcm1480_send_ipi_mask,
171 .init_secondary = bcm1480_init_secondary, 164 .init_secondary = bcm1480_init_secondary,
172 .smp_finish = bcm1480_smp_finish, 165 .smp_finish = bcm1480_smp_finish,
173 .cpus_done = bcm1480_cpus_done,
174 .boot_secondary = bcm1480_boot_secondary, 166 .boot_secondary = bcm1480_boot_secondary,
175 .smp_setup = bcm1480_smp_setup, 167 .smp_setup = bcm1480_smp_setup,
176 .prepare_cpus = bcm1480_prepare_cpus, 168 .prepare_cpus = bcm1480_prepare_cpus,
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index db976117dd4d..c0c4b3f88a08 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -103,13 +103,6 @@ static void sb1250_smp_finish(void)
103} 103}
104 104
105/* 105/*
106 * Final cleanup after all secondaries booted
107 */
108static void sb1250_cpus_done(void)
109{
110}
111
112/*
113 * Setup the PC, SP, and GP of a secondary processor and start it 106 * Setup the PC, SP, and GP of a secondary processor and start it
114 * running! 107 * running!
115 */ 108 */
@@ -158,7 +151,6 @@ struct plat_smp_ops sb_smp_ops = {
158 .send_ipi_mask = sb1250_send_ipi_mask, 151 .send_ipi_mask = sb1250_send_ipi_mask,
159 .init_secondary = sb1250_init_secondary, 152 .init_secondary = sb1250_init_secondary,
160 .smp_finish = sb1250_smp_finish, 153 .smp_finish = sb1250_smp_finish,
161 .cpus_done = sb1250_cpus_done,
162 .boot_secondary = sb1250_boot_secondary, 154 .boot_secondary = sb1250_boot_secondary,
163 .smp_setup = sb1250_smp_setup, 155 .smp_setup = sb1250_smp_setup,
164 .prepare_cpus = sb1250_prepare_cpus, 156 .prepare_cpus = sb1250_prepare_cpus,
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 2b0b83c171e0..dd2cf25b5ae5 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -309,8 +309,8 @@ static void __init preprocess_cmdline(void)
309 txx9_board_vec = find_board_byname(str + 6); 309 txx9_board_vec = find_board_byname(str + 6);
310 continue; 310 continue;
311 } else if (strncmp(str, "masterclk=", 10) == 0) { 311 } else if (strncmp(str, "masterclk=", 10) == 0) {
312 unsigned long val; 312 unsigned int val;
313 if (strict_strtoul(str + 10, 10, &val) == 0) 313 if (kstrtouint(str + 10, 10, &val) == 0)
314 txx9_master_clock = val; 314 txx9_master_clock = val;
315 continue; 315 continue;
316 } else if (strcmp(str, "icdisable") == 0) { 316 } else if (strcmp(str, "icdisable") == 0) {