aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2013-07-02 02:31:48 -0400
committerJens Axboe <axboe@kernel.dk>2013-07-02 02:31:48 -0400
commit5f0e5afa0de4522abb3ea7d1369039b94e740ec5 (patch)
tree6a5be3db9ecfed8ef2150c6146f6d1e0d658ac8b /arch/mips
parentd752b2696072ed52fd5afab08b601e2220a3b87e (diff)
parent9e895ace5d82df8929b16f58e9f515f6d54ab82d (diff)
Merge tag 'v3.10-rc7' into for-3.11/drivers
Linux 3.10-rc7 Pull this in early to avoid doing it with the bcache merge, since there are a number of changes to bcache between my old base (3.10-rc1) and the new pull request.
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/alchemy/board-gpr.c1
-rw-r--r--arch/mips/alchemy/common/time.c1
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/cavium-octeon/setup.c15
-rw-r--r--arch/mips/cobalt/reset.c1
-rw-r--r--arch/mips/configs/db1000_defconfig1
-rw-r--r--arch/mips/configs/db1235_defconfig1
-rw-r--r--arch/mips/configs/lemote2f_defconfig1
-rw-r--r--arch/mips/include/asm/clock.h2
-rw-r--r--arch/mips/include/asm/idle.h23
-rw-r--r--arch/mips/include/asm/io.h2
-rw-r--r--arch/mips/include/asm/kvm.h55
-rw-r--r--arch/mips/include/asm/kvm_host.h6
-rw-r--r--arch/mips/include/asm/mmu_context.h97
-rw-r--r--arch/mips/include/asm/page.h17
-rw-r--r--arch/mips/include/asm/processor.h1
-rw-r--r--arch/mips/include/asm/ptrace.h32
-rw-r--r--arch/mips/include/uapi/asm/kvm.h135
-rw-r--r--arch/mips/include/uapi/asm/ptrace.h17
-rw-r--r--arch/mips/include/uapi/asm/unistd.h5
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c11
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c11
-rw-r--r--arch/mips/kernel/cpu-probe.c198
-rw-r--r--arch/mips/kernel/crash_dump.c1
-rw-r--r--arch/mips/kernel/ftrace.c4
-rw-r--r--arch/mips/kernel/genex.S8
-rw-r--r--arch/mips/kernel/idle.c245
-rw-r--r--arch/mips/kernel/kprobes.c5
-rw-r--r--arch/mips/kernel/proc.c1
-rw-r--r--arch/mips/kernel/process.c53
-rw-r--r--arch/mips/kernel/rtlx.c1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/smtc.c15
-rw-r--r--arch/mips/kernel/traps.c43
-rw-r--r--arch/mips/kvm/kvm_mips.c305
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c29
-rw-r--r--arch/mips/kvm/kvm_tlb.c61
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c50
-rw-r--r--arch/mips/lantiq/xway/gptu.c6
-rw-r--r--arch/mips/lib/dump_tlb.c5
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c7
-rw-r--r--arch/mips/loongson/common/reset.c1
-rw-r--r--arch/mips/loongson1/common/reset.c1
-rw-r--r--arch/mips/mm/tlb-r3k.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c83
-rw-r--r--arch/mips/netlogic/xlp/setup.c1
-rw-r--r--arch/mips/netlogic/xlr/setup.c1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_prom.c2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c1
-rw-r--r--arch/mips/ralink/dts/rt3050.dtsi10
-rw-r--r--arch/mips/ralink/dts/rt3052_eval.dts4
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/mips/txx9/generic/setup.c1
-rw-r--r--arch/mips/vr41xx/common/pmu.c1
-rw-r--r--arch/mips/wrppmc/reset.c1
59 files changed, 988 insertions, 622 deletions
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index cb0f6afb7389..9edc35ff8cf1 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -31,6 +31,7 @@
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/i2c-gpio.h> 32#include <linux/i2c-gpio.h>
33#include <asm/bootinfo.h> 33#include <asm/bootinfo.h>
34#include <asm/idle.h>
34#include <asm/reboot.h> 35#include <asm/reboot.h>
35#include <asm/mach-au1x00/au1000.h> 36#include <asm/mach-au1x00/au1000.h>
36#include <prom.h> 37#include <prom.h>
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 38afb11ba2c4..93fa586d52e2 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -36,6 +36,7 @@
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38 38
39#include <asm/idle.h>
39#include <asm/processor.h> 40#include <asm/processor.h>
40#include <asm/time.h> 41#include <asm/time.h>
41#include <asm/mach-au1x00/au1000.h> 42#include <asm/mach-au1x00/au1000.h>
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index a0233a2c1988..8be4e856b8b8 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -19,6 +19,7 @@
19#include <linux/clk.h> 19#include <linux/clk.h>
20 20
21#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
22#include <asm/idle.h>
22#include <asm/time.h> /* for mips_hpt_frequency */ 23#include <asm/time.h> /* for mips_hpt_frequency */
23#include <asm/reboot.h> /* for _machine_{restart,halt} */ 24#include <asm/reboot.h> /* for _machine_{restart,halt} */
24#include <asm/mips_machine.h> 25#include <asm/mips_machine.h>
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index b0baa299f899..01b1b3f94feb 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -428,13 +428,16 @@ static void octeon_restart(char *command)
428 */ 428 */
429static void octeon_kill_core(void *arg) 429static void octeon_kill_core(void *arg)
430{ 430{
431 mb(); 431 if (octeon_is_simulation())
432 if (octeon_is_simulation()) {
433 /* The simulator needs the watchdog to stop for dead cores */
434 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
435 /* A break instruction causes the simulator stop a core */ 432 /* A break instruction causes the simulator stop a core */
436 asm volatile ("sync\nbreak"); 433 asm volatile ("break" ::: "memory");
437 } 434
435 local_irq_disable();
436 /* Disable watchdog on this core. */
437 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
438 /* Spin in a low power mode. */
439 while (true)
440 asm volatile ("wait" ::: "memory");
438} 441}
439 442
440 443
diff --git a/arch/mips/cobalt/reset.c b/arch/mips/cobalt/reset.c
index 516b4428df4e..4eedd481dd00 100644
--- a/arch/mips/cobalt/reset.c
+++ b/arch/mips/cobalt/reset.c
@@ -12,6 +12,7 @@
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/leds.h> 13#include <linux/leds.h>
14 14
15#include <asm/idle.h>
15#include <asm/processor.h> 16#include <asm/processor.h>
16 17
17#include <cobalt.h> 18#include <cobalt.h>
diff --git a/arch/mips/configs/db1000_defconfig b/arch/mips/configs/db1000_defconfig
index face9d26e6d5..bac26b971c5e 100644
--- a/arch/mips/configs/db1000_defconfig
+++ b/arch/mips/configs/db1000_defconfig
@@ -228,7 +228,6 @@ CONFIG_HIDRAW=y
228CONFIG_USB_HID=y 228CONFIG_USB_HID=y
229CONFIG_USB_SUPPORT=y 229CONFIG_USB_SUPPORT=y
230CONFIG_USB=y 230CONFIG_USB=y
231CONFIG_USB_SUSPEND=y
232CONFIG_USB_EHCI_HCD=y 231CONFIG_USB_EHCI_HCD=y
233CONFIG_USB_EHCI_ROOT_HUB_TT=y 232CONFIG_USB_EHCI_ROOT_HUB_TT=y
234CONFIG_USB_EHCI_TT_NEWSCHED=y 233CONFIG_USB_EHCI_TT_NEWSCHED=y
diff --git a/arch/mips/configs/db1235_defconfig b/arch/mips/configs/db1235_defconfig
index 14752dde7540..e2b4ad55462f 100644
--- a/arch/mips/configs/db1235_defconfig
+++ b/arch/mips/configs/db1235_defconfig
@@ -344,7 +344,6 @@ CONFIG_UHID=y
344CONFIG_USB_HIDDEV=y 344CONFIG_USB_HIDDEV=y
345CONFIG_USB=y 345CONFIG_USB=y
346CONFIG_USB_DYNAMIC_MINORS=y 346CONFIG_USB_DYNAMIC_MINORS=y
347CONFIG_USB_SUSPEND=y
348CONFIG_USB_EHCI_HCD=y 347CONFIG_USB_EHCI_HCD=y
349CONFIG_USB_EHCI_HCD_PLATFORM=y 348CONFIG_USB_EHCI_HCD_PLATFORM=y
350CONFIG_USB_EHCI_ROOT_HUB_TT=y 349CONFIG_USB_EHCI_ROOT_HUB_TT=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index b6acd2f256b6..343bebc4b63b 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -300,7 +300,6 @@ CONFIG_USB=y
300CONFIG_USB_DEVICEFS=y 300CONFIG_USB_DEVICEFS=y
301# CONFIG_USB_DEVICE_CLASS is not set 301# CONFIG_USB_DEVICE_CLASS is not set
302CONFIG_USB_DYNAMIC_MINORS=y 302CONFIG_USB_DYNAMIC_MINORS=y
303CONFIG_USB_SUSPEND=y
304CONFIG_USB_OTG_WHITELIST=y 303CONFIG_USB_OTG_WHITELIST=y
305CONFIG_USB_MON=y 304CONFIG_USB_MON=y
306CONFIG_USB_EHCI_HCD=y 305CONFIG_USB_EHCI_HCD=y
diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h
index c9456e7a7283..778e32d817bc 100644
--- a/arch/mips/include/asm/clock.h
+++ b/arch/mips/include/asm/clock.h
@@ -6,8 +6,6 @@
6#include <linux/seq_file.h> 6#include <linux/seq_file.h>
7#include <linux/clk.h> 7#include <linux/clk.h>
8 8
9extern void (*cpu_wait) (void);
10
11struct clk; 9struct clk;
12 10
13struct clk_ops { 11struct clk_ops {
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
new file mode 100644
index 000000000000..d192158886b1
--- /dev/null
+++ b/arch/mips/include/asm/idle.h
@@ -0,0 +1,23 @@
1#ifndef __ASM_IDLE_H
2#define __ASM_IDLE_H
3
4#include <linux/linkage.h>
5
6extern void (*cpu_wait)(void);
7extern void r4k_wait(void);
8extern asmlinkage void __r4k_wait(void);
9extern void r4k_wait_irqoff(void);
10extern void __pastwait(void);
11
12static inline int using_rollback_handler(void)
13{
14 return cpu_wait == r4k_wait;
15}
16
17static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
18{
19 return addr >= (unsigned long)r4k_wait_irqoff &&
20 addr < (unsigned long)__pastwait;
21}
22
23#endif /* __ASM_IDLE_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 1be13727323f..b7e59853fd33 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base)
118 */ 118 */
119static inline unsigned long virt_to_phys(volatile const void *address) 119static inline unsigned long virt_to_phys(volatile const void *address)
120{ 120{
121 return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; 121 return __pa(address);
122} 122}
123 123
124/* 124/*
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
deleted file mode 100644
index 85789eacbf18..000000000000
--- a/arch/mips/include/asm/kvm.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __LINUX_KVM_MIPS_H
11#define __LINUX_KVM_MIPS_H
12
13#include <linux/types.h>
14
15#define __KVM_MIPS
16
17#define N_MIPS_COPROC_REGS 32
18#define N_MIPS_COPROC_SEL 8
19
20/* for KVM_GET_REGS and KVM_SET_REGS */
21struct kvm_regs {
22 __u32 gprs[32];
23 __u32 hi;
24 __u32 lo;
25 __u32 pc;
26
27 __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
28};
29
30/* for KVM_GET_SREGS and KVM_SET_SREGS */
31struct kvm_sregs {
32};
33
34/* for KVM_GET_FPU and KVM_SET_FPU */
35struct kvm_fpu {
36};
37
38struct kvm_debug_exit_arch {
39};
40
41/* for KVM_SET_GUEST_DEBUG */
42struct kvm_guest_debug_arch {
43};
44
45struct kvm_mips_interrupt {
46 /* in */
47 __u32 cpu;
48 __u32 irq;
49};
50
51/* definition of registers in kvm_run */
52struct kvm_sync_regs {
53};
54
55#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index e68781e18387..4d6fa0bf1305 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -336,7 +336,7 @@ enum emulation_result {
336#define VPN2_MASK 0xffffe000 336#define VPN2_MASK 0xffffe000
337#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) 337#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
338#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) 338#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
339#define TLB_ASID(x) (ASID_MASK((x).tlb_hi)) 339#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
340#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) 340#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
341 341
342struct kvm_mips_tlb { 342struct kvm_mips_tlb {
@@ -496,10 +496,6 @@ struct kvm_mips_callbacks {
496 uint32_t cause); 496 uint32_t cause);
497 int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, 497 int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
498 uint32_t cause); 498 uint32_t cause);
499 int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
500 struct kvm_regs *regs);
501 int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
502 struct kvm_regs *regs);
503}; 499};
504extern struct kvm_mips_callbacks *kvm_mips_callbacks; 500extern struct kvm_mips_callbacks *kvm_mips_callbacks;
505int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); 501int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 1554721e4808..516e6e9a5594 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -67,68 +67,45 @@ extern unsigned long pgd_current[];
67 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 67 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
68#endif 68#endif
69#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ 69#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
70#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
70 71
71#define ASID_INC(asid) \ 72#define ASID_INC 0x40
72({ \ 73#define ASID_MASK 0xfc0
73 unsigned long __asid = asid; \ 74
74 __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \ 75#elif defined(CONFIG_CPU_R8000)
75 ".section\t__asid_inc,\"a\"\n\t" \ 76
76 ".word\t1b\n\t" \ 77#define ASID_INC 0x10
77 ".previous" \ 78#define ASID_MASK 0xff0
78 :"=r" (__asid) \ 79
79 :"0" (__asid)); \ 80#elif defined(CONFIG_MIPS_MT_SMTC)
80 __asid; \ 81
81}) 82#define ASID_INC 0x1
82#define ASID_MASK(asid) \ 83extern unsigned long smtc_asid_mask;
83({ \ 84#define ASID_MASK (smtc_asid_mask)
84 unsigned long __asid = asid; \ 85#define HW_ASID_MASK 0xff
85 __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \ 86/* End SMTC/34K debug hack */
86 ".section\t__asid_mask,\"a\"\n\t" \ 87#else /* FIXME: not correct for R6000 */
87 ".word\t1b\n\t" \ 88
88 ".previous" \ 89#define ASID_INC 0x1
89 :"=r" (__asid) \ 90#define ASID_MASK 0xff
90 :"r" (__asid)); \
91 __asid; \
92})
93#define ASID_VERSION_MASK \
94({ \
95 unsigned long __asid; \
96 __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
97 ".section\t__asid_version_mask,\"a\"\n\t" \
98 ".word\t1b\n\t" \
99 ".previous" \
100 :"=r" (__asid)); \
101 __asid; \
102})
103#define ASID_FIRST_VERSION \
104({ \
105 unsigned long __asid = asid; \
106 __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
107 ".section\t__asid_first_version,\"a\"\n\t" \
108 ".word\t1b\n\t" \
109 ".previous" \
110 :"=r" (__asid)); \
111 __asid; \
112})
113
114#define ASID_FIRST_VERSION_R3000 0x1000
115#define ASID_FIRST_VERSION_R4000 0x100
116#define ASID_FIRST_VERSION_R8000 0x1000
117#define ASID_FIRST_VERSION_RM9000 0x1000
118 91
119#ifdef CONFIG_MIPS_MT_SMTC
120#define SMTC_HW_ASID_MASK 0xff
121extern unsigned int smtc_asid_mask;
122#endif 92#endif
123 93
124#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 94#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
125#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm))) 95#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
126#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 96#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
127 97
128static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 98static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
129{ 99{
130} 100}
131 101
102/*
103 * All unused by hardware upper bits will be considered
104 * as a software asid extension.
105 */
106#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
107#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
108
132#ifndef CONFIG_MIPS_MT_SMTC 109#ifndef CONFIG_MIPS_MT_SMTC
133/* Normal, classic MIPS get_new_mmu_context */ 110/* Normal, classic MIPS get_new_mmu_context */
134static inline void 111static inline void
@@ -137,10 +114,10 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
137 extern void kvm_local_flush_tlb_all(void); 114 extern void kvm_local_flush_tlb_all(void);
138 unsigned long asid = asid_cache(cpu); 115 unsigned long asid = asid_cache(cpu);
139 116
140 if (!ASID_MASK((asid = ASID_INC(asid)))) { 117 if (! ((asid += ASID_INC) & ASID_MASK) ) {
141 if (cpu_has_vtag_icache) 118 if (cpu_has_vtag_icache)
142 flush_icache_all(); 119 flush_icache_all();
143#ifdef CONFIG_VIRTUALIZATION 120#ifdef CONFIG_KVM
144 kvm_local_flush_tlb_all(); /* start new asid cycle */ 121 kvm_local_flush_tlb_all(); /* start new asid cycle */
145#else 122#else
146 local_flush_tlb_all(); /* start new asid cycle */ 123 local_flush_tlb_all(); /* start new asid cycle */
@@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
200 * free up the ASID value for use and flush any old 177 * free up the ASID value for use and flush any old
201 * instances of it from the TLB. 178 * instances of it from the TLB.
202 */ 179 */
203 oldasid = ASID_MASK(read_c0_entryhi()); 180 oldasid = (read_c0_entryhi() & ASID_MASK);
204 if(smtc_live_asid[mytlb][oldasid]) { 181 if(smtc_live_asid[mytlb][oldasid]) {
205 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 182 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
206 if(smtc_live_asid[mytlb][oldasid] == 0) 183 if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
211 * having ASID_MASK smaller than the hardware maximum, 188 * having ASID_MASK smaller than the hardware maximum,
212 * make sure no "soft" bits become "hard"... 189 * make sure no "soft" bits become "hard"...
213 */ 190 */
214 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | 191 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
215 cpu_asid(cpu, next)); 192 cpu_asid(cpu, next));
216 ehb(); /* Make sure it propagates to TCStatus */ 193 ehb(); /* Make sure it propagates to TCStatus */
217 evpe(mtflags); 194 evpe(mtflags);
@@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
264#ifdef CONFIG_MIPS_MT_SMTC 241#ifdef CONFIG_MIPS_MT_SMTC
265 /* See comments for similar code above */ 242 /* See comments for similar code above */
266 mtflags = dvpe(); 243 mtflags = dvpe();
267 oldasid = ASID_MASK(read_c0_entryhi()); 244 oldasid = read_c0_entryhi() & ASID_MASK;
268 if(smtc_live_asid[mytlb][oldasid]) { 245 if(smtc_live_asid[mytlb][oldasid]) {
269 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 246 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
270 if(smtc_live_asid[mytlb][oldasid] == 0) 247 if(smtc_live_asid[mytlb][oldasid] == 0)
271 smtc_flush_tlb_asid(oldasid); 248 smtc_flush_tlb_asid(oldasid);
272 } 249 }
273 /* See comments for similar code above */ 250 /* See comments for similar code above */
274 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | 251 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
275 cpu_asid(cpu, next)); 252 cpu_asid(cpu, next));
276 ehb(); /* Make sure it propagates to TCStatus */ 253 ehb(); /* Make sure it propagates to TCStatus */
277 evpe(mtflags); 254 evpe(mtflags);
278#else 255#else
@@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
309#ifdef CONFIG_MIPS_MT_SMTC 286#ifdef CONFIG_MIPS_MT_SMTC
310 /* See comments for similar code above */ 287 /* See comments for similar code above */
311 prevvpe = dvpe(); 288 prevvpe = dvpe();
312 oldasid = ASID_MASK(read_c0_entryhi()); 289 oldasid = (read_c0_entryhi() & ASID_MASK);
313 if (smtc_live_asid[mytlb][oldasid]) { 290 if (smtc_live_asid[mytlb][oldasid]) {
314 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 291 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
315 if(smtc_live_asid[mytlb][oldasid] == 0) 292 if(smtc_live_asid[mytlb][oldasid] == 0)
316 smtc_flush_tlb_asid(oldasid); 293 smtc_flush_tlb_asid(oldasid);
317 } 294 }
318 /* See comments for similar code above */ 295 /* See comments for similar code above */
319 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) 296 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
320 | cpu_asid(cpu, mm)); 297 | cpu_asid(cpu, mm));
321 ehb(); /* Make sure it propagates to TCStatus */ 298 ehb(); /* Make sure it propagates to TCStatus */
322 evpe(prevvpe); 299 evpe(prevvpe);
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index eab99e536b5c..f59552fae917 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -46,7 +46,6 @@
46#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 46#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
47 47
48#include <linux/pfn.h> 48#include <linux/pfn.h>
49#include <asm/io.h>
50 49
51extern void build_clear_page(void); 50extern void build_clear_page(void);
52extern void build_copy_page(void); 51extern void build_copy_page(void);
@@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
151 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 150 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
152#endif 151#endif
153#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) 152#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
153#include <asm/io.h>
154 154
155/* 155/*
156 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad 156 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
@@ -171,14 +171,13 @@ typedef struct { unsigned long pgprot; } pgprot_t;
171 171
172#ifdef CONFIG_FLATMEM 172#ifdef CONFIG_FLATMEM
173 173
174#define pfn_valid(pfn) \ 174static inline int pfn_valid(unsigned long pfn)
175({ \ 175{
176 unsigned long __pfn = (pfn); \ 176 /* avoid <linux/mm.h> include hell */
177 /* avoid <linux/bootmem.h> include hell */ \ 177 extern unsigned long max_mapnr;
178 extern unsigned long min_low_pfn; \ 178
179 \ 179 return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
180 __pfn >= min_low_pfn && __pfn < max_mapnr; \ 180}
181})
182 181
183#elif defined(CONFIG_SPARSEMEM) 182#elif defined(CONFIG_SPARSEMEM)
184 183
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 71686c897dea..1470b7b68b0e 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -28,7 +28,6 @@
28/* 28/*
29 * System setup and hardware flags.. 29 * System setup and hardware flags..
30 */ 30 */
31extern void (*cpu_wait)(void);
32 31
33extern unsigned int vced_count, vcei_count; 32extern unsigned int vced_count, vcei_count;
34 33
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index a3186f2bb8a0..5e6cd0947393 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -16,6 +16,38 @@
16#include <asm/isadep.h> 16#include <asm/isadep.h>
17#include <uapi/asm/ptrace.h> 17#include <uapi/asm/ptrace.h>
18 18
19/*
20 * This struct defines the way the registers are stored on the stack during a
21 * system call/exception. As usual the registers k0/k1 aren't being saved.
22 */
23struct pt_regs {
24#ifdef CONFIG_32BIT
25 /* Pad bytes for argument save space on the stack. */
26 unsigned long pad0[6];
27#endif
28
29 /* Saved main processor registers. */
30 unsigned long regs[32];
31
32 /* Saved special registers. */
33 unsigned long cp0_status;
34 unsigned long hi;
35 unsigned long lo;
36#ifdef CONFIG_CPU_HAS_SMARTMIPS
37 unsigned long acx;
38#endif
39 unsigned long cp0_badvaddr;
40 unsigned long cp0_cause;
41 unsigned long cp0_epc;
42#ifdef CONFIG_MIPS_MT_SMTC
43 unsigned long cp0_tcstatus;
44#endif /* CONFIG_MIPS_MT_SMTC */
45#ifdef CONFIG_CPU_CAVIUM_OCTEON
46 unsigned long long mpl[3]; /* MTM{0,1,2} */
47 unsigned long long mtp[3]; /* MTP{0,1,2} */
48#endif
49} __aligned(8);
50
19struct task_struct; 51struct task_struct;
20 52
21extern int ptrace_getregs(struct task_struct *child, __s64 __user *data); 53extern int ptrace_getregs(struct task_struct *child, __s64 __user *data);
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
new file mode 100644
index 000000000000..f09ff5ae2059
--- /dev/null
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -0,0 +1,135 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 * Copyright (C) 2013 Cavium, Inc.
8 * Authors: Sanjay Lal <sanjayl@kymasys.com>
9 */
10
11#ifndef __LINUX_KVM_MIPS_H
12#define __LINUX_KVM_MIPS_H
13
14#include <linux/types.h>
15
16/*
17 * KVM MIPS specific structures and definitions.
18 *
19 * Some parts derived from the x86 version of this file.
20 */
21
22/*
23 * for KVM_GET_REGS and KVM_SET_REGS
24 *
25 * If Config[AT] is zero (32-bit CPU), the register contents are
26 * stored in the lower 32-bits of the struct kvm_regs fields and sign
27 * extended to 64-bits.
28 */
29struct kvm_regs {
30 /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
31 __u64 gpr[32];
32 __u64 hi;
33 __u64 lo;
34 __u64 pc;
35};
36
37/*
38 * for KVM_GET_FPU and KVM_SET_FPU
39 *
40 * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
41 * are zero filled.
42 */
43struct kvm_fpu {
44 __u64 fpr[32];
45 __u32 fir;
46 __u32 fccr;
47 __u32 fexr;
48 __u32 fenr;
49 __u32 fcsr;
50 __u32 pad;
51};
52
53
54/*
55 * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
56 * registers. The id field is broken down as follows:
57 *
58 * bits[2..0] - Register 'sel' index.
59 * bits[7..3] - Register 'rd' index.
60 * bits[15..8] - Must be zero.
61 * bits[31..16] - 1 -> CP0 registers.
62 * bits[51..32] - Must be zero.
63 * bits[63..52] - As per linux/kvm.h
64 *
65 * Other sets registers may be added in the future. Each set would
66 * have its own identifier in bits[31..16].
67 *
68 * The registers defined in struct kvm_regs are also accessible, the
69 * id values for these are below.
70 */
71
72#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
73#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
74#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
75#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
76#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
77#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
78#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
79#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
80#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
81#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
82#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
83#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
84#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
85#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
86#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
87#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
88#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
89#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
90#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
91#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
92#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
93#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
94#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
95#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
96#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
97#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
98#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
99#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
100#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
101#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
102#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
103#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
104
105#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
106#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
107#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
108
109/*
110 * KVM MIPS specific structures and definitions
111 *
112 */
113struct kvm_debug_exit_arch {
114 __u64 epc;
115};
116
117/* for KVM_SET_GUEST_DEBUG */
118struct kvm_guest_debug_arch {
119};
120
121/* definition of registers in kvm_run */
122struct kvm_sync_regs {
123};
124
125/* dummy definition */
126struct kvm_sregs {
127};
128
129struct kvm_mips_interrupt {
130 /* in */
131 __u32 cpu;
132 __u32 irq;
133};
134
135#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
index 4d58d8468705..b26f7e317279 100644
--- a/arch/mips/include/uapi/asm/ptrace.h
+++ b/arch/mips/include/uapi/asm/ptrace.h
@@ -22,16 +22,12 @@
22#define DSP_CONTROL 77 22#define DSP_CONTROL 77
23#define ACX 78 23#define ACX 78
24 24
25#ifndef __KERNEL__
25/* 26/*
26 * This struct defines the way the registers are stored on the stack during a 27 * This struct defines the way the registers are stored on the stack during a
27 * system call/exception. As usual the registers k0/k1 aren't being saved. 28 * system call/exception. As usual the registers k0/k1 aren't being saved.
28 */ 29 */
29struct pt_regs { 30struct pt_regs {
30#ifdef CONFIG_32BIT
31 /* Pad bytes for argument save space on the stack. */
32 unsigned long pad0[6];
33#endif
34
35 /* Saved main processor registers. */ 31 /* Saved main processor registers. */
36 unsigned long regs[32]; 32 unsigned long regs[32];
37 33
@@ -39,20 +35,11 @@ struct pt_regs {
39 unsigned long cp0_status; 35 unsigned long cp0_status;
40 unsigned long hi; 36 unsigned long hi;
41 unsigned long lo; 37 unsigned long lo;
42#ifdef CONFIG_CPU_HAS_SMARTMIPS
43 unsigned long acx;
44#endif
45 unsigned long cp0_badvaddr; 38 unsigned long cp0_badvaddr;
46 unsigned long cp0_cause; 39 unsigned long cp0_cause;
47 unsigned long cp0_epc; 40 unsigned long cp0_epc;
48#ifdef CONFIG_MIPS_MT_SMTC
49 unsigned long cp0_tcstatus;
50#endif /* CONFIG_MIPS_MT_SMTC */
51#ifdef CONFIG_CPU_CAVIUM_OCTEON
52 unsigned long long mpl[3]; /* MTM{0,1,2} */
53 unsigned long long mtp[3]; /* MTP{0,1,2} */
54#endif
55} __attribute__ ((aligned (8))); 41} __attribute__ ((aligned (8)));
42#endif /* __KERNEL__ */
56 43
57/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ 44/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
58#define PTRACE_GETREGS 12 45#define PTRACE_GETREGS 12
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 16338b84fa79..1dee279f9665 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -694,16 +694,17 @@
694#define __NR_process_vm_writev (__NR_Linux + 305) 694#define __NR_process_vm_writev (__NR_Linux + 305)
695#define __NR_kcmp (__NR_Linux + 306) 695#define __NR_kcmp (__NR_Linux + 306)
696#define __NR_finit_module (__NR_Linux + 307) 696#define __NR_finit_module (__NR_Linux + 307)
697#define __NR_getdents64 (__NR_Linux + 308)
697 698
698/* 699/*
699 * Offset of the last Linux 64-bit flavoured syscall 700 * Offset of the last Linux 64-bit flavoured syscall
700 */ 701 */
701#define __NR_Linux_syscalls 307 702#define __NR_Linux_syscalls 308
702 703
703#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 704#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
704 705
705#define __NR_64_Linux 5000 706#define __NR_64_Linux 5000
706#define __NR_64_Linux_syscalls 307 707#define __NR_64_Linux_syscalls 308
707 708
708#if _MIPS_SIM == _MIPS_SIM_NABI32 709#if _MIPS_SIM == _MIPS_SIM_NABI32
709 710
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 6ad9e04bdf62..423d871a946b 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -4,7 +4,7 @@
4 4
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
8 prom.o ptrace.o reset.o setup.o signal.o syscall.o \ 8 prom.o ptrace.o reset.o setup.o signal.o syscall.o \
9 time.o topology.o traps.o unaligned.o watch.o vdso.o 9 time.o topology.o traps.o unaligned.o watch.o vdso.o
10 10
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index e06f777e9c49..1188e00bb120 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
119#undef TASK_SIZE 119#undef TASK_SIZE
120#define TASK_SIZE TASK_SIZE32 120#define TASK_SIZE TASK_SIZE32
121 121
122#undef cputime_to_timeval
123#define cputime_to_timeval cputime_to_compat_timeval
124static __inline__ void
125cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
126{
127 unsigned long jiffies = cputime_to_jiffies(cputime);
128
129 value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
130 value->tv_sec = jiffies / HZ;
131}
132
122#include "../../../fs/binfmt_elf.c" 133#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 97c5a1668e53..202e581e6096 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -162,4 +162,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
162#undef TASK_SIZE 162#undef TASK_SIZE
163#define TASK_SIZE TASK_SIZE32 163#define TASK_SIZE TASK_SIZE32
164 164
165#undef cputime_to_timeval
166#define cputime_to_timeval cputime_to_compat_timeval
167static __inline__ void
168cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
169{
170 unsigned long jiffies = cputime_to_jiffies(cputime);
171
172 value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
173 value->tv_sec = jiffies / HZ;
174}
175
165#include "../../../fs/binfmt_elf.c" 176#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 4bbffdb9024f..c6568bf4b1b0 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -27,105 +27,6 @@
27#include <asm/spram.h> 27#include <asm/spram.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29 29
30/*
31 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
32 * the implementation of the "wait" feature differs between CPU families. This
33 * points to the function that implements CPU specific wait.
34 * The wait instruction stops the pipeline and reduces the power consumption of
35 * the CPU very much.
36 */
37void (*cpu_wait)(void);
38EXPORT_SYMBOL(cpu_wait);
39
40static void r3081_wait(void)
41{
42 unsigned long cfg = read_c0_conf();
43 write_c0_conf(cfg | R30XX_CONF_HALT);
44}
45
46static void r39xx_wait(void)
47{
48 local_irq_disable();
49 if (!need_resched())
50 write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
51 local_irq_enable();
52}
53
54extern void r4k_wait(void);
55
56/*
57 * This variant is preferable as it allows testing need_resched and going to
58 * sleep depending on the outcome atomically. Unfortunately the "It is
59 * implementation-dependent whether the pipeline restarts when a non-enabled
60 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
61 * using this version a gamble.
62 */
63void r4k_wait_irqoff(void)
64{
65 local_irq_disable();
66 if (!need_resched())
67 __asm__(" .set push \n"
68 " .set mips3 \n"
69 " wait \n"
70 " .set pop \n");
71 local_irq_enable();
72 __asm__(" .globl __pastwait \n"
73 "__pastwait: \n");
74}
75
76/*
77 * The RM7000 variant has to handle erratum 38. The workaround is to not
78 * have any pending stores when the WAIT instruction is executed.
79 */
80static void rm7k_wait_irqoff(void)
81{
82 local_irq_disable();
83 if (!need_resched())
84 __asm__(
85 " .set push \n"
86 " .set mips3 \n"
87 " .set noat \n"
88 " mfc0 $1, $12 \n"
89 " sync \n"
90 " mtc0 $1, $12 # stalls until W stage \n"
91 " wait \n"
92 " mtc0 $1, $12 # stalls until W stage \n"
93 " .set pop \n");
94 local_irq_enable();
95}
96
97/*
98 * The Au1xxx wait is available only if using 32khz counter or
99 * external timer source, but specifically not CP0 Counter.
100 * alchemy/common/time.c may override cpu_wait!
101 */
102static void au1k_wait(void)
103{
104 __asm__(" .set mips3 \n"
105 " cache 0x14, 0(%0) \n"
106 " cache 0x14, 32(%0) \n"
107 " sync \n"
108 " nop \n"
109 " wait \n"
110 " nop \n"
111 " nop \n"
112 " nop \n"
113 " nop \n"
114 " .set mips0 \n"
115 : : "r" (au1k_wait));
116}
117
118static int __initdata nowait;
119
120static int __init wait_disable(char *s)
121{
122 nowait = 1;
123
124 return 1;
125}
126
127__setup("nowait", wait_disable);
128
129static int __cpuinitdata mips_fpu_disabled; 30static int __cpuinitdata mips_fpu_disabled;
130 31
131static int __init fpu_disable(char *s) 32static int __init fpu_disable(char *s)
@@ -150,105 +51,6 @@ static int __init dsp_disable(char *s)
150 51
151__setup("nodsp", dsp_disable); 52__setup("nodsp", dsp_disable);
152 53
153void __init check_wait(void)
154{
155 struct cpuinfo_mips *c = &current_cpu_data;
156
157 if (nowait) {
158 printk("Wait instruction disabled.\n");
159 return;
160 }
161
162 switch (c->cputype) {
163 case CPU_R3081:
164 case CPU_R3081E:
165 cpu_wait = r3081_wait;
166 break;
167 case CPU_TX3927:
168 cpu_wait = r39xx_wait;
169 break;
170 case CPU_R4200:
171/* case CPU_R4300: */
172 case CPU_R4600:
173 case CPU_R4640:
174 case CPU_R4650:
175 case CPU_R4700:
176 case CPU_R5000:
177 case CPU_R5500:
178 case CPU_NEVADA:
179 case CPU_4KC:
180 case CPU_4KEC:
181 case CPU_4KSC:
182 case CPU_5KC:
183 case CPU_25KF:
184 case CPU_PR4450:
185 case CPU_BMIPS3300:
186 case CPU_BMIPS4350:
187 case CPU_BMIPS4380:
188 case CPU_BMIPS5000:
189 case CPU_CAVIUM_OCTEON:
190 case CPU_CAVIUM_OCTEON_PLUS:
191 case CPU_CAVIUM_OCTEON2:
192 case CPU_JZRISC:
193 case CPU_LOONGSON1:
194 case CPU_XLR:
195 case CPU_XLP:
196 cpu_wait = r4k_wait;
197 break;
198
199 case CPU_RM7000:
200 cpu_wait = rm7k_wait_irqoff;
201 break;
202
203 case CPU_M14KC:
204 case CPU_M14KEC:
205 case CPU_24K:
206 case CPU_34K:
207 case CPU_1004K:
208 cpu_wait = r4k_wait;
209 if (read_c0_config7() & MIPS_CONF7_WII)
210 cpu_wait = r4k_wait_irqoff;
211 break;
212
213 case CPU_74K:
214 cpu_wait = r4k_wait;
215 if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
216 cpu_wait = r4k_wait_irqoff;
217 break;
218
219 case CPU_TX49XX:
220 cpu_wait = r4k_wait_irqoff;
221 break;
222 case CPU_ALCHEMY:
223 cpu_wait = au1k_wait;
224 break;
225 case CPU_20KC:
226 /*
227 * WAIT on Rev1.0 has E1, E2, E3 and E16.
228 * WAIT on Rev2.0 and Rev3.0 has E16.
229 * Rev3.1 WAIT is nop, why bother
230 */
231 if ((c->processor_id & 0xff) <= 0x64)
232 break;
233
234 /*
235 * Another rev is incremeting c0_count at a reduced clock
236 * rate while in WAIT mode. So we basically have the choice
237 * between using the cp0 timer as clocksource or avoiding
238 * the WAIT instruction. Until more details are known,
239 * disable the use of WAIT for 20Kc entirely.
240 cpu_wait = r4k_wait;
241 */
242 break;
243 case CPU_RM9000:
244 if ((c->processor_id & 0x00ff) >= 0x40)
245 cpu_wait = r4k_wait;
246 break;
247 default:
248 break;
249 }
250}
251
252static inline void check_errata(void) 54static inline void check_errata(void)
253{ 55{
254 struct cpuinfo_mips *c = &current_cpu_data; 56 struct cpuinfo_mips *c = &current_cpu_data;
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index 35bed0d2342c..3be9e7bb30ff 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -2,6 +2,7 @@
2#include <linux/bootmem.h> 2#include <linux/bootmem.h>
3#include <linux/crash_dump.h> 3#include <linux/crash_dump.h>
4#include <asm/uaccess.h> 4#include <asm/uaccess.h>
5#include <linux/slab.h>
5 6
6static int __init parse_savemaxmem(char *p) 7static int __init parse_savemaxmem(char *p)
7{ 8{
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index cf5509f13dd5..dba90ec0dc38 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -25,12 +25,16 @@
25#define MCOUNT_OFFSET_INSNS 4 25#define MCOUNT_OFFSET_INSNS 4
26#endif 26#endif
27 27
28#ifdef CONFIG_DYNAMIC_FTRACE
29
28/* Arch override because MIPS doesn't need to run this from stop_machine() */ 30/* Arch override because MIPS doesn't need to run this from stop_machine() */
29void arch_ftrace_update_code(int command) 31void arch_ftrace_update_code(int command)
30{ 32{
31 ftrace_modify_all_code(command); 33 ftrace_modify_all_code(command);
32} 34}
33 35
36#endif
37
34/* 38/*
35 * Check if the address is in kernel space 39 * Check if the address is in kernel space
36 * 40 *
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 5c2ba9f08a80..31fa856829cb 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -122,7 +122,7 @@ handle_vcei:
122 __FINIT 122 __FINIT
123 123
124 .align 5 /* 32 byte rollback region */ 124 .align 5 /* 32 byte rollback region */
125LEAF(r4k_wait) 125LEAF(__r4k_wait)
126 .set push 126 .set push
127 .set noreorder 127 .set noreorder
128 /* start of rollback region */ 128 /* start of rollback region */
@@ -146,14 +146,14 @@ LEAF(r4k_wait)
146 jr ra 146 jr ra
147 nop 147 nop
148 .set pop 148 .set pop
149 END(r4k_wait) 149 END(__r4k_wait)
150 150
151 .macro BUILD_ROLLBACK_PROLOGUE handler 151 .macro BUILD_ROLLBACK_PROLOGUE handler
152 FEXPORT(rollback_\handler) 152 FEXPORT(rollback_\handler)
153 .set push 153 .set push
154 .set noat 154 .set noat
155 MFC0 k0, CP0_EPC 155 MFC0 k0, CP0_EPC
156 PTR_LA k1, r4k_wait 156 PTR_LA k1, __r4k_wait
157 ori k0, 0x1f /* 32 byte rollback region */ 157 ori k0, 0x1f /* 32 byte rollback region */
158 xori k0, 0x1f 158 xori k0, 0x1f
159 bne k0, k1, 9f 159 bne k0, k1, 9f
@@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
493 .set noreorder 493 .set noreorder
494 /* check if TLB contains a entry for EPC */ 494 /* check if TLB contains a entry for EPC */
495 MFC0 k1, CP0_ENTRYHI 495 MFC0 k1, CP0_ENTRYHI
496 andi k1, 0xff /* ASID_MASK patched at run-time!! */ 496 andi k1, 0xff /* ASID_MASK */
497 MFC0 k0, CP0_EPC 497 MFC0 k0, CP0_EPC
498 PTR_SRL k0, _PAGE_SHIFT + 1 498 PTR_SRL k0, _PAGE_SHIFT + 1
499 PTR_SLL k0, _PAGE_SHIFT + 1 499 PTR_SLL k0, _PAGE_SHIFT + 1
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
new file mode 100644
index 000000000000..0c655deeea4a
--- /dev/null
+++ b/arch/mips/kernel/idle.c
@@ -0,0 +1,245 @@
1/*
2 * MIPS idle loop and WAIT instruction support.
3 *
4 * Copyright (C) xxxx the Anonymous
5 * Copyright (C) 1994 - 2006 Ralf Baechle
6 * Copyright (C) 2003, 2004 Maciej W. Rozycki
7 * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/irqflags.h>
17#include <linux/printk.h>
18#include <linux/sched.h>
19#include <asm/cpu.h>
20#include <asm/cpu-info.h>
21#include <asm/idle.h>
22#include <asm/mipsregs.h>
23
24/*
25 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
26 * the implementation of the "wait" feature differs between CPU families. This
27 * points to the function that implements CPU specific wait.
28 * The wait instruction stops the pipeline and reduces the power consumption of
29 * the CPU very much.
30 */
31void (*cpu_wait)(void);
32EXPORT_SYMBOL(cpu_wait);
33
34static void r3081_wait(void)
35{
36 unsigned long cfg = read_c0_conf();
37 write_c0_conf(cfg | R30XX_CONF_HALT);
38 local_irq_enable();
39}
40
41static void r39xx_wait(void)
42{
43 if (!need_resched())
44 write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
45 local_irq_enable();
46}
47
48void r4k_wait(void)
49{
50 local_irq_enable();
51 __r4k_wait();
52}
53
54/*
55 * This variant is preferable as it allows testing need_resched and going to
56 * sleep depending on the outcome atomically. Unfortunately the "It is
57 * implementation-dependent whether the pipeline restarts when a non-enabled
58 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
59 * using this version a gamble.
60 */
61void r4k_wait_irqoff(void)
62{
63 if (!need_resched())
64 __asm__(
65 " .set push \n"
66 " .set mips3 \n"
67 " wait \n"
68 " .set pop \n");
69 local_irq_enable();
70 __asm__(
71 " .globl __pastwait \n"
72 "__pastwait: \n");
73}
74
75/*
76 * The RM7000 variant has to handle erratum 38. The workaround is to not
77 * have any pending stores when the WAIT instruction is executed.
78 */
79static void rm7k_wait_irqoff(void)
80{
81 if (!need_resched())
82 __asm__(
83 " .set push \n"
84 " .set mips3 \n"
85 " .set noat \n"
86 " mfc0 $1, $12 \n"
87 " sync \n"
88 " mtc0 $1, $12 # stalls until W stage \n"
89 " wait \n"
90 " mtc0 $1, $12 # stalls until W stage \n"
91 " .set pop \n");
92 local_irq_enable();
93}
94
95/*
96 * Au1 'wait' is only useful when the 32kHz counter is used as timer,
97 * since coreclock (and the cp0 counter) stops upon executing it. Only an
98 * interrupt can wake it, so they must be enabled before entering idle modes.
99 */
100static void au1k_wait(void)
101{
102 unsigned long c0status = read_c0_status() | 1; /* irqs on */
103
104 __asm__(
105 " .set mips3 \n"
106 " cache 0x14, 0(%0) \n"
107 " cache 0x14, 32(%0) \n"
108 " sync \n"
109 " mtc0 %1, $12 \n" /* wr c0status */
110 " wait \n"
111 " nop \n"
112 " nop \n"
113 " nop \n"
114 " nop \n"
115 " .set mips0 \n"
116 : : "r" (au1k_wait), "r" (c0status));
117}
118
119static int __initdata nowait;
120
121static int __init wait_disable(char *s)
122{
123 nowait = 1;
124
125 return 1;
126}
127
128__setup("nowait", wait_disable);
129
130void __init check_wait(void)
131{
132 struct cpuinfo_mips *c = &current_cpu_data;
133
134 if (nowait) {
135 printk("Wait instruction disabled.\n");
136 return;
137 }
138
139 switch (c->cputype) {
140 case CPU_R3081:
141 case CPU_R3081E:
142 cpu_wait = r3081_wait;
143 break;
144 case CPU_TX3927:
145 cpu_wait = r39xx_wait;
146 break;
147 case CPU_R4200:
148/* case CPU_R4300: */
149 case CPU_R4600:
150 case CPU_R4640:
151 case CPU_R4650:
152 case CPU_R4700:
153 case CPU_R5000:
154 case CPU_R5500:
155 case CPU_NEVADA:
156 case CPU_4KC:
157 case CPU_4KEC:
158 case CPU_4KSC:
159 case CPU_5KC:
160 case CPU_25KF:
161 case CPU_PR4450:
162 case CPU_BMIPS3300:
163 case CPU_BMIPS4350:
164 case CPU_BMIPS4380:
165 case CPU_BMIPS5000:
166 case CPU_CAVIUM_OCTEON:
167 case CPU_CAVIUM_OCTEON_PLUS:
168 case CPU_CAVIUM_OCTEON2:
169 case CPU_JZRISC:
170 case CPU_LOONGSON1:
171 case CPU_XLR:
172 case CPU_XLP:
173 cpu_wait = r4k_wait;
174 break;
175
176 case CPU_RM7000:
177 cpu_wait = rm7k_wait_irqoff;
178 break;
179
180 case CPU_M14KC:
181 case CPU_M14KEC:
182 case CPU_24K:
183 case CPU_34K:
184 case CPU_1004K:
185 cpu_wait = r4k_wait;
186 if (read_c0_config7() & MIPS_CONF7_WII)
187 cpu_wait = r4k_wait_irqoff;
188 break;
189
190 case CPU_74K:
191 cpu_wait = r4k_wait;
192 if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
193 cpu_wait = r4k_wait_irqoff;
194 break;
195
196 case CPU_TX49XX:
197 cpu_wait = r4k_wait_irqoff;
198 break;
199 case CPU_ALCHEMY:
200 cpu_wait = au1k_wait;
201 break;
202 case CPU_20KC:
203 /*
204 * WAIT on Rev1.0 has E1, E2, E3 and E16.
205 * WAIT on Rev2.0 and Rev3.0 has E16.
206 * Rev3.1 WAIT is nop, why bother
207 */
208 if ((c->processor_id & 0xff) <= 0x64)
209 break;
210
211 /*
212 * Another rev is incremeting c0_count at a reduced clock
213 * rate while in WAIT mode. So we basically have the choice
214 * between using the cp0 timer as clocksource or avoiding
215 * the WAIT instruction. Until more details are known,
216 * disable the use of WAIT for 20Kc entirely.
217 cpu_wait = r4k_wait;
218 */
219 break;
220 case CPU_RM9000:
221 if ((c->processor_id & 0x00ff) >= 0x40)
222 cpu_wait = r4k_wait;
223 break;
224 default:
225 break;
226 }
227}
228
229static void smtc_idle_hook(void)
230{
231#ifdef CONFIG_MIPS_MT_SMTC
232 void smtc_idle_loop_hook(void);
233
234 smtc_idle_loop_hook();
235#endif
236}
237
238void arch_cpu_idle(void)
239{
240 smtc_idle_hook();
241 if (cpu_wait)
242 cpu_wait();
243 else
244 local_irq_enable();
245}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 12bc4ebdf55b..1f8187ab0997 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -207,7 +207,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
207 207
208void __kprobes arch_remove_kprobe(struct kprobe *p) 208void __kprobes arch_remove_kprobe(struct kprobe *p)
209{ 209{
210 free_insn_slot(p->ainsn.insn, 0); 210 if (p->ainsn.insn) {
211 free_insn_slot(p->ainsn.insn, 0);
212 p->ainsn.insn = NULL;
213 }
211} 214}
212 215
213static void save_previous_kprobe(struct kprobe_ctlblk *kcb) 216static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index a3e461408b7e..acb34373679e 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -10,6 +10,7 @@
10#include <asm/bootinfo.h> 10#include <asm/bootinfo.h>
11#include <asm/cpu.h> 11#include <asm/cpu.h>
12#include <asm/cpu-features.h> 12#include <asm/cpu-features.h>
13#include <asm/idle.h>
13#include <asm/mipsregs.h> 14#include <asm/mipsregs.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
15#include <asm/prom.h> 16#include <asm/prom.h>
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eb902c1f0cad..c6a041d9d05d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -51,19 +51,6 @@ void arch_cpu_idle_dead(void)
51} 51}
52#endif 52#endif
53 53
54void arch_cpu_idle(void)
55{
56#ifdef CONFIG_MIPS_MT_SMTC
57 extern void smtc_idle_loop_hook(void);
58
59 smtc_idle_loop_hook();
60#endif
61 if (cpu_wait)
62 (*cpu_wait)();
63 else
64 local_irq_enable();
65}
66
67asmlinkage void ret_from_fork(void); 54asmlinkage void ret_from_fork(void);
68asmlinkage void ret_from_kernel_thread(void); 55asmlinkage void ret_from_kernel_thread(void);
69 56
@@ -224,6 +211,9 @@ struct mips_frame_info {
224 int pc_offset; 211 int pc_offset;
225}; 212};
226 213
214#define J_TARGET(pc,target) \
215 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
216
227static inline int is_ra_save_ins(union mips_instruction *ip) 217static inline int is_ra_save_ins(union mips_instruction *ip)
228{ 218{
229#ifdef CONFIG_CPU_MICROMIPS 219#ifdef CONFIG_CPU_MICROMIPS
@@ -264,7 +254,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
264#endif 254#endif
265} 255}
266 256
267static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) 257static inline int is_jump_ins(union mips_instruction *ip)
268{ 258{
269#ifdef CONFIG_CPU_MICROMIPS 259#ifdef CONFIG_CPU_MICROMIPS
270 /* 260 /*
@@ -288,6 +278,8 @@ static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
288 return 0; 278 return 0;
289 return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); 279 return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
290#else 280#else
281 if (ip->j_format.opcode == j_op)
282 return 1;
291 if (ip->j_format.opcode == jal_op) 283 if (ip->j_format.opcode == jal_op)
292 return 1; 284 return 1;
293 if (ip->r_format.opcode != spec_op) 285 if (ip->r_format.opcode != spec_op)
@@ -350,7 +342,7 @@ static int get_frame_info(struct mips_frame_info *info)
350 342
351 for (i = 0; i < max_insns; i++, ip++) { 343 for (i = 0; i < max_insns; i++, ip++) {
352 344
353 if (is_jal_jalr_jr_ins(ip)) 345 if (is_jump_ins(ip))
354 break; 346 break;
355 if (!info->frame_size) { 347 if (!info->frame_size) {
356 if (is_sp_move_ins(ip)) 348 if (is_sp_move_ins(ip))
@@ -393,15 +385,42 @@ err:
393 385
394static struct mips_frame_info schedule_mfi __read_mostly; 386static struct mips_frame_info schedule_mfi __read_mostly;
395 387
388#ifdef CONFIG_KALLSYMS
389static unsigned long get___schedule_addr(void)
390{
391 return kallsyms_lookup_name("__schedule");
392}
393#else
394static unsigned long get___schedule_addr(void)
395{
396 union mips_instruction *ip = (void *)schedule;
397 int max_insns = 8;
398 int i;
399
400 for (i = 0; i < max_insns; i++, ip++) {
401 if (ip->j_format.opcode == j_op)
402 return J_TARGET(ip, ip->j_format.target);
403 }
404 return 0;
405}
406#endif
407
396static int __init frame_info_init(void) 408static int __init frame_info_init(void)
397{ 409{
398 unsigned long size = 0; 410 unsigned long size = 0;
399#ifdef CONFIG_KALLSYMS 411#ifdef CONFIG_KALLSYMS
400 unsigned long ofs; 412 unsigned long ofs;
413#endif
414 unsigned long addr;
415
416 addr = get___schedule_addr();
417 if (!addr)
418 addr = (unsigned long)schedule;
401 419
402 kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); 420#ifdef CONFIG_KALLSYMS
421 kallsyms_lookup_size_offset(addr, &size, &ofs);
403#endif 422#endif
404 schedule_mfi.func = schedule; 423 schedule_mfi.func = (void *)addr;
405 schedule_mfi.func_size = size; 424 schedule_mfi.func_size = size;
406 425
407 get_frame_info(&schedule_mfi); 426 get_frame_info(&schedule_mfi);
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 93c070b41b0d..6fa198db8999 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -40,6 +40,7 @@
40#include <asm/processor.h> 40#include <asm/processor.h>
41#include <asm/vpe.h> 41#include <asm/vpe.h>
42#include <asm/rtlx.h> 42#include <asm/rtlx.h>
43#include <asm/setup.h>
43 44
44static struct rtlx_info *rtlx; 45static struct rtlx_info *rtlx;
45static int major; 46static int major;
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 36cfd4060e1f..97a5909a61cf 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -423,4 +423,5 @@ sys_call_table:
423 PTR sys_process_vm_writev /* 5305 */ 423 PTR sys_process_vm_writev /* 5305 */
424 PTR sys_kcmp 424 PTR sys_kcmp
425 PTR sys_finit_module 425 PTR sys_finit_module
426 PTR sys_getdents64
426 .size sys_call_table,.-sys_call_table 427 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index c17619fe18e3..6e7862ab46cc 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -37,6 +37,7 @@
37#include <linux/atomic.h> 37#include <linux/atomic.h>
38#include <asm/cpu.h> 38#include <asm/cpu.h>
39#include <asm/processor.h> 39#include <asm/processor.h>
40#include <asm/idle.h>
40#include <asm/r4k-timer.h> 41#include <asm/r4k-timer.h>
41#include <asm/mmu_context.h> 42#include <asm/mmu_context.h>
42#include <asm/time.h> 43#include <asm/time.h>
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 31d22f3121c9..75a4fd709841 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -34,6 +34,7 @@
34#include <asm/hardirq.h> 34#include <asm/hardirq.h>
35#include <asm/hazards.h> 35#include <asm/hazards.h>
36#include <asm/irq.h> 36#include <asm/irq.h>
37#include <asm/idle.h>
37#include <asm/mmu_context.h> 38#include <asm/mmu_context.h>
38#include <asm/mipsregs.h> 39#include <asm/mipsregs.h>
39#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
@@ -111,7 +112,7 @@ static int vpe0limit;
111static int ipibuffers; 112static int ipibuffers;
112static int nostlb; 113static int nostlb;
113static int asidmask; 114static int asidmask;
114unsigned int smtc_asid_mask = 0xff; 115unsigned long smtc_asid_mask = 0xff;
115 116
116static int __init vpe0tcs(char *str) 117static int __init vpe0tcs(char *str)
117{ 118{
@@ -858,7 +859,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
858 unsigned long flags; 859 unsigned long flags;
859 int mtflags; 860 int mtflags;
860 unsigned long tcrestart; 861 unsigned long tcrestart;
861 extern void r4k_wait_irqoff(void), __pastwait(void);
862 int set_resched_flag = (type == LINUX_SMP_IPI && 862 int set_resched_flag = (type == LINUX_SMP_IPI &&
863 action == SMP_RESCHEDULE_YOURSELF); 863 action == SMP_RESCHEDULE_YOURSELF);
864 864
@@ -914,8 +914,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
914 */ 914 */
915 if (cpu_wait == r4k_wait_irqoff) { 915 if (cpu_wait == r4k_wait_irqoff) {
916 tcrestart = read_tc_c0_tcrestart(); 916 tcrestart = read_tc_c0_tcrestart();
917 if (tcrestart >= (unsigned long)r4k_wait_irqoff 917 if (address_is_in_r4k_wait_irqoff(tcrestart)) {
918 && tcrestart < (unsigned long)__pastwait) {
919 write_tc_c0_tcrestart(__pastwait); 918 write_tc_c0_tcrestart(__pastwait);
920 tcstatus &= ~TCSTATUS_IXMT; 919 tcstatus &= ~TCSTATUS_IXMT;
921 write_tc_c0_tcstatus(tcstatus); 920 write_tc_c0_tcstatus(tcstatus);
@@ -1395,7 +1394,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1395 asid = asid_cache(cpu); 1394 asid = asid_cache(cpu);
1396 1395
1397 do { 1396 do {
1398 if (!ASID_MASK(ASID_INC(asid))) { 1397 if (!((asid += ASID_INC) & ASID_MASK) ) {
1399 if (cpu_has_vtag_icache) 1398 if (cpu_has_vtag_icache)
1400 flush_icache_all(); 1399 flush_icache_all();
1401 /* Traverse all online CPUs (hack requires contiguous range) */ 1400 /* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1413,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1414 mips_ihb(); 1413 mips_ihb();
1415 } 1414 }
1416 tcstat = read_tc_c0_tcstatus(); 1415 tcstat = read_tc_c0_tcstatus();
1417 smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); 1416 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1418 if (!prevhalt) 1417 if (!prevhalt)
1419 write_tc_c0_tchalt(0); 1418 write_tc_c0_tchalt(0);
1420 } 1419 }
@@ -1423,7 +1422,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1423 asid = ASID_FIRST_VERSION; 1422 asid = ASID_FIRST_VERSION;
1424 local_flush_tlb_all(); /* start new asid cycle */ 1423 local_flush_tlb_all(); /* start new asid cycle */
1425 } 1424 }
1426 } while (smtc_live_asid[tlb][ASID_MASK(asid)]); 1425 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1427 1426
1428 /* 1427 /*
1429 * SMTC shares the TLB within VPEs and possibly across all VPEs. 1428 * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1460,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
1461 tlb_read(); 1460 tlb_read();
1462 ehb(); 1461 ehb();
1463 ehi = read_c0_entryhi(); 1462 ehi = read_c0_entryhi();
1464 if (ASID_MASK(ehi) == asid) { 1463 if ((ehi & ASID_MASK) == asid) {
1465 /* 1464 /*
1466 * Invalidate only entries with specified ASID, 1465 * Invalidate only entries with specified ASID,
1467 * makiing sure all entries differ. 1466 * makiing sure all entries differ.
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 77cff1f6d050..a75ae40184aa 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -41,6 +41,7 @@
41#include <asm/dsp.h> 41#include <asm/dsp.h>
42#include <asm/fpu.h> 42#include <asm/fpu.h>
43#include <asm/fpu_emulator.h> 43#include <asm/fpu_emulator.h>
44#include <asm/idle.h>
44#include <asm/mipsregs.h> 45#include <asm/mipsregs.h>
45#include <asm/mipsmtregs.h> 46#include <asm/mipsmtregs.h>
46#include <asm/module.h> 47#include <asm/module.h>
@@ -57,7 +58,6 @@
57#include <asm/uasm.h> 58#include <asm/uasm.h>
58 59
59extern void check_wait(void); 60extern void check_wait(void);
60extern asmlinkage void r4k_wait(void);
61extern asmlinkage void rollback_handle_int(void); 61extern asmlinkage void rollback_handle_int(void);
62extern asmlinkage void handle_int(void); 62extern asmlinkage void handle_int(void);
63extern u32 handle_tlbl[]; 63extern u32 handle_tlbl[];
@@ -897,22 +897,24 @@ out_sigsegv:
897 897
898asmlinkage void do_tr(struct pt_regs *regs) 898asmlinkage void do_tr(struct pt_regs *regs)
899{ 899{
900 unsigned int opcode, tcode = 0; 900 u32 opcode, tcode = 0;
901 u16 instr[2]; 901 u16 instr[2];
902 unsigned long epc = exception_epc(regs); 902 unsigned long epc = msk_isa16_mode(exception_epc(regs));
903 903
904 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || 904 if (get_isa16_mode(regs->cp0_epc)) {
905 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) 905 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
906 __get_user(instr[1], (u16 __user *)(epc + 2)))
906 goto out_sigsegv; 907 goto out_sigsegv;
907 opcode = (instr[0] << 16) | instr[1]; 908 opcode = (instr[0] << 16) | instr[1];
908 909 /* Immediate versions don't provide a code. */
909 /* Immediate versions don't provide a code. */ 910 if (!(opcode & OPCODE))
910 if (!(opcode & OPCODE)) { 911 tcode = (opcode >> 12) & ((1 << 4) - 1);
911 if (get_isa16_mode(regs->cp0_epc)) 912 } else {
912 /* microMIPS */ 913 if (__get_user(opcode, (u32 __user *)epc))
913 tcode = (opcode >> 12) & 0x1f; 914 goto out_sigsegv;
914 else 915 /* Immediate versions don't provide a code. */
915 tcode = ((opcode >> 6) & ((1 << 10) - 1)); 916 if (!(opcode & OPCODE))
917 tcode = (opcode >> 6) & ((1 << 10) - 1);
916 } 918 }
917 919
918 do_trap_or_bp(regs, tcode, "Trap"); 920 do_trap_or_bp(regs, tcode, "Trap");
@@ -1542,7 +1544,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1542 extern char except_vec_vi, except_vec_vi_lui; 1544 extern char except_vec_vi, except_vec_vi_lui;
1543 extern char except_vec_vi_ori, except_vec_vi_end; 1545 extern char except_vec_vi_ori, except_vec_vi_end;
1544 extern char rollback_except_vec_vi; 1546 extern char rollback_except_vec_vi;
1545 char *vec_start = (cpu_wait == r4k_wait) ? 1547 char *vec_start = using_rollback_handler() ?
1546 &rollback_except_vec_vi : &except_vec_vi; 1548 &rollback_except_vec_vi : &except_vec_vi;
1547#ifdef CONFIG_MIPS_MT_SMTC 1549#ifdef CONFIG_MIPS_MT_SMTC
1548 /* 1550 /*
@@ -1656,7 +1658,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1656 unsigned int cpu = smp_processor_id(); 1658 unsigned int cpu = smp_processor_id();
1657 unsigned int status_set = ST0_CU0; 1659 unsigned int status_set = ST0_CU0;
1658 unsigned int hwrena = cpu_hwrena_impl_bits; 1660 unsigned int hwrena = cpu_hwrena_impl_bits;
1659 unsigned long asid = 0;
1660#ifdef CONFIG_MIPS_MT_SMTC 1661#ifdef CONFIG_MIPS_MT_SMTC
1661 int secondaryTC = 0; 1662 int secondaryTC = 0;
1662 int bootTC = (cpu == 0); 1663 int bootTC = (cpu == 0);
@@ -1740,9 +1741,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1740 } 1741 }
1741#endif /* CONFIG_MIPS_MT_SMTC */ 1742#endif /* CONFIG_MIPS_MT_SMTC */
1742 1743
1743 asid = ASID_FIRST_VERSION; 1744 if (!cpu_data[cpu].asid_cache)
1744 cpu_data[cpu].asid_cache = asid; 1745 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1745 TLBMISS_HANDLER_SETUP();
1746 1746
1747 atomic_inc(&init_mm.mm_count); 1747 atomic_inc(&init_mm.mm_count);
1748 current->active_mm = &init_mm; 1748 current->active_mm = &init_mm;
@@ -1814,10 +1814,8 @@ void __init trap_init(void)
1814 extern char except_vec4; 1814 extern char except_vec4;
1815 extern char except_vec3_r4000; 1815 extern char except_vec3_r4000;
1816 unsigned long i; 1816 unsigned long i;
1817 int rollback;
1818 1817
1819 check_wait(); 1818 check_wait();
1820 rollback = (cpu_wait == r4k_wait);
1821 1819
1822#if defined(CONFIG_KGDB) 1820#if defined(CONFIG_KGDB)
1823 if (kgdb_early_setup) 1821 if (kgdb_early_setup)
@@ -1894,7 +1892,8 @@ void __init trap_init(void)
1894 if (board_be_init) 1892 if (board_be_init)
1895 board_be_init(); 1893 board_be_init();
1896 1894
1897 set_except_vector(0, rollback ? rollback_handle_int : handle_int); 1895 set_except_vector(0, using_rollback_handler() ? rollback_handle_int
1896 : handle_int);
1898 set_except_vector(1, handle_tlbm); 1897 set_except_vector(1, handle_tlbm);
1899 set_except_vector(2, handle_tlbl); 1898 set_except_vector(2, handle_tlbl);
1900 set_except_vector(3, handle_tlbs); 1899 set_except_vector(3, handle_tlbs);
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index e0dad0289797..dd203e59e6fd 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -195,7 +195,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
195long 195long
196kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 196kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
197{ 197{
198 return -EINVAL; 198 return -ENOIOCTLCMD;
199} 199}
200 200
201void kvm_arch_free_memslot(struct kvm_memory_slot *free, 201void kvm_arch_free_memslot(struct kvm_memory_slot *free,
@@ -401,7 +401,7 @@ int
401kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 401kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
402 struct kvm_guest_debug *dbg) 402 struct kvm_guest_debug *dbg)
403{ 403{
404 return -EINVAL; 404 return -ENOIOCTLCMD;
405} 405}
406 406
407int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 407int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -475,14 +475,248 @@ int
475kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 475kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
476 struct kvm_mp_state *mp_state) 476 struct kvm_mp_state *mp_state)
477{ 477{
478 return -EINVAL; 478 return -ENOIOCTLCMD;
479} 479}
480 480
481int 481int
482kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 482kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483 struct kvm_mp_state *mp_state) 483 struct kvm_mp_state *mp_state)
484{ 484{
485 return -EINVAL; 485 return -ENOIOCTLCMD;
486}
487
488#define MIPS_CP0_32(_R, _S) \
489 (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
490
491#define MIPS_CP0_64(_R, _S) \
492 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
493
494#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
495#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
496#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
497#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
498#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
499#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
500#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
501#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
502#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
503#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
504#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
505#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
506#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
507#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
508#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
509#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
510#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
511#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
512#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
513#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
514#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
515#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
516#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
517
518static u64 kvm_mips_get_one_regs[] = {
519 KVM_REG_MIPS_R0,
520 KVM_REG_MIPS_R1,
521 KVM_REG_MIPS_R2,
522 KVM_REG_MIPS_R3,
523 KVM_REG_MIPS_R4,
524 KVM_REG_MIPS_R5,
525 KVM_REG_MIPS_R6,
526 KVM_REG_MIPS_R7,
527 KVM_REG_MIPS_R8,
528 KVM_REG_MIPS_R9,
529 KVM_REG_MIPS_R10,
530 KVM_REG_MIPS_R11,
531 KVM_REG_MIPS_R12,
532 KVM_REG_MIPS_R13,
533 KVM_REG_MIPS_R14,
534 KVM_REG_MIPS_R15,
535 KVM_REG_MIPS_R16,
536 KVM_REG_MIPS_R17,
537 KVM_REG_MIPS_R18,
538 KVM_REG_MIPS_R19,
539 KVM_REG_MIPS_R20,
540 KVM_REG_MIPS_R21,
541 KVM_REG_MIPS_R22,
542 KVM_REG_MIPS_R23,
543 KVM_REG_MIPS_R24,
544 KVM_REG_MIPS_R25,
545 KVM_REG_MIPS_R26,
546 KVM_REG_MIPS_R27,
547 KVM_REG_MIPS_R28,
548 KVM_REG_MIPS_R29,
549 KVM_REG_MIPS_R30,
550 KVM_REG_MIPS_R31,
551
552 KVM_REG_MIPS_HI,
553 KVM_REG_MIPS_LO,
554 KVM_REG_MIPS_PC,
555
556 KVM_REG_MIPS_CP0_INDEX,
557 KVM_REG_MIPS_CP0_CONTEXT,
558 KVM_REG_MIPS_CP0_PAGEMASK,
559 KVM_REG_MIPS_CP0_WIRED,
560 KVM_REG_MIPS_CP0_BADVADDR,
561 KVM_REG_MIPS_CP0_ENTRYHI,
562 KVM_REG_MIPS_CP0_STATUS,
563 KVM_REG_MIPS_CP0_CAUSE,
564 /* EPC set via kvm_regs, et al. */
565 KVM_REG_MIPS_CP0_CONFIG,
566 KVM_REG_MIPS_CP0_CONFIG1,
567 KVM_REG_MIPS_CP0_CONFIG2,
568 KVM_REG_MIPS_CP0_CONFIG3,
569 KVM_REG_MIPS_CP0_CONFIG7,
570 KVM_REG_MIPS_CP0_ERROREPC
571};
572
573static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
574 const struct kvm_one_reg *reg)
575{
576 struct mips_coproc *cop0 = vcpu->arch.cop0;
577 s64 v;
578
579 switch (reg->id) {
580 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
581 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
582 break;
583 case KVM_REG_MIPS_HI:
584 v = (long)vcpu->arch.hi;
585 break;
586 case KVM_REG_MIPS_LO:
587 v = (long)vcpu->arch.lo;
588 break;
589 case KVM_REG_MIPS_PC:
590 v = (long)vcpu->arch.pc;
591 break;
592
593 case KVM_REG_MIPS_CP0_INDEX:
594 v = (long)kvm_read_c0_guest_index(cop0);
595 break;
596 case KVM_REG_MIPS_CP0_CONTEXT:
597 v = (long)kvm_read_c0_guest_context(cop0);
598 break;
599 case KVM_REG_MIPS_CP0_PAGEMASK:
600 v = (long)kvm_read_c0_guest_pagemask(cop0);
601 break;
602 case KVM_REG_MIPS_CP0_WIRED:
603 v = (long)kvm_read_c0_guest_wired(cop0);
604 break;
605 case KVM_REG_MIPS_CP0_BADVADDR:
606 v = (long)kvm_read_c0_guest_badvaddr(cop0);
607 break;
608 case KVM_REG_MIPS_CP0_ENTRYHI:
609 v = (long)kvm_read_c0_guest_entryhi(cop0);
610 break;
611 case KVM_REG_MIPS_CP0_STATUS:
612 v = (long)kvm_read_c0_guest_status(cop0);
613 break;
614 case KVM_REG_MIPS_CP0_CAUSE:
615 v = (long)kvm_read_c0_guest_cause(cop0);
616 break;
617 case KVM_REG_MIPS_CP0_ERROREPC:
618 v = (long)kvm_read_c0_guest_errorepc(cop0);
619 break;
620 case KVM_REG_MIPS_CP0_CONFIG:
621 v = (long)kvm_read_c0_guest_config(cop0);
622 break;
623 case KVM_REG_MIPS_CP0_CONFIG1:
624 v = (long)kvm_read_c0_guest_config1(cop0);
625 break;
626 case KVM_REG_MIPS_CP0_CONFIG2:
627 v = (long)kvm_read_c0_guest_config2(cop0);
628 break;
629 case KVM_REG_MIPS_CP0_CONFIG3:
630 v = (long)kvm_read_c0_guest_config3(cop0);
631 break;
632 case KVM_REG_MIPS_CP0_CONFIG7:
633 v = (long)kvm_read_c0_guest_config7(cop0);
634 break;
635 default:
636 return -EINVAL;
637 }
638 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
639 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
640 return put_user(v, uaddr64);
641 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
642 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
643 u32 v32 = (u32)v;
644 return put_user(v32, uaddr32);
645 } else {
646 return -EINVAL;
647 }
648}
649
650static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
651 const struct kvm_one_reg *reg)
652{
653 struct mips_coproc *cop0 = vcpu->arch.cop0;
654 u64 v;
655
656 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
657 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
658
659 if (get_user(v, uaddr64) != 0)
660 return -EFAULT;
661 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
662 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
663 s32 v32;
664
665 if (get_user(v32, uaddr32) != 0)
666 return -EFAULT;
667 v = (s64)v32;
668 } else {
669 return -EINVAL;
670 }
671
672 switch (reg->id) {
673 case KVM_REG_MIPS_R0:
674 /* Silently ignore requests to set $0 */
675 break;
676 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
677 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
678 break;
679 case KVM_REG_MIPS_HI:
680 vcpu->arch.hi = v;
681 break;
682 case KVM_REG_MIPS_LO:
683 vcpu->arch.lo = v;
684 break;
685 case KVM_REG_MIPS_PC:
686 vcpu->arch.pc = v;
687 break;
688
689 case KVM_REG_MIPS_CP0_INDEX:
690 kvm_write_c0_guest_index(cop0, v);
691 break;
692 case KVM_REG_MIPS_CP0_CONTEXT:
693 kvm_write_c0_guest_context(cop0, v);
694 break;
695 case KVM_REG_MIPS_CP0_PAGEMASK:
696 kvm_write_c0_guest_pagemask(cop0, v);
697 break;
698 case KVM_REG_MIPS_CP0_WIRED:
699 kvm_write_c0_guest_wired(cop0, v);
700 break;
701 case KVM_REG_MIPS_CP0_BADVADDR:
702 kvm_write_c0_guest_badvaddr(cop0, v);
703 break;
704 case KVM_REG_MIPS_CP0_ENTRYHI:
705 kvm_write_c0_guest_entryhi(cop0, v);
706 break;
707 case KVM_REG_MIPS_CP0_STATUS:
708 kvm_write_c0_guest_status(cop0, v);
709 break;
710 case KVM_REG_MIPS_CP0_CAUSE:
711 kvm_write_c0_guest_cause(cop0, v);
712 break;
713 case KVM_REG_MIPS_CP0_ERROREPC:
714 kvm_write_c0_guest_errorepc(cop0, v);
715 break;
716 default:
717 return -EINVAL;
718 }
719 return 0;
486} 720}
487 721
488long 722long
@@ -491,9 +725,38 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
491 struct kvm_vcpu *vcpu = filp->private_data; 725 struct kvm_vcpu *vcpu = filp->private_data;
492 void __user *argp = (void __user *)arg; 726 void __user *argp = (void __user *)arg;
493 long r; 727 long r;
494 int intr;
495 728
496 switch (ioctl) { 729 switch (ioctl) {
730 case KVM_SET_ONE_REG:
731 case KVM_GET_ONE_REG: {
732 struct kvm_one_reg reg;
733 if (copy_from_user(&reg, argp, sizeof(reg)))
734 return -EFAULT;
735 if (ioctl == KVM_SET_ONE_REG)
736 return kvm_mips_set_reg(vcpu, &reg);
737 else
738 return kvm_mips_get_reg(vcpu, &reg);
739 }
740 case KVM_GET_REG_LIST: {
741 struct kvm_reg_list __user *user_list = argp;
742 u64 __user *reg_dest;
743 struct kvm_reg_list reg_list;
744 unsigned n;
745
746 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
747 return -EFAULT;
748 n = reg_list.n;
749 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
750 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
751 return -EFAULT;
752 if (n < reg_list.n)
753 return -E2BIG;
754 reg_dest = user_list->reg;
755 if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
756 sizeof(kvm_mips_get_one_regs)))
757 return -EFAULT;
758 return 0;
759 }
497 case KVM_NMI: 760 case KVM_NMI:
498 /* Treat the NMI as a CPU reset */ 761 /* Treat the NMI as a CPU reset */
499 r = kvm_mips_reset_vcpu(vcpu); 762 r = kvm_mips_reset_vcpu(vcpu);
@@ -505,8 +768,6 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
505 if (copy_from_user(&irq, argp, sizeof(irq))) 768 if (copy_from_user(&irq, argp, sizeof(irq)))
506 goto out; 769 goto out;
507 770
508 intr = (int)irq.irq;
509
510 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, 771 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
511 irq.irq); 772 irq.irq);
512 773
@@ -514,7 +775,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
514 break; 775 break;
515 } 776 }
516 default: 777 default:
517 r = -EINVAL; 778 r = -ENOIOCTLCMD;
518 } 779 }
519 780
520out: 781out:
@@ -565,7 +826,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
565 826
566 switch (ioctl) { 827 switch (ioctl) {
567 default: 828 default:
568 r = -EINVAL; 829 r = -ENOIOCTLCMD;
569 } 830 }
570 831
571 return r; 832 return r;
@@ -593,13 +854,13 @@ void kvm_arch_exit(void)
593int 854int
594kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 855kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
595{ 856{
596 return -ENOTSUPP; 857 return -ENOIOCTLCMD;
597} 858}
598 859
599int 860int
600kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 861kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
601{ 862{
602 return -ENOTSUPP; 863 return -ENOIOCTLCMD;
603} 864}
604 865
605int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 866int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -609,12 +870,12 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
609 870
610int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 871int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
611{ 872{
612 return -ENOTSUPP; 873 return -ENOIOCTLCMD;
613} 874}
614 875
615int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 876int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
616{ 877{
617 return -ENOTSUPP; 878 return -ENOIOCTLCMD;
618} 879}
619 880
620int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 881int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
@@ -627,6 +888,9 @@ int kvm_dev_ioctl_check_extension(long ext)
627 int r; 888 int r;
628 889
629 switch (ext) { 890 switch (ext) {
891 case KVM_CAP_ONE_REG:
892 r = 1;
893 break;
630 case KVM_CAP_COALESCED_MMIO: 894 case KVM_CAP_COALESCED_MMIO:
631 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 895 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
632 break; 896 break;
@@ -635,7 +899,6 @@ int kvm_dev_ioctl_check_extension(long ext)
635 break; 899 break;
636 } 900 }
637 return r; 901 return r;
638
639} 902}
640 903
641int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 904int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -677,28 +940,28 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
677{ 940{
678 int i; 941 int i;
679 942
680 for (i = 0; i < 32; i++) 943 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
681 vcpu->arch.gprs[i] = regs->gprs[i]; 944 vcpu->arch.gprs[i] = regs->gpr[i];
682 945 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
683 vcpu->arch.hi = regs->hi; 946 vcpu->arch.hi = regs->hi;
684 vcpu->arch.lo = regs->lo; 947 vcpu->arch.lo = regs->lo;
685 vcpu->arch.pc = regs->pc; 948 vcpu->arch.pc = regs->pc;
686 949
687 return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs); 950 return 0;
688} 951}
689 952
690int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 953int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
691{ 954{
692 int i; 955 int i;
693 956
694 for (i = 0; i < 32; i++) 957 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
695 regs->gprs[i] = vcpu->arch.gprs[i]; 958 regs->gpr[i] = vcpu->arch.gprs[i];
696 959
697 regs->hi = vcpu->arch.hi; 960 regs->hi = vcpu->arch.hi;
698 regs->lo = vcpu->arch.lo; 961 regs->lo = vcpu->arch.lo;
699 regs->pc = vcpu->arch.pc; 962 regs->pc = vcpu->arch.pc;
700 963
701 return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs); 964 return 0;
702} 965}
703 966
704void kvm_mips_comparecount_func(unsigned long data) 967void kvm_mips_comparecount_func(unsigned long data)
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index 2b2bac9a40aa..4b6274b47f33 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
525 printk("MTCz, cop0->reg[EBASE]: %#lx\n", 525 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526 kvm_read_c0_guest_ebase(cop0)); 526 kvm_read_c0_guest_ebase(cop0));
527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
528 uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]); 528 uint32_t nasid =
529 vcpu->arch.gprs[rt] & ASID_MASK;
529 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) 530 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
530 && 531 &&
531 (ASID_MASK(kvm_read_c0_guest_entryhi(cop0)) 532 ((kvm_read_c0_guest_entryhi(cop0) &
532 != nasid)) { 533 ASID_MASK) != nasid)) {
533 534
534 kvm_debug 535 kvm_debug
535 ("MTCz, change ASID from %#lx to %#lx\n", 536 ("MTCz, change ASID from %#lx to %#lx\n",
536 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)), 537 kvm_read_c0_guest_entryhi(cop0) &
537 ASID_MASK(vcpu->arch.gprs[rt])); 538 ASID_MASK,
539 vcpu->arch.gprs[rt] & ASID_MASK);
538 540
539 /* Blow away the shadow host TLBs */ 541 /* Blow away the shadow host TLBs */
540 kvm_mips_flush_host_tlb(1); 542 kvm_mips_flush_host_tlb(1);
@@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
986 * resulting handler will do the right thing 988 * resulting handler will do the right thing
987 */ 989 */
988 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | 990 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
989 ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); 991 (kvm_read_c0_guest_entryhi
992 (cop0) & ASID_MASK));
990 993
991 if (index < 0) { 994 if (index < 0) {
992 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); 995 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
@@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1151 struct kvm_vcpu_arch *arch = &vcpu->arch; 1154 struct kvm_vcpu_arch *arch = &vcpu->arch;
1152 enum emulation_result er = EMULATE_DONE; 1155 enum emulation_result er = EMULATE_DONE;
1153 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | 1156 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1154 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1157 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1155 1158
1156 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1159 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1157 /* save old pc */ 1160 /* save old pc */
@@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1198 enum emulation_result er = EMULATE_DONE; 1201 enum emulation_result er = EMULATE_DONE;
1199 unsigned long entryhi = 1202 unsigned long entryhi =
1200 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1203 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1201 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1204 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1202 1205
1203 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1206 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1204 /* save old pc */ 1207 /* save old pc */
@@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1243 struct kvm_vcpu_arch *arch = &vcpu->arch; 1246 struct kvm_vcpu_arch *arch = &vcpu->arch;
1244 enum emulation_result er = EMULATE_DONE; 1247 enum emulation_result er = EMULATE_DONE;
1245 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1248 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1246 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1249 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1247 1250
1248 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1251 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1249 /* save old pc */ 1252 /* save old pc */
@@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1287 struct kvm_vcpu_arch *arch = &vcpu->arch; 1290 struct kvm_vcpu_arch *arch = &vcpu->arch;
1288 enum emulation_result er = EMULATE_DONE; 1291 enum emulation_result er = EMULATE_DONE;
1289 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1292 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1290 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1293 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1291 1294
1292 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1295 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1293 /* save old pc */ 1296 /* save old pc */
@@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1356{ 1359{
1357 struct mips_coproc *cop0 = vcpu->arch.cop0; 1360 struct mips_coproc *cop0 = vcpu->arch.cop0;
1358 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1361 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1359 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1362 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1360 struct kvm_vcpu_arch *arch = &vcpu->arch; 1363 struct kvm_vcpu_arch *arch = &vcpu->arch;
1361 enum emulation_result er = EMULATE_DONE; 1364 enum emulation_result er = EMULATE_DONE;
1362 1365
@@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1783 */ 1786 */
1784 index = kvm_mips_guest_tlb_lookup(vcpu, 1787 index = kvm_mips_guest_tlb_lookup(vcpu,
1785 (va & VPN2_MASK) | 1788 (va & VPN2_MASK) |
1786 ASID_MASK(kvm_read_c0_guest_entryhi 1789 (kvm_read_c0_guest_entryhi
1787 (vcpu->arch.cop0))); 1790 (vcpu->arch.cop0) & ASID_MASK));
1788 if (index < 0) { 1791 if (index < 0) {
1789 if (exccode == T_TLB_LD_MISS) { 1792 if (exccode == T_TLB_LD_MISS) {
1790 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); 1793 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
index 89511a9258d3..c777dd36d4a8 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -17,6 +17,8 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
20#include <linux/srcu.h>
21
20 22
21#include <asm/cpu.h> 23#include <asm/cpu.h>
22#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
@@ -51,13 +53,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51 53
52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 54uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53{ 55{
54 return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); 56 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
55} 57}
56 58
57 59
58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 60uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59{ 61{
60 return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); 62 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
61} 63}
62 64
63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) 65inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
@@ -84,7 +86,7 @@ void kvm_mips_dump_host_tlbs(void)
84 old_pagemask = read_c0_pagemask(); 86 old_pagemask = read_c0_pagemask();
85 87
86 printk("HOST TLBs:\n"); 88 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); 89 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
88 90
89 for (i = 0; i < current_cpu_data.tlbsize; i++) { 91 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 write_c0_index(i); 92 write_c0_index(i);
@@ -169,21 +171,27 @@ void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
169 } 171 }
170} 172}
171 173
172static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 174static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
173{ 175{
176 int srcu_idx, err = 0;
174 pfn_t pfn; 177 pfn_t pfn;
175 178
176 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) 179 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
177 return; 180 return 0;
178 181
182 srcu_idx = srcu_read_lock(&kvm->srcu);
179 pfn = kvm_mips_gfn_to_pfn(kvm, gfn); 183 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
180 184
181 if (kvm_mips_is_error_pfn(pfn)) { 185 if (kvm_mips_is_error_pfn(pfn)) {
182 panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); 186 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
187 err = -EFAULT;
188 goto out;
183 } 189 }
184 190
185 kvm->arch.guest_pmap[gfn] = pfn; 191 kvm->arch.guest_pmap[gfn] = pfn;
186 return; 192out:
193 srcu_read_unlock(&kvm->srcu, srcu_idx);
194 return err;
187} 195}
188 196
189/* Translate guest KSEG0 addresses to Host PA */ 197/* Translate guest KSEG0 addresses to Host PA */
@@ -207,7 +215,10 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
207 gva); 215 gva);
208 return KVM_INVALID_PAGE; 216 return KVM_INVALID_PAGE;
209 } 217 }
210 kvm_mips_map_page(vcpu->kvm, gfn); 218
219 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
220 return KVM_INVALID_ADDR;
221
211 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; 222 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
212} 223}
213 224
@@ -310,8 +321,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
310 even = !(gfn & 0x1); 321 even = !(gfn & 0x1);
311 vaddr = badvaddr & (PAGE_MASK << 1); 322 vaddr = badvaddr & (PAGE_MASK << 1);
312 323
313 kvm_mips_map_page(vcpu->kvm, gfn); 324 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
314 kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1); 325 return -1;
326
327 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
328 return -1;
315 329
316 if (even) { 330 if (even) {
317 pfn0 = kvm->arch.guest_pmap[gfn]; 331 pfn0 = kvm->arch.guest_pmap[gfn];
@@ -389,8 +403,11 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
389 pfn0 = 0; 403 pfn0 = 0;
390 pfn1 = 0; 404 pfn1 = 0;
391 } else { 405 } else {
392 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT); 406 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
393 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT); 407 return -1;
408
409 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
410 return -1;
394 411
395 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; 412 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
396 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; 413 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
@@ -428,7 +445,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
428 445
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 446 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && 447 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { 448 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
432 index = i; 449 index = i;
433 break; 450 break;
434 } 451 }
@@ -626,7 +643,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
626{ 643{
627 unsigned long asid = asid_cache(cpu); 644 unsigned long asid = asid_cache(cpu);
628 645
629 if (!(ASID_MASK(ASID_INC(asid)))) { 646 if (!((asid += ASID_INC) & ASID_MASK)) {
630 if (cpu_has_vtag_icache) { 647 if (cpu_has_vtag_icache) {
631 flush_icache_all(); 648 flush_icache_all();
632 } 649 }
@@ -804,7 +821,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
804 if (!newasid) { 821 if (!newasid) {
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */ 822 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) { 823 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); 824 write_c0_entryhi(vcpu->arch.
825 preempt_entryhi & ASID_MASK);
808 ehb(); 826 ehb();
809 } 827 }
810 } else { 828 } else {
@@ -816,11 +834,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
816 */ 834 */
817 if (current->flags & PF_VCPU) { 835 if (current->flags & PF_VCPU) {
818 if (KVM_GUEST_KERNEL_MODE(vcpu)) 836 if (KVM_GUEST_KERNEL_MODE(vcpu))
819 write_c0_entryhi(ASID_MASK(vcpu->arch. 837 write_c0_entryhi(vcpu->arch.
820 guest_kernel_asid[cpu])); 838 guest_kernel_asid[cpu] &
839 ASID_MASK);
821 else 840 else
822 write_c0_entryhi(ASID_MASK(vcpu->arch. 841 write_c0_entryhi(vcpu->arch.
823 guest_user_asid[cpu])); 842 guest_user_asid[cpu] &
843 ASID_MASK);
824 ehb(); 844 ehb();
825 } 845 }
826 } 846 }
@@ -879,7 +899,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
879 kvm_mips_guest_tlb_lookup(vcpu, 899 kvm_mips_guest_tlb_lookup(vcpu,
880 ((unsigned long) opc & VPN2_MASK) 900 ((unsigned long) opc & VPN2_MASK)
881 | 901 |
882 ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); 902 (kvm_read_c0_guest_entryhi
903 (cop0) & ASID_MASK));
883 if (index < 0) { 904 if (index < 0) {
884 kvm_err 905 kvm_err
885 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", 906 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
index 466aeef044bd..30d725321db1 100644
--- a/arch/mips/kvm/kvm_trap_emul.c
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -345,54 +345,6 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
345 return ret; 345 return ret;
346} 346}
347 347
348static int
349kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
350{
351 struct mips_coproc *cop0 = vcpu->arch.cop0;
352
353 kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
354 kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
355 kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
356 kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
357 kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
358
359 kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
360 kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
361 kvm_write_c0_guest_pagemask(cop0,
362 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
363 kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
364 kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
365
366 return 0;
367}
368
369static int
370kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371{
372 struct mips_coproc *cop0 = vcpu->arch.cop0;
373
374 regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
375 regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
376 regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
377 regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
378 regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
379
380 regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
381 regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
382 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
383 kvm_read_c0_guest_pagemask(cop0);
384 regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
385 regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
386
387 regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
388 regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
389 regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
390 regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
391 regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
392
393 return 0;
394}
395
396static int kvm_trap_emul_vm_init(struct kvm *kvm) 348static int kvm_trap_emul_vm_init(struct kvm *kvm)
397{ 349{
398 return 0; 350 return 0;
@@ -471,8 +423,6 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
471 .dequeue_io_int = kvm_mips_dequeue_io_int_cb, 423 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
472 .irq_deliver = kvm_mips_irq_deliver_cb, 424 .irq_deliver = kvm_mips_irq_deliver_cb,
473 .irq_clear = kvm_mips_irq_clear_cb, 425 .irq_clear = kvm_mips_irq_clear_cb,
474 .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
475 .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
476}; 426};
477 427
478int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) 428int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
index 9861c8669fab..850821df924c 100644
--- a/arch/mips/lantiq/xway/gptu.c
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev)
144 } 144 }
145 145
146 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 146 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
147 if (!res) {
148 dev_err(&pdev->dev, "Failed to get resource\n");
149 return -ENOMEM;
150 }
151 147
152 /* remap gptu register range */ 148 /* remap gptu register range */
153 gptu_membase = devm_ioremap_resource(&pdev->dev, res); 149 gptu_membase = devm_ioremap_resource(&pdev->dev, res);
@@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev)
169 if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) { 165 if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
170 dev_err(&pdev->dev, "Failed to find magic\n"); 166 dev_err(&pdev->dev, "Failed to find magic\n");
171 gptu_hwexit(); 167 gptu_hwexit();
168 clk_disable(clk);
169 clk_put(clk);
172 return -ENAVAIL; 170 return -ENAVAIL;
173 } 171 }
174 172
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 8a12d00908e0..32b9f21bfd85 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -11,7 +11,6 @@
11#include <asm/page.h> 11#include <asm/page.h>
12#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13#include <asm/tlbdebug.h> 13#include <asm/tlbdebug.h>
14#include <asm/mmu_context.h>
15 14
16static inline const char *msk2str(unsigned int mask) 15static inline const char *msk2str(unsigned int mask)
17{ 16{
@@ -56,7 +55,7 @@ static void dump_tlb(int first, int last)
56 s_pagemask = read_c0_pagemask(); 55 s_pagemask = read_c0_pagemask();
57 s_entryhi = read_c0_entryhi(); 56 s_entryhi = read_c0_entryhi();
58 s_index = read_c0_index(); 57 s_index = read_c0_index();
59 asid = ASID_MASK(s_entryhi); 58 asid = s_entryhi & 0xff;
60 59
61 for (i = first; i <= last; i++) { 60 for (i = first; i <= last; i++) {
62 write_c0_index(i); 61 write_c0_index(i);
@@ -86,7 +85,7 @@ static void dump_tlb(int first, int last)
86 85
87 printk("va=%0*lx asid=%02lx\n", 86 printk("va=%0*lx asid=%02lx\n",
88 width, (entryhi & ~0x1fffUL), 87 width, (entryhi & ~0x1fffUL),
89 ASID_MASK(entryhi)); 88 entryhi & 0xff);
90 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", 89 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
91 width, 90 width,
92 (entrylo0 << 6) & PAGE_MASK, c0, 91 (entrylo0 << 6) & PAGE_MASK, c0,
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 8327698b9937..91615c2ef0cf 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -9,7 +9,6 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10 10
11#include <asm/mipsregs.h> 11#include <asm/mipsregs.h>
12#include <asm/mmu_context.h>
13#include <asm/page.h> 12#include <asm/page.h>
14#include <asm/pgtable.h> 13#include <asm/pgtable.h>
15#include <asm/tlbdebug.h> 14#include <asm/tlbdebug.h>
@@ -22,7 +21,7 @@ static void dump_tlb(int first, int last)
22 unsigned int asid; 21 unsigned int asid;
23 unsigned long entryhi, entrylo0; 22 unsigned long entryhi, entrylo0;
24 23
25 asid = ASID_MASK(read_c0_entryhi()); 24 asid = read_c0_entryhi() & 0xfc0;
26 25
27 for (i = first; i <= last; i++) { 26 for (i = first; i <= last; i++) {
28 write_c0_index(i<<8); 27 write_c0_index(i<<8);
@@ -36,7 +35,7 @@ static void dump_tlb(int first, int last)
36 35
37 /* Unused entries have a virtual address of KSEG0. */ 36 /* Unused entries have a virtual address of KSEG0. */
38 if ((entryhi & 0xffffe000) != 0x80000000 37 if ((entryhi & 0xffffe000) != 0x80000000
39 && (ASID_MASK(entryhi) == asid)) { 38 && (entryhi & 0xfc0) == asid) {
40 /* 39 /*
41 * Only print entries in use 40 * Only print entries in use
42 */ 41 */
@@ -45,7 +44,7 @@ static void dump_tlb(int first, int last)
45 printk("va=%08lx asid=%08lx" 44 printk("va=%08lx asid=%08lx"
46 " [pa=%06lx n=%d d=%d v=%d g=%d]", 45 " [pa=%06lx n=%d d=%d v=%d g=%d]",
47 (entryhi & 0xffffe000), 46 (entryhi & 0xffffe000),
48 ASID_MASK(entryhi), 47 entryhi & 0xfc0,
49 entrylo0 & PAGE_MASK, 48 entrylo0 & PAGE_MASK,
50 (entrylo0 & (1 << 11)) ? 1 : 0, 49 (entrylo0 & (1 << 11)) ? 1 : 0,
51 (entrylo0 & (1 << 10)) ? 1 : 0, 50 (entrylo0 & (1 << 10)) ? 1 : 0,
diff --git a/arch/mips/loongson/common/reset.c b/arch/mips/loongson/common/reset.c
index 35c8c6468494..65bfbb5d06f4 100644
--- a/arch/mips/loongson/common/reset.c
+++ b/arch/mips/loongson/common/reset.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/pm.h> 13#include <linux/pm.h>
14 14
15#include <asm/idle.h>
15#include <asm/reboot.h> 16#include <asm/reboot.h>
16 17
17#include <loongson.h> 18#include <loongson.h>
diff --git a/arch/mips/loongson1/common/reset.c b/arch/mips/loongson1/common/reset.c
index d4f610f9604a..547f34b69e4c 100644
--- a/arch/mips/loongson1/common/reset.c
+++ b/arch/mips/loongson1/common/reset.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/pm.h> 11#include <linux/pm.h>
12#include <asm/idle.h>
12#include <asm/reboot.h> 13#include <asm/reboot.h>
13 14
14#include <loongson1.h> 15#include <loongson1.h>
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 4a13c150f31b..a63d1ed0827f 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
51#endif 51#endif
52 52
53 local_irq_save(flags); 53 local_irq_save(flags);
54 old_ctx = ASID_MASK(read_c0_entryhi()); 54 old_ctx = read_c0_entryhi() & ASID_MASK;
55 write_c0_entrylo0(0); 55 write_c0_entrylo0(0);
56 entry = r3k_have_wired_reg ? read_c0_wired() : 8; 56 entry = r3k_have_wired_reg ? read_c0_wired() : 8;
57 for (; entry < current_cpu_data.tlbsize; entry++) { 57 for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
87 87
88#ifdef DEBUG_TLB 88#ifdef DEBUG_TLB
89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", 89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
90 ASID_MASK(cpu_context(cpu, mm)), start, end); 90 cpu_context(cpu, mm) & ASID_MASK, start, end);
91#endif 91#endif
92 local_irq_save(flags); 92 local_irq_save(flags);
93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94 if (size <= current_cpu_data.tlbsize) { 94 if (size <= current_cpu_data.tlbsize) {
95 int oldpid = ASID_MASK(read_c0_entryhi()); 95 int oldpid = read_c0_entryhi() & ASID_MASK;
96 int newpid = ASID_MASK(cpu_context(cpu, mm)); 96 int newpid = cpu_context(cpu, mm) & ASID_MASK;
97 97
98 start &= PAGE_MASK; 98 start &= PAGE_MASK;
99 end += PAGE_SIZE - 1; 99 end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
166#ifdef DEBUG_TLB 166#ifdef DEBUG_TLB
167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); 167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
168#endif 168#endif
169 newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm)); 169 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
170 page &= PAGE_MASK; 170 page &= PAGE_MASK;
171 local_irq_save(flags); 171 local_irq_save(flags);
172 oldpid = ASID_MASK(read_c0_entryhi()); 172 oldpid = read_c0_entryhi() & ASID_MASK;
173 write_c0_entryhi(page | newpid); 173 write_c0_entryhi(page | newpid);
174 BARRIER; 174 BARRIER;
175 tlb_probe(); 175 tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
197 if (current->active_mm != vma->vm_mm) 197 if (current->active_mm != vma->vm_mm)
198 return; 198 return;
199 199
200 pid = ASID_MASK(read_c0_entryhi()); 200 pid = read_c0_entryhi() & ASID_MASK;
201 201
202#ifdef DEBUG_TLB 202#ifdef DEBUG_TLB
203 if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) { 203 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", 204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
205 (cpu_context(cpu, vma->vm_mm)), pid); 205 (cpu_context(cpu, vma->vm_mm)), pid);
206 } 206 }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
241 241
242 local_irq_save(flags); 242 local_irq_save(flags);
243 /* Save old context and create impossible VPN2 value */ 243 /* Save old context and create impossible VPN2 value */
244 old_ctx = ASID_MASK(read_c0_entryhi()); 244 old_ctx = read_c0_entryhi() & ASID_MASK;
245 old_pagemask = read_c0_pagemask(); 245 old_pagemask = read_c0_pagemask();
246 w = read_c0_wired(); 246 w = read_c0_wired();
247 write_c0_wired(w + 1); 247 write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
264#endif 264#endif
265 265
266 local_irq_save(flags); 266 local_irq_save(flags);
267 old_ctx = ASID_MASK(read_c0_entryhi()); 267 old_ctx = read_c0_entryhi() & ASID_MASK;
268 write_c0_entrylo0(entrylo0); 268 write_c0_entrylo0(entrylo0);
269 write_c0_entryhi(entryhi); 269 write_c0_entryhi(entryhi);
270 write_c0_index(wired); 270 write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 09653b290d53..c643de4c473a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
287 287
288 ENTER_CRITICAL(flags); 288 ENTER_CRITICAL(flags);
289 289
290 pid = ASID_MASK(read_c0_entryhi()); 290 pid = read_c0_entryhi() & ASID_MASK;
291 address &= (PAGE_MASK << 1); 291 address &= (PAGE_MASK << 1);
292 write_c0_entryhi(address | pid); 292 write_c0_entryhi(address | pid);
293 pgdp = pgd_offset(vma->vm_mm, address); 293 pgdp = pgd_offset(vma->vm_mm, address);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 122f9207f49e..91c2499f806a 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
195 if (current->active_mm != vma->vm_mm) 195 if (current->active_mm != vma->vm_mm)
196 return; 196 return;
197 197
198 pid = ASID_MASK(read_c0_entryhi()); 198 pid = read_c0_entryhi() & ASID_MASK;
199 199
200 local_irq_save(flags); 200 local_irq_save(flags);
201 address &= PAGE_MASK; 201 address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 4d46d3787576..afeef93f81a7 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,7 +29,6 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/cache.h> 30#include <linux/cache.h>
31 31
32#include <asm/mmu_context.h>
33#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
34#include <asm/pgtable.h> 33#include <asm/pgtable.h>
35#include <asm/war.h> 34#include <asm/war.h>
@@ -302,82 +301,6 @@ static u32 tlb_handler[128] __cpuinitdata;
302static struct uasm_label labels[128] __cpuinitdata; 301static struct uasm_label labels[128] __cpuinitdata;
303static struct uasm_reloc relocs[128] __cpuinitdata; 302static struct uasm_reloc relocs[128] __cpuinitdata;
304 303
305#ifdef CONFIG_64BIT
306static int check_for_high_segbits __cpuinitdata;
307#endif
308
309static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
310 unsigned int i_const)
311{
312 unsigned int **p;
313
314 for (p = start; p < stop; p++) {
315#ifndef CONFIG_CPU_MICROMIPS
316 unsigned int *ip;
317
318 ip = *p;
319 *ip = (*ip & 0xffff0000) | i_const;
320#else
321 unsigned short *ip;
322
323 ip = ((unsigned short *)((unsigned int)*p - 1));
324 if ((*ip & 0xf000) == 0x4000) {
325 *ip &= 0xfff1;
326 *ip |= (i_const << 1);
327 } else if ((*ip & 0xf000) == 0x6000) {
328 *ip &= 0xfff1;
329 *ip |= ((i_const >> 2) << 1);
330 } else {
331 ip++;
332 *ip = i_const;
333 }
334#endif
335 local_flush_icache_range((unsigned long)ip,
336 (unsigned long)ip + sizeof(*ip));
337 }
338}
339
340#define asid_insn_fixup(section, const) \
341do { \
342 extern unsigned int *__start_ ## section; \
343 extern unsigned int *__stop_ ## section; \
344 insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
345} while(0)
346
347/*
348 * Caller is assumed to flush the caches before the first context switch.
349 */
350static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
351 unsigned int version_mask,
352 unsigned int first_version)
353{
354 extern asmlinkage void handle_ri_rdhwr_vivt(void);
355 unsigned long *vivt_exc;
356
357#ifdef CONFIG_CPU_MICROMIPS
358 /*
359 * Worst case optimised microMIPS addiu instructions support
360 * only a 3-bit immediate value.
361 */
362 if(inc > 7)
363 panic("Invalid ASID increment value!");
364#endif
365 asid_insn_fixup(__asid_inc, inc);
366 asid_insn_fixup(__asid_mask, mask);
367 asid_insn_fixup(__asid_version_mask, version_mask);
368 asid_insn_fixup(__asid_first_version, first_version);
369
370 /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
371 vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
372#ifdef CONFIG_CPU_MICROMIPS
373 vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
374#endif
375 vivt_exc++;
376 *vivt_exc = (*vivt_exc & ~mask) | mask;
377
378 current_cpu_data.asid_cache = first_version;
379}
380
381static int check_for_high_segbits __cpuinitdata; 304static int check_for_high_segbits __cpuinitdata;
382 305
383static unsigned int kscratch_used_mask __cpuinitdata; 306static unsigned int kscratch_used_mask __cpuinitdata;
@@ -2256,7 +2179,6 @@ void __cpuinit build_tlb_refill_handler(void)
2256 case CPU_TX3922: 2179 case CPU_TX3922:
2257 case CPU_TX3927: 2180 case CPU_TX3927:
2258#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2181#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2259 setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
2260 if (cpu_has_local_ebase) 2182 if (cpu_has_local_ebase)
2261 build_r3000_tlb_refill_handler(); 2183 build_r3000_tlb_refill_handler();
2262 if (!run_once) { 2184 if (!run_once) {
@@ -2282,11 +2204,6 @@ void __cpuinit build_tlb_refill_handler(void)
2282 break; 2204 break;
2283 2205
2284 default: 2206 default:
2285#ifndef CONFIG_MIPS_MT_SMTC
2286 setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
2287#else
2288 setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
2289#endif
2290 if (!run_once) { 2207 if (!run_once) {
2291 scratch_reg = allocate_kscratch(); 2208 scratch_reg = allocate_kscratch();
2292#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2209#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index af319143b591..eaa99d28cb8e 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -37,6 +37,7 @@
37#include <linux/pm.h> 37#include <linux/pm.h>
38#include <linux/bootmem.h> 38#include <linux/bootmem.h>
39 39
40#include <asm/idle.h>
40#include <asm/reboot.h> 41#include <asm/reboot.h>
41#include <asm/time.h> 42#include <asm/time.h>
42#include <asm/bootinfo.h> 43#include <asm/bootinfo.h>
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index e3e094100e3e..89c8c1066632 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -36,6 +36,7 @@
36#include <linux/serial_8250.h> 36#include <linux/serial_8250.h>
37#include <linux/pm.h> 37#include <linux/pm.h>
38 38
39#include <asm/idle.h>
39#include <asm/reboot.h> 40#include <asm/reboot.h>
40#include <asm/time.h> 41#include <asm/time.h>
41#include <asm/bootinfo.h> 42#include <asm/bootinfo.h>
diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c
index 0edb89a63516..1c9897531660 100644
--- a/arch/mips/pmcs-msp71xx/msp_prom.c
+++ b/arch/mips/pmcs-msp71xx/msp_prom.c
@@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c)
83 return 0; /* foo */ 83 return 0; /* foo */
84} 84}
85 85
86static inline int str2eaddr(unsigned char *ea, unsigned char *str) 86int str2eaddr(unsigned char *ea, unsigned char *str)
87{ 87{
88 int index = 0; 88 int index = 0;
89 unsigned char num = 0; 89 unsigned char num = 0;
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 1651cfdbfe7b..396b2967ad85 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -12,6 +12,7 @@
12 12
13#include <asm/bootinfo.h> 13#include <asm/bootinfo.h>
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/idle.h>
15#include <asm/r4kcache.h> 16#include <asm/r4kcache.h>
16#include <asm/reboot.h> 17#include <asm/reboot.h>
17#include <asm/smp-ops.h> 18#include <asm/smp-ops.h>
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi
index ef7da1e227e6..e3203d414fee 100644
--- a/arch/mips/ralink/dts/rt3050.dtsi
+++ b/arch/mips/ralink/dts/rt3050.dtsi
@@ -55,4 +55,14 @@
55 reg-shift = <2>; 55 reg-shift = <2>;
56 }; 56 };
57 }; 57 };
58
59 usb@101c0000 {
60 compatible = "ralink,rt3050-usb", "snps,dwc2";
61 reg = <0x101c0000 40000>;
62
63 interrupt-parent = <&intc>;
64 interrupts = <18>;
65
66 status = "disabled";
67 };
58}; 68};
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
index c18c9a84f4c4..0ac73ea28198 100644
--- a/arch/mips/ralink/dts/rt3052_eval.dts
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -43,4 +43,8 @@
43 reg = <0x50000 0x7b0000>; 43 reg = <0x50000 0x7b0000>;
44 }; 44 };
45 }; 45 };
46
47 usb@101c0000 {
48 status = "ok";
49 };
46}; 50};
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index fb1569580def..6b5f3406f414 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -88,7 +88,7 @@ void __init plat_mem_setup(void)
88 __dt_setup_arch(&__dtb_start); 88 __dt_setup_arch(&__dtb_start);
89 89
90 if (soc_info.mem_size) 90 if (soc_info.mem_size)
91 add_memory_region(soc_info.mem_base, soc_info.mem_size, 91 add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M,
92 BOOT_MEM_RAM); 92 BOOT_MEM_RAM);
93 else 93 else
94 detect_memory_region(soc_info.mem_base, 94 detect_memory_region(soc_info.mem_base,
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 5364aabc2102..681e7f86c080 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -26,6 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <asm/bootinfo.h> 28#include <asm/bootinfo.h>
29#include <asm/idle.h>
29#include <asm/time.h> 30#include <asm/time.h>
30#include <asm/reboot.h> 31#include <asm/reboot.h>
31#include <asm/r4kcache.h> 32#include <asm/r4kcache.h>
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index 70a3f90131d8..d7f755833c3f 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -27,6 +27,7 @@
27 27
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/cpu.h> 29#include <asm/cpu.h>
30#include <asm/idle.h>
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/processor.h> 32#include <asm/processor.h>
32#include <asm/reboot.h> 33#include <asm/reboot.h>
diff --git a/arch/mips/wrppmc/reset.c b/arch/mips/wrppmc/reset.c
index cc5474b24f06..80beb188ed47 100644
--- a/arch/mips/wrppmc/reset.c
+++ b/arch/mips/wrppmc/reset.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10 10
11#include <asm/cacheflush.h> 11#include <asm/cacheflush.h>
12#include <asm/idle.h>
12#include <asm/mipsregs.h> 13#include <asm/mipsregs.h>
13#include <asm/processor.h> 14#include <asm/processor.h>
14 15