aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2013-05-13 16:56:44 -0400
committerRalf Baechle <ralf@linux-mips.org>2013-05-16 14:35:42 -0400
commit48c4ac976ae995f263cde8f09578de86bc8e9f1d (patch)
tree48e9b3753b951ea2c276546679264c3030a8ea7d
parent8ea6cd7af124ad070b44a7f60e225e45e3f38f79 (diff)
Revert "MIPS: Allow ASID size to be determined at boot time."
This reverts commit d532f3d26716a39dfd4b88d687bd344fbe77e390. The original commit has several problems: 1) Doesn't work with 64-bit kernels. 2) Calls TLBMISS_HANDLER_SETUP() before the code is generated. 3) Calls TLBMISS_HANDLER_SETUP() twice in per_cpu_trap_init() when only one call is needed. [ralf@linux-mips.org: Also revert the bits of the ASID patch which were hidden in the KVM merge.] Signed-off-by: David Daney <david.daney@cavium.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: "Steven J. Hill" <Steven.Hill@imgtec.com> Cc: David Daney <david.daney@cavium.com> Patchwork: https://patchwork.linux-mips.org/patch/5242/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/include/asm/mmu_context.h95
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/smtc.c10
-rw-r--r--arch/mips/kernel/traps.c6
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c29
-rw-r--r--arch/mips/kvm/kvm_tlb.c26
-rw-r--r--arch/mips/lib/dump_tlb.c5
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c7
-rw-r--r--arch/mips/mm/tlb-r3k.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c49
13 files changed, 93 insertions, 162 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index e68781e18387..143875c6c95a 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -336,7 +336,7 @@ enum emulation_result {
336#define VPN2_MASK 0xffffe000 336#define VPN2_MASK 0xffffe000
337#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) 337#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
338#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) 338#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
339#define TLB_ASID(x) (ASID_MASK((x).tlb_hi)) 339#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
340#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) 340#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
341 341
342struct kvm_mips_tlb { 342struct kvm_mips_tlb {
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 1554721e4808..820116067c10 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -67,68 +67,45 @@ extern unsigned long pgd_current[];
67 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 67 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
68#endif 68#endif
69#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ 69#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
70#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
70 71
71#define ASID_INC(asid) \ 72#define ASID_INC 0x40
72({ \ 73#define ASID_MASK 0xfc0
73 unsigned long __asid = asid; \ 74
74 __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \ 75#elif defined(CONFIG_CPU_R8000)
75 ".section\t__asid_inc,\"a\"\n\t" \ 76
76 ".word\t1b\n\t" \ 77#define ASID_INC 0x10
77 ".previous" \ 78#define ASID_MASK 0xff0
78 :"=r" (__asid) \ 79
79 :"0" (__asid)); \ 80#elif defined(CONFIG_MIPS_MT_SMTC)
80 __asid; \ 81
81}) 82#define ASID_INC 0x1
82#define ASID_MASK(asid) \ 83extern unsigned long smtc_asid_mask;
83({ \ 84#define ASID_MASK (smtc_asid_mask)
84 unsigned long __asid = asid; \ 85#define HW_ASID_MASK 0xff
85 __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \ 86/* End SMTC/34K debug hack */
86 ".section\t__asid_mask,\"a\"\n\t" \ 87#else /* FIXME: not correct for R6000 */
87 ".word\t1b\n\t" \ 88
88 ".previous" \ 89#define ASID_INC 0x1
89 :"=r" (__asid) \ 90#define ASID_MASK 0xff
90 :"r" (__asid)); \
91 __asid; \
92})
93#define ASID_VERSION_MASK \
94({ \
95 unsigned long __asid; \
96 __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
97 ".section\t__asid_version_mask,\"a\"\n\t" \
98 ".word\t1b\n\t" \
99 ".previous" \
100 :"=r" (__asid)); \
101 __asid; \
102})
103#define ASID_FIRST_VERSION \
104({ \
105 unsigned long __asid = asid; \
106 __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
107 ".section\t__asid_first_version,\"a\"\n\t" \
108 ".word\t1b\n\t" \
109 ".previous" \
110 :"=r" (__asid)); \
111 __asid; \
112})
113
114#define ASID_FIRST_VERSION_R3000 0x1000
115#define ASID_FIRST_VERSION_R4000 0x100
116#define ASID_FIRST_VERSION_R8000 0x1000
117#define ASID_FIRST_VERSION_RM9000 0x1000
118 91
119#ifdef CONFIG_MIPS_MT_SMTC
120#define SMTC_HW_ASID_MASK 0xff
121extern unsigned int smtc_asid_mask;
122#endif 92#endif
123 93
124#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 94#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
125#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm))) 95#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
126#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 96#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
127 97
128static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 98static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
129{ 99{
130} 100}
131 101
102/*
103 * All unused by hardware upper bits will be considered
104 * as a software asid extension.
105 */
106#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
107#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
108
132#ifndef CONFIG_MIPS_MT_SMTC 109#ifndef CONFIG_MIPS_MT_SMTC
133/* Normal, classic MIPS get_new_mmu_context */ 110/* Normal, classic MIPS get_new_mmu_context */
134static inline void 111static inline void
@@ -137,7 +114,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
137 extern void kvm_local_flush_tlb_all(void); 114 extern void kvm_local_flush_tlb_all(void);
138 unsigned long asid = asid_cache(cpu); 115 unsigned long asid = asid_cache(cpu);
139 116
140 if (!ASID_MASK((asid = ASID_INC(asid)))) { 117 if (! ((asid += ASID_INC) & ASID_MASK) ) {
141 if (cpu_has_vtag_icache) 118 if (cpu_has_vtag_icache)
142 flush_icache_all(); 119 flush_icache_all();
143#ifdef CONFIG_VIRTUALIZATION 120#ifdef CONFIG_VIRTUALIZATION
@@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
200 * free up the ASID value for use and flush any old 177 * free up the ASID value for use and flush any old
201 * instances of it from the TLB. 178 * instances of it from the TLB.
202 */ 179 */
203 oldasid = ASID_MASK(read_c0_entryhi()); 180 oldasid = (read_c0_entryhi() & ASID_MASK);
204 if(smtc_live_asid[mytlb][oldasid]) { 181 if(smtc_live_asid[mytlb][oldasid]) {
205 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 182 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
206 if(smtc_live_asid[mytlb][oldasid] == 0) 183 if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
211 * having ASID_MASK smaller than the hardware maximum, 188 * having ASID_MASK smaller than the hardware maximum,
212 * make sure no "soft" bits become "hard"... 189 * make sure no "soft" bits become "hard"...
213 */ 190 */
214 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | 191 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
215 cpu_asid(cpu, next)); 192 cpu_asid(cpu, next));
216 ehb(); /* Make sure it propagates to TCStatus */ 193 ehb(); /* Make sure it propagates to TCStatus */
217 evpe(mtflags); 194 evpe(mtflags);
@@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
264#ifdef CONFIG_MIPS_MT_SMTC 241#ifdef CONFIG_MIPS_MT_SMTC
265 /* See comments for similar code above */ 242 /* See comments for similar code above */
266 mtflags = dvpe(); 243 mtflags = dvpe();
267 oldasid = ASID_MASK(read_c0_entryhi()); 244 oldasid = read_c0_entryhi() & ASID_MASK;
268 if(smtc_live_asid[mytlb][oldasid]) { 245 if(smtc_live_asid[mytlb][oldasid]) {
269 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 246 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
270 if(smtc_live_asid[mytlb][oldasid] == 0) 247 if(smtc_live_asid[mytlb][oldasid] == 0)
271 smtc_flush_tlb_asid(oldasid); 248 smtc_flush_tlb_asid(oldasid);
272 } 249 }
273 /* See comments for similar code above */ 250 /* See comments for similar code above */
274 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | 251 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
275 cpu_asid(cpu, next)); 252 cpu_asid(cpu, next));
276 ehb(); /* Make sure it propagates to TCStatus */ 253 ehb(); /* Make sure it propagates to TCStatus */
277 evpe(mtflags); 254 evpe(mtflags);
278#else 255#else
@@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
309#ifdef CONFIG_MIPS_MT_SMTC 286#ifdef CONFIG_MIPS_MT_SMTC
310 /* See comments for similar code above */ 287 /* See comments for similar code above */
311 prevvpe = dvpe(); 288 prevvpe = dvpe();
312 oldasid = ASID_MASK(read_c0_entryhi()); 289 oldasid = (read_c0_entryhi() & ASID_MASK);
313 if (smtc_live_asid[mytlb][oldasid]) { 290 if (smtc_live_asid[mytlb][oldasid]) {
314 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 291 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
315 if(smtc_live_asid[mytlb][oldasid] == 0) 292 if(smtc_live_asid[mytlb][oldasid] == 0)
316 smtc_flush_tlb_asid(oldasid); 293 smtc_flush_tlb_asid(oldasid);
317 } 294 }
318 /* See comments for similar code above */ 295 /* See comments for similar code above */
319 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) 296 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
320 | cpu_asid(cpu, mm)); 297 | cpu_asid(cpu, mm));
321 ehb(); /* Make sure it propagates to TCStatus */ 298 ehb(); /* Make sure it propagates to TCStatus */
322 evpe(prevvpe); 299 evpe(prevvpe);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 5c2ba9f08a80..9098829bfcb0 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
493 .set noreorder 493 .set noreorder
494 /* check if TLB contains a entry for EPC */ 494 /* check if TLB contains a entry for EPC */
495 MFC0 k1, CP0_ENTRYHI 495 MFC0 k1, CP0_ENTRYHI
496 andi k1, 0xff /* ASID_MASK patched at run-time!! */ 496 andi k1, 0xff /* ASID_MASK */
497 MFC0 k0, CP0_EPC 497 MFC0 k0, CP0_EPC
498 PTR_SRL k0, _PAGE_SHIFT + 1 498 PTR_SRL k0, _PAGE_SHIFT + 1
499 PTR_SLL k0, _PAGE_SHIFT + 1 499 PTR_SLL k0, _PAGE_SHIFT + 1
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 31d22f3121c9..7186222dc5bb 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -111,7 +111,7 @@ static int vpe0limit;
111static int ipibuffers; 111static int ipibuffers;
112static int nostlb; 112static int nostlb;
113static int asidmask; 113static int asidmask;
114unsigned int smtc_asid_mask = 0xff; 114unsigned long smtc_asid_mask = 0xff;
115 115
116static int __init vpe0tcs(char *str) 116static int __init vpe0tcs(char *str)
117{ 117{
@@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1395 asid = asid_cache(cpu); 1395 asid = asid_cache(cpu);
1396 1396
1397 do { 1397 do {
1398 if (!ASID_MASK(ASID_INC(asid))) { 1398 if (!((asid += ASID_INC) & ASID_MASK) ) {
1399 if (cpu_has_vtag_icache) 1399 if (cpu_has_vtag_icache)
1400 flush_icache_all(); 1400 flush_icache_all();
1401 /* Traverse all online CPUs (hack requires contiguous range) */ 1401 /* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1414 mips_ihb(); 1414 mips_ihb();
1415 } 1415 }
1416 tcstat = read_tc_c0_tcstatus(); 1416 tcstat = read_tc_c0_tcstatus();
1417 smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); 1417 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1418 if (!prevhalt) 1418 if (!prevhalt)
1419 write_tc_c0_tchalt(0); 1419 write_tc_c0_tchalt(0);
1420 } 1420 }
@@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1423 asid = ASID_FIRST_VERSION; 1423 asid = ASID_FIRST_VERSION;
1424 local_flush_tlb_all(); /* start new asid cycle */ 1424 local_flush_tlb_all(); /* start new asid cycle */
1425 } 1425 }
1426 } while (smtc_live_asid[tlb][ASID_MASK(asid)]); 1426 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1427 1427
1428 /* 1428 /*
1429 * SMTC shares the TLB within VPEs and possibly across all VPEs. 1429 * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
1461 tlb_read(); 1461 tlb_read();
1462 ehb(); 1462 ehb();
1463 ehi = read_c0_entryhi(); 1463 ehi = read_c0_entryhi();
1464 if (ASID_MASK(ehi) == asid) { 1464 if ((ehi & ASID_MASK) == asid) {
1465 /* 1465 /*
1466 * Invalidate only entries with specified ASID, 1466 * Invalidate only entries with specified ASID,
1467 * makiing sure all entries differ. 1467 * makiing sure all entries differ.
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 77cff1f6d050..cb14db3c5764 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1656,7 +1656,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1656 unsigned int cpu = smp_processor_id(); 1656 unsigned int cpu = smp_processor_id();
1657 unsigned int status_set = ST0_CU0; 1657 unsigned int status_set = ST0_CU0;
1658 unsigned int hwrena = cpu_hwrena_impl_bits; 1658 unsigned int hwrena = cpu_hwrena_impl_bits;
1659 unsigned long asid = 0;
1660#ifdef CONFIG_MIPS_MT_SMTC 1659#ifdef CONFIG_MIPS_MT_SMTC
1661 int secondaryTC = 0; 1660 int secondaryTC = 0;
1662 int bootTC = (cpu == 0); 1661 int bootTC = (cpu == 0);
@@ -1740,9 +1739,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1740 } 1739 }
1741#endif /* CONFIG_MIPS_MT_SMTC */ 1740#endif /* CONFIG_MIPS_MT_SMTC */
1742 1741
1743 asid = ASID_FIRST_VERSION; 1742 if (!cpu_data[cpu].asid_cache)
1744 cpu_data[cpu].asid_cache = asid; 1743 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1745 TLBMISS_HANDLER_SETUP();
1746 1744
1747 atomic_inc(&init_mm.mm_count); 1745 atomic_inc(&init_mm.mm_count);
1748 current->active_mm = &init_mm; 1746 current->active_mm = &init_mm;
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index 2b2bac9a40aa..4b6274b47f33 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
525 printk("MTCz, cop0->reg[EBASE]: %#lx\n", 525 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526 kvm_read_c0_guest_ebase(cop0)); 526 kvm_read_c0_guest_ebase(cop0));
527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
528 uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]); 528 uint32_t nasid =
529 vcpu->arch.gprs[rt] & ASID_MASK;
529 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) 530 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
530 && 531 &&
531 (ASID_MASK(kvm_read_c0_guest_entryhi(cop0)) 532 ((kvm_read_c0_guest_entryhi(cop0) &
532 != nasid)) { 533 ASID_MASK) != nasid)) {
533 534
534 kvm_debug 535 kvm_debug
535 ("MTCz, change ASID from %#lx to %#lx\n", 536 ("MTCz, change ASID from %#lx to %#lx\n",
536 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)), 537 kvm_read_c0_guest_entryhi(cop0) &
537 ASID_MASK(vcpu->arch.gprs[rt])); 538 ASID_MASK,
539 vcpu->arch.gprs[rt] & ASID_MASK);
538 540
539 /* Blow away the shadow host TLBs */ 541 /* Blow away the shadow host TLBs */
540 kvm_mips_flush_host_tlb(1); 542 kvm_mips_flush_host_tlb(1);
@@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
986 * resulting handler will do the right thing 988 * resulting handler will do the right thing
987 */ 989 */
988 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | 990 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
989 ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); 991 (kvm_read_c0_guest_entryhi
992 (cop0) & ASID_MASK));
990 993
991 if (index < 0) { 994 if (index < 0) {
992 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); 995 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
@@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1151 struct kvm_vcpu_arch *arch = &vcpu->arch; 1154 struct kvm_vcpu_arch *arch = &vcpu->arch;
1152 enum emulation_result er = EMULATE_DONE; 1155 enum emulation_result er = EMULATE_DONE;
1153 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | 1156 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1154 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1157 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1155 1158
1156 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1159 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1157 /* save old pc */ 1160 /* save old pc */
@@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1198 enum emulation_result er = EMULATE_DONE; 1201 enum emulation_result er = EMULATE_DONE;
1199 unsigned long entryhi = 1202 unsigned long entryhi =
1200 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1203 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1201 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1204 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1202 1205
1203 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1206 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1204 /* save old pc */ 1207 /* save old pc */
@@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1243 struct kvm_vcpu_arch *arch = &vcpu->arch; 1246 struct kvm_vcpu_arch *arch = &vcpu->arch;
1244 enum emulation_result er = EMULATE_DONE; 1247 enum emulation_result er = EMULATE_DONE;
1245 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1248 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1246 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1249 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1247 1250
1248 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1251 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1249 /* save old pc */ 1252 /* save old pc */
@@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1287 struct kvm_vcpu_arch *arch = &vcpu->arch; 1290 struct kvm_vcpu_arch *arch = &vcpu->arch;
1288 enum emulation_result er = EMULATE_DONE; 1291 enum emulation_result er = EMULATE_DONE;
1289 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1292 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1290 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1293 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1291 1294
1292 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1295 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1293 /* save old pc */ 1296 /* save old pc */
@@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1356{ 1359{
1357 struct mips_coproc *cop0 = vcpu->arch.cop0; 1360 struct mips_coproc *cop0 = vcpu->arch.cop0;
1358 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1361 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1359 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1362 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1360 struct kvm_vcpu_arch *arch = &vcpu->arch; 1363 struct kvm_vcpu_arch *arch = &vcpu->arch;
1361 enum emulation_result er = EMULATE_DONE; 1364 enum emulation_result er = EMULATE_DONE;
1362 1365
@@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1783 */ 1786 */
1784 index = kvm_mips_guest_tlb_lookup(vcpu, 1787 index = kvm_mips_guest_tlb_lookup(vcpu,
1785 (va & VPN2_MASK) | 1788 (va & VPN2_MASK) |
1786 ASID_MASK(kvm_read_c0_guest_entryhi 1789 (kvm_read_c0_guest_entryhi
1787 (vcpu->arch.cop0))); 1790 (vcpu->arch.cop0) & ASID_MASK));
1788 if (index < 0) { 1791 if (index < 0) {
1789 if (exccode == T_TLB_LD_MISS) { 1792 if (exccode == T_TLB_LD_MISS) {
1790 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); 1793 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
index 89511a9258d3..e3f0d9b8b6c5 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -51,13 +51,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51 51
52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53{ 53{
54 return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); 54 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
55} 55}
56 56
57 57
58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59{ 59{
60 return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); 60 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
61} 61}
62 62
63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) 63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
@@ -84,7 +84,7 @@ void kvm_mips_dump_host_tlbs(void)
84 old_pagemask = read_c0_pagemask(); 84 old_pagemask = read_c0_pagemask();
85 85
86 printk("HOST TLBs:\n"); 86 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); 87 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
88 88
89 for (i = 0; i < current_cpu_data.tlbsize; i++) { 89 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 write_c0_index(i); 90 write_c0_index(i);
@@ -428,7 +428,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
428 428
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && 430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { 431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
432 index = i; 432 index = i;
433 break; 433 break;
434 } 434 }
@@ -626,7 +626,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
626{ 626{
627 unsigned long asid = asid_cache(cpu); 627 unsigned long asid = asid_cache(cpu);
628 628
629 if (!(ASID_MASK(ASID_INC(asid)))) { 629 if (!((asid += ASID_INC) & ASID_MASK)) {
630 if (cpu_has_vtag_icache) { 630 if (cpu_has_vtag_icache) {
631 flush_icache_all(); 631 flush_icache_all();
632 } 632 }
@@ -804,7 +804,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
804 if (!newasid) { 804 if (!newasid) {
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */ 805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) { 806 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); 807 write_c0_entryhi(vcpu->arch.
808 preempt_entryhi & ASID_MASK);
808 ehb(); 809 ehb();
809 } 810 }
810 } else { 811 } else {
@@ -816,11 +817,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
816 */ 817 */
817 if (current->flags & PF_VCPU) { 818 if (current->flags & PF_VCPU) {
818 if (KVM_GUEST_KERNEL_MODE(vcpu)) 819 if (KVM_GUEST_KERNEL_MODE(vcpu))
819 write_c0_entryhi(ASID_MASK(vcpu->arch. 820 write_c0_entryhi(vcpu->arch.
820 guest_kernel_asid[cpu])); 821 guest_kernel_asid[cpu] &
822 ASID_MASK);
821 else 823 else
822 write_c0_entryhi(ASID_MASK(vcpu->arch. 824 write_c0_entryhi(vcpu->arch.
823 guest_user_asid[cpu])); 825 guest_user_asid[cpu] &
826 ASID_MASK);
824 ehb(); 827 ehb();
825 } 828 }
826 } 829 }
@@ -879,7 +882,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
879 kvm_mips_guest_tlb_lookup(vcpu, 882 kvm_mips_guest_tlb_lookup(vcpu,
880 ((unsigned long) opc & VPN2_MASK) 883 ((unsigned long) opc & VPN2_MASK)
881 | 884 |
882 ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); 885 (kvm_read_c0_guest_entryhi
886 (cop0) & ASID_MASK));
883 if (index < 0) { 887 if (index < 0) {
884 kvm_err 888 kvm_err
885 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", 889 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 8a12d00908e0..32b9f21bfd85 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -11,7 +11,6 @@
11#include <asm/page.h> 11#include <asm/page.h>
12#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13#include <asm/tlbdebug.h> 13#include <asm/tlbdebug.h>
14#include <asm/mmu_context.h>
15 14
16static inline const char *msk2str(unsigned int mask) 15static inline const char *msk2str(unsigned int mask)
17{ 16{
@@ -56,7 +55,7 @@ static void dump_tlb(int first, int last)
56 s_pagemask = read_c0_pagemask(); 55 s_pagemask = read_c0_pagemask();
57 s_entryhi = read_c0_entryhi(); 56 s_entryhi = read_c0_entryhi();
58 s_index = read_c0_index(); 57 s_index = read_c0_index();
59 asid = ASID_MASK(s_entryhi); 58 asid = s_entryhi & 0xff;
60 59
61 for (i = first; i <= last; i++) { 60 for (i = first; i <= last; i++) {
62 write_c0_index(i); 61 write_c0_index(i);
@@ -86,7 +85,7 @@ static void dump_tlb(int first, int last)
86 85
87 printk("va=%0*lx asid=%02lx\n", 86 printk("va=%0*lx asid=%02lx\n",
88 width, (entryhi & ~0x1fffUL), 87 width, (entryhi & ~0x1fffUL),
89 ASID_MASK(entryhi)); 88 entryhi & 0xff);
90 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", 89 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
91 width, 90 width,
92 (entrylo0 << 6) & PAGE_MASK, c0, 91 (entrylo0 << 6) & PAGE_MASK, c0,
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 8327698b9937..91615c2ef0cf 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -9,7 +9,6 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10 10
11#include <asm/mipsregs.h> 11#include <asm/mipsregs.h>
12#include <asm/mmu_context.h>
13#include <asm/page.h> 12#include <asm/page.h>
14#include <asm/pgtable.h> 13#include <asm/pgtable.h>
15#include <asm/tlbdebug.h> 14#include <asm/tlbdebug.h>
@@ -22,7 +21,7 @@ static void dump_tlb(int first, int last)
22 unsigned int asid; 21 unsigned int asid;
23 unsigned long entryhi, entrylo0; 22 unsigned long entryhi, entrylo0;
24 23
25 asid = ASID_MASK(read_c0_entryhi()); 24 asid = read_c0_entryhi() & 0xfc0;
26 25
27 for (i = first; i <= last; i++) { 26 for (i = first; i <= last; i++) {
28 write_c0_index(i<<8); 27 write_c0_index(i<<8);
@@ -36,7 +35,7 @@ static void dump_tlb(int first, int last)
36 35
37 /* Unused entries have a virtual address of KSEG0. */ 36 /* Unused entries have a virtual address of KSEG0. */
38 if ((entryhi & 0xffffe000) != 0x80000000 37 if ((entryhi & 0xffffe000) != 0x80000000
39 && (ASID_MASK(entryhi) == asid)) { 38 && (entryhi & 0xfc0) == asid) {
40 /* 39 /*
41 * Only print entries in use 40 * Only print entries in use
42 */ 41 */
@@ -45,7 +44,7 @@ static void dump_tlb(int first, int last)
45 printk("va=%08lx asid=%08lx" 44 printk("va=%08lx asid=%08lx"
46 " [pa=%06lx n=%d d=%d v=%d g=%d]", 45 " [pa=%06lx n=%d d=%d v=%d g=%d]",
47 (entryhi & 0xffffe000), 46 (entryhi & 0xffffe000),
48 ASID_MASK(entryhi), 47 entryhi & 0xfc0,
49 entrylo0 & PAGE_MASK, 48 entrylo0 & PAGE_MASK,
50 (entrylo0 & (1 << 11)) ? 1 : 0, 49 (entrylo0 & (1 << 11)) ? 1 : 0,
51 (entrylo0 & (1 << 10)) ? 1 : 0, 50 (entrylo0 & (1 << 10)) ? 1 : 0,
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 4a13c150f31b..a63d1ed0827f 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
51#endif 51#endif
52 52
53 local_irq_save(flags); 53 local_irq_save(flags);
54 old_ctx = ASID_MASK(read_c0_entryhi()); 54 old_ctx = read_c0_entryhi() & ASID_MASK;
55 write_c0_entrylo0(0); 55 write_c0_entrylo0(0);
56 entry = r3k_have_wired_reg ? read_c0_wired() : 8; 56 entry = r3k_have_wired_reg ? read_c0_wired() : 8;
57 for (; entry < current_cpu_data.tlbsize; entry++) { 57 for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
87 87
88#ifdef DEBUG_TLB 88#ifdef DEBUG_TLB
89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", 89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
90 ASID_MASK(cpu_context(cpu, mm)), start, end); 90 cpu_context(cpu, mm) & ASID_MASK, start, end);
91#endif 91#endif
92 local_irq_save(flags); 92 local_irq_save(flags);
93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94 if (size <= current_cpu_data.tlbsize) { 94 if (size <= current_cpu_data.tlbsize) {
95 int oldpid = ASID_MASK(read_c0_entryhi()); 95 int oldpid = read_c0_entryhi() & ASID_MASK;
96 int newpid = ASID_MASK(cpu_context(cpu, mm)); 96 int newpid = cpu_context(cpu, mm) & ASID_MASK;
97 97
98 start &= PAGE_MASK; 98 start &= PAGE_MASK;
99 end += PAGE_SIZE - 1; 99 end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
166#ifdef DEBUG_TLB 166#ifdef DEBUG_TLB
167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); 167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
168#endif 168#endif
169 newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm)); 169 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
170 page &= PAGE_MASK; 170 page &= PAGE_MASK;
171 local_irq_save(flags); 171 local_irq_save(flags);
172 oldpid = ASID_MASK(read_c0_entryhi()); 172 oldpid = read_c0_entryhi() & ASID_MASK;
173 write_c0_entryhi(page | newpid); 173 write_c0_entryhi(page | newpid);
174 BARRIER; 174 BARRIER;
175 tlb_probe(); 175 tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
197 if (current->active_mm != vma->vm_mm) 197 if (current->active_mm != vma->vm_mm)
198 return; 198 return;
199 199
200 pid = ASID_MASK(read_c0_entryhi()); 200 pid = read_c0_entryhi() & ASID_MASK;
201 201
202#ifdef DEBUG_TLB 202#ifdef DEBUG_TLB
203 if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) { 203 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", 204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
205 (cpu_context(cpu, vma->vm_mm)), pid); 205 (cpu_context(cpu, vma->vm_mm)), pid);
206 } 206 }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
241 241
242 local_irq_save(flags); 242 local_irq_save(flags);
243 /* Save old context and create impossible VPN2 value */ 243 /* Save old context and create impossible VPN2 value */
244 old_ctx = ASID_MASK(read_c0_entryhi()); 244 old_ctx = read_c0_entryhi() & ASID_MASK;
245 old_pagemask = read_c0_pagemask(); 245 old_pagemask = read_c0_pagemask();
246 w = read_c0_wired(); 246 w = read_c0_wired();
247 write_c0_wired(w + 1); 247 write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
264#endif 264#endif
265 265
266 local_irq_save(flags); 266 local_irq_save(flags);
267 old_ctx = ASID_MASK(read_c0_entryhi()); 267 old_ctx = read_c0_entryhi() & ASID_MASK;
268 write_c0_entrylo0(entrylo0); 268 write_c0_entrylo0(entrylo0);
269 write_c0_entryhi(entryhi); 269 write_c0_entryhi(entryhi);
270 write_c0_index(wired); 270 write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 09653b290d53..c643de4c473a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
287 287
288 ENTER_CRITICAL(flags); 288 ENTER_CRITICAL(flags);
289 289
290 pid = ASID_MASK(read_c0_entryhi()); 290 pid = read_c0_entryhi() & ASID_MASK;
291 address &= (PAGE_MASK << 1); 291 address &= (PAGE_MASK << 1);
292 write_c0_entryhi(address | pid); 292 write_c0_entryhi(address | pid);
293 pgdp = pgd_offset(vma->vm_mm, address); 293 pgdp = pgd_offset(vma->vm_mm, address);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 122f9207f49e..91c2499f806a 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
195 if (current->active_mm != vma->vm_mm) 195 if (current->active_mm != vma->vm_mm)
196 return; 196 return;
197 197
198 pid = ASID_MASK(read_c0_entryhi()); 198 pid = read_c0_entryhi() & ASID_MASK;
199 199
200 local_irq_save(flags); 200 local_irq_save(flags);
201 address &= PAGE_MASK; 201 address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 2ad41e94394e..ce9818eef7d3 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,7 +29,6 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/cache.h> 30#include <linux/cache.h>
31 31
32#include <asm/mmu_context.h>
33#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
34#include <asm/pgtable.h> 33#include <asm/pgtable.h>
35#include <asm/war.h> 34#include <asm/war.h>
@@ -306,48 +305,6 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
306static int check_for_high_segbits __cpuinitdata; 305static int check_for_high_segbits __cpuinitdata;
307#endif 306#endif
308 307
309static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
310 unsigned int i_const)
311{
312 unsigned int **p, *ip;
313
314 for (p = start; p < stop; p++) {
315 ip = *p;
316 *ip = (*ip & 0xffff0000) | i_const;
317 }
318 local_flush_icache_range((unsigned long)*p, (unsigned long)((*p) + 1));
319}
320
321#define asid_insn_fixup(section, const) \
322do { \
323 extern unsigned int *__start_ ## section; \
324 extern unsigned int *__stop_ ## section; \
325 insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
326} while(0)
327
328/*
329 * Caller is assumed to flush the caches before the first context switch.
330 */
331static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
332 unsigned int version_mask,
333 unsigned int first_version)
334{
335 extern asmlinkage void handle_ri_rdhwr_vivt(void);
336 unsigned long *vivt_exc;
337
338 asid_insn_fixup(__asid_inc, inc);
339 asid_insn_fixup(__asid_mask, mask);
340 asid_insn_fixup(__asid_version_mask, version_mask);
341 asid_insn_fixup(__asid_first_version, first_version);
342
343 /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
344 vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
345 vivt_exc++;
346 *vivt_exc = (*vivt_exc & ~mask) | mask;
347
348 current_cpu_data.asid_cache = first_version;
349}
350
351static int check_for_high_segbits __cpuinitdata; 308static int check_for_high_segbits __cpuinitdata;
352 309
353static unsigned int kscratch_used_mask __cpuinitdata; 310static unsigned int kscratch_used_mask __cpuinitdata;
@@ -2226,7 +2183,6 @@ void __cpuinit build_tlb_refill_handler(void)
2226 case CPU_TX3922: 2183 case CPU_TX3922:
2227 case CPU_TX3927: 2184 case CPU_TX3927:
2228#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2185#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2229 setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
2230 if (cpu_has_local_ebase) 2186 if (cpu_has_local_ebase)
2231 build_r3000_tlb_refill_handler(); 2187 build_r3000_tlb_refill_handler();
2232 if (!run_once) { 2188 if (!run_once) {
@@ -2252,11 +2208,6 @@ void __cpuinit build_tlb_refill_handler(void)
2252 break; 2208 break;
2253 2209
2254 default: 2210 default:
2255#ifndef CONFIG_MIPS_MT_SMTC
2256 setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
2257#else
2258 setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
2259#endif
2260 if (!run_once) { 2211 if (!run_once) {
2261 scratch_reg = allocate_kscratch(); 2212 scratch_reg = allocate_kscratch();
2262#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2213#ifdef CONFIG_MIPS_PGD_C0_CONTEXT