aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2013-05-13 16:56:44 -0400
committerRalf Baechle <ralf@linux-mips.org>2013-05-16 14:35:42 -0400
commit48c4ac976ae995f263cde8f09578de86bc8e9f1d (patch)
tree48e9b3753b951ea2c276546679264c3030a8ea7d /arch/mips/mm
parent8ea6cd7af124ad070b44a7f60e225e45e3f38f79 (diff)
Revert "MIPS: Allow ASID size to be determined at boot time."
This reverts commit d532f3d26716a39dfd4b88d687bd344fbe77e390. The original commit has several problems: 1) Doesn't work with 64-bit kernels. 2) Calls TLBMISS_HANDLER_SETUP() before the code is generated. 3) Calls TLBMISS_HANDLER_SETUP() twice in per_cpu_trap_init() when only one call is needed. [ralf@linux-mips.org: Also revert the bits of the ASID patch which were hidden in the KVM merge.] Signed-off-by: David Daney <david.daney@cavium.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: "Steven J. Hill" <Steven.Hill@imgtec.com> Cc: David Daney <david.daney@cavium.com> Patchwork: https://patchwork.linux-mips.org/patch/5242/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/tlb-r3k.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c49
4 files changed, 12 insertions, 61 deletions
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 4a13c150f31b..a63d1ed0827f 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
51#endif 51#endif
52 52
53 local_irq_save(flags); 53 local_irq_save(flags);
54 old_ctx = ASID_MASK(read_c0_entryhi()); 54 old_ctx = read_c0_entryhi() & ASID_MASK;
55 write_c0_entrylo0(0); 55 write_c0_entrylo0(0);
56 entry = r3k_have_wired_reg ? read_c0_wired() : 8; 56 entry = r3k_have_wired_reg ? read_c0_wired() : 8;
57 for (; entry < current_cpu_data.tlbsize; entry++) { 57 for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
87 87
88#ifdef DEBUG_TLB 88#ifdef DEBUG_TLB
89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", 89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
90 ASID_MASK(cpu_context(cpu, mm)), start, end); 90 cpu_context(cpu, mm) & ASID_MASK, start, end);
91#endif 91#endif
92 local_irq_save(flags); 92 local_irq_save(flags);
93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94 if (size <= current_cpu_data.tlbsize) { 94 if (size <= current_cpu_data.tlbsize) {
95 int oldpid = ASID_MASK(read_c0_entryhi()); 95 int oldpid = read_c0_entryhi() & ASID_MASK;
96 int newpid = ASID_MASK(cpu_context(cpu, mm)); 96 int newpid = cpu_context(cpu, mm) & ASID_MASK;
97 97
98 start &= PAGE_MASK; 98 start &= PAGE_MASK;
99 end += PAGE_SIZE - 1; 99 end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
166#ifdef DEBUG_TLB 166#ifdef DEBUG_TLB
167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); 167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
168#endif 168#endif
169 newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm)); 169 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
170 page &= PAGE_MASK; 170 page &= PAGE_MASK;
171 local_irq_save(flags); 171 local_irq_save(flags);
172 oldpid = ASID_MASK(read_c0_entryhi()); 172 oldpid = read_c0_entryhi() & ASID_MASK;
173 write_c0_entryhi(page | newpid); 173 write_c0_entryhi(page | newpid);
174 BARRIER; 174 BARRIER;
175 tlb_probe(); 175 tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
197 if (current->active_mm != vma->vm_mm) 197 if (current->active_mm != vma->vm_mm)
198 return; 198 return;
199 199
200 pid = ASID_MASK(read_c0_entryhi()); 200 pid = read_c0_entryhi() & ASID_MASK;
201 201
202#ifdef DEBUG_TLB 202#ifdef DEBUG_TLB
203 if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) { 203 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", 204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
205 (cpu_context(cpu, vma->vm_mm)), pid); 205 (cpu_context(cpu, vma->vm_mm)), pid);
206 } 206 }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
241 241
242 local_irq_save(flags); 242 local_irq_save(flags);
243 /* Save old context and create impossible VPN2 value */ 243 /* Save old context and create impossible VPN2 value */
244 old_ctx = ASID_MASK(read_c0_entryhi()); 244 old_ctx = read_c0_entryhi() & ASID_MASK;
245 old_pagemask = read_c0_pagemask(); 245 old_pagemask = read_c0_pagemask();
246 w = read_c0_wired(); 246 w = read_c0_wired();
247 write_c0_wired(w + 1); 247 write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
264#endif 264#endif
265 265
266 local_irq_save(flags); 266 local_irq_save(flags);
267 old_ctx = ASID_MASK(read_c0_entryhi()); 267 old_ctx = read_c0_entryhi() & ASID_MASK;
268 write_c0_entrylo0(entrylo0); 268 write_c0_entrylo0(entrylo0);
269 write_c0_entryhi(entryhi); 269 write_c0_entryhi(entryhi);
270 write_c0_index(wired); 270 write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 09653b290d53..c643de4c473a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
287 287
288 ENTER_CRITICAL(flags); 288 ENTER_CRITICAL(flags);
289 289
290 pid = ASID_MASK(read_c0_entryhi()); 290 pid = read_c0_entryhi() & ASID_MASK;
291 address &= (PAGE_MASK << 1); 291 address &= (PAGE_MASK << 1);
292 write_c0_entryhi(address | pid); 292 write_c0_entryhi(address | pid);
293 pgdp = pgd_offset(vma->vm_mm, address); 293 pgdp = pgd_offset(vma->vm_mm, address);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 122f9207f49e..91c2499f806a 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
195 if (current->active_mm != vma->vm_mm) 195 if (current->active_mm != vma->vm_mm)
196 return; 196 return;
197 197
198 pid = ASID_MASK(read_c0_entryhi()); 198 pid = read_c0_entryhi() & ASID_MASK;
199 199
200 local_irq_save(flags); 200 local_irq_save(flags);
201 address &= PAGE_MASK; 201 address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 2ad41e94394e..ce9818eef7d3 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,7 +29,6 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/cache.h> 30#include <linux/cache.h>
31 31
32#include <asm/mmu_context.h>
33#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
34#include <asm/pgtable.h> 33#include <asm/pgtable.h>
35#include <asm/war.h> 34#include <asm/war.h>
@@ -306,48 +305,6 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
306static int check_for_high_segbits __cpuinitdata; 305static int check_for_high_segbits __cpuinitdata;
307#endif 306#endif
308 307
309static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
310 unsigned int i_const)
311{
312 unsigned int **p, *ip;
313
314 for (p = start; p < stop; p++) {
315 ip = *p;
316 *ip = (*ip & 0xffff0000) | i_const;
317 }
318 local_flush_icache_range((unsigned long)*p, (unsigned long)((*p) + 1));
319}
320
321#define asid_insn_fixup(section, const) \
322do { \
323 extern unsigned int *__start_ ## section; \
324 extern unsigned int *__stop_ ## section; \
325 insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
326} while(0)
327
328/*
329 * Caller is assumed to flush the caches before the first context switch.
330 */
331static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
332 unsigned int version_mask,
333 unsigned int first_version)
334{
335 extern asmlinkage void handle_ri_rdhwr_vivt(void);
336 unsigned long *vivt_exc;
337
338 asid_insn_fixup(__asid_inc, inc);
339 asid_insn_fixup(__asid_mask, mask);
340 asid_insn_fixup(__asid_version_mask, version_mask);
341 asid_insn_fixup(__asid_first_version, first_version);
342
343 /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
344 vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
345 vivt_exc++;
346 *vivt_exc = (*vivt_exc & ~mask) | mask;
347
348 current_cpu_data.asid_cache = first_version;
349}
350
351static int check_for_high_segbits __cpuinitdata; 308static int check_for_high_segbits __cpuinitdata;
352 309
353static unsigned int kscratch_used_mask __cpuinitdata; 310static unsigned int kscratch_used_mask __cpuinitdata;
@@ -2226,7 +2183,6 @@ void __cpuinit build_tlb_refill_handler(void)
2226 case CPU_TX3922: 2183 case CPU_TX3922:
2227 case CPU_TX3927: 2184 case CPU_TX3927:
2228#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2185#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2229 setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
2230 if (cpu_has_local_ebase) 2186 if (cpu_has_local_ebase)
2231 build_r3000_tlb_refill_handler(); 2187 build_r3000_tlb_refill_handler();
2232 if (!run_once) { 2188 if (!run_once) {
@@ -2252,11 +2208,6 @@ void __cpuinit build_tlb_refill_handler(void)
2252 break; 2208 break;
2253 2209
2254 default: 2210 default:
2255#ifndef CONFIG_MIPS_MT_SMTC
2256 setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
2257#else
2258 setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
2259#endif
2260 if (!run_once) { 2211 if (!run_once) {
2261 scratch_reg = allocate_kscratch(); 2212 scratch_reg = allocate_kscratch();
2262#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2213#ifdef CONFIG_MIPS_PGD_C0_CONTEXT