summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/include/asm/cpu-features.h13
-rw-r--r--arch/mips/include/asm/cpu.h1
-rw-r--r--arch/mips/include/asm/mipsregs.h4
-rw-r--r--arch/mips/include/asm/mmu.h6
-rw-r--r--arch/mips/include/asm/mmu_context.h54
-rw-r--r--arch/mips/kernel/cpu-probe.c55
-rw-r--r--arch/mips/kernel/smp.c57
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/mips/kernel/unaligned.c1
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/lib/dump_tlb.c22
-rw-r--r--arch/mips/mm/c-r4k.c3
-rw-r--r--arch/mips/mm/context.c256
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/tlb-r4k.c52
15 files changed, 509 insertions, 31 deletions
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 701e525641b8..6998a9796499 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -591,6 +591,19 @@
591#endif /* CONFIG_MIPS_MT_SMP */ 591#endif /* CONFIG_MIPS_MT_SMP */
592 592
593/* 593/*
594 * We only enable MMID support for configurations which natively support 64 bit
595 * atomics because getting good performance from the allocator relies upon
596 * efficient atomic64_*() functions.
597 */
598#ifndef cpu_has_mmid
599# ifdef CONFIG_GENERIC_ATOMIC64
600# define cpu_has_mmid 0
601# else
602# define cpu_has_mmid __isa_ge_and_opt(6, MIPS_CPU_MMID)
603# endif
604#endif
605
606/*
594 * Guest capabilities 607 * Guest capabilities
595 */ 608 */
596#ifndef cpu_guest_has_conf1 609#ifndef cpu_guest_has_conf1
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 532b49b1dbb3..6ad7d3cabd91 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -422,6 +422,7 @@ enum cpu_type_enum {
422 MBIT_ULL(55) /* CPU shares FTLB entries with another */ 422 MBIT_ULL(55) /* CPU shares FTLB entries with another */
423#define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \ 423#define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \
424 MBIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */ 424 MBIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */
425#define MIPS_CPU_MMID MBIT_ULL(57) /* CPU supports MemoryMapIDs */
425 426
426/* 427/*
427 * CPU ASE encodings 428 * CPU ASE encodings
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 900a47581dd1..1e6966e8527e 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -667,6 +667,7 @@
667#define MIPS_CONF5_FRE (_ULCAST_(1) << 8) 667#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
668#define MIPS_CONF5_UFE (_ULCAST_(1) << 9) 668#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
669#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14) 669#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14)
670#define MIPS_CONF5_MI (_ULCAST_(1) << 17)
670#define MIPS_CONF5_CRCP (_ULCAST_(1) << 18) 671#define MIPS_CONF5_CRCP (_ULCAST_(1) << 18)
671#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) 672#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
672#define MIPS_CONF5_EVA (_ULCAST_(1) << 28) 673#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
@@ -1610,6 +1611,9 @@ do { \
1610#define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3) 1611#define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3)
1611#define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val) 1612#define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val)
1612 1613
1614#define read_c0_memorymapid() __read_32bit_c0_register($4, 5)
1615#define write_c0_memorymapid(val) __write_32bit_c0_register($4, 5, val)
1616
1613#define read_c0_pagemask() __read_32bit_c0_register($5, 0) 1617#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
1614#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val) 1618#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)
1615 1619
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 88a108ce62c1..5df0238f639b 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -7,7 +7,11 @@
7#include <linux/wait.h> 7#include <linux/wait.h>
8 8
9typedef struct { 9typedef struct {
10 u64 asid[NR_CPUS]; 10 union {
11 u64 asid[NR_CPUS];
12 atomic64_t mmid;
13 };
14
11 void *vdso; 15 void *vdso;
12 16
13 /* lock to be held whilst modifying fp_bd_emupage_allocmap */ 17 /* lock to be held whilst modifying fp_bd_emupage_allocmap */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index a0f29df8ced8..cddead91acd4 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -17,8 +17,10 @@
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20#include <asm/barrier.h>
20#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
21#include <asm/dsemul.h> 22#include <asm/dsemul.h>
23#include <asm/ginvt.h>
22#include <asm/hazards.h> 24#include <asm/hazards.h>
23#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
24#include <asm-generic/mm_hooks.h> 26#include <asm-generic/mm_hooks.h>
@@ -73,6 +75,19 @@ extern unsigned long pgd_current[];
73#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ 75#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
74 76
75/* 77/*
78 * The ginvt instruction will invalidate wired entries when its type field
79 * targets anything other than the entire TLB. That means that if we were to
80 * allow the kernel to create wired entries with the MMID of current->active_mm
81 * then those wired entries could be invalidated when we later use ginvt to
82 * invalidate TLB entries with that MMID.
83 *
84 * In order to prevent ginvt from trashing wired entries, we reserve one MMID
85 * for use by the kernel when creating wired entries. This MMID will never be
86 * assigned to a struct mm, and we'll never target it with a ginvt instruction.
87 */
88#define MMID_KERNEL_WIRED 0
89
90/*
76 * All unused by hardware upper bits will be considered 91 * All unused by hardware upper bits will be considered
77 * as a software asid extension. 92 * as a software asid extension.
78 */ 93 */
@@ -90,13 +105,19 @@ static inline u64 asid_first_version(unsigned int cpu)
90 105
91static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm) 106static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
92{ 107{
108 if (cpu_has_mmid)
109 return atomic64_read(&mm->context.mmid);
110
93 return mm->context.asid[cpu]; 111 return mm->context.asid[cpu];
94} 112}
95 113
96static inline void set_cpu_context(unsigned int cpu, 114static inline void set_cpu_context(unsigned int cpu,
97 struct mm_struct *mm, u64 ctx) 115 struct mm_struct *mm, u64 ctx)
98{ 116{
99 mm->context.asid[cpu] = ctx; 117 if (cpu_has_mmid)
118 atomic64_set(&mm->context.mmid, ctx);
119 else
120 mm->context.asid[cpu] = ctx;
100} 121}
101 122
102#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 123#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
@@ -120,8 +141,12 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
120{ 141{
121 int i; 142 int i;
122 143
123 for_each_possible_cpu(i) 144 if (cpu_has_mmid) {
124 set_cpu_context(i, mm, 0); 145 set_cpu_context(0, mm, 0);
146 } else {
147 for_each_possible_cpu(i)
148 set_cpu_context(i, mm, 0);
149 }
125 150
126 mm->context.bd_emupage_allocmap = NULL; 151 mm->context.bd_emupage_allocmap = NULL;
127 spin_lock_init(&mm->context.bd_emupage_lock); 152 spin_lock_init(&mm->context.bd_emupage_lock);
@@ -168,12 +193,33 @@ drop_mmu_context(struct mm_struct *mm)
168{ 193{
169 unsigned long flags; 194 unsigned long flags;
170 unsigned int cpu; 195 unsigned int cpu;
196 u32 old_mmid;
197 u64 ctx;
171 198
172 local_irq_save(flags); 199 local_irq_save(flags);
173 200
174 cpu = smp_processor_id(); 201 cpu = smp_processor_id();
175 if (!cpu_context(cpu, mm)) { 202 ctx = cpu_context(cpu, mm);
203
204 if (!ctx) {
176 /* no-op */ 205 /* no-op */
206 } else if (cpu_has_mmid) {
207 /*
208 * Globally invalidating TLB entries associated with the MMID
209 * is pretty cheap using the GINVT instruction, so we'll do
210 * that rather than incur the overhead of allocating a new
211 * MMID. The latter would be especially difficult since MMIDs
212 * are global & other CPUs may be actively using ctx.
213 */
214 htw_stop();
215 old_mmid = read_c0_memorymapid();
216 write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
217 mtc0_tlbw_hazard();
218 ginvt_mmid();
219 sync_ginv();
220 write_c0_memorymapid(old_mmid);
221 instruction_hazard();
222 htw_start();
177 } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 223 } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
178 /* 224 /*
179 * mm is currently active, so we can't really drop it. 225 * mm is currently active, so we can't really drop it.
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 95b18a194f53..d5e335e6846a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -872,10 +872,19 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
872 872
873static inline unsigned int decode_config5(struct cpuinfo_mips *c) 873static inline unsigned int decode_config5(struct cpuinfo_mips *c)
874{ 874{
875 unsigned int config5; 875 unsigned int config5, max_mmid_width;
876 unsigned long asid_mask;
876 877
877 config5 = read_c0_config5(); 878 config5 = read_c0_config5();
878 config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); 879 config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
880
881 if (cpu_has_mips_r6) {
882 if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)
883 config5 |= MIPS_CONF5_MI;
884 else
885 config5 &= ~MIPS_CONF5_MI;
886 }
887
879 write_c0_config5(config5); 888 write_c0_config5(config5);
880 889
881 if (config5 & MIPS_CONF5_EVA) 890 if (config5 & MIPS_CONF5_EVA)
@@ -894,6 +903,50 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
894 if (config5 & MIPS_CONF5_CRCP) 903 if (config5 & MIPS_CONF5_CRCP)
895 elf_hwcap |= HWCAP_MIPS_CRC32; 904 elf_hwcap |= HWCAP_MIPS_CRC32;
896 905
906 if (cpu_has_mips_r6) {
907 /* Ensure the write to config5 above takes effect */
908 back_to_back_c0_hazard();
909
910 /* Check whether we successfully enabled MMID support */
911 config5 = read_c0_config5();
912 if (config5 & MIPS_CONF5_MI)
913 c->options |= MIPS_CPU_MMID;
914
915 /*
916 * Warn if we've hardcoded cpu_has_mmid to a value unsuitable
917 * for the CPU we're running on, or if CPUs in an SMP system
918 * have inconsistent MMID support.
919 */
920 WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI));
921
922 if (cpu_has_mmid) {
923 write_c0_memorymapid(~0ul);
924 back_to_back_c0_hazard();
925 asid_mask = read_c0_memorymapid();
926
927 /*
928 * We maintain a bitmap to track MMID allocation, and
929 * need a sensible upper bound on the size of that
930 * bitmap. The initial CPU with MMID support (I6500)
931 * supports 16 bit MMIDs, which gives us an 8KiB
932 * bitmap. The architecture recommends that hardware
933 * support 32 bit MMIDs, which would give us a 512MiB
934 * bitmap - that's too big in most cases.
935 *
936 * Cap MMID width at 16 bits for now & we can revisit
937 * this if & when hardware supports anything wider.
938 */
939 max_mmid_width = 16;
940 if (asid_mask > GENMASK(max_mmid_width - 1, 0)) {
941 pr_info("Capping MMID width at %d bits",
942 max_mmid_width);
943 asid_mask = GENMASK(max_mmid_width - 1, 0);
944 }
945
946 set_cpu_asid_mask(c, asid_mask);
947 }
948 }
949
897 return config5 & MIPS_CONF_M; 950 return config5 & MIPS_CONF_M;
898} 951}
899 952
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index f9dbd95e1d68..6fd9e94fc87e 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -39,6 +39,7 @@
39 39
40#include <linux/atomic.h> 40#include <linux/atomic.h>
41#include <asm/cpu.h> 41#include <asm/cpu.h>
42#include <asm/ginvt.h>
42#include <asm/processor.h> 43#include <asm/processor.h>
43#include <asm/idle.h> 44#include <asm/idle.h>
44#include <asm/r4k-timer.h> 45#include <asm/r4k-timer.h>
@@ -482,6 +483,15 @@ static void flush_tlb_all_ipi(void *info)
482 483
483void flush_tlb_all(void) 484void flush_tlb_all(void)
484{ 485{
486 if (cpu_has_mmid) {
487 htw_stop();
488 ginvt_full();
489 sync_ginv();
490 instruction_hazard();
491 htw_start();
492 return;
493 }
494
485 on_each_cpu(flush_tlb_all_ipi, NULL, 1); 495 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
486} 496}
487 497
@@ -530,7 +540,12 @@ void flush_tlb_mm(struct mm_struct *mm)
530{ 540{
531 preempt_disable(); 541 preempt_disable();
532 542
533 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 543 if (cpu_has_mmid) {
544 /*
545 * No need to worry about other CPUs - the ginvt in
546 * drop_mmu_context() will be globalized.
547 */
548 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
534 smp_on_other_tlbs(flush_tlb_mm_ipi, mm); 549 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
535 } else { 550 } else {
536 unsigned int cpu; 551 unsigned int cpu;
@@ -561,9 +576,26 @@ static void flush_tlb_range_ipi(void *info)
561void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 576void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
562{ 577{
563 struct mm_struct *mm = vma->vm_mm; 578 struct mm_struct *mm = vma->vm_mm;
579 unsigned long addr;
580 u32 old_mmid;
564 581
565 preempt_disable(); 582 preempt_disable();
566 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 583 if (cpu_has_mmid) {
584 htw_stop();
585 old_mmid = read_c0_memorymapid();
586 write_c0_memorymapid(cpu_asid(0, mm));
587 mtc0_tlbw_hazard();
588 addr = round_down(start, PAGE_SIZE * 2);
589 end = round_up(end, PAGE_SIZE * 2);
590 do {
591 ginvt_va_mmid(addr);
592 sync_ginv();
593 addr += PAGE_SIZE * 2;
594 } while (addr < end);
595 write_c0_memorymapid(old_mmid);
596 instruction_hazard();
597 htw_start();
598 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
567 struct flush_tlb_data fd = { 599 struct flush_tlb_data fd = {
568 .vma = vma, 600 .vma = vma,
569 .addr1 = start, 601 .addr1 = start,
@@ -571,6 +603,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
571 }; 603 };
572 604
573 smp_on_other_tlbs(flush_tlb_range_ipi, &fd); 605 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
606 local_flush_tlb_range(vma, start, end);
574 } else { 607 } else {
575 unsigned int cpu; 608 unsigned int cpu;
576 int exec = vma->vm_flags & VM_EXEC; 609 int exec = vma->vm_flags & VM_EXEC;
@@ -585,8 +618,8 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
585 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) 618 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
586 set_cpu_context(cpu, mm, !exec); 619 set_cpu_context(cpu, mm, !exec);
587 } 620 }
621 local_flush_tlb_range(vma, start, end);
588 } 622 }
589 local_flush_tlb_range(vma, start, end);
590 preempt_enable(); 623 preempt_enable();
591} 624}
592 625
@@ -616,14 +649,28 @@ static void flush_tlb_page_ipi(void *info)
616 649
617void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 650void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
618{ 651{
652 u32 old_mmid;
653
619 preempt_disable(); 654 preempt_disable();
620 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 655 if (cpu_has_mmid) {
656 htw_stop();
657 old_mmid = read_c0_memorymapid();
658 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
659 mtc0_tlbw_hazard();
660 ginvt_va_mmid(page);
661 sync_ginv();
662 write_c0_memorymapid(old_mmid);
663 instruction_hazard();
664 htw_start();
665 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
666 (current->mm != vma->vm_mm)) {
621 struct flush_tlb_data fd = { 667 struct flush_tlb_data fd = {
622 .vma = vma, 668 .vma = vma,
623 .addr1 = page, 669 .addr1 = page,
624 }; 670 };
625 671
626 smp_on_other_tlbs(flush_tlb_page_ipi, &fd); 672 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
673 local_flush_tlb_page(vma, page);
627 } else { 674 } else {
628 unsigned int cpu; 675 unsigned int cpu;
629 676
@@ -637,8 +684,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
637 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) 684 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
638 set_cpu_context(cpu, vma->vm_mm, 1); 685 set_cpu_context(cpu, vma->vm_mm, 1);
639 } 686 }
687 local_flush_tlb_page(vma, page);
640 } 688 }
641 local_flush_tlb_page(vma, page);
642 preempt_enable(); 689 preempt_enable();
643} 690}
644 691
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c91097f7b32f..995249be64f1 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2223,7 +2223,9 @@ void per_cpu_trap_init(bool is_boot_cpu)
2223 cp0_fdc_irq = -1; 2223 cp0_fdc_irq = -1;
2224 } 2224 }
2225 2225
2226 if (!cpu_data[cpu].asid_cache) 2226 if (cpu_has_mmid)
2227 cpu_data[cpu].asid_cache = 0;
2228 else if (!cpu_data[cpu].asid_cache)
2227 cpu_data[cpu].asid_cache = asid_first_version(cpu); 2229 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2228 2230
2229 mmgrab(&init_mm); 2231 mmgrab(&init_mm);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 0ed20a64b285..76e33f940971 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -89,6 +89,7 @@
89#include <asm/fpu.h> 89#include <asm/fpu.h>
90#include <asm/fpu_emulator.h> 90#include <asm/fpu_emulator.h>
91#include <asm/inst.h> 91#include <asm/inst.h>
92#include <asm/mmu_context.h>
92#include <linux/uaccess.h> 93#include <linux/uaccess.h>
93 94
94#define STR(x) __STR(x) 95#define STR(x) __STR(x)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 3734cd58895e..6d0517ac18e5 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1723,6 +1723,11 @@ static int __init kvm_mips_init(void)
1723{ 1723{
1724 int ret; 1724 int ret;
1725 1725
1726 if (cpu_has_mmid) {
1727 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1728 return -EOPNOTSUPP;
1729 }
1730
1726 ret = kvm_mips_entry_setup(); 1731 ret = kvm_mips_entry_setup();
1727 if (ret) 1732 if (ret)
1728 return ret; 1733 return ret;
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 781ad96b78c4..83ed37298e66 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -10,6 +10,7 @@
10 10
11#include <asm/hazards.h> 11#include <asm/hazards.h>
12#include <asm/mipsregs.h> 12#include <asm/mipsregs.h>
13#include <asm/mmu_context.h>
13#include <asm/page.h> 14#include <asm/page.h>
14#include <asm/pgtable.h> 15#include <asm/pgtable.h>
15#include <asm/tlbdebug.h> 16#include <asm/tlbdebug.h>
@@ -73,12 +74,13 @@ static inline const char *msk2str(unsigned int mask)
73 74
74static void dump_tlb(int first, int last) 75static void dump_tlb(int first, int last)
75{ 76{
76 unsigned long s_entryhi, entryhi, asid; 77 unsigned long s_entryhi, entryhi, asid, mmid;
77 unsigned long long entrylo0, entrylo1, pa; 78 unsigned long long entrylo0, entrylo1, pa;
78 unsigned int s_index, s_pagemask, s_guestctl1 = 0; 79 unsigned int s_index, s_pagemask, s_guestctl1 = 0;
79 unsigned int pagemask, guestctl1 = 0, c0, c1, i; 80 unsigned int pagemask, guestctl1 = 0, c0, c1, i;
80 unsigned long asidmask = cpu_asid_mask(&current_cpu_data); 81 unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
81 int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4); 82 int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
83 unsigned long uninitialized_var(s_mmid);
82#ifdef CONFIG_32BIT 84#ifdef CONFIG_32BIT
83 bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); 85 bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
84 int pwidth = xpa ? 11 : 8; 86 int pwidth = xpa ? 11 : 8;
@@ -92,7 +94,12 @@ static void dump_tlb(int first, int last)
92 s_pagemask = read_c0_pagemask(); 94 s_pagemask = read_c0_pagemask();
93 s_entryhi = read_c0_entryhi(); 95 s_entryhi = read_c0_entryhi();
94 s_index = read_c0_index(); 96 s_index = read_c0_index();
95 asid = s_entryhi & asidmask; 97
98 if (cpu_has_mmid)
99 asid = s_mmid = read_c0_memorymapid();
100 else
101 asid = s_entryhi & asidmask;
102
96 if (cpu_has_guestid) 103 if (cpu_has_guestid)
97 s_guestctl1 = read_c0_guestctl1(); 104 s_guestctl1 = read_c0_guestctl1();
98 105
@@ -105,6 +112,12 @@ static void dump_tlb(int first, int last)
105 entryhi = read_c0_entryhi(); 112 entryhi = read_c0_entryhi();
106 entrylo0 = read_c0_entrylo0(); 113 entrylo0 = read_c0_entrylo0();
107 entrylo1 = read_c0_entrylo1(); 114 entrylo1 = read_c0_entrylo1();
115
116 if (cpu_has_mmid)
117 mmid = read_c0_memorymapid();
118 else
119 mmid = entryhi & asidmask;
120
108 if (cpu_has_guestid) 121 if (cpu_has_guestid)
109 guestctl1 = read_c0_guestctl1(); 122 guestctl1 = read_c0_guestctl1();
110 123
@@ -124,8 +137,7 @@ static void dump_tlb(int first, int last)
124 * leave only a single G bit set after a machine check exception 137 * leave only a single G bit set after a machine check exception
125 * due to duplicate TLB entry. 138 * due to duplicate TLB entry.
126 */ 139 */
127 if (!((entrylo0 | entrylo1) & ENTRYLO_G) && 140 if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
128 (entryhi & asidmask) != asid)
129 continue; 141 continue;
130 142
131 /* 143 /*
@@ -138,7 +150,7 @@ static void dump_tlb(int first, int last)
138 150
139 pr_cont("va=%0*lx asid=%0*lx", 151 pr_cont("va=%0*lx asid=%0*lx",
140 vwidth, (entryhi & ~0x1fffUL), 152 vwidth, (entryhi & ~0x1fffUL),
141 asidwidth, entryhi & asidmask); 153 asidwidth, mmid);
142 if (cpu_has_guestid) 154 if (cpu_has_guestid)
143 pr_cont(" gid=%02lx", 155 pr_cont(" gid=%02lx",
144 (guestctl1 & MIPS_GCTL1_RID) 156 (guestctl1 & MIPS_GCTL1_RID)
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 248d9e8263cf..cc4e17caeb26 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -540,6 +540,9 @@ static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
540 unsigned int i; 540 unsigned int i;
541 const cpumask_t *mask = cpu_present_mask; 541 const cpumask_t *mask = cpu_present_mask;
542 542
543 if (cpu_has_mmid)
544 return cpu_context(0, mm) != 0;
545
543 /* cpu_sibling_map[] undeclared when !CONFIG_SMP */ 546 /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
544#ifdef CONFIG_SMP 547#ifdef CONFIG_SMP
545 /* 548 /*
diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c
index dcaceee179f7..a6adae550788 100644
--- a/arch/mips/mm/context.c
+++ b/arch/mips/mm/context.c
@@ -1,11 +1,35 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/atomic.h>
2#include <linux/mmu_context.h> 3#include <linux/mmu_context.h>
4#include <linux/percpu.h>
5#include <linux/spinlock.h>
6
7static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
8
9static atomic64_t mmid_version;
10static unsigned int num_mmids;
11static unsigned long *mmid_map;
12
13static DEFINE_PER_CPU(u64, reserved_mmids);
14static cpumask_t tlb_flush_pending;
15
16static bool asid_versions_eq(int cpu, u64 a, u64 b)
17{
18 return ((a ^ b) & asid_version_mask(cpu)) == 0;
19}
3 20
4void get_new_mmu_context(struct mm_struct *mm) 21void get_new_mmu_context(struct mm_struct *mm)
5{ 22{
6 unsigned int cpu; 23 unsigned int cpu;
7 u64 asid; 24 u64 asid;
8 25
26 /*
27 * This function is specific to ASIDs, and should not be called when
28 * MMIDs are in use.
29 */
30 if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
31 return;
32
9 cpu = smp_processor_id(); 33 cpu = smp_processor_id();
10 asid = asid_cache(cpu); 34 asid = asid_cache(cpu);
11 35
@@ -23,16 +47,242 @@ void check_mmu_context(struct mm_struct *mm)
23{ 47{
24 unsigned int cpu = smp_processor_id(); 48 unsigned int cpu = smp_processor_id();
25 49
50 /*
51 * This function is specific to ASIDs, and should not be called when
52 * MMIDs are in use.
53 */
54 if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
55 return;
56
26 /* Check if our ASID is of an older version and thus invalid */ 57 /* Check if our ASID is of an older version and thus invalid */
27 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu)) 58 if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
28 get_new_mmu_context(mm); 59 get_new_mmu_context(mm);
29} 60}
30 61
62static void flush_context(void)
63{
64 u64 mmid;
65 int cpu;
66
67 /* Update the list of reserved MMIDs and the MMID bitmap */
68 bitmap_clear(mmid_map, 0, num_mmids);
69
70 /* Reserve an MMID for kmap/wired entries */
71 __set_bit(MMID_KERNEL_WIRED, mmid_map);
72
73 for_each_possible_cpu(cpu) {
74 mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
75
76 /*
77 * If this CPU has already been through a
78 * rollover, but hasn't run another task in
79 * the meantime, we must preserve its reserved
80 * MMID, as this is the only trace we have of
81 * the process it is still running.
82 */
83 if (mmid == 0)
84 mmid = per_cpu(reserved_mmids, cpu);
85
86 __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
87 per_cpu(reserved_mmids, cpu) = mmid;
88 }
89
90 /*
91 * Queue a TLB invalidation for each CPU to perform on next
92 * context-switch
93 */
94 cpumask_setall(&tlb_flush_pending);
95}
96
97static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
98{
99 bool hit;
100 int cpu;
101
102 /*
103 * Iterate over the set of reserved MMIDs looking for a match.
104 * If we find one, then we can update our mm to use newmmid
105 * (i.e. the same MMID in the current generation) but we can't
106 * exit the loop early, since we need to ensure that all copies
107 * of the old MMID are updated to reflect the mm. Failure to do
108 * so could result in us missing the reserved MMID in a future
109 * generation.
110 */
111 hit = false;
112 for_each_possible_cpu(cpu) {
113 if (per_cpu(reserved_mmids, cpu) == mmid) {
114 hit = true;
115 per_cpu(reserved_mmids, cpu) = newmmid;
116 }
117 }
118
119 return hit;
120}
121
122static u64 get_new_mmid(struct mm_struct *mm)
123{
124 static u32 cur_idx = MMID_KERNEL_WIRED + 1;
125 u64 mmid, version, mmid_mask;
126
127 mmid = cpu_context(0, mm);
128 version = atomic64_read(&mmid_version);
129 mmid_mask = cpu_asid_mask(&boot_cpu_data);
130
131 if (!asid_versions_eq(0, mmid, 0)) {
132 u64 newmmid = version | (mmid & mmid_mask);
133
134 /*
135 * If our current MMID was active during a rollover, we
136 * can continue to use it and this was just a false alarm.
137 */
138 if (check_update_reserved_mmid(mmid, newmmid)) {
139 mmid = newmmid;
140 goto set_context;
141 }
142
143 /*
144 * We had a valid MMID in a previous life, so try to re-use
145 * it if possible.
146 */
147 if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
148 mmid = newmmid;
149 goto set_context;
150 }
151 }
152
153 /* Allocate a free MMID */
154 mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
155 if (mmid != num_mmids)
156 goto reserve_mmid;
157
158 /* We're out of MMIDs, so increment the global version */
159 version = atomic64_add_return_relaxed(asid_first_version(0),
160 &mmid_version);
161
162 /* Note currently active MMIDs & mark TLBs as requiring flushes */
163 flush_context();
164
165 /* We have more MMIDs than CPUs, so this will always succeed */
166 mmid = find_first_zero_bit(mmid_map, num_mmids);
167
168reserve_mmid:
169 __set_bit(mmid, mmid_map);
170 cur_idx = mmid;
171 mmid |= version;
172set_context:
173 set_cpu_context(0, mm, mmid);
174 return mmid;
175}
176
31void check_switch_mmu_context(struct mm_struct *mm) 177void check_switch_mmu_context(struct mm_struct *mm)
32{ 178{
33 unsigned int cpu = smp_processor_id(); 179 unsigned int cpu = smp_processor_id();
180 u64 ctx, old_active_mmid;
181 unsigned long flags;
34 182
35 check_mmu_context(mm); 183 if (!cpu_has_mmid) {
36 write_c0_entryhi(cpu_asid(cpu, mm)); 184 check_mmu_context(mm);
185 write_c0_entryhi(cpu_asid(cpu, mm));
186 goto setup_pgd;
187 }
188
189 /*
190 * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
191 * unnecessary.
192 *
193 * The memory ordering here is subtle. If our active_mmids is non-zero
194 * and the MMID matches the current version, then we update the CPU's
195 * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
196 * means that either:
197 *
198 * - We get a zero back from the cmpxchg and end up waiting on
199 * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
200 * with the rollover and so we are forced to see the updated
201 * generation.
202 *
203 * - We get a valid MMID back from the cmpxchg, which means the
204 * relaxed xchg in flush_context will treat us as reserved
205 * because atomic RmWs are totally ordered for a given location.
206 */
207 ctx = cpu_context(cpu, mm);
208 old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
209 if (!old_active_mmid ||
210 !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
211 !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
212 raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
213
214 ctx = cpu_context(cpu, mm);
215 if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
216 ctx = get_new_mmid(mm);
217
218 WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
219 raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
220 }
221
222 /*
223 * Invalidate the local TLB if needed. Note that we must only clear our
224 * bit in tlb_flush_pending after this is complete, so that the
225 * cpu_has_shared_ftlb_entries case below isn't misled.
226 */
227 if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
228 if (cpu_has_vtag_icache)
229 flush_icache_all();
230 local_flush_tlb_all();
231 cpumask_clear_cpu(cpu, &tlb_flush_pending);
232 }
233
234 write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
235
236 /*
237 * If this CPU shares FTLB entries with its siblings and one or more of
238 * those siblings hasn't yet invalidated its TLB following a version
239 * increase then we need to invalidate any TLB entries for our MMID
240 * that we might otherwise pick up from a sibling.
241 *
242 * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
243 * CONFIG_SMP=n kernels.
244 */
245#ifdef CONFIG_SMP
246 if (cpu_has_shared_ftlb_entries &&
247 cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
248 /* Ensure we operate on the new MMID */
249 mtc0_tlbw_hazard();
250
251 /*
252 * Invalidate all TLB entries associated with the new
253 * MMID, and wait for the invalidation to complete.
254 */
255 ginvt_mmid();
256 sync_ginv();
257 }
258#endif
259
260setup_pgd:
37 TLBMISS_HANDLER_SETUP_PGD(mm->pgd); 261 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
38} 262}
263
264static int mmid_init(void)
265{
266 if (!cpu_has_mmid)
267 return 0;
268
269 /*
270 * Expect allocation after rollover to fail if we don't have at least
271 * one more MMID than CPUs.
272 */
273 num_mmids = asid_first_version(0);
274 WARN_ON(num_mmids <= num_possible_cpus());
275
276 atomic64_set(&mmid_version, asid_first_version(0));
277 mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
278 GFP_KERNEL);
279 if (!mmid_map)
280 panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
281
282 /* Reserve an MMID for kmap/wired entries */
283 __set_bit(MMID_KERNEL_WIRED, mmid_map);
284
285 pr_info("MMID allocator initialised with %u entries\n", num_mmids);
286 return 0;
287}
288early_initcall(mmid_init);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index b521d8e2d359..c3b45e248806 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -84,6 +84,7 @@ void setup_zero_pages(void)
84static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) 84static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
85{ 85{
86 enum fixed_addresses idx; 86 enum fixed_addresses idx;
87 unsigned int uninitialized_var(old_mmid);
87 unsigned long vaddr, flags, entrylo; 88 unsigned long vaddr, flags, entrylo;
88 unsigned long old_ctx; 89 unsigned long old_ctx;
89 pte_t pte; 90 pte_t pte;
@@ -110,6 +111,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
110 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 111 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
111 write_c0_entrylo0(entrylo); 112 write_c0_entrylo0(entrylo);
112 write_c0_entrylo1(entrylo); 113 write_c0_entrylo1(entrylo);
114 if (cpu_has_mmid) {
115 old_mmid = read_c0_memorymapid();
116 write_c0_memorymapid(MMID_KERNEL_WIRED);
117 }
113#ifdef CONFIG_XPA 118#ifdef CONFIG_XPA
114 if (cpu_has_xpa) { 119 if (cpu_has_xpa) {
115 entrylo = (pte.pte_low & _PFNX_MASK); 120 entrylo = (pte.pte_low & _PFNX_MASK);
@@ -124,6 +129,8 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
124 tlb_write_indexed(); 129 tlb_write_indexed();
125 tlbw_use_hazard(); 130 tlbw_use_hazard();
126 write_c0_entryhi(old_ctx); 131 write_c0_entryhi(old_ctx);
132 if (cpu_has_mmid)
133 write_c0_memorymapid(old_mmid);
127 local_irq_restore(flags); 134 local_irq_restore(flags);
128 135
129 return (void*) vaddr; 136 return (void*) vaddr;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 0114c43398f3..c13e46ced425 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -120,14 +120,23 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
120 if (size <= (current_cpu_data.tlbsizeftlbsets ? 120 if (size <= (current_cpu_data.tlbsizeftlbsets ?
121 current_cpu_data.tlbsize / 8 : 121 current_cpu_data.tlbsize / 8 :
122 current_cpu_data.tlbsize / 2)) { 122 current_cpu_data.tlbsize / 2)) {
123 int oldpid = read_c0_entryhi(); 123 unsigned long old_entryhi, uninitialized_var(old_mmid);
124 int newpid = cpu_asid(cpu, mm); 124 int newpid = cpu_asid(cpu, mm);
125 125
126 old_entryhi = read_c0_entryhi();
127 if (cpu_has_mmid) {
128 old_mmid = read_c0_memorymapid();
129 write_c0_memorymapid(newpid);
130 }
131
126 htw_stop(); 132 htw_stop();
127 while (start < end) { 133 while (start < end) {
128 int idx; 134 int idx;
129 135
130 write_c0_entryhi(start | newpid); 136 if (cpu_has_mmid)
137 write_c0_entryhi(start);
138 else
139 write_c0_entryhi(start | newpid);
131 start += (PAGE_SIZE << 1); 140 start += (PAGE_SIZE << 1);
132 mtc0_tlbw_hazard(); 141 mtc0_tlbw_hazard();
133 tlb_probe(); 142 tlb_probe();
@@ -143,7 +152,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
143 tlb_write_indexed(); 152 tlb_write_indexed();
144 } 153 }
145 tlbw_use_hazard(); 154 tlbw_use_hazard();
146 write_c0_entryhi(oldpid); 155 write_c0_entryhi(old_entryhi);
156 if (cpu_has_mmid)
157 write_c0_memorymapid(old_mmid);
147 htw_start(); 158 htw_start();
148 } else { 159 } else {
149 drop_mmu_context(mm); 160 drop_mmu_context(mm);
@@ -203,15 +214,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
203 int cpu = smp_processor_id(); 214 int cpu = smp_processor_id();
204 215
205 if (cpu_context(cpu, vma->vm_mm) != 0) { 216 if (cpu_context(cpu, vma->vm_mm) != 0) {
206 unsigned long flags; 217 unsigned long uninitialized_var(old_mmid);
207 int oldpid, newpid, idx; 218 unsigned long flags, old_entryhi;
219 int idx;
208 220
209 newpid = cpu_asid(cpu, vma->vm_mm);
210 page &= (PAGE_MASK << 1); 221 page &= (PAGE_MASK << 1);
211 local_irq_save(flags); 222 local_irq_save(flags);
212 oldpid = read_c0_entryhi(); 223 old_entryhi = read_c0_entryhi();
213 htw_stop(); 224 htw_stop();
214 write_c0_entryhi(page | newpid); 225 if (cpu_has_mmid) {
226 old_mmid = read_c0_memorymapid();
227 write_c0_entryhi(page);
228 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
229 } else {
230 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
231 }
215 mtc0_tlbw_hazard(); 232 mtc0_tlbw_hazard();
216 tlb_probe(); 233 tlb_probe();
217 tlb_probe_hazard(); 234 tlb_probe_hazard();
@@ -227,7 +244,9 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
227 tlbw_use_hazard(); 244 tlbw_use_hazard();
228 245
229 finish: 246 finish:
230 write_c0_entryhi(oldpid); 247 write_c0_entryhi(old_entryhi);
248 if (cpu_has_mmid)
249 write_c0_memorymapid(old_mmid);
231 htw_start(); 250 htw_start();
232 flush_micro_tlb_vm(vma); 251 flush_micro_tlb_vm(vma);
233 local_irq_restore(flags); 252 local_irq_restore(flags);
@@ -290,9 +309,13 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
290 local_irq_save(flags); 309 local_irq_save(flags);
291 310
292 htw_stop(); 311 htw_stop();
293 pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
294 address &= (PAGE_MASK << 1); 312 address &= (PAGE_MASK << 1);
295 write_c0_entryhi(address | pid); 313 if (cpu_has_mmid) {
314 write_c0_entryhi(address);
315 } else {
316 pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
317 write_c0_entryhi(address | pid);
318 }
296 pgdp = pgd_offset(vma->vm_mm, address); 319 pgdp = pgd_offset(vma->vm_mm, address);
297 mtc0_tlbw_hazard(); 320 mtc0_tlbw_hazard();
298 tlb_probe(); 321 tlb_probe();
@@ -358,12 +381,17 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
358#ifdef CONFIG_XPA 381#ifdef CONFIG_XPA
359 panic("Broken for XPA kernels"); 382 panic("Broken for XPA kernels");
360#else 383#else
384 unsigned int uninitialized_var(old_mmid);
361 unsigned long flags; 385 unsigned long flags;
362 unsigned long wired; 386 unsigned long wired;
363 unsigned long old_pagemask; 387 unsigned long old_pagemask;
364 unsigned long old_ctx; 388 unsigned long old_ctx;
365 389
366 local_irq_save(flags); 390 local_irq_save(flags);
391 if (cpu_has_mmid) {
392 old_mmid = read_c0_memorymapid();
393 write_c0_memorymapid(MMID_KERNEL_WIRED);
394 }
367 /* Save old context and create impossible VPN2 value */ 395 /* Save old context and create impossible VPN2 value */
368 old_ctx = read_c0_entryhi(); 396 old_ctx = read_c0_entryhi();
369 htw_stop(); 397 htw_stop();
@@ -381,6 +409,8 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
381 tlbw_use_hazard(); 409 tlbw_use_hazard();
382 410
383 write_c0_entryhi(old_ctx); 411 write_c0_entryhi(old_ctx);
412 if (cpu_has_mmid)
413 write_c0_memorymapid(old_mmid);
384 tlbw_use_hazard(); /* What is the hazard here? */ 414 tlbw_use_hazard(); /* What is the hazard here? */
385 htw_start(); 415 htw_start();
386 write_c0_pagemask(old_pagemask); 416 write_c0_pagemask(old_pagemask);