diff options
Diffstat (limited to 'arch/mips/include')
-rw-r--r-- | arch/mips/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/mips/include/asm/mmu_context.h | 95 |
2 files changed, 37 insertions, 60 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index e68781e18387..143875c6c95a 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -336,7 +336,7 @@ enum emulation_result { | |||
336 | #define VPN2_MASK 0xffffe000 | 336 | #define VPN2_MASK 0xffffe000 |
337 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) | 337 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) |
338 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) | 338 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) |
339 | #define TLB_ASID(x) (ASID_MASK((x).tlb_hi)) | 339 | #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) |
340 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) | 340 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) |
341 | 341 | ||
342 | struct kvm_mips_tlb { | 342 | struct kvm_mips_tlb { |
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 1554721e4808..820116067c10 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
@@ -67,68 +67,45 @@ extern unsigned long pgd_current[]; | |||
67 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) | 67 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) |
68 | #endif | 68 | #endif |
69 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ | 69 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ |
70 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
70 | 71 | ||
71 | #define ASID_INC(asid) \ | 72 | #define ASID_INC 0x40 |
72 | ({ \ | 73 | #define ASID_MASK 0xfc0 |
73 | unsigned long __asid = asid; \ | 74 | |
74 | __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \ | 75 | #elif defined(CONFIG_CPU_R8000) |
75 | ".section\t__asid_inc,\"a\"\n\t" \ | 76 | |
76 | ".word\t1b\n\t" \ | 77 | #define ASID_INC 0x10 |
77 | ".previous" \ | 78 | #define ASID_MASK 0xff0 |
78 | :"=r" (__asid) \ | 79 | |
79 | :"0" (__asid)); \ | 80 | #elif defined(CONFIG_MIPS_MT_SMTC) |
80 | __asid; \ | 81 | |
81 | }) | 82 | #define ASID_INC 0x1 |
82 | #define ASID_MASK(asid) \ | 83 | extern unsigned long smtc_asid_mask; |
83 | ({ \ | 84 | #define ASID_MASK (smtc_asid_mask) |
84 | unsigned long __asid = asid; \ | 85 | #define HW_ASID_MASK 0xff |
85 | __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \ | 86 | /* End SMTC/34K debug hack */ |
86 | ".section\t__asid_mask,\"a\"\n\t" \ | 87 | #else /* FIXME: not correct for R6000 */ |
87 | ".word\t1b\n\t" \ | 88 | |
88 | ".previous" \ | 89 | #define ASID_INC 0x1 |
89 | :"=r" (__asid) \ | 90 | #define ASID_MASK 0xff |
90 | :"r" (__asid)); \ | ||
91 | __asid; \ | ||
92 | }) | ||
93 | #define ASID_VERSION_MASK \ | ||
94 | ({ \ | ||
95 | unsigned long __asid; \ | ||
96 | __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \ | ||
97 | ".section\t__asid_version_mask,\"a\"\n\t" \ | ||
98 | ".word\t1b\n\t" \ | ||
99 | ".previous" \ | ||
100 | :"=r" (__asid)); \ | ||
101 | __asid; \ | ||
102 | }) | ||
103 | #define ASID_FIRST_VERSION \ | ||
104 | ({ \ | ||
105 | unsigned long __asid = asid; \ | ||
106 | __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \ | ||
107 | ".section\t__asid_first_version,\"a\"\n\t" \ | ||
108 | ".word\t1b\n\t" \ | ||
109 | ".previous" \ | ||
110 | :"=r" (__asid)); \ | ||
111 | __asid; \ | ||
112 | }) | ||
113 | |||
114 | #define ASID_FIRST_VERSION_R3000 0x1000 | ||
115 | #define ASID_FIRST_VERSION_R4000 0x100 | ||
116 | #define ASID_FIRST_VERSION_R8000 0x1000 | ||
117 | #define ASID_FIRST_VERSION_RM9000 0x1000 | ||
118 | 91 | ||
119 | #ifdef CONFIG_MIPS_MT_SMTC | ||
120 | #define SMTC_HW_ASID_MASK 0xff | ||
121 | extern unsigned int smtc_asid_mask; | ||
122 | #endif | 92 | #endif |
123 | 93 | ||
124 | #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) | 94 | #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) |
125 | #define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm))) | 95 | #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) |
126 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) | 96 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) |
127 | 97 | ||
128 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 98 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
129 | { | 99 | { |
130 | } | 100 | } |
131 | 101 | ||
102 | /* | ||
103 | * All unused by hardware upper bits will be considered | ||
104 | * as a software asid extension. | ||
105 | */ | ||
106 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | ||
107 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | ||
108 | |||
132 | #ifndef CONFIG_MIPS_MT_SMTC | 109 | #ifndef CONFIG_MIPS_MT_SMTC |
133 | /* Normal, classic MIPS get_new_mmu_context */ | 110 | /* Normal, classic MIPS get_new_mmu_context */ |
134 | static inline void | 111 | static inline void |
@@ -137,7 +114,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
137 | extern void kvm_local_flush_tlb_all(void); | 114 | extern void kvm_local_flush_tlb_all(void); |
138 | unsigned long asid = asid_cache(cpu); | 115 | unsigned long asid = asid_cache(cpu); |
139 | 116 | ||
140 | if (!ASID_MASK((asid = ASID_INC(asid)))) { | 117 | if (! ((asid += ASID_INC) & ASID_MASK) ) { |
141 | if (cpu_has_vtag_icache) | 118 | if (cpu_has_vtag_icache) |
142 | flush_icache_all(); | 119 | flush_icache_all(); |
143 | #ifdef CONFIG_VIRTUALIZATION | 120 | #ifdef CONFIG_VIRTUALIZATION |
@@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
200 | * free up the ASID value for use and flush any old | 177 | * free up the ASID value for use and flush any old |
201 | * instances of it from the TLB. | 178 | * instances of it from the TLB. |
202 | */ | 179 | */ |
203 | oldasid = ASID_MASK(read_c0_entryhi()); | 180 | oldasid = (read_c0_entryhi() & ASID_MASK); |
204 | if(smtc_live_asid[mytlb][oldasid]) { | 181 | if(smtc_live_asid[mytlb][oldasid]) { |
205 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | 182 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); |
206 | if(smtc_live_asid[mytlb][oldasid] == 0) | 183 | if(smtc_live_asid[mytlb][oldasid] == 0) |
@@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
211 | * having ASID_MASK smaller than the hardware maximum, | 188 | * having ASID_MASK smaller than the hardware maximum, |
212 | * make sure no "soft" bits become "hard"... | 189 | * make sure no "soft" bits become "hard"... |
213 | */ | 190 | */ |
214 | write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | | 191 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | |
215 | cpu_asid(cpu, next)); | 192 | cpu_asid(cpu, next)); |
216 | ehb(); /* Make sure it propagates to TCStatus */ | 193 | ehb(); /* Make sure it propagates to TCStatus */ |
217 | evpe(mtflags); | 194 | evpe(mtflags); |
@@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
264 | #ifdef CONFIG_MIPS_MT_SMTC | 241 | #ifdef CONFIG_MIPS_MT_SMTC |
265 | /* See comments for similar code above */ | 242 | /* See comments for similar code above */ |
266 | mtflags = dvpe(); | 243 | mtflags = dvpe(); |
267 | oldasid = ASID_MASK(read_c0_entryhi()); | 244 | oldasid = read_c0_entryhi() & ASID_MASK; |
268 | if(smtc_live_asid[mytlb][oldasid]) { | 245 | if(smtc_live_asid[mytlb][oldasid]) { |
269 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | 246 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); |
270 | if(smtc_live_asid[mytlb][oldasid] == 0) | 247 | if(smtc_live_asid[mytlb][oldasid] == 0) |
271 | smtc_flush_tlb_asid(oldasid); | 248 | smtc_flush_tlb_asid(oldasid); |
272 | } | 249 | } |
273 | /* See comments for similar code above */ | 250 | /* See comments for similar code above */ |
274 | write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | | 251 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | |
275 | cpu_asid(cpu, next)); | 252 | cpu_asid(cpu, next)); |
276 | ehb(); /* Make sure it propagates to TCStatus */ | 253 | ehb(); /* Make sure it propagates to TCStatus */ |
277 | evpe(mtflags); | 254 | evpe(mtflags); |
278 | #else | 255 | #else |
@@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) | |||
309 | #ifdef CONFIG_MIPS_MT_SMTC | 286 | #ifdef CONFIG_MIPS_MT_SMTC |
310 | /* See comments for similar code above */ | 287 | /* See comments for similar code above */ |
311 | prevvpe = dvpe(); | 288 | prevvpe = dvpe(); |
312 | oldasid = ASID_MASK(read_c0_entryhi()); | 289 | oldasid = (read_c0_entryhi() & ASID_MASK); |
313 | if (smtc_live_asid[mytlb][oldasid]) { | 290 | if (smtc_live_asid[mytlb][oldasid]) { |
314 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | 291 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); |
315 | if(smtc_live_asid[mytlb][oldasid] == 0) | 292 | if(smtc_live_asid[mytlb][oldasid] == 0) |
316 | smtc_flush_tlb_asid(oldasid); | 293 | smtc_flush_tlb_asid(oldasid); |
317 | } | 294 | } |
318 | /* See comments for similar code above */ | 295 | /* See comments for similar code above */ |
319 | write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | 296 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
320 | | cpu_asid(cpu, mm)); | 297 | | cpu_asid(cpu, mm)); |
321 | ehb(); /* Make sure it propagates to TCStatus */ | 298 | ehb(); /* Make sure it propagates to TCStatus */ |
322 | evpe(prevvpe); | 299 | evpe(prevvpe); |