aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorThiemo Seufer <ths@networkno.de>2005-04-02 05:21:56 -0500
committerRalf Baechle <ralf@linux-mips.org>2005-10-29 14:31:01 -0400
commit172546bf601356f94f8018af7908a9b7c1c4915c (patch)
tree3b0d93bc5caf6014ef5554b05cad8b41b7c83b94 /arch/mips/mm
parent202d0388e747d7e9b70fc0efc2a5637812b722c1 (diff)
Fix race conditions for read_c0_entryhi. Remove broken ASID masks in
tlb-sb1.c. Make tlb-r4k.c and tlb-sb1.c more similiar and more efficient. Signed-off-by: Thiemo Seufer <ths@networkno.de> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/tlb-r4k.c49
-rw-r--r--arch/mips/mm/tlb-sb1.c59
2 files changed, 63 insertions, 45 deletions
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 08702202758d..316c8a3d6b53 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -21,6 +21,12 @@
21 21
22extern void build_tlb_refill_handler(void); 22extern void build_tlb_refill_handler(void);
23 23
24/*
25 * Make sure all entries differ. If they're not different
26 * MIPS32 will take revenge ...
27 */
28#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29
24/* CP0 hazard avoidance. */ 30/* CP0 hazard avoidance. */
25#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ 31#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
26 "nop; nop; nop; nop; nop; nop;\n\t" \ 32 "nop; nop; nop; nop; nop; nop;\n\t" \
@@ -42,11 +48,8 @@ void local_flush_tlb_all(void)
42 48
43 /* Blast 'em all away. */ 49 /* Blast 'em all away. */
44 while (entry < current_cpu_data.tlbsize) { 50 while (entry < current_cpu_data.tlbsize) {
45 /* 51 /* Make sure all entries differ. */
46 * Make sure all entries differ. If they're not different 52 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
47 * MIPS32 will take revenge ...
48 */
49 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
50 write_c0_index(entry); 53 write_c0_index(entry);
51 mtc0_tlbw_hazard(); 54 mtc0_tlbw_hazard();
52 tlb_write_indexed(); 55 tlb_write_indexed();
@@ -57,12 +60,21 @@ void local_flush_tlb_all(void)
57 local_irq_restore(flags); 60 local_irq_restore(flags);
58} 61}
59 62
63/* All entries common to a mm share an asid. To effectively flush
64 these entries, we just bump the asid. */
60void local_flush_tlb_mm(struct mm_struct *mm) 65void local_flush_tlb_mm(struct mm_struct *mm)
61{ 66{
62 int cpu = smp_processor_id(); 67 int cpu;
68
69 preempt_disable();
63 70
64 if (cpu_context(cpu, mm) != 0) 71 cpu = smp_processor_id();
65 drop_mmu_context(mm,cpu); 72
73 if (cpu_context(cpu, mm) != 0) {
74 drop_mmu_context(mm, cpu);
75 }
76
77 preempt_enable();
66} 78}
67 79
68void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 80void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -75,9 +87,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
75 unsigned long flags; 87 unsigned long flags;
76 int size; 88 int size;
77 89
78 local_irq_save(flags);
79 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 90 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
80 size = (size + 1) >> 1; 91 size = (size + 1) >> 1;
92 local_irq_save(flags);
81 if (size <= current_cpu_data.tlbsize/2) { 93 if (size <= current_cpu_data.tlbsize/2) {
82 int oldpid = read_c0_entryhi(); 94 int oldpid = read_c0_entryhi();
83 int newpid = cpu_asid(cpu, mm); 95 int newpid = cpu_asid(cpu, mm);
@@ -99,8 +111,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
99 if (idx < 0) 111 if (idx < 0)
100 continue; 112 continue;
101 /* Make sure all entries differ. */ 113 /* Make sure all entries differ. */
102 write_c0_entryhi(CKSEG0 + 114 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
103 (idx << (PAGE_SHIFT + 1)));
104 mtc0_tlbw_hazard(); 115 mtc0_tlbw_hazard();
105 tlb_write_indexed(); 116 tlb_write_indexed();
106 } 117 }
@@ -118,9 +129,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
118 unsigned long flags; 129 unsigned long flags;
119 int size; 130 int size;
120 131
121 local_irq_save(flags);
122 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 132 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
123 size = (size + 1) >> 1; 133 size = (size + 1) >> 1;
134 local_irq_save(flags);
124 if (size <= current_cpu_data.tlbsize / 2) { 135 if (size <= current_cpu_data.tlbsize / 2) {
125 int pid = read_c0_entryhi(); 136 int pid = read_c0_entryhi();
126 137
@@ -142,7 +153,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
142 if (idx < 0) 153 if (idx < 0)
143 continue; 154 continue;
144 /* Make sure all entries differ. */ 155 /* Make sure all entries differ. */
145 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); 156 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
146 mtc0_tlbw_hazard(); 157 mtc0_tlbw_hazard();
147 tlb_write_indexed(); 158 tlb_write_indexed();
148 } 159 }
@@ -176,7 +187,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
176 if (idx < 0) 187 if (idx < 0)
177 goto finish; 188 goto finish;
178 /* Make sure all entries differ. */ 189 /* Make sure all entries differ. */
179 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); 190 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
180 mtc0_tlbw_hazard(); 191 mtc0_tlbw_hazard();
181 tlb_write_indexed(); 192 tlb_write_indexed();
182 tlbw_use_hazard(); 193 tlbw_use_hazard();
@@ -197,8 +208,8 @@ void local_flush_tlb_one(unsigned long page)
197 int oldpid, idx; 208 int oldpid, idx;
198 209
199 local_irq_save(flags); 210 local_irq_save(flags);
200 page &= (PAGE_MASK << 1);
201 oldpid = read_c0_entryhi(); 211 oldpid = read_c0_entryhi();
212 page &= (PAGE_MASK << 1);
202 write_c0_entryhi(page); 213 write_c0_entryhi(page);
203 mtc0_tlbw_hazard(); 214 mtc0_tlbw_hazard();
204 tlb_probe(); 215 tlb_probe();
@@ -208,7 +219,7 @@ void local_flush_tlb_one(unsigned long page)
208 write_c0_entrylo1(0); 219 write_c0_entrylo1(0);
209 if (idx >= 0) { 220 if (idx >= 0) {
210 /* Make sure all entries differ. */ 221 /* Make sure all entries differ. */
211 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); 222 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
212 mtc0_tlbw_hazard(); 223 mtc0_tlbw_hazard();
213 tlb_write_indexed(); 224 tlb_write_indexed();
214 tlbw_use_hazard(); 225 tlbw_use_hazard();
@@ -238,9 +249,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
238 if (current->active_mm != vma->vm_mm) 249 if (current->active_mm != vma->vm_mm)
239 return; 250 return;
240 251
241 pid = read_c0_entryhi() & ASID_MASK;
242
243 local_irq_save(flags); 252 local_irq_save(flags);
253
254 pid = read_c0_entryhi() & ASID_MASK;
244 address &= (PAGE_MASK << 1); 255 address &= (PAGE_MASK << 1);
245 write_c0_entryhi(address | pid); 256 write_c0_entryhi(address | pid);
246 pgdp = pgd_offset(vma->vm_mm, address); 257 pgdp = pgd_offset(vma->vm_mm, address);
@@ -260,14 +271,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
260 write_c0_entrylo0(pte_val(*ptep++) >> 6); 271 write_c0_entrylo0(pte_val(*ptep++) >> 6);
261 write_c0_entrylo1(pte_val(*ptep) >> 6); 272 write_c0_entrylo1(pte_val(*ptep) >> 6);
262#endif 273#endif
263 write_c0_entryhi(address | pid);
264 mtc0_tlbw_hazard(); 274 mtc0_tlbw_hazard();
265 if (idx < 0) 275 if (idx < 0)
266 tlb_write_random(); 276 tlb_write_random();
267 else 277 else
268 tlb_write_indexed(); 278 tlb_write_indexed();
269 tlbw_use_hazard(); 279 tlbw_use_hazard();
270 write_c0_entryhi(pid);
271 local_irq_restore(flags); 280 local_irq_restore(flags);
272} 281}
273 282
diff --git a/arch/mips/mm/tlb-sb1.c b/arch/mips/mm/tlb-sb1.c
index 6256cafcf3a2..bba7130e7547 100644
--- a/arch/mips/mm/tlb-sb1.c
+++ b/arch/mips/mm/tlb-sb1.c
@@ -94,7 +94,7 @@ void local_flush_tlb_all(void)
94 94
95 local_irq_save(flags); 95 local_irq_save(flags);
96 /* Save old context and create impossible VPN2 value */ 96 /* Save old context and create impossible VPN2 value */
97 old_ctx = read_c0_entryhi() & ASID_MASK; 97 old_ctx = read_c0_entryhi();
98 write_c0_entrylo0(0); 98 write_c0_entrylo0(0);
99 write_c0_entrylo1(0); 99 write_c0_entrylo1(0);
100 100
@@ -144,17 +144,17 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
144 unsigned long end) 144 unsigned long end)
145{ 145{
146 struct mm_struct *mm = vma->vm_mm; 146 struct mm_struct *mm = vma->vm_mm;
147 unsigned long flags; 147 int cpu = smp_processor_id();
148 int cpu;
149 148
150 local_irq_save(flags);
151 cpu = smp_processor_id();
152 if (cpu_context(cpu, mm) != 0) { 149 if (cpu_context(cpu, mm) != 0) {
150 unsigned long flags;
153 int size; 151 int size;
152
154 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 153 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
155 size = (size + 1) >> 1; 154 size = (size + 1) >> 1;
155 local_irq_save(flags);
156 if (size <= (current_cpu_data.tlbsize/2)) { 156 if (size <= (current_cpu_data.tlbsize/2)) {
157 int oldpid = read_c0_entryhi() & ASID_MASK; 157 int oldpid = read_c0_entryhi();
158 int newpid = cpu_asid(cpu, mm); 158 int newpid = cpu_asid(cpu, mm);
159 159
160 start &= (PAGE_MASK << 1); 160 start &= (PAGE_MASK << 1);
@@ -169,17 +169,17 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
169 idx = read_c0_index(); 169 idx = read_c0_index();
170 write_c0_entrylo0(0); 170 write_c0_entrylo0(0);
171 write_c0_entrylo1(0); 171 write_c0_entrylo1(0);
172 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
173 if (idx < 0) 172 if (idx < 0)
174 continue; 173 continue;
174 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
175 tlb_write_indexed(); 175 tlb_write_indexed();
176 } 176 }
177 write_c0_entryhi(oldpid); 177 write_c0_entryhi(oldpid);
178 } else { 178 } else {
179 drop_mmu_context(mm, cpu); 179 drop_mmu_context(mm, cpu);
180 } 180 }
181 local_irq_restore(flags);
181 } 182 }
182 local_irq_restore(flags);
183} 183}
184 184
185void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 185void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -189,7 +189,6 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
189 189
190 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 190 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
191 size = (size + 1) >> 1; 191 size = (size + 1) >> 1;
192
193 local_irq_save(flags); 192 local_irq_save(flags);
194 if (size <= (current_cpu_data.tlbsize/2)) { 193 if (size <= (current_cpu_data.tlbsize/2)) {
195 int pid = read_c0_entryhi(); 194 int pid = read_c0_entryhi();
@@ -207,9 +206,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
207 idx = read_c0_index(); 206 idx = read_c0_index();
208 write_c0_entrylo0(0); 207 write_c0_entrylo0(0);
209 write_c0_entrylo1(0); 208 write_c0_entrylo1(0);
210 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
211 if (idx < 0) 209 if (idx < 0)
212 continue; 210 continue;
211 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
213 tlb_write_indexed(); 212 tlb_write_indexed();
214 } 213 }
215 write_c0_entryhi(pid); 214 write_c0_entryhi(pid);
@@ -221,15 +220,16 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
221 220
222void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 221void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
223{ 222{
224 unsigned long flags;
225 int cpu = smp_processor_id(); 223 int cpu = smp_processor_id();
226 224
227 local_irq_save(flags);
228 if (cpu_context(cpu, vma->vm_mm) != 0) { 225 if (cpu_context(cpu, vma->vm_mm) != 0) {
226 unsigned long flags;
229 int oldpid, newpid, idx; 227 int oldpid, newpid, idx;
228
230 newpid = cpu_asid(cpu, vma->vm_mm); 229 newpid = cpu_asid(cpu, vma->vm_mm);
231 page &= (PAGE_MASK << 1); 230 page &= (PAGE_MASK << 1);
232 oldpid = read_c0_entryhi() & ASID_MASK; 231 local_irq_save(flags);
232 oldpid = read_c0_entryhi();
233 write_c0_entryhi(page | newpid); 233 write_c0_entryhi(page | newpid);
234 tlb_probe(); 234 tlb_probe();
235 idx = read_c0_index(); 235 idx = read_c0_index();
@@ -240,10 +240,11 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
240 /* Make sure all entries differ. */ 240 /* Make sure all entries differ. */
241 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 241 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
242 tlb_write_indexed(); 242 tlb_write_indexed();
243
243 finish: 244 finish:
244 write_c0_entryhi(oldpid); 245 write_c0_entryhi(oldpid);
246 local_irq_restore(flags);
245 } 247 }
246 local_irq_restore(flags);
247} 248}
248 249
249/* 250/*
@@ -255,18 +256,17 @@ void local_flush_tlb_one(unsigned long page)
255 unsigned long flags; 256 unsigned long flags;
256 int oldpid, idx; 257 int oldpid, idx;
257 258
258 page &= (PAGE_MASK << 1);
259 oldpid = read_c0_entryhi() & ASID_MASK;
260
261 local_irq_save(flags); 259 local_irq_save(flags);
260 oldpid = read_c0_entryhi();
261 page &= (PAGE_MASK << 1);
262 write_c0_entryhi(page); 262 write_c0_entryhi(page);
263 tlb_probe(); 263 tlb_probe();
264 idx = read_c0_index(); 264 idx = read_c0_index();
265 write_c0_entrylo0(0);
266 write_c0_entrylo1(0);
265 if (idx >= 0) { 267 if (idx >= 0) {
266 /* Make sure all entries differ. */ 268 /* Make sure all entries differ. */
267 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 269 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
268 write_c0_entrylo0(0);
269 write_c0_entrylo1(0);
270 tlb_write_indexed(); 270 tlb_write_indexed();
271 } 271 }
272 272
@@ -297,6 +297,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
297{ 297{
298 unsigned long flags; 298 unsigned long flags;
299 pgd_t *pgdp; 299 pgd_t *pgdp;
300 pud_t *pudp;
300 pmd_t *pmdp; 301 pmd_t *pmdp;
301 pte_t *ptep; 302 pte_t *ptep;
302 int idx, pid; 303 int idx, pid;
@@ -311,19 +312,26 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
311 312
312 pid = read_c0_entryhi() & ASID_MASK; 313 pid = read_c0_entryhi() & ASID_MASK;
313 address &= (PAGE_MASK << 1); 314 address &= (PAGE_MASK << 1);
314 write_c0_entryhi(address | (pid)); 315 write_c0_entryhi(address | pid);
315 pgdp = pgd_offset(vma->vm_mm, address); 316 pgdp = pgd_offset(vma->vm_mm, address);
316 tlb_probe(); 317 tlb_probe();
317 pmdp = pmd_offset(pgdp, address); 318 pudp = pud_offset(pgdp, address);
319 pmdp = pmd_offset(pudp, address);
318 idx = read_c0_index(); 320 idx = read_c0_index();
319 ptep = pte_offset_map(pmdp, address); 321 ptep = pte_offset_map(pmdp, address);
322
323#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
324 write_c0_entrylo0(ptep->pte_high);
325 ptep++;
326 write_c0_entrylo1(ptep->pte_high);
327#else
320 write_c0_entrylo0(pte_val(*ptep++) >> 6); 328 write_c0_entrylo0(pte_val(*ptep++) >> 6);
321 write_c0_entrylo1(pte_val(*ptep) >> 6); 329 write_c0_entrylo1(pte_val(*ptep) >> 6);
322 if (idx < 0) { 330#endif
331 if (idx < 0)
323 tlb_write_random(); 332 tlb_write_random();
324 } else { 333 else
325 tlb_write_indexed(); 334 tlb_write_indexed();
326 }
327 local_irq_restore(flags); 335 local_irq_restore(flags);
328} 336}
329 337
@@ -336,7 +344,8 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
336 unsigned long old_ctx; 344 unsigned long old_ctx;
337 345
338 local_irq_save(flags); 346 local_irq_save(flags);
339 old_ctx = read_c0_entryhi() & 0xff; 347 /* Save old context and create impossible VPN2 value */
348 old_ctx = read_c0_entryhi();
340 old_pagemask = read_c0_pagemask(); 349 old_pagemask = read_c0_pagemask();
341 wired = read_c0_wired(); 350 wired = read_c0_wired();
342 write_c0_wired(wired + 1); 351 write_c0_wired(wired + 1);