aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel/tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/kernel/tlb.c')
-rw-r--r--arch/um/kernel/tlb.c132
1 files changed, 68 insertions, 64 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index eda477edfdf5..83ec8d4747fd 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -18,13 +18,15 @@
18#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) 18#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
19 19
20void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 20void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
21 unsigned long end_addr, int force, int data, 21 unsigned long end_addr, int force,
22 void (*do_ops)(int, struct host_vm_op *, int)) 22 void (*do_ops)(union mm_context *, struct host_vm_op *,
23 int))
23{ 24{
24 pgd_t *npgd; 25 pgd_t *npgd;
25 pud_t *npud; 26 pud_t *npud;
26 pmd_t *npmd; 27 pmd_t *npmd;
27 pte_t *npte; 28 pte_t *npte;
29 union mm_context *mmu = &mm->context;
28 unsigned long addr, end; 30 unsigned long addr, end;
29 int r, w, x; 31 int r, w, x;
30 struct host_vm_op ops[16]; 32 struct host_vm_op ops[16];
@@ -40,7 +42,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
40 end = end_addr; 42 end = end_addr;
41 if(force || pgd_newpage(*npgd)){ 43 if(force || pgd_newpage(*npgd)){
42 op_index = add_munmap(addr, end - addr, ops, 44 op_index = add_munmap(addr, end - addr, ops,
43 op_index, last_op, data, 45 op_index, last_op, mmu,
44 do_ops); 46 do_ops);
45 pgd_mkuptodate(*npgd); 47 pgd_mkuptodate(*npgd);
46 } 48 }
@@ -55,7 +57,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
55 end = end_addr; 57 end = end_addr;
56 if(force || pud_newpage(*npud)){ 58 if(force || pud_newpage(*npud)){
57 op_index = add_munmap(addr, end - addr, ops, 59 op_index = add_munmap(addr, end - addr, ops,
58 op_index, last_op, data, 60 op_index, last_op, mmu,
59 do_ops); 61 do_ops);
60 pud_mkuptodate(*npud); 62 pud_mkuptodate(*npud);
61 } 63 }
@@ -70,7 +72,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
70 end = end_addr; 72 end = end_addr;
71 if(force || pmd_newpage(*npmd)){ 73 if(force || pmd_newpage(*npmd)){
72 op_index = add_munmap(addr, end - addr, ops, 74 op_index = add_munmap(addr, end - addr, ops,
73 op_index, last_op, data, 75 op_index, last_op, mmu,
74 do_ops); 76 do_ops);
75 pmd_mkuptodate(*npmd); 77 pmd_mkuptodate(*npmd);
76 } 78 }
@@ -93,21 +95,21 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
93 op_index = add_mmap(addr, 95 op_index = add_mmap(addr,
94 pte_val(*npte) & PAGE_MASK, 96 pte_val(*npte) & PAGE_MASK,
95 PAGE_SIZE, r, w, x, ops, 97 PAGE_SIZE, r, w, x, ops,
96 op_index, last_op, data, 98 op_index, last_op, mmu,
97 do_ops); 99 do_ops);
98 else op_index = add_munmap(addr, PAGE_SIZE, ops, 100 else op_index = add_munmap(addr, PAGE_SIZE, ops,
99 op_index, last_op, data, 101 op_index, last_op, mmu,
100 do_ops); 102 do_ops);
101 } 103 }
102 else if(pte_newprot(*npte)) 104 else if(pte_newprot(*npte))
103 op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, 105 op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
104 op_index, last_op, data, 106 op_index, last_op, mmu,
105 do_ops); 107 do_ops);
106 108
107 *npte = pte_mkuptodate(*npte); 109 *npte = pte_mkuptodate(*npte);
108 addr += PAGE_SIZE; 110 addr += PAGE_SIZE;
109 } 111 }
110 (*do_ops)(data, ops, op_index); 112 (*do_ops)(mmu, ops, op_index);
111} 113}
112 114
113int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) 115int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
@@ -195,51 +197,6 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
195 return(updated); 197 return(updated);
196} 198}
197 199
198void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
199{
200 address &= PAGE_MASK;
201 flush_tlb_range(vma, address, address + PAGE_SIZE);
202}
203
204void flush_tlb_all(void)
205{
206 flush_tlb_mm(current->mm);
207}
208
209void flush_tlb_kernel_range(unsigned long start, unsigned long end)
210{
211 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
212 flush_tlb_kernel_range_common, start, end);
213}
214
215void flush_tlb_kernel_vm(void)
216{
217 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
218 flush_tlb_kernel_range_common(start_vm, end_vm));
219}
220
221void __flush_tlb_one(unsigned long addr)
222{
223 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
224}
225
226void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
227 unsigned long end)
228{
229 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
230 end);
231}
232
233void flush_tlb_mm(struct mm_struct *mm)
234{
235 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
236}
237
238void force_flush_all(void)
239{
240 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
241}
242
243pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) 200pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
244{ 201{
245 return(pgd_offset(mm, address)); 202 return(pgd_offset(mm, address));
@@ -270,9 +227,9 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr)
270} 227}
271 228
272int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 229int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
273 int r, int w, int x, struct host_vm_op *ops, int index, 230 int r, int w, int x, struct host_vm_op *ops, int index,
274 int last_filled, int data, 231 int last_filled, union mm_context *mmu,
275 void (*do_ops)(int, struct host_vm_op *, int)) 232 void (*do_ops)(union mm_context *, struct host_vm_op *, int))
276{ 233{
277 __u64 offset; 234 __u64 offset;
278 struct host_vm_op *last; 235 struct host_vm_op *last;
@@ -292,7 +249,7 @@ int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
292 } 249 }
293 250
294 if(index == last_filled){ 251 if(index == last_filled){
295 (*do_ops)(data, ops, last_filled); 252 (*do_ops)(mmu, ops, last_filled);
296 index = -1; 253 index = -1;
297 } 254 }
298 255
@@ -310,8 +267,8 @@ int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
310} 267}
311 268
312int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops, 269int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
313 int index, int last_filled, int data, 270 int index, int last_filled, union mm_context *mmu,
314 void (*do_ops)(int, struct host_vm_op *, int)) 271 void (*do_ops)(union mm_context *, struct host_vm_op *, int))
315{ 272{
316 struct host_vm_op *last; 273 struct host_vm_op *last;
317 274
@@ -325,7 +282,7 @@ int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
325 } 282 }
326 283
327 if(index == last_filled){ 284 if(index == last_filled){
328 (*do_ops)(data, ops, last_filled); 285 (*do_ops)(mmu, ops, last_filled);
329 index = -1; 286 index = -1;
330 } 287 }
331 288
@@ -337,8 +294,9 @@ int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
337} 294}
338 295
339int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x, 296int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
340 struct host_vm_op *ops, int index, int last_filled, int data, 297 struct host_vm_op *ops, int index, int last_filled,
341 void (*do_ops)(int, struct host_vm_op *, int)) 298 union mm_context *mmu,
299 void (*do_ops)(union mm_context *, struct host_vm_op *, int))
342{ 300{
343 struct host_vm_op *last; 301 struct host_vm_op *last;
344 302
@@ -354,7 +312,7 @@ int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
354 } 312 }
355 313
356 if(index == last_filled){ 314 if(index == last_filled){
357 (*do_ops)(data, ops, last_filled); 315 (*do_ops)(mmu, ops, last_filled);
358 index = -1; 316 index = -1;
359 } 317 }
360 318
@@ -367,3 +325,49 @@ int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
367 .x = x } } }); 325 .x = x } } });
368 return(index); 326 return(index);
369} 327}
328
329void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
330{
331 address &= PAGE_MASK;
332 flush_tlb_range(vma, address, address + PAGE_SIZE);
333}
334
335void flush_tlb_all(void)
336{
337 flush_tlb_mm(current->mm);
338}
339
340void flush_tlb_kernel_range(unsigned long start, unsigned long end)
341{
342 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
343 flush_tlb_kernel_range_common, start, end);
344}
345
346void flush_tlb_kernel_vm(void)
347{
348 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
349 flush_tlb_kernel_range_common(start_vm, end_vm));
350}
351
352void __flush_tlb_one(unsigned long addr)
353{
354 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
355}
356
357void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
358 unsigned long end)
359{
360 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
361 end);
362}
363
364void flush_tlb_mm(struct mm_struct *mm)
365{
366 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
367}
368
369void force_flush_all(void)
370{
371 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
372}
373