aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel/tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/kernel/tlb.c')
-rw-r--r--arch/um/kernel/tlb.c134
1 files changed, 129 insertions, 5 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 312e8ba30cd3..12b8c637527d 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -8,12 +8,12 @@
8#include "asm/pgalloc.h" 8#include "asm/pgalloc.h"
9#include "asm/pgtable.h" 9#include "asm/pgtable.h"
10#include "asm/tlbflush.h" 10#include "asm/tlbflush.h"
11#include "mode_kern.h"
12#include "as-layout.h" 11#include "as-layout.h"
13#include "tlb.h" 12#include "tlb.h"
14#include "mem.h" 13#include "mem.h"
15#include "mem_user.h" 14#include "mem_user.h"
16#include "os.h" 15#include "os.h"
16#include "skas.h"
17 17
18static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 18static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 unsigned int prot, struct host_vm_op *ops, int *index, 19 unsigned int prot, struct host_vm_op *ops, int *index,
@@ -341,6 +341,71 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
341 return(updated); 341 return(updated);
342} 342}
343 343
344void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
345{
346 pgd_t *pgd;
347 pud_t *pud;
348 pmd_t *pmd;
349 pte_t *pte;
350 struct mm_struct *mm = vma->vm_mm;
351 void *flush = NULL;
352 int r, w, x, prot, err = 0;
353 struct mm_id *mm_id;
354
355 address &= PAGE_MASK;
356 pgd = pgd_offset(mm, address);
357 if(!pgd_present(*pgd))
358 goto kill;
359
360 pud = pud_offset(pgd, address);
361 if(!pud_present(*pud))
362 goto kill;
363
364 pmd = pmd_offset(pud, address);
365 if(!pmd_present(*pmd))
366 goto kill;
367
368 pte = pte_offset_kernel(pmd, address);
369
370 r = pte_read(*pte);
371 w = pte_write(*pte);
372 x = pte_exec(*pte);
373 if (!pte_young(*pte)) {
374 r = 0;
375 w = 0;
376 } else if (!pte_dirty(*pte)) {
377 w = 0;
378 }
379
380 mm_id = &mm->context.skas.id;
381 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
382 (x ? UM_PROT_EXEC : 0));
383 if(pte_newpage(*pte)){
384 if(pte_present(*pte)){
385 unsigned long long offset;
386 int fd;
387
388 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
389 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
390 1, &flush);
391 }
392 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
393 }
394 else if(pte_newprot(*pte))
395 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
396
397 if(err)
398 goto kill;
399
400 *pte = pte_mkuptodate(*pte);
401
402 return;
403
404kill:
405 printk("Failed to flush page for address 0x%lx\n", address);
406 force_sig(SIGKILL, current);
407}
408
344pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) 409pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
345{ 410{
346 return(pgd_offset(mm, address)); 411 return(pgd_offset(mm, address));
@@ -387,21 +452,80 @@ void flush_tlb_kernel_vm(void)
387 452
388void __flush_tlb_one(unsigned long addr) 453void __flush_tlb_one(unsigned long addr)
389{ 454{
390 __flush_tlb_one_skas(addr); 455 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
456}
457
458static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
459 int finished, void **flush)
460{
461 struct host_vm_op *op;
462 int i, ret = 0;
463
464 for(i = 0; i <= last && !ret; i++){
465 op = &ops[i];
466 switch(op->type){
467 case MMAP:
468 ret = map(&mmu->skas.id, op->u.mmap.addr,
469 op->u.mmap.len, op->u.mmap.prot,
470 op->u.mmap.fd, op->u.mmap.offset, finished,
471 flush);
472 break;
473 case MUNMAP:
474 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
475 op->u.munmap.len, finished, flush);
476 break;
477 case MPROTECT:
478 ret = protect(&mmu->skas.id, op->u.mprotect.addr,
479 op->u.mprotect.len, op->u.mprotect.prot,
480 finished, flush);
481 break;
482 default:
483 printk("Unknown op type %d in do_ops\n", op->type);
484 break;
485 }
486 }
487
488 return ret;
489}
490
491static void fix_range(struct mm_struct *mm, unsigned long start_addr,
492 unsigned long end_addr, int force)
493{
494 if(!proc_mm && (end_addr > CONFIG_STUB_START))
495 end_addr = CONFIG_STUB_START;
496
497 fix_range_common(mm, start_addr, end_addr, force, do_ops);
391} 498}
392 499
393void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 500void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
394 unsigned long end) 501 unsigned long end)
395{ 502{
396 flush_tlb_range_skas(vma, start, end); 503 if(vma->vm_mm == NULL)
504 flush_tlb_kernel_range_common(start, end);
505 else fix_range(vma->vm_mm, start, end, 0);
397} 506}
398 507
399void flush_tlb_mm(struct mm_struct *mm) 508void flush_tlb_mm(struct mm_struct *mm)
400{ 509{
401 flush_tlb_mm_skas(mm); 510 unsigned long end;
511
512 /* Don't bother flushing if this address space is about to be
513 * destroyed.
514 */
515 if(atomic_read(&mm->mm_users) == 0)
516 return;
517
518 end = proc_mm ? task_size : CONFIG_STUB_START;
519 fix_range(mm, 0, end, 0);
402} 520}
403 521
404void force_flush_all(void) 522void force_flush_all(void)
405{ 523{
406 force_flush_all_skas(); 524 struct mm_struct *mm = current->mm;
525 struct vm_area_struct *vma = mm->mmap;
526
527 while(vma != NULL) {
528 fix_range(mm, vma->vm_start, vma->vm_end, 1);
529 vma = vma->vm_next;
530 }
407} 531}