aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel/skas
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-05-06 17:51:45 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:13:04 -0400
commit64f60841c096594b8073e408cd9b40d7d08dcfdd (patch)
tree3ad05b0b5f83d174eb2a7efdfd95d09f171aa51d /arch/um/kernel/skas
parent8603ec81487a5fefbc29611ff0d635b33b6da990 (diff)
uml: speed page fault path
Give the page fault code a specialized path. There is only one page to look at, so there's no point in going into the general page table walking code. There's only going to be one host operation, so there are no opportunities for merging. So, we go straight to the pte we want, figure out what needs doing, and do it. While I was in here, I fixed the wart where the address passed to unmap was a void *, but an unsigned long to map and protect. This gives me just under 10% on a kernel build. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/kernel/skas')
-rw-r--r--arch/um/kernel/skas/tlb.c66
1 files changed, 64 insertions, 2 deletions
diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c
index 304a5b0695ab..c43901aa9368 100644
--- a/arch/um/kernel/skas/tlb.c
+++ b/arch/um/kernel/skas/tlb.c
@@ -32,8 +32,7 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
32 op->u.mmap.offset, finished, flush); 32 op->u.mmap.offset, finished, flush);
33 break; 33 break;
34 case MUNMAP: 34 case MUNMAP:
35 ret = unmap(&mmu->skas.id, 35 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
36 (void *) op->u.munmap.addr,
37 op->u.munmap.len, finished, flush); 36 op->u.munmap.len, finished, flush);
38 break; 37 break;
39 case MPROTECT: 38 case MPROTECT:
@@ -94,3 +93,66 @@ void force_flush_all_skas(void)
94 unsigned long end = proc_mm ? task_size : CONFIG_STUB_START; 93 unsigned long end = proc_mm ? task_size : CONFIG_STUB_START;
95 fix_range(current->mm, 0, end, 1); 94 fix_range(current->mm, 0, end, 1);
96} 95}
96
97void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
98{
99 pgd_t *pgd;
100 pud_t *pud;
101 pmd_t *pmd;
102 pte_t *pte;
103 struct mm_struct *mm = vma->vm_mm;
104 void *flush = NULL;
105 int r, w, x, err = 0;
106 struct mm_id *mm_id;
107
108 pgd = pgd_offset(vma->vm_mm, address);
109 if(!pgd_present(*pgd))
110 goto kill;
111
112 pud = pud_offset(pgd, address);
113 if(!pud_present(*pud))
114 goto kill;
115
116 pmd = pmd_offset(pud, address);
117 if(!pmd_present(*pmd))
118 goto kill;
119
120 pte = pte_offset_kernel(pmd, address);
121
122 r = pte_read(*pte);
123 w = pte_write(*pte);
124 x = pte_exec(*pte);
125 if (!pte_young(*pte)) {
126 r = 0;
127 w = 0;
128 } else if (!pte_dirty(*pte)) {
129 w = 0;
130 }
131
132 mm_id = &mm->context.skas.id;
133 if(pte_newpage(*pte)){
134 if(pte_present(*pte)){
135 unsigned long long offset;
136 int fd;
137
138 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
139 err = map(mm_id, address, PAGE_SIZE, r, w, x, fd,
140 offset, 1, &flush);
141 }
142 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
143 }
144 else if(pte_newprot(*pte))
145 err = protect(mm_id, address, PAGE_SIZE, r, w, x, 1, &flush);
146
147 if(err)
148 goto kill;
149
150 *pte = pte_mkuptodate(*pte);
151
152 return;
153
154kill:
155 printk("Failed to flush page for address 0x%lx\n", address);
156 force_sig(SIGKILL, current);
157}
158