diff options
author | Jeff Dike <jdike@addtoit.com> | 2007-05-06 17:51:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 15:13:04 -0400 |
commit | 64f60841c096594b8073e408cd9b40d7d08dcfdd (patch) | |
tree | 3ad05b0b5f83d174eb2a7efdfd95d09f171aa51d | |
parent | 8603ec81487a5fefbc29611ff0d635b33b6da990 (diff) |
uml: speed page fault path
Give the page fault code a specialized path. There is only one page to look
at, so there's no point in going into the general page table walking code.
There's only going to be one host operation, so there are no opportunities for
merging. So, we go straight to the pte we want, figure out what needs doing,
and do it.
While I was in here, I fixed the wart where the address passed to unmap was a
void *, but an unsigned long to map and protect.
This gives me just under 10% on a kernel build.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/um/include/os.h | 2 | ||||
-rw-r--r-- | arch/um/include/skas/mode_kern_skas.h | 2 | ||||
-rw-r--r-- | arch/um/kernel/skas/tlb.c | 66 | ||||
-rw-r--r-- | arch/um/kernel/tlb.c | 4 | ||||
-rw-r--r-- | arch/um/os-Linux/skas/mem.c | 4 |
5 files changed, 72 insertions, 6 deletions
diff --git a/arch/um/include/os.h b/arch/um/include/os.h index d66380908703..e11bdcd8afc2 100644 --- a/arch/um/include/os.h +++ b/arch/um/include/os.h | |||
@@ -302,7 +302,7 @@ extern long syscall_stub_data(struct mm_id * mm_idp, | |||
302 | extern int map(struct mm_id * mm_idp, unsigned long virt, | 302 | extern int map(struct mm_id * mm_idp, unsigned long virt, |
303 | unsigned long len, int r, int w, int x, int phys_fd, | 303 | unsigned long len, int r, int w, int x, int phys_fd, |
304 | unsigned long long offset, int done, void **data); | 304 | unsigned long long offset, int done, void **data); |
305 | extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len, | 305 | extern int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len, |
306 | int done, void **data); | 306 | int done, void **data); |
307 | extern int protect(struct mm_id * mm_idp, unsigned long addr, | 307 | extern int protect(struct mm_id * mm_idp, unsigned long addr, |
308 | unsigned long len, int r, int w, int x, int done, | 308 | unsigned long len, int r, int w, int x, int done, |
diff --git a/arch/um/include/skas/mode_kern_skas.h b/arch/um/include/skas/mode_kern_skas.h index 9cd9c6ec9a63..8ee6285dfacc 100644 --- a/arch/um/include/skas/mode_kern_skas.h +++ b/arch/um/include/skas/mode_kern_skas.h | |||
@@ -33,6 +33,8 @@ extern unsigned long set_task_sizes_skas(unsigned long *task_size_out); | |||
33 | extern int start_uml_skas(void); | 33 | extern int start_uml_skas(void); |
34 | extern int external_pid_skas(struct task_struct *task); | 34 | extern int external_pid_skas(struct task_struct *task); |
35 | extern int thread_pid_skas(struct task_struct *task); | 35 | extern int thread_pid_skas(struct task_struct *task); |
36 | extern void flush_tlb_page_skas(struct vm_area_struct *vma, | ||
37 | unsigned long address); | ||
36 | 38 | ||
37 | #define kmem_end_skas (host_task_size - 1024 * 1024) | 39 | #define kmem_end_skas (host_task_size - 1024 * 1024) |
38 | 40 | ||
diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c index 304a5b0695ab..c43901aa9368 100644 --- a/arch/um/kernel/skas/tlb.c +++ b/arch/um/kernel/skas/tlb.c | |||
@@ -32,8 +32,7 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, | |||
32 | op->u.mmap.offset, finished, flush); | 32 | op->u.mmap.offset, finished, flush); |
33 | break; | 33 | break; |
34 | case MUNMAP: | 34 | case MUNMAP: |
35 | ret = unmap(&mmu->skas.id, | 35 | ret = unmap(&mmu->skas.id, op->u.munmap.addr, |
36 | (void *) op->u.munmap.addr, | ||
37 | op->u.munmap.len, finished, flush); | 36 | op->u.munmap.len, finished, flush); |
38 | break; | 37 | break; |
39 | case MPROTECT: | 38 | case MPROTECT: |
@@ -94,3 +93,66 @@ void force_flush_all_skas(void) | |||
94 | unsigned long end = proc_mm ? task_size : CONFIG_STUB_START; | 93 | unsigned long end = proc_mm ? task_size : CONFIG_STUB_START; |
95 | fix_range(current->mm, 0, end, 1); | 94 | fix_range(current->mm, 0, end, 1); |
96 | } | 95 | } |
96 | |||
97 | void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address) | ||
98 | { | ||
99 | pgd_t *pgd; | ||
100 | pud_t *pud; | ||
101 | pmd_t *pmd; | ||
102 | pte_t *pte; | ||
103 | struct mm_struct *mm = vma->vm_mm; | ||
104 | void *flush = NULL; | ||
105 | int r, w, x, err = 0; | ||
106 | struct mm_id *mm_id; | ||
107 | |||
108 | pgd = pgd_offset(vma->vm_mm, address); | ||
109 | if(!pgd_present(*pgd)) | ||
110 | goto kill; | ||
111 | |||
112 | pud = pud_offset(pgd, address); | ||
113 | if(!pud_present(*pud)) | ||
114 | goto kill; | ||
115 | |||
116 | pmd = pmd_offset(pud, address); | ||
117 | if(!pmd_present(*pmd)) | ||
118 | goto kill; | ||
119 | |||
120 | pte = pte_offset_kernel(pmd, address); | ||
121 | |||
122 | r = pte_read(*pte); | ||
123 | w = pte_write(*pte); | ||
124 | x = pte_exec(*pte); | ||
125 | if (!pte_young(*pte)) { | ||
126 | r = 0; | ||
127 | w = 0; | ||
128 | } else if (!pte_dirty(*pte)) { | ||
129 | w = 0; | ||
130 | } | ||
131 | |||
132 | mm_id = &mm->context.skas.id; | ||
133 | if(pte_newpage(*pte)){ | ||
134 | if(pte_present(*pte)){ | ||
135 | unsigned long long offset; | ||
136 | int fd; | ||
137 | |||
138 | fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset); | ||
139 | err = map(mm_id, address, PAGE_SIZE, r, w, x, fd, | ||
140 | offset, 1, &flush); | ||
141 | } | ||
142 | else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); | ||
143 | } | ||
144 | else if(pte_newprot(*pte)) | ||
145 | err = protect(mm_id, address, PAGE_SIZE, r, w, x, 1, &flush); | ||
146 | |||
147 | if(err) | ||
148 | goto kill; | ||
149 | |||
150 | *pte = pte_mkuptodate(*pte); | ||
151 | |||
152 | return; | ||
153 | |||
154 | kill: | ||
155 | printk("Failed to flush page for address 0x%lx\n", address); | ||
156 | force_sig(SIGKILL, current); | ||
157 | } | ||
158 | |||
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index e201ccf0ec89..00de86efccaa 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c | |||
@@ -381,7 +381,9 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr) | |||
381 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | 381 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) |
382 | { | 382 | { |
383 | address &= PAGE_MASK; | 383 | address &= PAGE_MASK; |
384 | flush_tlb_range(vma, address, address + PAGE_SIZE); | 384 | |
385 | CHOOSE_MODE(flush_tlb_range(vma, address, address + PAGE_SIZE), | ||
386 | flush_tlb_page_skas(vma, address)); | ||
385 | } | 387 | } |
386 | 388 | ||
387 | void flush_tlb_all(void) | 389 | void flush_tlb_all(void) |
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c index 6cdfda807b65..af0790719b77 100644 --- a/arch/um/os-Linux/skas/mem.c +++ b/arch/um/os-Linux/skas/mem.c | |||
@@ -219,8 +219,8 @@ int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len, | |||
219 | return ret; | 219 | return ret; |
220 | } | 220 | } |
221 | 221 | ||
222 | int unmap(struct mm_id * mm_idp, void *addr, unsigned long len, int done, | 222 | int unmap(struct mm_id * mm_idp, unsigned long addr, unsigned long len, |
223 | void **data) | 223 | int done, void **data) |
224 | { | 224 | { |
225 | int ret; | 225 | int ret; |
226 | 226 | ||