aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-05-06 17:51:48 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:13:04 -0400
commit16dd07bc6404c8da0bdfeb7a5cde4e4a63991c00 (patch)
treede8401aeebfe1bbdaecaff3b81d92196c50c85d7 /arch/um/kernel
parent3ec704e6660aa58505110a50102e57cdb9daa044 (diff)
uml: more page fault path trimming
More trimming of the page fault path. Permissions are passed around in a single int rather than one bit per int. The permission values are copied from libc so that they can be passed to mmap and mprotect without any further conversion. The register sets used by do_syscall_stub and copy_context_skas0 are initialized once, at boot time, rather than once per call. wait_stub_done checks whether it is getting the signals it expects by comparing the wait status to a mask containing bits for the signals of interest rather than comparing individually to the signal numbers. It also has one check for a wait failure instead of two. The caller is expected to do the initial continue of the stub. This gets rid of an argument and some logic. The fname argument is gone, as that can be had from a stack trace. user_signal() is collapsed into userspace() as it is basically one or two lines of code afterwards. The physical memory remapping stuff is gone, as it is unused. flush_tlb_page is inlined. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/physmem.c228
-rw-r--r--arch/um/kernel/skas/tlb.c21
-rw-r--r--arch/um/kernel/tlb.c42
3 files changed, 26 insertions, 265 deletions
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index df1ad3ba130c..3ba6e4c841da 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -21,229 +21,8 @@
21#include "kern.h" 21#include "kern.h"
22#include "init.h" 22#include "init.h"
23 23
24struct phys_desc {
25 struct rb_node rb;
26 int fd;
27 __u64 offset;
28 void *virt;
29 unsigned long phys;
30 struct list_head list;
31};
32
33static struct rb_root phys_mappings = RB_ROOT;
34
35static struct rb_node **find_rb(void *virt)
36{
37 struct rb_node **n = &phys_mappings.rb_node;
38 struct phys_desc *d;
39
40 while(*n != NULL){
41 d = rb_entry(*n, struct phys_desc, rb);
42 if(d->virt == virt)
43 return n;
44
45 if(d->virt > virt)
46 n = &(*n)->rb_left;
47 else
48 n = &(*n)->rb_right;
49 }
50
51 return n;
52}
53
54static struct phys_desc *find_phys_mapping(void *virt)
55{
56 struct rb_node **n = find_rb(virt);
57
58 if(*n == NULL)
59 return NULL;
60
61 return rb_entry(*n, struct phys_desc, rb);
62}
63
64static void insert_phys_mapping(struct phys_desc *desc)
65{
66 struct rb_node **n = find_rb(desc->virt);
67
68 if(*n != NULL)
69 panic("Physical remapping for %p already present",
70 desc->virt);
71
72 rb_link_node(&desc->rb, rb_parent(*n), n);
73 rb_insert_color(&desc->rb, &phys_mappings);
74}
75
76LIST_HEAD(descriptor_mappings);
77
78struct desc_mapping {
79 int fd;
80 struct list_head list;
81 struct list_head pages;
82};
83
84static struct desc_mapping *find_mapping(int fd)
85{
86 struct desc_mapping *desc;
87 struct list_head *ele;
88
89 list_for_each(ele, &descriptor_mappings){
90 desc = list_entry(ele, struct desc_mapping, list);
91 if(desc->fd == fd)
92 return desc;
93 }
94
95 return NULL;
96}
97
98static struct desc_mapping *descriptor_mapping(int fd)
99{
100 struct desc_mapping *desc;
101
102 desc = find_mapping(fd);
103 if(desc != NULL)
104 return desc;
105
106 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
107 if(desc == NULL)
108 return NULL;
109
110 *desc = ((struct desc_mapping)
111 { .fd = fd,
112 .list = LIST_HEAD_INIT(desc->list),
113 .pages = LIST_HEAD_INIT(desc->pages) });
114 list_add(&desc->list, &descriptor_mappings);
115
116 return desc;
117}
118
119int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w)
120{
121 struct desc_mapping *fd_maps;
122 struct phys_desc *desc;
123 unsigned long phys;
124 int err;
125
126 fd_maps = descriptor_mapping(fd);
127 if(fd_maps == NULL)
128 return -ENOMEM;
129
130 phys = __pa(virt);
131 desc = find_phys_mapping(virt);
132 if(desc != NULL)
133 panic("Address 0x%p is already substituted\n", virt);
134
135 err = -ENOMEM;
136 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
137 if(desc == NULL)
138 goto out;
139
140 *desc = ((struct phys_desc)
141 { .fd = fd,
142 .offset = offset,
143 .virt = virt,
144 .phys = __pa(virt),
145 .list = LIST_HEAD_INIT(desc->list) });
146 insert_phys_mapping(desc);
147
148 list_add(&desc->list, &fd_maps->pages);
149
150 virt = (void *) ((unsigned long) virt & PAGE_MASK);
151 err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0);
152 if(!err)
153 goto out;
154
155 rb_erase(&desc->rb, &phys_mappings);
156 kfree(desc);
157 out:
158 return err;
159}
160
161static int physmem_fd = -1; 24static int physmem_fd = -1;
162 25
163static void remove_mapping(struct phys_desc *desc)
164{
165 void *virt = desc->virt;
166 int err;
167
168 rb_erase(&desc->rb, &phys_mappings);
169 list_del(&desc->list);
170 kfree(desc);
171
172 err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0);
173 if(err)
174 panic("Failed to unmap block device page from physical memory, "
175 "errno = %d", -err);
176}
177
178int physmem_remove_mapping(void *virt)
179{
180 struct phys_desc *desc;
181
182 virt = (void *) ((unsigned long) virt & PAGE_MASK);
183 desc = find_phys_mapping(virt);
184 if(desc == NULL)
185 return 0;
186
187 remove_mapping(desc);
188 return 1;
189}
190
191void physmem_forget_descriptor(int fd)
192{
193 struct desc_mapping *desc;
194 struct phys_desc *page;
195 struct list_head *ele, *next;
196 __u64 offset;
197 void *addr;
198 int err;
199
200 desc = find_mapping(fd);
201 if(desc == NULL)
202 return;
203
204 list_for_each_safe(ele, next, &desc->pages){
205 page = list_entry(ele, struct phys_desc, list);
206 offset = page->offset;
207 addr = page->virt;
208 remove_mapping(page);
209 err = os_seek_file(fd, offset);
210 if(err)
211 panic("physmem_forget_descriptor - failed to seek "
212 "to %lld in fd %d, error = %d\n",
213 offset, fd, -err);
214 err = os_read_file(fd, addr, PAGE_SIZE);
215 if(err < 0)
216 panic("physmem_forget_descriptor - failed to read "
217 "from fd %d to 0x%p, error = %d\n",
218 fd, addr, -err);
219 }
220
221 list_del(&desc->list);
222 kfree(desc);
223}
224
225EXPORT_SYMBOL(physmem_forget_descriptor);
226EXPORT_SYMBOL(physmem_remove_mapping);
227EXPORT_SYMBOL(physmem_subst_mapping);
228
229void arch_free_page(struct page *page, int order)
230{
231 void *virt;
232 int i;
233
234 for(i = 0; i < (1 << order); i++){
235 virt = __va(page_to_phys(page + i));
236 physmem_remove_mapping(virt);
237 }
238}
239
240int is_remapped(void *virt)
241{
242 struct phys_desc *desc = find_phys_mapping(virt);
243
244 return desc != NULL;
245}
246
247/* Changed during early boot */ 26/* Changed during early boot */
248unsigned long high_physmem; 27unsigned long high_physmem;
249 28
@@ -350,14 +129,9 @@ void setup_physmem(unsigned long start, unsigned long reserve_end,
350 129
351int phys_mapping(unsigned long phys, __u64 *offset_out) 130int phys_mapping(unsigned long phys, __u64 *offset_out)
352{ 131{
353 struct phys_desc *desc = find_phys_mapping(__va(phys & PAGE_MASK));
354 int fd = -1; 132 int fd = -1;
355 133
356 if(desc != NULL){ 134 if(phys < physmem_size){
357 fd = desc->fd;
358 *offset_out = desc->offset;
359 }
360 else if(phys < physmem_size){
361 fd = physmem_fd; 135 fd = physmem_fd;
362 *offset_out = phys; 136 *offset_out = phys;
363 } 137 }
diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c
index c43901aa9368..b3d722ddde31 100644
--- a/arch/um/kernel/skas/tlb.c
+++ b/arch/um/kernel/skas/tlb.c
@@ -27,9 +27,9 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
27 switch(op->type){ 27 switch(op->type){
28 case MMAP: 28 case MMAP:
29 ret = map(&mmu->skas.id, op->u.mmap.addr, 29 ret = map(&mmu->skas.id, op->u.mmap.addr,
30 op->u.mmap.len, op->u.mmap.r, op->u.mmap.w, 30 op->u.mmap.len, op->u.mmap.prot,
31 op->u.mmap.x, op->u.mmap.fd, 31 op->u.mmap.fd, op->u.mmap.offset, finished,
32 op->u.mmap.offset, finished, flush); 32 flush);
33 break; 33 break;
34 case MUNMAP: 34 case MUNMAP:
35 ret = unmap(&mmu->skas.id, op->u.munmap.addr, 35 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
@@ -37,8 +37,7 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
37 break; 37 break;
38 case MPROTECT: 38 case MPROTECT:
39 ret = protect(&mmu->skas.id, op->u.mprotect.addr, 39 ret = protect(&mmu->skas.id, op->u.mprotect.addr,
40 op->u.mprotect.len, op->u.mprotect.r, 40 op->u.mprotect.len, op->u.mprotect.prot,
41 op->u.mprotect.w, op->u.mprotect.x,
42 finished, flush); 41 finished, flush);
43 break; 42 break;
44 default: 43 default:
@@ -102,10 +101,10 @@ void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
102 pte_t *pte; 101 pte_t *pte;
103 struct mm_struct *mm = vma->vm_mm; 102 struct mm_struct *mm = vma->vm_mm;
104 void *flush = NULL; 103 void *flush = NULL;
105 int r, w, x, err = 0; 104 int r, w, x, prot, err = 0;
106 struct mm_id *mm_id; 105 struct mm_id *mm_id;
107 106
108 pgd = pgd_offset(vma->vm_mm, address); 107 pgd = pgd_offset(mm, address);
109 if(!pgd_present(*pgd)) 108 if(!pgd_present(*pgd))
110 goto kill; 109 goto kill;
111 110
@@ -130,19 +129,21 @@ void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
130 } 129 }
131 130
132 mm_id = &mm->context.skas.id; 131 mm_id = &mm->context.skas.id;
132 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
133 (x ? UM_PROT_EXEC : 0));
133 if(pte_newpage(*pte)){ 134 if(pte_newpage(*pte)){
134 if(pte_present(*pte)){ 135 if(pte_present(*pte)){
135 unsigned long long offset; 136 unsigned long long offset;
136 int fd; 137 int fd;
137 138
138 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset); 139 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
139 err = map(mm_id, address, PAGE_SIZE, r, w, x, fd, 140 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
140 offset, 1, &flush); 141 1, &flush);
141 } 142 }
142 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); 143 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
143 } 144 }
144 else if(pte_newprot(*pte)) 145 else if(pte_newprot(*pte))
145 err = protect(mm_id, address, PAGE_SIZE, r, w, x, 1, &flush); 146 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
146 147
147 if(err) 148 if(err)
148 goto kill; 149 goto kill;
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 4a39d50d2d62..8a8d52851443 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -17,7 +17,7 @@
17#include "os.h" 17#include "os.h"
18 18
19static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 19static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
20 int r, int w, int x, struct host_vm_op *ops, int *index, 20 unsigned int prot, struct host_vm_op *ops, int *index,
21 int last_filled, union mm_context *mmu, void **flush, 21 int last_filled, union mm_context *mmu, void **flush,
22 int (*do_ops)(union mm_context *, struct host_vm_op *, 22 int (*do_ops)(union mm_context *, struct host_vm_op *,
23 int, int, void **)) 23 int, int, void **))
@@ -31,8 +31,7 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
31 last = &ops[*index]; 31 last = &ops[*index];
32 if((last->type == MMAP) && 32 if((last->type == MMAP) &&
33 (last->u.mmap.addr + last->u.mmap.len == virt) && 33 (last->u.mmap.addr + last->u.mmap.len == virt) &&
34 (last->u.mmap.r == r) && (last->u.mmap.w == w) && 34 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
35 (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
36 (last->u.mmap.offset + last->u.mmap.len == offset)){ 35 (last->u.mmap.offset + last->u.mmap.len == offset)){
37 last->u.mmap.len += len; 36 last->u.mmap.len += len;
38 return 0; 37 return 0;
@@ -48,9 +47,7 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
48 .u = { .mmap = { 47 .u = { .mmap = {
49 .addr = virt, 48 .addr = virt,
50 .len = len, 49 .len = len,
51 .r = r, 50 .prot = prot,
52 .w = w,
53 .x = x,
54 .fd = fd, 51 .fd = fd,
55 .offset = offset } 52 .offset = offset }
56 } }); 53 } });
@@ -87,8 +84,8 @@ static int add_munmap(unsigned long addr, unsigned long len,
87 return ret; 84 return ret;
88} 85}
89 86
90static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, 87static int add_mprotect(unsigned long addr, unsigned long len,
91 int x, struct host_vm_op *ops, int *index, 88 unsigned int prot, struct host_vm_op *ops, int *index,
92 int last_filled, union mm_context *mmu, void **flush, 89 int last_filled, union mm_context *mmu, void **flush,
93 int (*do_ops)(union mm_context *, struct host_vm_op *, 90 int (*do_ops)(union mm_context *, struct host_vm_op *,
94 int, int, void **)) 91 int, int, void **))
@@ -100,8 +97,7 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
100 last = &ops[*index]; 97 last = &ops[*index];
101 if((last->type == MPROTECT) && 98 if((last->type == MPROTECT) &&
102 (last->u.mprotect.addr + last->u.mprotect.len == addr) && 99 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
103 (last->u.mprotect.r == r) && (last->u.mprotect.w == w) && 100 (last->u.mprotect.prot == prot)){
104 (last->u.mprotect.x == x)){
105 last->u.mprotect.len += len; 101 last->u.mprotect.len += len;
106 return 0; 102 return 0;
107 } 103 }
@@ -116,9 +112,7 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
116 .u = { .mprotect = { 112 .u = { .mprotect = {
117 .addr = addr, 113 .addr = addr,
118 .len = len, 114 .len = len,
119 .r = r, 115 .prot = prot } } });
120 .w = w,
121 .x = x } } });
122 return ret; 116 return ret;
123} 117}
124 118
@@ -133,7 +127,7 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
133 void **)) 127 void **))
134{ 128{
135 pte_t *pte; 129 pte_t *pte;
136 int r, w, x, ret = 0; 130 int r, w, x, prot, ret = 0;
137 131
138 pte = pte_offset_kernel(pmd, addr); 132 pte = pte_offset_kernel(pmd, addr);
139 do { 133 do {
@@ -146,19 +140,19 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
146 } else if (!pte_dirty(*pte)) { 140 } else if (!pte_dirty(*pte)) {
147 w = 0; 141 w = 0;
148 } 142 }
143 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
144 (x ? UM_PROT_EXEC : 0));
149 if(force || pte_newpage(*pte)){ 145 if(force || pte_newpage(*pte)){
150 if(pte_present(*pte)) 146 if(pte_present(*pte))
151 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, 147 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
152 PAGE_SIZE, r, w, x, ops, 148 PAGE_SIZE, prot, ops, op_index,
153 op_index, last_op, mmu, flush, 149 last_op, mmu, flush, do_ops);
154 do_ops);
155 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index, 150 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
156 last_op, mmu, flush, do_ops); 151 last_op, mmu, flush, do_ops);
157 } 152 }
158 else if(pte_newprot(*pte)) 153 else if(pte_newprot(*pte))
159 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, 154 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
160 op_index, last_op, mmu, flush, 155 last_op, mmu, flush, do_ops);
161 do_ops);
162 *pte = pte_mkuptodate(*pte); 156 *pte = pte_mkuptodate(*pte);
163 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); 157 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
164 return ret; 158 return ret;
@@ -377,14 +371,6 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr)
377 return(pte_offset_map(pmd, addr)); 371 return(pte_offset_map(pmd, addr));
378} 372}
379 373
380void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
381{
382 address &= PAGE_MASK;
383
384 CHOOSE_MODE(flush_tlb_range(vma, address, address + PAGE_SIZE),
385 flush_tlb_page_skas(vma, address));
386}
387
388void flush_tlb_all(void) 374void flush_tlb_all(void)
389{ 375{
390 flush_tlb_mm(current->mm); 376 flush_tlb_mm(current->mm);