diff options
Diffstat (limited to 'arch/um/kernel/tlb.c')
-rw-r--r-- | arch/um/kernel/tlb.c | 418 |
1 files changed, 266 insertions, 152 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 8a8d52851443..f4a0e407eee4 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c | |||
@@ -1,130 +1,182 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/mm.h" | 6 | #include "linux/mm.h" |
7 | #include "asm/page.h" | ||
8 | #include "asm/pgalloc.h" | ||
9 | #include "asm/pgtable.h" | 7 | #include "asm/pgtable.h" |
10 | #include "asm/tlbflush.h" | 8 | #include "asm/tlbflush.h" |
11 | #include "choose-mode.h" | ||
12 | #include "mode_kern.h" | ||
13 | #include "as-layout.h" | 9 | #include "as-layout.h" |
14 | #include "tlb.h" | ||
15 | #include "mem.h" | ||
16 | #include "mem_user.h" | 10 | #include "mem_user.h" |
17 | #include "os.h" | 11 | #include "os.h" |
12 | #include "skas.h" | ||
13 | #include "tlb.h" | ||
14 | |||
15 | struct host_vm_change { | ||
16 | struct host_vm_op { | ||
17 | enum { NONE, MMAP, MUNMAP, MPROTECT } type; | ||
18 | union { | ||
19 | struct { | ||
20 | unsigned long addr; | ||
21 | unsigned long len; | ||
22 | unsigned int prot; | ||
23 | int fd; | ||
24 | __u64 offset; | ||
25 | } mmap; | ||
26 | struct { | ||
27 | unsigned long addr; | ||
28 | unsigned long len; | ||
29 | } munmap; | ||
30 | struct { | ||
31 | unsigned long addr; | ||
32 | unsigned long len; | ||
33 | unsigned int prot; | ||
34 | } mprotect; | ||
35 | } u; | ||
36 | } ops[1]; | ||
37 | int index; | ||
38 | struct mm_id *id; | ||
39 | void *data; | ||
40 | int force; | ||
41 | }; | ||
42 | |||
43 | #define INIT_HVC(mm, force) \ | ||
44 | ((struct host_vm_change) \ | ||
45 | { .ops = { { .type = NONE } }, \ | ||
46 | .id = &mm->context.id, \ | ||
47 | .data = NULL, \ | ||
48 | .index = 0, \ | ||
49 | .force = force }) | ||
50 | |||
51 | static int do_ops(struct host_vm_change *hvc, int end, | ||
52 | int finished) | ||
53 | { | ||
54 | struct host_vm_op *op; | ||
55 | int i, ret = 0; | ||
56 | |||
57 | for (i = 0; i < end && !ret; i++) { | ||
58 | op = &hvc->ops[i]; | ||
59 | switch(op->type) { | ||
60 | case MMAP: | ||
61 | ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len, | ||
62 | op->u.mmap.prot, op->u.mmap.fd, | ||
63 | op->u.mmap.offset, finished, &hvc->data); | ||
64 | break; | ||
65 | case MUNMAP: | ||
66 | ret = unmap(hvc->id, op->u.munmap.addr, | ||
67 | op->u.munmap.len, finished, &hvc->data); | ||
68 | break; | ||
69 | case MPROTECT: | ||
70 | ret = protect(hvc->id, op->u.mprotect.addr, | ||
71 | op->u.mprotect.len, op->u.mprotect.prot, | ||
72 | finished, &hvc->data); | ||
73 | break; | ||
74 | default: | ||
75 | printk(KERN_ERR "Unknown op type %d in do_ops\n", | ||
76 | op->type); | ||
77 | break; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | return ret; | ||
82 | } | ||
18 | 83 | ||
19 | static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | 84 | static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, |
20 | unsigned int prot, struct host_vm_op *ops, int *index, | 85 | unsigned int prot, struct host_vm_change *hvc) |
21 | int last_filled, union mm_context *mmu, void **flush, | ||
22 | int (*do_ops)(union mm_context *, struct host_vm_op *, | ||
23 | int, int, void **)) | ||
24 | { | 86 | { |
25 | __u64 offset; | 87 | __u64 offset; |
26 | struct host_vm_op *last; | 88 | struct host_vm_op *last; |
27 | int fd, ret = 0; | 89 | int fd, ret = 0; |
28 | 90 | ||
29 | fd = phys_mapping(phys, &offset); | 91 | fd = phys_mapping(phys, &offset); |
30 | if(*index != -1){ | 92 | if (hvc->index != 0) { |
31 | last = &ops[*index]; | 93 | last = &hvc->ops[hvc->index - 1]; |
32 | if((last->type == MMAP) && | 94 | if ((last->type == MMAP) && |
33 | (last->u.mmap.addr + last->u.mmap.len == virt) && | 95 | (last->u.mmap.addr + last->u.mmap.len == virt) && |
34 | (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) && | 96 | (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) && |
35 | (last->u.mmap.offset + last->u.mmap.len == offset)){ | 97 | (last->u.mmap.offset + last->u.mmap.len == offset)) { |
36 | last->u.mmap.len += len; | 98 | last->u.mmap.len += len; |
37 | return 0; | 99 | return 0; |
38 | } | 100 | } |
39 | } | 101 | } |
40 | 102 | ||
41 | if(*index == last_filled){ | 103 | if (hvc->index == ARRAY_SIZE(hvc->ops)) { |
42 | ret = (*do_ops)(mmu, ops, last_filled, 0, flush); | 104 | ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); |
43 | *index = -1; | 105 | hvc->index = 0; |
44 | } | 106 | } |
45 | 107 | ||
46 | ops[++*index] = ((struct host_vm_op) { .type = MMAP, | 108 | hvc->ops[hvc->index++] = ((struct host_vm_op) |
47 | .u = { .mmap = { | 109 | { .type = MMAP, |
48 | .addr = virt, | 110 | .u = { .mmap = { .addr = virt, |
49 | .len = len, | 111 | .len = len, |
50 | .prot = prot, | 112 | .prot = prot, |
51 | .fd = fd, | 113 | .fd = fd, |
52 | .offset = offset } | 114 | .offset = offset } |
53 | } }); | 115 | } }); |
54 | return ret; | 116 | return ret; |
55 | } | 117 | } |
56 | 118 | ||
57 | static int add_munmap(unsigned long addr, unsigned long len, | 119 | static int add_munmap(unsigned long addr, unsigned long len, |
58 | struct host_vm_op *ops, int *index, int last_filled, | 120 | struct host_vm_change *hvc) |
59 | union mm_context *mmu, void **flush, | ||
60 | int (*do_ops)(union mm_context *, struct host_vm_op *, | ||
61 | int, int, void **)) | ||
62 | { | 121 | { |
63 | struct host_vm_op *last; | 122 | struct host_vm_op *last; |
64 | int ret = 0; | 123 | int ret = 0; |
65 | 124 | ||
66 | if(*index != -1){ | 125 | if (hvc->index != 0) { |
67 | last = &ops[*index]; | 126 | last = &hvc->ops[hvc->index - 1]; |
68 | if((last->type == MUNMAP) && | 127 | if ((last->type == MUNMAP) && |
69 | (last->u.munmap.addr + last->u.mmap.len == addr)){ | 128 | (last->u.munmap.addr + last->u.mmap.len == addr)) { |
70 | last->u.munmap.len += len; | 129 | last->u.munmap.len += len; |
71 | return 0; | 130 | return 0; |
72 | } | 131 | } |
73 | } | 132 | } |
74 | 133 | ||
75 | if(*index == last_filled){ | 134 | if (hvc->index == ARRAY_SIZE(hvc->ops)) { |
76 | ret = (*do_ops)(mmu, ops, last_filled, 0, flush); | 135 | ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); |
77 | *index = -1; | 136 | hvc->index = 0; |
78 | } | 137 | } |
79 | 138 | ||
80 | ops[++*index] = ((struct host_vm_op) { .type = MUNMAP, | 139 | hvc->ops[hvc->index++] = ((struct host_vm_op) |
81 | .u = { .munmap = { | 140 | { .type = MUNMAP, |
82 | .addr = addr, | 141 | .u = { .munmap = { .addr = addr, |
83 | .len = len } } }); | 142 | .len = len } } }); |
84 | return ret; | 143 | return ret; |
85 | } | 144 | } |
86 | 145 | ||
87 | static int add_mprotect(unsigned long addr, unsigned long len, | 146 | static int add_mprotect(unsigned long addr, unsigned long len, |
88 | unsigned int prot, struct host_vm_op *ops, int *index, | 147 | unsigned int prot, struct host_vm_change *hvc) |
89 | int last_filled, union mm_context *mmu, void **flush, | ||
90 | int (*do_ops)(union mm_context *, struct host_vm_op *, | ||
91 | int, int, void **)) | ||
92 | { | 148 | { |
93 | struct host_vm_op *last; | 149 | struct host_vm_op *last; |
94 | int ret = 0; | 150 | int ret = 0; |
95 | 151 | ||
96 | if(*index != -1){ | 152 | if (hvc->index != 0) { |
97 | last = &ops[*index]; | 153 | last = &hvc->ops[hvc->index - 1]; |
98 | if((last->type == MPROTECT) && | 154 | if ((last->type == MPROTECT) && |
99 | (last->u.mprotect.addr + last->u.mprotect.len == addr) && | 155 | (last->u.mprotect.addr + last->u.mprotect.len == addr) && |
100 | (last->u.mprotect.prot == prot)){ | 156 | (last->u.mprotect.prot == prot)) { |
101 | last->u.mprotect.len += len; | 157 | last->u.mprotect.len += len; |
102 | return 0; | 158 | return 0; |
103 | } | 159 | } |
104 | } | 160 | } |
105 | 161 | ||
106 | if(*index == last_filled){ | 162 | if (hvc->index == ARRAY_SIZE(hvc->ops)) { |
107 | ret = (*do_ops)(mmu, ops, last_filled, 0, flush); | 163 | ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); |
108 | *index = -1; | 164 | hvc->index = 0; |
109 | } | 165 | } |
110 | 166 | ||
111 | ops[++*index] = ((struct host_vm_op) { .type = MPROTECT, | 167 | hvc->ops[hvc->index++] = ((struct host_vm_op) |
112 | .u = { .mprotect = { | 168 | { .type = MPROTECT, |
113 | .addr = addr, | 169 | .u = { .mprotect = { .addr = addr, |
114 | .len = len, | 170 | .len = len, |
115 | .prot = prot } } }); | 171 | .prot = prot } } }); |
116 | return ret; | 172 | return ret; |
117 | } | 173 | } |
118 | 174 | ||
119 | #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) | 175 | #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) |
120 | 176 | ||
121 | static inline int update_pte_range(pmd_t *pmd, unsigned long addr, | 177 | static inline int update_pte_range(pmd_t *pmd, unsigned long addr, |
122 | unsigned long end, struct host_vm_op *ops, | 178 | unsigned long end, |
123 | int last_op, int *op_index, int force, | 179 | struct host_vm_change *hvc) |
124 | union mm_context *mmu, void **flush, | ||
125 | int (*do_ops)(union mm_context *, | ||
126 | struct host_vm_op *, int, int, | ||
127 | void **)) | ||
128 | { | 180 | { |
129 | pte_t *pte; | 181 | pte_t *pte; |
130 | int r, w, x, prot, ret = 0; | 182 | int r, w, x, prot, ret = 0; |
@@ -142,29 +194,22 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, | |||
142 | } | 194 | } |
143 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | | 195 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | |
144 | (x ? UM_PROT_EXEC : 0)); | 196 | (x ? UM_PROT_EXEC : 0)); |
145 | if(force || pte_newpage(*pte)){ | 197 | if (hvc->force || pte_newpage(*pte)) { |
146 | if(pte_present(*pte)) | 198 | if (pte_present(*pte)) |
147 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, | 199 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, |
148 | PAGE_SIZE, prot, ops, op_index, | 200 | PAGE_SIZE, prot, hvc); |
149 | last_op, mmu, flush, do_ops); | 201 | else ret = add_munmap(addr, PAGE_SIZE, hvc); |
150 | else ret = add_munmap(addr, PAGE_SIZE, ops, op_index, | ||
151 | last_op, mmu, flush, do_ops); | ||
152 | } | 202 | } |
153 | else if(pte_newprot(*pte)) | 203 | else if (pte_newprot(*pte)) |
154 | ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index, | 204 | ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); |
155 | last_op, mmu, flush, do_ops); | ||
156 | *pte = pte_mkuptodate(*pte); | 205 | *pte = pte_mkuptodate(*pte); |
157 | } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); | 206 | } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); |
158 | return ret; | 207 | return ret; |
159 | } | 208 | } |
160 | 209 | ||
161 | static inline int update_pmd_range(pud_t *pud, unsigned long addr, | 210 | static inline int update_pmd_range(pud_t *pud, unsigned long addr, |
162 | unsigned long end, struct host_vm_op *ops, | 211 | unsigned long end, |
163 | int last_op, int *op_index, int force, | 212 | struct host_vm_change *hvc) |
164 | union mm_context *mmu, void **flush, | ||
165 | int (*do_ops)(union mm_context *, | ||
166 | struct host_vm_op *, int, int, | ||
167 | void **)) | ||
168 | { | 213 | { |
169 | pmd_t *pmd; | 214 | pmd_t *pmd; |
170 | unsigned long next; | 215 | unsigned long next; |
@@ -173,28 +218,20 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr, | |||
173 | pmd = pmd_offset(pud, addr); | 218 | pmd = pmd_offset(pud, addr); |
174 | do { | 219 | do { |
175 | next = pmd_addr_end(addr, end); | 220 | next = pmd_addr_end(addr, end); |
176 | if(!pmd_present(*pmd)){ | 221 | if (!pmd_present(*pmd)) { |
177 | if(force || pmd_newpage(*pmd)){ | 222 | if (hvc->force || pmd_newpage(*pmd)) { |
178 | ret = add_munmap(addr, next - addr, ops, | 223 | ret = add_munmap(addr, next - addr, hvc); |
179 | op_index, last_op, mmu, | ||
180 | flush, do_ops); | ||
181 | pmd_mkuptodate(*pmd); | 224 | pmd_mkuptodate(*pmd); |
182 | } | 225 | } |
183 | } | 226 | } |
184 | else ret = update_pte_range(pmd, addr, next, ops, last_op, | 227 | else ret = update_pte_range(pmd, addr, next, hvc); |
185 | op_index, force, mmu, flush, | ||
186 | do_ops); | ||
187 | } while (pmd++, addr = next, ((addr != end) && !ret)); | 228 | } while (pmd++, addr = next, ((addr != end) && !ret)); |
188 | return ret; | 229 | return ret; |
189 | } | 230 | } |
190 | 231 | ||
191 | static inline int update_pud_range(pgd_t *pgd, unsigned long addr, | 232 | static inline int update_pud_range(pgd_t *pgd, unsigned long addr, |
192 | unsigned long end, struct host_vm_op *ops, | 233 | unsigned long end, |
193 | int last_op, int *op_index, int force, | 234 | struct host_vm_change *hvc) |
194 | union mm_context *mmu, void **flush, | ||
195 | int (*do_ops)(union mm_context *, | ||
196 | struct host_vm_op *, int, int, | ||
197 | void **)) | ||
198 | { | 235 | { |
199 | pud_t *pud; | 236 | pud_t *pud; |
200 | unsigned long next; | 237 | unsigned long next; |
@@ -203,56 +240,45 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr, | |||
203 | pud = pud_offset(pgd, addr); | 240 | pud = pud_offset(pgd, addr); |
204 | do { | 241 | do { |
205 | next = pud_addr_end(addr, end); | 242 | next = pud_addr_end(addr, end); |
206 | if(!pud_present(*pud)){ | 243 | if (!pud_present(*pud)) { |
207 | if(force || pud_newpage(*pud)){ | 244 | if (hvc->force || pud_newpage(*pud)) { |
208 | ret = add_munmap(addr, next - addr, ops, | 245 | ret = add_munmap(addr, next - addr, hvc); |
209 | op_index, last_op, mmu, | ||
210 | flush, do_ops); | ||
211 | pud_mkuptodate(*pud); | 246 | pud_mkuptodate(*pud); |
212 | } | 247 | } |
213 | } | 248 | } |
214 | else ret = update_pmd_range(pud, addr, next, ops, last_op, | 249 | else ret = update_pmd_range(pud, addr, next, hvc); |
215 | op_index, force, mmu, flush, | ||
216 | do_ops); | ||
217 | } while (pud++, addr = next, ((addr != end) && !ret)); | 250 | } while (pud++, addr = next, ((addr != end) && !ret)); |
218 | return ret; | 251 | return ret; |
219 | } | 252 | } |
220 | 253 | ||
221 | void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | 254 | void fix_range_common(struct mm_struct *mm, unsigned long start_addr, |
222 | unsigned long end_addr, int force, | 255 | unsigned long end_addr, int force) |
223 | int (*do_ops)(union mm_context *, struct host_vm_op *, | ||
224 | int, int, void **)) | ||
225 | { | 256 | { |
226 | pgd_t *pgd; | 257 | pgd_t *pgd; |
227 | union mm_context *mmu = &mm->context; | 258 | struct host_vm_change hvc; |
228 | struct host_vm_op ops[1]; | ||
229 | unsigned long addr = start_addr, next; | 259 | unsigned long addr = start_addr, next; |
230 | int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1; | 260 | int ret = 0; |
231 | void *flush = NULL; | ||
232 | 261 | ||
233 | ops[0].type = NONE; | 262 | hvc = INIT_HVC(mm, force); |
234 | pgd = pgd_offset(mm, addr); | 263 | pgd = pgd_offset(mm, addr); |
235 | do { | 264 | do { |
236 | next = pgd_addr_end(addr, end_addr); | 265 | next = pgd_addr_end(addr, end_addr); |
237 | if(!pgd_present(*pgd)){ | 266 | if (!pgd_present(*pgd)) { |
238 | if (force || pgd_newpage(*pgd)){ | 267 | if (force || pgd_newpage(*pgd)) { |
239 | ret = add_munmap(addr, next - addr, ops, | 268 | ret = add_munmap(addr, next - addr, &hvc); |
240 | &op_index, last_op, mmu, | ||
241 | &flush, do_ops); | ||
242 | pgd_mkuptodate(*pgd); | 269 | pgd_mkuptodate(*pgd); |
243 | } | 270 | } |
244 | } | 271 | } |
245 | else ret = update_pud_range(pgd, addr, next, ops, last_op, | 272 | else ret = update_pud_range(pgd, addr, next, &hvc); |
246 | &op_index, force, mmu, &flush, | ||
247 | do_ops); | ||
248 | } while (pgd++, addr = next, ((addr != end_addr) && !ret)); | 273 | } while (pgd++, addr = next, ((addr != end_addr) && !ret)); |
249 | 274 | ||
250 | if(!ret) | 275 | if (!ret) |
251 | ret = (*do_ops)(mmu, ops, op_index, 1, &flush); | 276 | ret = do_ops(&hvc, hvc.index, 1); |
252 | 277 | ||
253 | /* This is not an else because ret is modified above */ | 278 | /* This is not an else because ret is modified above */ |
254 | if(ret) { | 279 | if (ret) { |
255 | printk("fix_range_common: failed, killing current process\n"); | 280 | printk(KERN_ERR "fix_range_common: failed, killing current " |
281 | "process\n"); | ||
256 | force_sig(SIGKILL, current); | 282 | force_sig(SIGKILL, current); |
257 | } | 283 | } |
258 | } | 284 | } |
@@ -268,17 +294,17 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
268 | int updated = 0, err; | 294 | int updated = 0, err; |
269 | 295 | ||
270 | mm = &init_mm; | 296 | mm = &init_mm; |
271 | for(addr = start; addr < end;){ | 297 | for (addr = start; addr < end;) { |
272 | pgd = pgd_offset(mm, addr); | 298 | pgd = pgd_offset(mm, addr); |
273 | if(!pgd_present(*pgd)){ | 299 | if (!pgd_present(*pgd)) { |
274 | last = ADD_ROUND(addr, PGDIR_SIZE); | 300 | last = ADD_ROUND(addr, PGDIR_SIZE); |
275 | if(last > end) | 301 | if (last > end) |
276 | last = end; | 302 | last = end; |
277 | if(pgd_newpage(*pgd)){ | 303 | if (pgd_newpage(*pgd)) { |
278 | updated = 1; | 304 | updated = 1; |
279 | err = os_unmap_memory((void *) addr, | 305 | err = os_unmap_memory((void *) addr, |
280 | last - addr); | 306 | last - addr); |
281 | if(err < 0) | 307 | if (err < 0) |
282 | panic("munmap failed, errno = %d\n", | 308 | panic("munmap failed, errno = %d\n", |
283 | -err); | 309 | -err); |
284 | } | 310 | } |
@@ -287,15 +313,15 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
287 | } | 313 | } |
288 | 314 | ||
289 | pud = pud_offset(pgd, addr); | 315 | pud = pud_offset(pgd, addr); |
290 | if(!pud_present(*pud)){ | 316 | if (!pud_present(*pud)) { |
291 | last = ADD_ROUND(addr, PUD_SIZE); | 317 | last = ADD_ROUND(addr, PUD_SIZE); |
292 | if(last > end) | 318 | if (last > end) |
293 | last = end; | 319 | last = end; |
294 | if(pud_newpage(*pud)){ | 320 | if (pud_newpage(*pud)) { |
295 | updated = 1; | 321 | updated = 1; |
296 | err = os_unmap_memory((void *) addr, | 322 | err = os_unmap_memory((void *) addr, |
297 | last - addr); | 323 | last - addr); |
298 | if(err < 0) | 324 | if (err < 0) |
299 | panic("munmap failed, errno = %d\n", | 325 | panic("munmap failed, errno = %d\n", |
300 | -err); | 326 | -err); |
301 | } | 327 | } |
@@ -304,15 +330,15 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
304 | } | 330 | } |
305 | 331 | ||
306 | pmd = pmd_offset(pud, addr); | 332 | pmd = pmd_offset(pud, addr); |
307 | if(!pmd_present(*pmd)){ | 333 | if (!pmd_present(*pmd)) { |
308 | last = ADD_ROUND(addr, PMD_SIZE); | 334 | last = ADD_ROUND(addr, PMD_SIZE); |
309 | if(last > end) | 335 | if (last > end) |
310 | last = end; | 336 | last = end; |
311 | if(pmd_newpage(*pmd)){ | 337 | if (pmd_newpage(*pmd)) { |
312 | updated = 1; | 338 | updated = 1; |
313 | err = os_unmap_memory((void *) addr, | 339 | err = os_unmap_memory((void *) addr, |
314 | last - addr); | 340 | last - addr); |
315 | if(err < 0) | 341 | if (err < 0) |
316 | panic("munmap failed, errno = %d\n", | 342 | panic("munmap failed, errno = %d\n", |
317 | -err); | 343 | -err); |
318 | } | 344 | } |
@@ -321,45 +347,110 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
321 | } | 347 | } |
322 | 348 | ||
323 | pte = pte_offset_kernel(pmd, addr); | 349 | pte = pte_offset_kernel(pmd, addr); |
324 | if(!pte_present(*pte) || pte_newpage(*pte)){ | 350 | if (!pte_present(*pte) || pte_newpage(*pte)) { |
325 | updated = 1; | 351 | updated = 1; |
326 | err = os_unmap_memory((void *) addr, | 352 | err = os_unmap_memory((void *) addr, |
327 | PAGE_SIZE); | 353 | PAGE_SIZE); |
328 | if(err < 0) | 354 | if (err < 0) |
329 | panic("munmap failed, errno = %d\n", | 355 | panic("munmap failed, errno = %d\n", |
330 | -err); | 356 | -err); |
331 | if(pte_present(*pte)) | 357 | if (pte_present(*pte)) |
332 | map_memory(addr, | 358 | map_memory(addr, |
333 | pte_val(*pte) & PAGE_MASK, | 359 | pte_val(*pte) & PAGE_MASK, |
334 | PAGE_SIZE, 1, 1, 1); | 360 | PAGE_SIZE, 1, 1, 1); |
335 | } | 361 | } |
336 | else if(pte_newprot(*pte)){ | 362 | else if (pte_newprot(*pte)) { |
337 | updated = 1; | 363 | updated = 1; |
338 | os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1); | 364 | os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1); |
339 | } | 365 | } |
340 | addr += PAGE_SIZE; | 366 | addr += PAGE_SIZE; |
341 | } | 367 | } |
342 | return(updated); | 368 | return updated; |
369 | } | ||
370 | |||
371 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | ||
372 | { | ||
373 | pgd_t *pgd; | ||
374 | pud_t *pud; | ||
375 | pmd_t *pmd; | ||
376 | pte_t *pte; | ||
377 | struct mm_struct *mm = vma->vm_mm; | ||
378 | void *flush = NULL; | ||
379 | int r, w, x, prot, err = 0; | ||
380 | struct mm_id *mm_id; | ||
381 | |||
382 | address &= PAGE_MASK; | ||
383 | pgd = pgd_offset(mm, address); | ||
384 | if (!pgd_present(*pgd)) | ||
385 | goto kill; | ||
386 | |||
387 | pud = pud_offset(pgd, address); | ||
388 | if (!pud_present(*pud)) | ||
389 | goto kill; | ||
390 | |||
391 | pmd = pmd_offset(pud, address); | ||
392 | if (!pmd_present(*pmd)) | ||
393 | goto kill; | ||
394 | |||
395 | pte = pte_offset_kernel(pmd, address); | ||
396 | |||
397 | r = pte_read(*pte); | ||
398 | w = pte_write(*pte); | ||
399 | x = pte_exec(*pte); | ||
400 | if (!pte_young(*pte)) { | ||
401 | r = 0; | ||
402 | w = 0; | ||
403 | } else if (!pte_dirty(*pte)) { | ||
404 | w = 0; | ||
405 | } | ||
406 | |||
407 | mm_id = &mm->context.id; | ||
408 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | | ||
409 | (x ? UM_PROT_EXEC : 0)); | ||
410 | if (pte_newpage(*pte)) { | ||
411 | if (pte_present(*pte)) { | ||
412 | unsigned long long offset; | ||
413 | int fd; | ||
414 | |||
415 | fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset); | ||
416 | err = map(mm_id, address, PAGE_SIZE, prot, fd, offset, | ||
417 | 1, &flush); | ||
418 | } | ||
419 | else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); | ||
420 | } | ||
421 | else if (pte_newprot(*pte)) | ||
422 | err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush); | ||
423 | |||
424 | if (err) | ||
425 | goto kill; | ||
426 | |||
427 | *pte = pte_mkuptodate(*pte); | ||
428 | |||
429 | return; | ||
430 | |||
431 | kill: | ||
432 | printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address); | ||
433 | force_sig(SIGKILL, current); | ||
343 | } | 434 | } |
344 | 435 | ||
345 | pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) | 436 | pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) |
346 | { | 437 | { |
347 | return(pgd_offset(mm, address)); | 438 | return pgd_offset(mm, address); |
348 | } | 439 | } |
349 | 440 | ||
350 | pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address) | 441 | pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address) |
351 | { | 442 | { |
352 | return(pud_offset(pgd, address)); | 443 | return pud_offset(pgd, address); |
353 | } | 444 | } |
354 | 445 | ||
355 | pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address) | 446 | pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address) |
356 | { | 447 | { |
357 | return(pmd_offset(pud, address)); | 448 | return pmd_offset(pud, address); |
358 | } | 449 | } |
359 | 450 | ||
360 | pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address) | 451 | pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address) |
361 | { | 452 | { |
362 | return(pte_offset_kernel(pmd, address)); | 453 | return pte_offset_kernel(pmd, address); |
363 | } | 454 | } |
364 | 455 | ||
365 | pte_t *addr_pte(struct task_struct *task, unsigned long addr) | 456 | pte_t *addr_pte(struct task_struct *task, unsigned long addr) |
@@ -368,7 +459,7 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr) | |||
368 | pud_t *pud = pud_offset(pgd, addr); | 459 | pud_t *pud = pud_offset(pgd, addr); |
369 | pmd_t *pmd = pmd_offset(pud, addr); | 460 | pmd_t *pmd = pmd_offset(pud, addr); |
370 | 461 | ||
371 | return(pte_offset_map(pmd, addr)); | 462 | return pte_offset_map(pmd, addr); |
372 | } | 463 | } |
373 | 464 | ||
374 | void flush_tlb_all(void) | 465 | void flush_tlb_all(void) |
@@ -378,35 +469,58 @@ void flush_tlb_all(void) | |||
378 | 469 | ||
379 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 470 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
380 | { | 471 | { |
381 | CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt, | 472 | flush_tlb_kernel_range_common(start, end); |
382 | flush_tlb_kernel_range_common, start, end); | ||
383 | } | 473 | } |
384 | 474 | ||
385 | void flush_tlb_kernel_vm(void) | 475 | void flush_tlb_kernel_vm(void) |
386 | { | 476 | { |
387 | CHOOSE_MODE(flush_tlb_kernel_vm_tt(), | 477 | flush_tlb_kernel_range_common(start_vm, end_vm); |
388 | flush_tlb_kernel_range_common(start_vm, end_vm)); | ||
389 | } | 478 | } |
390 | 479 | ||
391 | void __flush_tlb_one(unsigned long addr) | 480 | void __flush_tlb_one(unsigned long addr) |
392 | { | 481 | { |
393 | CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr); | 482 | flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); |
483 | } | ||
484 | |||
485 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, | ||
486 | unsigned long end_addr, int force) | ||
487 | { | ||
488 | if (!proc_mm && (end_addr > STUB_START)) | ||
489 | end_addr = STUB_START; | ||
490 | |||
491 | fix_range_common(mm, start_addr, end_addr, force); | ||
394 | } | 492 | } |
395 | 493 | ||
396 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 494 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
397 | unsigned long end) | 495 | unsigned long end) |
398 | { | 496 | { |
399 | CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start, | 497 | if (vma->vm_mm == NULL) |
400 | end); | 498 | flush_tlb_kernel_range_common(start, end); |
499 | else fix_range(vma->vm_mm, start, end, 0); | ||
401 | } | 500 | } |
402 | 501 | ||
403 | void flush_tlb_mm(struct mm_struct *mm) | 502 | void flush_tlb_mm(struct mm_struct *mm) |
404 | { | 503 | { |
405 | CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm); | 504 | unsigned long end; |
505 | |||
506 | /* | ||
507 | * Don't bother flushing if this address space is about to be | ||
508 | * destroyed. | ||
509 | */ | ||
510 | if (atomic_read(&mm->mm_users) == 0) | ||
511 | return; | ||
512 | |||
513 | end = proc_mm ? task_size : STUB_START; | ||
514 | fix_range(mm, 0, end, 0); | ||
406 | } | 515 | } |
407 | 516 | ||
408 | void force_flush_all(void) | 517 | void force_flush_all(void) |
409 | { | 518 | { |
410 | CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas()); | 519 | struct mm_struct *mm = current->mm; |
411 | } | 520 | struct vm_area_struct *vma = mm->mmap; |
412 | 521 | ||
522 | while (vma != NULL) { | ||
523 | fix_range(mm, vma->vm_start, vma->vm_end, 1); | ||
524 | vma = vma->vm_next; | ||
525 | } | ||
526 | } | ||