aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-10-16 04:27:12 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:06 -0400
commit1466abf2d03207b42980ddf8cde1fba17b7cd301 (patch)
tree402f8a77f5b08c3a9130e341df402fb6c02b5c02
parent512b6fb1c14d4c34f23a3419b0789ad01914a899 (diff)
uml: clean up tlb flush path
Tidy the tlb flushing code. With tt mode gone, there is no reason to have the capability to have called directly from do_mmap, do_mprotect, and do_munmap, rather than calling a function pointer that it is given. There was a large amount of data that was passed from function to function, being used at the lowest level, without being changed. This stuff is now encapsulated in a structure which is initialized at the top layer and passed down. This simplifies the code, reduces the amount of code needed to pass the parameters around, and saves on stack space. A somewhat more subtle change is the meaning of the current operation index. It used to start at -1, being pre-incremented when adding an operation. It now starts at 0, being post-incremented, with associated adjustments of +/- 1 on comparisons. In addition, tlb.h contained a couple of declarations which had no users outside of tlb.c, so they could be moved or deleted. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/um/include/tlb.h27
-rw-r--r--arch/um/kernel/tlb.c258
2 files changed, 127 insertions, 158 deletions
diff --git a/arch/um/include/tlb.h b/arch/um/include/tlb.h
index 46cf0057a1c5..ecd2265b301b 100644
--- a/arch/um/include/tlb.h
+++ b/arch/um/include/tlb.h
@@ -8,34 +8,7 @@
8 8
9#include "um_mmu.h" 9#include "um_mmu.h"
10 10
11struct host_vm_op {
12 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
13 union {
14 struct {
15 unsigned long addr;
16 unsigned long len;
17 unsigned int prot;
18 int fd;
19 __u64 offset;
20 } mmap;
21 struct {
22 unsigned long addr;
23 unsigned long len;
24 } munmap;
25 struct {
26 unsigned long addr;
27 unsigned long len;
28 unsigned int prot;
29 } mprotect;
30 } u;
31};
32
33extern void force_flush_all(void); 11extern void force_flush_all(void);
34extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
35 unsigned long end_addr, int force,
36 int (*do_ops)(struct mm_context *,
37 struct host_vm_op *, int, int,
38 void **));
39extern int flush_tlb_kernel_range_common(unsigned long start, 12extern int flush_tlb_kernel_range_common(unsigned long start,
40 unsigned long end); 13 unsigned long end);
41 14
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 081baefb4c0d..942f20ea888a 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -12,19 +12,85 @@
12#include "skas.h" 12#include "skas.h"
13#include "tlb.h" 13#include "tlb.h"
14 14
15struct host_vm_change {
16 struct host_vm_op {
17 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
18 union {
19 struct {
20 unsigned long addr;
21 unsigned long len;
22 unsigned int prot;
23 int fd;
24 __u64 offset;
25 } mmap;
26 struct {
27 unsigned long addr;
28 unsigned long len;
29 } munmap;
30 struct {
31 unsigned long addr;
32 unsigned long len;
33 unsigned int prot;
34 } mprotect;
35 } u;
36 } ops[1];
37 int index;
38 struct mm_id *id;
39 void *data;
40 int force;
41};
42
43#define INIT_HVC(mm, force) \
44 ((struct host_vm_change) \
45 { .ops = { { .type = NONE } }, \
46 .id = &mm->context.id, \
47 .data = NULL, \
48 .index = 0, \
49 .force = force })
50
51static int do_ops(struct host_vm_change *hvc, int end,
52 int finished)
53{
54 struct host_vm_op *op;
55 int i, ret = 0;
56
57 for (i = 0; i < end && !ret; i++) {
58 op = &hvc->ops[i];
59 switch(op->type) {
60 case MMAP:
61 ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
62 op->u.mmap.prot, op->u.mmap.fd,
63 op->u.mmap.offset, finished, &hvc->data);
64 break;
65 case MUNMAP:
66 ret = unmap(hvc->id, op->u.munmap.addr,
67 op->u.munmap.len, finished, &hvc->data);
68 break;
69 case MPROTECT:
70 ret = protect(hvc->id, op->u.mprotect.addr,
71 op->u.mprotect.len, op->u.mprotect.prot,
72 finished, &hvc->data);
73 break;
74 default:
75 printk(KERN_ERR "Unknown op type %d in do_ops\n",
76 op->type);
77 break;
78 }
79 }
80
81 return ret;
82}
83
15static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, 84static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
16 unsigned int prot, struct host_vm_op *ops, int *index, 85 unsigned int prot, struct host_vm_change *hvc)
17 int last_filled, struct mm_context *mmu, void **flush,
18 int (*do_ops)(struct mm_context *, struct host_vm_op *,
19 int, int, void **))
20{ 86{
21 __u64 offset; 87 __u64 offset;
22 struct host_vm_op *last; 88 struct host_vm_op *last;
23 int fd, ret = 0; 89 int fd, ret = 0;
24 90
25 fd = phys_mapping(phys, &offset); 91 fd = phys_mapping(phys, &offset);
26 if (*index != -1) { 92 if (hvc->index != 0) {
27 last = &ops[*index]; 93 last = &hvc->ops[hvc->index - 1];
28 if ((last->type == MMAP) && 94 if ((last->type == MMAP) &&
29 (last->u.mmap.addr + last->u.mmap.len == virt) && 95 (last->u.mmap.addr + last->u.mmap.len == virt) &&
30 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) && 96 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
@@ -34,33 +100,30 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
34 } 100 }
35 } 101 }
36 102
37 if (*index == last_filled) { 103 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
38 ret = (*do_ops)(mmu, ops, last_filled, 0, flush); 104 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
39 *index = -1; 105 hvc->index = 0;
40 } 106 }
41 107
42 ops[++*index] = ((struct host_vm_op) { .type = MMAP, 108 hvc->ops[hvc->index++] = ((struct host_vm_op)
43 .u = { .mmap = { 109 { .type = MMAP,
44 .addr = virt, 110 .u = { .mmap = { .addr = virt,
45 .len = len, 111 .len = len,
46 .prot = prot, 112 .prot = prot,
47 .fd = fd, 113 .fd = fd,
48 .offset = offset } 114 .offset = offset }
49 } }); 115 } });
50 return ret; 116 return ret;
51} 117}
52 118
53static int add_munmap(unsigned long addr, unsigned long len, 119static int add_munmap(unsigned long addr, unsigned long len,
54 struct host_vm_op *ops, int *index, int last_filled, 120 struct host_vm_change *hvc)
55 struct mm_context *mmu, void **flush,
56 int (*do_ops)(struct mm_context *, struct host_vm_op *,
57 int, int, void **))
58{ 121{
59 struct host_vm_op *last; 122 struct host_vm_op *last;
60 int ret = 0; 123 int ret = 0;
61 124
62 if (*index != -1) { 125 if (hvc->index != 0) {
63 last = &ops[*index]; 126 last = &hvc->ops[hvc->index - 1];
64 if ((last->type == MUNMAP) && 127 if ((last->type == MUNMAP) &&
65 (last->u.munmap.addr + last->u.mmap.len == addr)) { 128 (last->u.munmap.addr + last->u.mmap.len == addr)) {
66 last->u.munmap.len += len; 129 last->u.munmap.len += len;
@@ -68,29 +131,26 @@ static int add_munmap(unsigned long addr, unsigned long len,
68 } 131 }
69 } 132 }
70 133
71 if (*index == last_filled) { 134 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
72 ret = (*do_ops)(mmu, ops, last_filled, 0, flush); 135 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
73 *index = -1; 136 hvc->index = 0;
74 } 137 }
75 138
76 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP, 139 hvc->ops[hvc->index++] = ((struct host_vm_op)
77 .u = { .munmap = { 140 { .type = MUNMAP,
78 .addr = addr, 141 .u = { .munmap = { .addr = addr,
79 .len = len } } }); 142 .len = len } } });
80 return ret; 143 return ret;
81} 144}
82 145
83static int add_mprotect(unsigned long addr, unsigned long len, 146static int add_mprotect(unsigned long addr, unsigned long len,
84 unsigned int prot, struct host_vm_op *ops, int *index, 147 unsigned int prot, struct host_vm_change *hvc)
85 int last_filled, struct mm_context *mmu, void **flush,
86 int (*do_ops)(struct mm_context *, struct host_vm_op *,
87 int, int, void **))
88{ 148{
89 struct host_vm_op *last; 149 struct host_vm_op *last;
90 int ret = 0; 150 int ret = 0;
91 151
92 if (*index != -1) { 152 if (hvc->index != 0) {
93 last = &ops[*index]; 153 last = &hvc->ops[hvc->index - 1];
94 if ((last->type == MPROTECT) && 154 if ((last->type == MPROTECT) &&
95 (last->u.mprotect.addr + last->u.mprotect.len == addr) && 155 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
96 (last->u.mprotect.prot == prot)) { 156 (last->u.mprotect.prot == prot)) {
@@ -99,28 +159,24 @@ static int add_mprotect(unsigned long addr, unsigned long len,
99 } 159 }
100 } 160 }
101 161
102 if (*index == last_filled) { 162 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
103 ret = (*do_ops)(mmu, ops, last_filled, 0, flush); 163 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
104 *index = -1; 164 hvc->index = 0;
105 } 165 }
106 166
107 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT, 167 hvc->ops[hvc->index++] = ((struct host_vm_op)
108 .u = { .mprotect = { 168 { .type = MPROTECT,
109 .addr = addr, 169 .u = { .mprotect = { .addr = addr,
110 .len = len, 170 .len = len,
111 .prot = prot } } }); 171 .prot = prot } } });
112 return ret; 172 return ret;
113} 173}
114 174
115#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) 175#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
116 176
117static inline int update_pte_range(pmd_t *pmd, unsigned long addr, 177static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
118 unsigned long end, struct host_vm_op *ops, 178 unsigned long end,
119 int last_op, int *op_index, int force, 179 struct host_vm_change *hvc)
120 struct mm_context *mmu, void **flush,
121 int (*do_ops)(struct mm_context *,
122 struct host_vm_op *, int, int,
123 void **))
124{ 180{
125 pte_t *pte; 181 pte_t *pte;
126 int r, w, x, prot, ret = 0; 182 int r, w, x, prot, ret = 0;
@@ -138,29 +194,22 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
138 } 194 }
139 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | 195 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
140 (x ? UM_PROT_EXEC : 0)); 196 (x ? UM_PROT_EXEC : 0));
141 if (force || pte_newpage(*pte)) { 197 if (hvc->force || pte_newpage(*pte)) {
142 if (pte_present(*pte)) 198 if (pte_present(*pte))
143 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, 199 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
144 PAGE_SIZE, prot, ops, op_index, 200 PAGE_SIZE, prot, hvc);
145 last_op, mmu, flush, do_ops); 201 else ret = add_munmap(addr, PAGE_SIZE, hvc);
146 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
147 last_op, mmu, flush, do_ops);
148 } 202 }
149 else if (pte_newprot(*pte)) 203 else if (pte_newprot(*pte))
150 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index, 204 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
151 last_op, mmu, flush, do_ops);
152 *pte = pte_mkuptodate(*pte); 205 *pte = pte_mkuptodate(*pte);
153 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); 206 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
154 return ret; 207 return ret;
155} 208}
156 209
157static inline int update_pmd_range(pud_t *pud, unsigned long addr, 210static inline int update_pmd_range(pud_t *pud, unsigned long addr,
158 unsigned long end, struct host_vm_op *ops, 211 unsigned long end,
159 int last_op, int *op_index, int force, 212 struct host_vm_change *hvc)
160 struct mm_context *mmu, void **flush,
161 int (*do_ops)(struct mm_context *,
162 struct host_vm_op *, int, int,
163 void **))
164{ 213{
165 pmd_t *pmd; 214 pmd_t *pmd;
166 unsigned long next; 215 unsigned long next;
@@ -170,27 +219,19 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
170 do { 219 do {
171 next = pmd_addr_end(addr, end); 220 next = pmd_addr_end(addr, end);
172 if (!pmd_present(*pmd)) { 221 if (!pmd_present(*pmd)) {
173 if (force || pmd_newpage(*pmd)) { 222 if (hvc->force || pmd_newpage(*pmd)) {
174 ret = add_munmap(addr, next - addr, ops, 223 ret = add_munmap(addr, next - addr, hvc);
175 op_index, last_op, mmu,
176 flush, do_ops);
177 pmd_mkuptodate(*pmd); 224 pmd_mkuptodate(*pmd);
178 } 225 }
179 } 226 }
180 else ret = update_pte_range(pmd, addr, next, ops, last_op, 227 else ret = update_pte_range(pmd, addr, next, hvc);
181 op_index, force, mmu, flush,
182 do_ops);
183 } while (pmd++, addr = next, ((addr != end) && !ret)); 228 } while (pmd++, addr = next, ((addr != end) && !ret));
184 return ret; 229 return ret;
185} 230}
186 231
187static inline int update_pud_range(pgd_t *pgd, unsigned long addr, 232static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
188 unsigned long end, struct host_vm_op *ops, 233 unsigned long end,
189 int last_op, int *op_index, int force, 234 struct host_vm_change *hvc)
190 struct mm_context *mmu, void **flush,
191 int (*do_ops)(struct mm_context *,
192 struct host_vm_op *, int, int,
193 void **))
194{ 235{
195 pud_t *pud; 236 pud_t *pud;
196 unsigned long next; 237 unsigned long next;
@@ -200,51 +241,39 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
200 do { 241 do {
201 next = pud_addr_end(addr, end); 242 next = pud_addr_end(addr, end);
202 if (!pud_present(*pud)) { 243 if (!pud_present(*pud)) {
203 if (force || pud_newpage(*pud)) { 244 if (hvc->force || pud_newpage(*pud)) {
204 ret = add_munmap(addr, next - addr, ops, 245 ret = add_munmap(addr, next - addr, hvc);
205 op_index, last_op, mmu,
206 flush, do_ops);
207 pud_mkuptodate(*pud); 246 pud_mkuptodate(*pud);
208 } 247 }
209 } 248 }
210 else ret = update_pmd_range(pud, addr, next, ops, last_op, 249 else ret = update_pmd_range(pud, addr, next, hvc);
211 op_index, force, mmu, flush,
212 do_ops);
213 } while (pud++, addr = next, ((addr != end) && !ret)); 250 } while (pud++, addr = next, ((addr != end) && !ret));
214 return ret; 251 return ret;
215} 252}
216 253
217void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 254void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
218 unsigned long end_addr, int force, 255 unsigned long end_addr, int force)
219 int (*do_ops)(struct mm_context *, struct host_vm_op *,
220 int, int, void **))
221{ 256{
222 pgd_t *pgd; 257 pgd_t *pgd;
223 struct mm_context *mmu = &mm->context; 258 struct host_vm_change hvc;
224 struct host_vm_op ops[1];
225 unsigned long addr = start_addr, next; 259 unsigned long addr = start_addr, next;
226 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1; 260 int ret = 0;
227 void *flush = NULL;
228 261
229 ops[0].type = NONE; 262 hvc = INIT_HVC(mm, force);
230 pgd = pgd_offset(mm, addr); 263 pgd = pgd_offset(mm, addr);
231 do { 264 do {
232 next = pgd_addr_end(addr, end_addr); 265 next = pgd_addr_end(addr, end_addr);
233 if (!pgd_present(*pgd)) { 266 if (!pgd_present(*pgd)) {
234 if (force || pgd_newpage(*pgd)) { 267 if (force || pgd_newpage(*pgd)) {
235 ret = add_munmap(addr, next - addr, ops, 268 ret = add_munmap(addr, next - addr, &hvc);
236 &op_index, last_op, mmu,
237 &flush, do_ops);
238 pgd_mkuptodate(*pgd); 269 pgd_mkuptodate(*pgd);
239 } 270 }
240 } 271 }
241 else ret = update_pud_range(pgd, addr, next, ops, last_op, 272 else ret = update_pud_range(pgd, addr, next, &hvc);
242 &op_index, force, mmu, &flush,
243 do_ops);
244 } while (pgd++, addr = next, ((addr != end_addr) && !ret)); 273 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
245 274
246 if (!ret) 275 if (!ret)
247 ret = (*do_ops)(mmu, ops, op_index, 1, &flush); 276 ret = do_ops(&hvc, hvc.index, 1);
248 277
249 /* This is not an else because ret is modified above */ 278 /* This is not an else because ret is modified above */
250 if (ret) { 279 if (ret) {
@@ -453,46 +482,13 @@ void __flush_tlb_one(unsigned long addr)
453 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); 482 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
454} 483}
455 484
456static int do_ops(struct mm_context *mmu, struct host_vm_op *ops, int last,
457 int finished, void **flush)
458{
459 struct host_vm_op *op;
460 int i, ret = 0;
461
462 for (i = 0; i <= last && !ret; i++) {
463 op = &ops[i];
464 switch(op->type) {
465 case MMAP:
466 ret = map(&mmu->id, op->u.mmap.addr, op->u.mmap.len,
467 op->u.mmap.prot, op->u.mmap.fd,
468 op->u.mmap.offset, finished, flush);
469 break;
470 case MUNMAP:
471 ret = unmap(&mmu->id, op->u.munmap.addr,
472 op->u.munmap.len, finished, flush);
473 break;
474 case MPROTECT:
475 ret = protect(&mmu->id, op->u.mprotect.addr,
476 op->u.mprotect.len, op->u.mprotect.prot,
477 finished, flush);
478 break;
479 default:
480 printk(KERN_ERR "Unknown op type %d in do_ops\n",
481 op->type);
482 break;
483 }
484 }
485
486 return ret;
487}
488
489static void fix_range(struct mm_struct *mm, unsigned long start_addr, 485static void fix_range(struct mm_struct *mm, unsigned long start_addr,
490 unsigned long end_addr, int force) 486 unsigned long end_addr, int force)
491{ 487{
492 if (!proc_mm && (end_addr > CONFIG_STUB_START)) 488 if (!proc_mm && (end_addr > CONFIG_STUB_START))
493 end_addr = CONFIG_STUB_START; 489 end_addr = CONFIG_STUB_START;
494 490
495 fix_range_common(mm, start_addr, end_addr, force, do_ops); 491 fix_range_common(mm, start_addr, end_addr, force);
496} 492}
497 493
498void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 494void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,