diff options
author | Anton Ivanov <anton.ivanov@cambridgegreys.com> | 2018-12-05 07:37:39 -0500 |
---|---|---|
committer | Richard Weinberger <richard@nod.at> | 2018-12-27 16:48:34 -0500 |
commit | a9c52c2a2881ec69343a49ee32b2f3965e74ca98 (patch) | |
tree | d3e195b24a2bbeb845609cc8e99282257fcd56eb /arch/um | |
parent | 747b254ca2649d0c206385c7902fb8ac97a2b0b4 (diff) |
um: Optimize TLB operations v2
Make the code to merge mmap/munmap/mprotect operations in tlb.c
common for userspace and kernel. Kernel tlb operations can now
be merged as well.
Signed-off-by: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um')
-rw-r--r-- | arch/um/kernel/tlb.c | 85 |
1 files changed, 57 insertions, 28 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 37508b190106..b7f7a60a0928 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c | |||
@@ -37,17 +37,19 @@ struct host_vm_change { | |||
37 | } mprotect; | 37 | } mprotect; |
38 | } u; | 38 | } u; |
39 | } ops[1]; | 39 | } ops[1]; |
40 | int userspace; | ||
40 | int index; | 41 | int index; |
41 | struct mm_id *id; | 42 | struct mm_struct *mm; |
42 | void *data; | 43 | void *data; |
43 | int force; | 44 | int force; |
44 | }; | 45 | }; |
45 | 46 | ||
46 | #define INIT_HVC(mm, force) \ | 47 | #define INIT_HVC(mm, force, userspace) \ |
47 | ((struct host_vm_change) \ | 48 | ((struct host_vm_change) \ |
48 | { .ops = { { .type = NONE } }, \ | 49 | { .ops = { { .type = NONE } }, \ |
49 | .id = &mm->context.id, \ | 50 | .mm = mm, \ |
50 | .data = NULL, \ | 51 | .data = NULL, \ |
52 | .userspace = userspace, \ | ||
51 | .index = 0, \ | 53 | .index = 0, \ |
52 | .force = force }) | 54 | .force = force }) |
53 | 55 | ||
@@ -68,18 +70,40 @@ static int do_ops(struct host_vm_change *hvc, int end, | |||
68 | op = &hvc->ops[i]; | 70 | op = &hvc->ops[i]; |
69 | switch (op->type) { | 71 | switch (op->type) { |
70 | case MMAP: | 72 | case MMAP: |
71 | ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len, | 73 | if (hvc->userspace) |
72 | op->u.mmap.prot, op->u.mmap.fd, | 74 | ret = map(&hvc->mm->context.id, op->u.mmap.addr, |
73 | op->u.mmap.offset, finished, &hvc->data); | 75 | op->u.mmap.len, op->u.mmap.prot, |
76 | op->u.mmap.fd, | ||
77 | op->u.mmap.offset, finished, | ||
78 | &hvc->data); | ||
79 | else | ||
80 | map_memory(op->u.mmap.addr, op->u.mmap.offset, | ||
81 | op->u.mmap.len, 1, 1, 1); | ||
74 | break; | 82 | break; |
75 | case MUNMAP: | 83 | case MUNMAP: |
76 | ret = unmap(hvc->id, op->u.munmap.addr, | 84 | if (hvc->userspace) |
77 | op->u.munmap.len, finished, &hvc->data); | 85 | ret = unmap(&hvc->mm->context.id, |
86 | op->u.munmap.addr, | ||
87 | op->u.munmap.len, finished, | ||
88 | &hvc->data); | ||
89 | else | ||
90 | ret = os_unmap_memory( | ||
91 | (void *) op->u.munmap.addr, | ||
92 | op->u.munmap.len); | ||
93 | |||
78 | break; | 94 | break; |
79 | case MPROTECT: | 95 | case MPROTECT: |
80 | ret = protect(hvc->id, op->u.mprotect.addr, | 96 | if (hvc->userspace) |
81 | op->u.mprotect.len, op->u.mprotect.prot, | 97 | ret = protect(&hvc->mm->context.id, |
82 | finished, &hvc->data); | 98 | op->u.mprotect.addr, |
99 | op->u.mprotect.len, | ||
100 | op->u.mprotect.prot, | ||
101 | finished, &hvc->data); | ||
102 | else | ||
103 | ret = os_protect_memory( | ||
104 | (void *) op->u.mprotect.addr, | ||
105 | op->u.mprotect.len, | ||
106 | 1, 1, 1); | ||
83 | break; | 107 | break; |
84 | default: | 108 | default: |
85 | printk(KERN_ERR "Unknown op type %d in do_ops\n", | 109 | printk(KERN_ERR "Unknown op type %d in do_ops\n", |
@@ -100,9 +124,12 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | |||
100 | { | 124 | { |
101 | __u64 offset; | 125 | __u64 offset; |
102 | struct host_vm_op *last; | 126 | struct host_vm_op *last; |
103 | int fd, ret = 0; | 127 | int fd = -1, ret = 0; |
104 | 128 | ||
105 | fd = phys_mapping(phys, &offset); | 129 | if (hvc->userspace) |
130 | fd = phys_mapping(phys, &offset); | ||
131 | else | ||
132 | offset = phys; | ||
106 | if (hvc->index != 0) { | 133 | if (hvc->index != 0) { |
107 | last = &hvc->ops[hvc->index - 1]; | 134 | last = &hvc->ops[hvc->index - 1]; |
108 | if ((last->type == MMAP) && | 135 | if ((last->type == MMAP) && |
@@ -277,9 +304,9 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, | |||
277 | pgd_t *pgd; | 304 | pgd_t *pgd; |
278 | struct host_vm_change hvc; | 305 | struct host_vm_change hvc; |
279 | unsigned long addr = start_addr, next; | 306 | unsigned long addr = start_addr, next; |
280 | int ret = 0; | 307 | int ret = 0, userspace = 1; |
281 | 308 | ||
282 | hvc = INIT_HVC(mm, force); | 309 | hvc = INIT_HVC(mm, force, userspace); |
283 | pgd = pgd_offset(mm, addr); | 310 | pgd = pgd_offset(mm, addr); |
284 | do { | 311 | do { |
285 | next = pgd_addr_end(addr, end_addr); | 312 | next = pgd_addr_end(addr, end_addr); |
@@ -314,9 +341,11 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
314 | pmd_t *pmd; | 341 | pmd_t *pmd; |
315 | pte_t *pte; | 342 | pte_t *pte; |
316 | unsigned long addr, last; | 343 | unsigned long addr, last; |
317 | int updated = 0, err; | 344 | int updated = 0, err = 0, force = 0, userspace = 0; |
345 | struct host_vm_change hvc; | ||
318 | 346 | ||
319 | mm = &init_mm; | 347 | mm = &init_mm; |
348 | hvc = INIT_HVC(mm, force, userspace); | ||
320 | for (addr = start; addr < end;) { | 349 | for (addr = start; addr < end;) { |
321 | pgd = pgd_offset(mm, addr); | 350 | pgd = pgd_offset(mm, addr); |
322 | if (!pgd_present(*pgd)) { | 351 | if (!pgd_present(*pgd)) { |
@@ -325,8 +354,7 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
325 | last = end; | 354 | last = end; |
326 | if (pgd_newpage(*pgd)) { | 355 | if (pgd_newpage(*pgd)) { |
327 | updated = 1; | 356 | updated = 1; |
328 | err = os_unmap_memory((void *) addr, | 357 | err = add_munmap(addr, last - addr, &hvc); |
329 | last - addr); | ||
330 | if (err < 0) | 358 | if (err < 0) |
331 | panic("munmap failed, errno = %d\n", | 359 | panic("munmap failed, errno = %d\n", |
332 | -err); | 360 | -err); |
@@ -342,8 +370,7 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
342 | last = end; | 370 | last = end; |
343 | if (pud_newpage(*pud)) { | 371 | if (pud_newpage(*pud)) { |
344 | updated = 1; | 372 | updated = 1; |
345 | err = os_unmap_memory((void *) addr, | 373 | err = add_munmap(addr, last - addr, &hvc); |
346 | last - addr); | ||
347 | if (err < 0) | 374 | if (err < 0) |
348 | panic("munmap failed, errno = %d\n", | 375 | panic("munmap failed, errno = %d\n", |
349 | -err); | 376 | -err); |
@@ -359,8 +386,7 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
359 | last = end; | 386 | last = end; |
360 | if (pmd_newpage(*pmd)) { | 387 | if (pmd_newpage(*pmd)) { |
361 | updated = 1; | 388 | updated = 1; |
362 | err = os_unmap_memory((void *) addr, | 389 | err = add_munmap(addr, last - addr, &hvc); |
363 | last - addr); | ||
364 | if (err < 0) | 390 | if (err < 0) |
365 | panic("munmap failed, errno = %d\n", | 391 | panic("munmap failed, errno = %d\n", |
366 | -err); | 392 | -err); |
@@ -372,22 +398,25 @@ static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) | |||
372 | pte = pte_offset_kernel(pmd, addr); | 398 | pte = pte_offset_kernel(pmd, addr); |
373 | if (!pte_present(*pte) || pte_newpage(*pte)) { | 399 | if (!pte_present(*pte) || pte_newpage(*pte)) { |
374 | updated = 1; | 400 | updated = 1; |
375 | err = os_unmap_memory((void *) addr, | 401 | err = add_munmap(addr, PAGE_SIZE, &hvc); |
376 | PAGE_SIZE); | ||
377 | if (err < 0) | 402 | if (err < 0) |
378 | panic("munmap failed, errno = %d\n", | 403 | panic("munmap failed, errno = %d\n", |
379 | -err); | 404 | -err); |
380 | if (pte_present(*pte)) | 405 | if (pte_present(*pte)) |
381 | map_memory(addr, | 406 | err = add_mmap(addr, pte_val(*pte) & PAGE_MASK, |
382 | pte_val(*pte) & PAGE_MASK, | 407 | PAGE_SIZE, 0, &hvc); |
383 | PAGE_SIZE, 1, 1, 1); | ||
384 | } | 408 | } |
385 | else if (pte_newprot(*pte)) { | 409 | else if (pte_newprot(*pte)) { |
386 | updated = 1; | 410 | updated = 1; |
387 | os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1); | 411 | err = add_mprotect(addr, PAGE_SIZE, 0, &hvc); |
388 | } | 412 | } |
389 | addr += PAGE_SIZE; | 413 | addr += PAGE_SIZE; |
390 | } | 414 | } |
415 | if (!err) | ||
416 | err = do_ops(&hvc, hvc.index, 1); | ||
417 | |||
418 | if (err < 0) | ||
419 | panic("flush_tlb_kernel failed, errno = %d\n", err); | ||
391 | return updated; | 420 | return updated; |
392 | } | 421 | } |
393 | 422 | ||