diff options
Diffstat (limited to 'arch/um/kernel/skas/uaccess.c')
-rw-r--r-- | arch/um/kernel/skas/uaccess.c | 140 |
1 files changed, 71 insertions, 69 deletions
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c index 1d8b119f2d0e..e22c96993db3 100644 --- a/arch/um/kernel/skas/uaccess.c +++ b/arch/um/kernel/skas/uaccess.c | |||
@@ -3,128 +3,130 @@ | |||
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/err.h" | 6 | #include <linux/err.h> |
7 | #include "linux/highmem.h" | 7 | #include <linux/highmem.h> |
8 | #include "linux/mm.h" | 8 | #include <linux/mm.h> |
9 | #include "asm/current.h" | 9 | #include <linux/sched.h> |
10 | #include "asm/page.h" | 10 | #include <asm/current.h> |
11 | #include "asm/pgtable.h" | 11 | #include <asm/page.h> |
12 | #include <asm/pgtable.h> | ||
12 | #include "kern_util.h" | 13 | #include "kern_util.h" |
13 | #include "os.h" | 14 | #include "os.h" |
14 | 15 | ||
15 | extern void *um_virt_to_phys(struct task_struct *task, unsigned long addr, | 16 | pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr) |
16 | pte_t *pte_out); | ||
17 | |||
18 | static unsigned long maybe_map(unsigned long virt, int is_write) | ||
19 | { | 17 | { |
20 | pte_t pte; | 18 | pgd_t *pgd; |
21 | int err; | 19 | pud_t *pud; |
20 | pmd_t *pmd; | ||
21 | |||
22 | if (mm == NULL) | ||
23 | return NULL; | ||
24 | |||
25 | pgd = pgd_offset(mm, addr); | ||
26 | if (!pgd_present(*pgd)) | ||
27 | return NULL; | ||
28 | |||
29 | pud = pud_offset(pgd, addr); | ||
30 | if (!pud_present(*pud)) | ||
31 | return NULL; | ||
22 | 32 | ||
23 | void *phys = um_virt_to_phys(current, virt, &pte); | 33 | pmd = pmd_offset(pud, addr); |
24 | int dummy_code; | 34 | if (!pmd_present(*pmd)) |
35 | return NULL; | ||
36 | |||
37 | return pte_offset_kernel(pmd, addr); | ||
38 | } | ||
39 | |||
40 | static pte_t *maybe_map(unsigned long virt, int is_write) | ||
41 | { | ||
42 | pte_t *pte = virt_to_pte(current->mm, virt); | ||
43 | int err, dummy_code; | ||
25 | 44 | ||
26 | if (IS_ERR(phys) || (is_write && !pte_write(pte))) { | 45 | if ((pte == NULL) || !pte_present(*pte) || |
46 | (is_write && !pte_write(*pte))) { | ||
27 | err = handle_page_fault(virt, 0, is_write, 1, &dummy_code); | 47 | err = handle_page_fault(virt, 0, is_write, 1, &dummy_code); |
28 | if (err) | 48 | if (err) |
29 | return -1UL; | 49 | return NULL; |
30 | phys = um_virt_to_phys(current, virt, NULL); | 50 | pte = virt_to_pte(current->mm, virt); |
31 | } | 51 | } |
32 | if (IS_ERR(phys)) | 52 | if (!pte_present(*pte)) |
33 | phys = (void *) -1; | 53 | pte = NULL; |
34 | 54 | ||
35 | return (unsigned long) phys; | 55 | return pte; |
36 | } | 56 | } |
37 | 57 | ||
38 | static int do_op_one_page(unsigned long addr, int len, int is_write, | 58 | static int do_op_one_page(unsigned long addr, int len, int is_write, |
39 | int (*op)(unsigned long addr, int len, void *arg), void *arg) | 59 | int (*op)(unsigned long addr, int len, void *arg), void *arg) |
40 | { | 60 | { |
61 | jmp_buf buf; | ||
41 | struct page *page; | 62 | struct page *page; |
42 | int n; | 63 | pte_t *pte; |
64 | int n, faulted; | ||
43 | 65 | ||
44 | addr = maybe_map(addr, is_write); | 66 | pte = maybe_map(addr, is_write); |
45 | if (addr == -1UL) | 67 | if (pte == NULL) |
46 | return -1; | 68 | return -1; |
47 | 69 | ||
48 | page = phys_to_page(addr); | 70 | page = pte_page(*pte); |
49 | addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + | 71 | addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + |
50 | (addr & ~PAGE_MASK); | 72 | (addr & ~PAGE_MASK); |
51 | 73 | ||
52 | n = (*op)(addr, len, arg); | 74 | current->thread.fault_catcher = &buf; |
75 | |||
76 | faulted = UML_SETJMP(&buf); | ||
77 | if (faulted == 0) | ||
78 | n = (*op)(addr, len, arg); | ||
79 | else | ||
80 | n = -1; | ||
81 | |||
82 | current->thread.fault_catcher = NULL; | ||
53 | 83 | ||
54 | kunmap_atomic(page, KM_UML_USERCOPY); | 84 | kunmap_atomic(page, KM_UML_USERCOPY); |
55 | 85 | ||
56 | return n; | 86 | return n; |
57 | } | 87 | } |
58 | 88 | ||
59 | static void do_buffer_op(void *jmpbuf, void *arg_ptr) | 89 | static int buffer_op(unsigned long addr, int len, int is_write, |
90 | int (*op)(unsigned long, int, void *), void *arg) | ||
60 | { | 91 | { |
61 | va_list args; | 92 | int size, remain, n; |
62 | unsigned long addr; | 93 | |
63 | int len, is_write, size, remain, n; | ||
64 | int (*op)(unsigned long, int, void *); | ||
65 | void *arg; | ||
66 | int *res; | ||
67 | |||
68 | va_copy(args, *(va_list *)arg_ptr); | ||
69 | addr = va_arg(args, unsigned long); | ||
70 | len = va_arg(args, int); | ||
71 | is_write = va_arg(args, int); | ||
72 | op = va_arg(args, void *); | ||
73 | arg = va_arg(args, void *); | ||
74 | res = va_arg(args, int *); | ||
75 | va_end(args); | ||
76 | size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len); | 94 | size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len); |
77 | remain = len; | 95 | remain = len; |
78 | 96 | ||
79 | current->thread.fault_catcher = jmpbuf; | ||
80 | n = do_op_one_page(addr, size, is_write, op, arg); | 97 | n = do_op_one_page(addr, size, is_write, op, arg); |
81 | if (n != 0) { | 98 | if (n != 0) { |
82 | *res = (n < 0 ? remain : 0); | 99 | remain = (n < 0 ? remain : 0); |
83 | goto out; | 100 | goto out; |
84 | } | 101 | } |
85 | 102 | ||
86 | addr += size; | 103 | addr += size; |
87 | remain -= size; | 104 | remain -= size; |
88 | if (remain == 0) { | 105 | if (remain == 0) |
89 | *res = 0; | ||
90 | goto out; | 106 | goto out; |
91 | } | ||
92 | 107 | ||
93 | while(addr < ((addr + remain) & PAGE_MASK)) { | 108 | while (addr < ((addr + remain) & PAGE_MASK)) { |
94 | n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg); | 109 | n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg); |
95 | if (n != 0) { | 110 | if (n != 0) { |
96 | *res = (n < 0 ? remain : 0); | 111 | remain = (n < 0 ? remain : 0); |
97 | goto out; | 112 | goto out; |
98 | } | 113 | } |
99 | 114 | ||
100 | addr += PAGE_SIZE; | 115 | addr += PAGE_SIZE; |
101 | remain -= PAGE_SIZE; | 116 | remain -= PAGE_SIZE; |
102 | } | 117 | } |
103 | if (remain == 0) { | 118 | if (remain == 0) |
104 | *res = 0; | ||
105 | goto out; | 119 | goto out; |
106 | } | ||
107 | 120 | ||
108 | n = do_op_one_page(addr, remain, is_write, op, arg); | 121 | n = do_op_one_page(addr, remain, is_write, op, arg); |
109 | if (n != 0) | 122 | if (n != 0) { |
110 | *res = (n < 0 ? remain : 0); | 123 | remain = (n < 0 ? remain : 0); |
111 | else *res = 0; | 124 | goto out; |
112 | out: | 125 | } |
113 | current->thread.fault_catcher = NULL; | ||
114 | } | ||
115 | |||
116 | static int buffer_op(unsigned long addr, int len, int is_write, | ||
117 | int (*op)(unsigned long addr, int len, void *arg), | ||
118 | void *arg) | ||
119 | { | ||
120 | int faulted, res; | ||
121 | |||
122 | faulted = setjmp_wrapper(do_buffer_op, addr, len, is_write, op, arg, | ||
123 | &res); | ||
124 | if (!faulted) | ||
125 | return res; | ||
126 | 126 | ||
127 | return addr + len - (unsigned long) current->thread.fault_addr; | 127 | return 0; |
128 | out: | ||
129 | return remain; | ||
128 | } | 130 | } |
129 | 131 | ||
130 | static int copy_chunk_from_user(unsigned long from, int len, void *arg) | 132 | static int copy_chunk_from_user(unsigned long from, int len, void *arg) |