aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2008-02-05 01:31:07 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:29 -0500
commit8efa3c9d545ab6adc5c5e001cbd7aee60909b3da (patch)
tree1ec5750233c242168f17a9669ff4ba4aa113a2e6 /arch/um/kernel
parent0b4e273fb83bce5dd8e166a4defb16ebdd215abf (diff)
uml: eliminate setjmp_wrapper
setjmp_wrapper existed to provide setjmp to kernel code when UML used libc's setjmp and longjmp. Now that UML has its own implementation, this isn't needed and kernel code can invoke setjmp directly. do_buffer_op is massively cleaned up since it is no longer a callback from setjmp_wrapper and given a va_list from which it must extract its arguments. The actual setjmp is moved from buffer_op to do_op_one_page because the copy operation is inside an atomic section (kmap_atomic to kunmap_atomic) and it shouldn't be longjmp-ed out of. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/skas/uaccess.c73
1 files changed, 27 insertions, 46 deletions
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 05b41dbc1dd..e22c96993db 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -58,9 +58,10 @@ static pte_t *maybe_map(unsigned long virt, int is_write)
58static int do_op_one_page(unsigned long addr, int len, int is_write, 58static int do_op_one_page(unsigned long addr, int len, int is_write,
59 int (*op)(unsigned long addr, int len, void *arg), void *arg) 59 int (*op)(unsigned long addr, int len, void *arg), void *arg)
60{ 60{
61 jmp_buf buf;
61 struct page *page; 62 struct page *page;
62 pte_t *pte; 63 pte_t *pte;
63 int n; 64 int n, faulted;
64 65
65 pte = maybe_map(addr, is_write); 66 pte = maybe_map(addr, is_write);
66 if (pte == NULL) 67 if (pte == NULL)
@@ -70,82 +71,62 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
70 addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + 71 addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) +
71 (addr & ~PAGE_MASK); 72 (addr & ~PAGE_MASK);
72 73
73 n = (*op)(addr, len, arg); 74 current->thread.fault_catcher = &buf;
75
76 faulted = UML_SETJMP(&buf);
77 if (faulted == 0)
78 n = (*op)(addr, len, arg);
79 else
80 n = -1;
81
82 current->thread.fault_catcher = NULL;
74 83
75 kunmap_atomic(page, KM_UML_USERCOPY); 84 kunmap_atomic(page, KM_UML_USERCOPY);
76 85
77 return n; 86 return n;
78} 87}
79 88
80static void do_buffer_op(void *jmpbuf, void *arg_ptr) 89static int buffer_op(unsigned long addr, int len, int is_write,
90 int (*op)(unsigned long, int, void *), void *arg)
81{ 91{
82 va_list args; 92 int size, remain, n;
83 unsigned long addr; 93
84 int len, is_write, size, remain, n;
85 int (*op)(unsigned long, int, void *);
86 void *arg;
87 int *res;
88
89 va_copy(args, *(va_list *)arg_ptr);
90 addr = va_arg(args, unsigned long);
91 len = va_arg(args, int);
92 is_write = va_arg(args, int);
93 op = va_arg(args, void *);
94 arg = va_arg(args, void *);
95 res = va_arg(args, int *);
96 va_end(args);
97 size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len); 94 size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
98 remain = len; 95 remain = len;
99 96
100 current->thread.fault_catcher = jmpbuf;
101 n = do_op_one_page(addr, size, is_write, op, arg); 97 n = do_op_one_page(addr, size, is_write, op, arg);
102 if (n != 0) { 98 if (n != 0) {
103 *res = (n < 0 ? remain : 0); 99 remain = (n < 0 ? remain : 0);
104 goto out; 100 goto out;
105 } 101 }
106 102
107 addr += size; 103 addr += size;
108 remain -= size; 104 remain -= size;
109 if (remain == 0) { 105 if (remain == 0)
110 *res = 0;
111 goto out; 106 goto out;
112 }
113 107
114 while(addr < ((addr + remain) & PAGE_MASK)) { 108 while (addr < ((addr + remain) & PAGE_MASK)) {
115 n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg); 109 n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
116 if (n != 0) { 110 if (n != 0) {
117 *res = (n < 0 ? remain : 0); 111 remain = (n < 0 ? remain : 0);
118 goto out; 112 goto out;
119 } 113 }
120 114
121 addr += PAGE_SIZE; 115 addr += PAGE_SIZE;
122 remain -= PAGE_SIZE; 116 remain -= PAGE_SIZE;
123 } 117 }
124 if (remain == 0) { 118 if (remain == 0)
125 *res = 0;
126 goto out; 119 goto out;
127 }
128 120
129 n = do_op_one_page(addr, remain, is_write, op, arg); 121 n = do_op_one_page(addr, remain, is_write, op, arg);
130 if (n != 0) 122 if (n != 0) {
131 *res = (n < 0 ? remain : 0); 123 remain = (n < 0 ? remain : 0);
132 else *res = 0; 124 goto out;
133 out: 125 }
134 current->thread.fault_catcher = NULL;
135}
136
137static int buffer_op(unsigned long addr, int len, int is_write,
138 int (*op)(unsigned long addr, int len, void *arg),
139 void *arg)
140{
141 int faulted, res;
142
143 faulted = setjmp_wrapper(do_buffer_op, addr, len, is_write, op, arg,
144 &res);
145 if (!faulted)
146 return res;
147 126
148 return addr + len - (unsigned long) current->thread.fault_addr; 127 return 0;
128 out:
129 return remain;
149} 130}
150 131
151static int copy_chunk_from_user(unsigned long from, int len, void *arg) 132static int copy_chunk_from_user(unsigned long from, int len, void *arg)