aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-02-25 03:10:23 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-02-28 03:37:12 -0500
commit066c4373599211ab0e1425586b8df6f1e932d97e (patch)
treeb1ccc220714494e034092dbe583c0c46cbca3591
parent225cf8d69c768f4472d2fd9f54bba2b69a588193 (diff)
s390/uaccess: fix kernel ds access for page table walk
When the kernel resides in home space and the mvcos instruction is not available uaccesses for kernel ds happen via simple strnlen() or memcpy() calls. This however can break badly, since uaccesses in kernel space may fail as well, especially if CONFIG_DEBUG_PAGEALLOC is turned on. To fix this implement strnlen_kernel() and copy_in_kernel() functions which can only be used by the page table uaccess functions. These two functions detect invalid memory accesses and return the correct length of processed data.. Both functions are more or less a copy of the std variants without sacf calls. Fixes ipl crashes on 31 bit machines as well on 64 bit machines without mvcos. Caused by changing the default address space of the kernel being home space. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/lib/uaccess_pt.c105
1 files changed, 77 insertions, 28 deletions
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 304e07086ab3..dff631d34b45 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -14,6 +14,63 @@
14#include <asm/futex.h> 14#include <asm/futex.h>
15#include "uaccess.h" 15#include "uaccess.h"
16 16
17#ifndef CONFIG_64BIT
18#define AHI "ahi"
19#define SLR "slr"
20#else
21#define AHI "aghi"
22#define SLR "slgr"
23#endif
24
25static size_t strnlen_kernel(size_t count, const char __user *src)
26{
27 register unsigned long reg0 asm("0") = 0UL;
28 unsigned long tmp1, tmp2;
29
30 asm volatile(
31 " la %2,0(%1)\n"
32 " la %3,0(%0,%1)\n"
33 " "SLR" %0,%0\n"
34 "0: srst %3,%2\n"
35 " jo 0b\n"
36 " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */
37 " "SLR" %0,%1\n"
38 "1:\n"
39 EX_TABLE(0b,1b)
40 : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
41 : "d" (reg0) : "cc", "memory");
42 return count;
43}
44
45static size_t copy_in_kernel(size_t count, void __user *to,
46 const void __user *from)
47{
48 unsigned long tmp1;
49
50 asm volatile(
51 " "AHI" %0,-1\n"
52 " jo 5f\n"
53 " bras %3,3f\n"
54 "0:"AHI" %0,257\n"
55 "1: mvc 0(1,%1),0(%2)\n"
56 " la %1,1(%1)\n"
57 " la %2,1(%2)\n"
58 " "AHI" %0,-1\n"
59 " jnz 1b\n"
60 " j 5f\n"
61 "2: mvc 0(256,%1),0(%2)\n"
62 " la %1,256(%1)\n"
63 " la %2,256(%2)\n"
64 "3:"AHI" %0,-256\n"
65 " jnm 2b\n"
66 "4: ex %0,1b-0b(%3)\n"
67 "5:"SLR" %0,%0\n"
68 "6:\n"
69 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
70 : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
71 : : "cc", "memory");
72 return count;
73}
17 74
18/* 75/*
19 * Returns kernel address for user virtual address. If the returned address is 76 * Returns kernel address for user virtual address. If the returned address is
@@ -123,10 +180,8 @@ size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
123{ 180{
124 size_t rc; 181 size_t rc;
125 182
126 if (segment_eq(get_fs(), KERNEL_DS)) { 183 if (segment_eq(get_fs(), KERNEL_DS))
127 memcpy(to, (void __kernel __force *) from, n); 184 return copy_in_kernel(n, (void __user *) to, from);
128 return 0;
129 }
130 rc = __user_copy_pt((unsigned long) from, to, n, 0); 185 rc = __user_copy_pt((unsigned long) from, to, n, 0);
131 if (unlikely(rc)) 186 if (unlikely(rc))
132 memset(to + n - rc, 0, rc); 187 memset(to + n - rc, 0, rc);
@@ -135,30 +190,28 @@ size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
135 190
136size_t copy_to_user_pt(size_t n, void __user *to, const void *from) 191size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
137{ 192{
138 if (segment_eq(get_fs(), KERNEL_DS)) { 193 if (segment_eq(get_fs(), KERNEL_DS))
139 memcpy((void __kernel __force *) to, from, n); 194 return copy_in_kernel(n, to, (void __user *) from);
140 return 0;
141 }
142 return __user_copy_pt((unsigned long) to, (void *) from, n, 1); 195 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
143} 196}
144 197
145static size_t clear_user_pt(size_t n, void __user *to) 198static size_t clear_user_pt(size_t n, void __user *to)
146{ 199{
200 void *zpage = &empty_zero_page;
147 long done, size, ret; 201 long done, size, ret;
148 202
149 if (segment_eq(get_fs(), KERNEL_DS)) {
150 memset((void __kernel __force *) to, 0, n);
151 return 0;
152 }
153 done = 0; 203 done = 0;
154 do { 204 do {
155 if (n - done > PAGE_SIZE) 205 if (n - done > PAGE_SIZE)
156 size = PAGE_SIZE; 206 size = PAGE_SIZE;
157 else 207 else
158 size = n - done; 208 size = n - done;
159 ret = __user_copy_pt((unsigned long) to + done, 209 if (segment_eq(get_fs(), KERNEL_DS))
160 &empty_zero_page, size, 1); 210 ret = copy_in_kernel(n, to, (void __user *) zpage);
211 else
212 ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
161 done += size; 213 done += size;
214 to += size;
162 if (ret) 215 if (ret)
163 return ret + n - done; 216 return ret + n - done;
164 } while (done < n); 217 } while (done < n);
@@ -175,7 +228,7 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
175 if (unlikely(!count)) 228 if (unlikely(!count))
176 return 0; 229 return 0;
177 if (segment_eq(get_fs(), KERNEL_DS)) 230 if (segment_eq(get_fs(), KERNEL_DS))
178 return strnlen((const char __kernel __force *) src, count) + 1; 231 return strnlen_kernel(count, src);
179 done = 0; 232 done = 0;
180retry: 233retry:
181 spin_lock(&mm->page_table_lock); 234 spin_lock(&mm->page_table_lock);
@@ -206,19 +259,17 @@ static size_t strncpy_from_user_pt(size_t count, const char __user *src,
206 259
207 if (unlikely(!count)) 260 if (unlikely(!count))
208 return 0; 261 return 0;
209 if (segment_eq(get_fs(), KERNEL_DS)) {
210 len = strnlen((const char __kernel __force *) src, count) + 1;
211 if (len > count)
212 len = count;
213 memcpy(dst, (const char __kernel __force *) src, len);
214 return (dst[len - 1] == '\0') ? len - 1 : len;
215 }
216 done = 0; 262 done = 0;
217 do { 263 do {
218 offset = (size_t)src & ~PAGE_MASK; 264 offset = (size_t)src & ~PAGE_MASK;
219 len = min(count - done, PAGE_SIZE - offset); 265 len = min(count - done, PAGE_SIZE - offset);
220 if (__user_copy_pt((unsigned long) src, dst, len, 0)) 266 if (segment_eq(get_fs(), KERNEL_DS)) {
221 return -EFAULT; 267 if (copy_in_kernel(len, (void __user *) dst, src))
268 return -EFAULT;
269 } else {
270 if (__user_copy_pt((unsigned long) src, dst, len, 0))
271 return -EFAULT;
272 }
222 len_str = strnlen(dst, len); 273 len_str = strnlen(dst, len);
223 done += len_str; 274 done += len_str;
224 src += len_str; 275 src += len_str;
@@ -237,10 +288,8 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
237 unsigned long kaddr_to, kaddr_from; 288 unsigned long kaddr_to, kaddr_from;
238 int write_user; 289 int write_user;
239 290
240 if (segment_eq(get_fs(), KERNEL_DS)) { 291 if (segment_eq(get_fs(), KERNEL_DS))
241 memcpy((void __force *) to, (void __force *) from, n); 292 return copy_in_kernel(n, to, from);
242 return 0;
243 }
244 done = 0; 293 done = 0;
245retry: 294retry:
246 spin_lock(&mm->page_table_lock); 295 spin_lock(&mm->page_table_lock);