aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2014-01-17 07:12:34 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-01-22 08:02:16 -0500
commitb03b467944b3e88a36a33b5429425c42dbd5b8a0 (patch)
tree57c5b36c72106e35e9ab9ca929946e5656373296 /arch/s390/lib
parent05e9181bdba4b1eb8f8eac5fd925df5223d16308 (diff)
s390/uaccess: test if current->mm is set before walking page tables
If get_fs() == USER_DS we better test if current->mm is not zero before walking page tables. The page table walk code would try to lock mm->page_table_lock, however if mm is zero this might crash. Now it is arguably incorrect trying to access userspace if current->mm is zero, however we have seen that and s390 would be the only architecture which would crash in such a case. So we better make the page table walk code a bit more robust and report always a fault instead. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/uaccess_pt.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 0632dc50da78..5b77c341a1e3 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -153,6 +153,8 @@ static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
153 unsigned long offset, done, size, kaddr; 153 unsigned long offset, done, size, kaddr;
154 void *from, *to; 154 void *from, *to;
155 155
156 if (!mm)
157 return n;
156 done = 0; 158 done = 0;
157retry: 159retry:
158 spin_lock(&mm->page_table_lock); 160 spin_lock(&mm->page_table_lock);
@@ -262,6 +264,8 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
262 return 0; 264 return 0;
263 if (segment_eq(get_fs(), KERNEL_DS)) 265 if (segment_eq(get_fs(), KERNEL_DS))
264 return strnlen_kernel(count, src); 266 return strnlen_kernel(count, src);
267 if (!mm)
268 return 0;
265 done = 0; 269 done = 0;
266retry: 270retry:
267 spin_lock(&mm->page_table_lock); 271 spin_lock(&mm->page_table_lock);
@@ -323,6 +327,8 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
323 327
324 if (segment_eq(get_fs(), KERNEL_DS)) 328 if (segment_eq(get_fs(), KERNEL_DS))
325 return copy_in_kernel(n, to, from); 329 return copy_in_kernel(n, to, from);
330 if (!mm)
331 return n;
326 done = 0; 332 done = 0;
327retry: 333retry:
328 spin_lock(&mm->page_table_lock); 334 spin_lock(&mm->page_table_lock);
@@ -411,6 +417,8 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
411 417
412 if (segment_eq(get_fs(), KERNEL_DS)) 418 if (segment_eq(get_fs(), KERNEL_DS))
413 return __futex_atomic_op_pt(op, uaddr, oparg, old); 419 return __futex_atomic_op_pt(op, uaddr, oparg, old);
420 if (unlikely(!current->mm))
421 return -EFAULT;
414 spin_lock(&current->mm->page_table_lock); 422 spin_lock(&current->mm->page_table_lock);
415 uaddr = (u32 __force __user *) 423 uaddr = (u32 __force __user *)
416 __dat_user_addr((__force unsigned long) uaddr, 1); 424 __dat_user_addr((__force unsigned long) uaddr, 1);
@@ -448,6 +456,8 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
448 456
449 if (segment_eq(get_fs(), KERNEL_DS)) 457 if (segment_eq(get_fs(), KERNEL_DS))
450 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); 458 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
459 if (unlikely(!current->mm))
460 return -EFAULT;
451 spin_lock(&current->mm->page_table_lock); 461 spin_lock(&current->mm->page_table_lock);
452 uaddr = (u32 __force __user *) 462 uaddr = (u32 __force __user *)
453 __dat_user_addr((__force unsigned long) uaddr, 1); 463 __dat_user_addr((__force unsigned long) uaddr, 1);