aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@de.ibm.com>2006-12-04 09:40:45 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-12-04 09:40:45 -0500
commit59f35d53fde3987d071ea1c9bf1c9ba29fcb69fe (patch)
tree50853c1bf3ad3af080707fb2e6b3722d3d86db9e /arch
parentd57de5a36791cb1b7285649c62f183b0d3505f7d (diff)
[S390] Add dynamic size check for usercopy functions.
Use a wrapper for copy_to/from_user to chose the best usercopy method. The mvcos instruction is better for sizes greater than 256 bytes, if mvcos is not available a page table walk is better for sizes greater than 1024 bytes. Also removed the redundant copy_to/from_user_std_small functions. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/uaccess_mvcos.c27
-rw-r--r--arch/s390/lib/uaccess_pt.c153
-rw-r--r--arch/s390/lib/uaccess_std.c67
4 files changed, 190 insertions, 59 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b0cfa6c4883d..b5f94cf3bde8 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o uaccess_std.o 7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
8lib-$(CONFIG_32BIT) += div64.o 8lib-$(CONFIG_32BIT) += div64.o
9lib-$(CONFIG_64BIT) += uaccess_mvcos.o 9lib-$(CONFIG_64BIT) += uaccess_mvcos.o
10lib-$(CONFIG_SMP) += spinlock.o 10lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 121b2935a422..f9a23d57eb79 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -27,6 +27,9 @@
27#define SLR "slgr" 27#define SLR "slgr"
28#endif 28#endif
29 29
30extern size_t copy_from_user_std(size_t, const void __user *, void *);
31extern size_t copy_to_user_std(size_t, void __user *, const void *);
32
30size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) 33size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
31{ 34{
32 register unsigned long reg0 asm("0") = 0x81UL; 35 register unsigned long reg0 asm("0") = 0x81UL;
@@ -66,6 +69,13 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
66 return size; 69 return size;
67} 70}
68 71
72size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
73{
74 if (size <= 256)
75 return copy_from_user_std(size, ptr, x);
76 return copy_from_user_mvcos(size, ptr, x);
77}
78
69size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) 79size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
70{ 80{
71 register unsigned long reg0 asm("0") = 0x810000UL; 81 register unsigned long reg0 asm("0") = 0x810000UL;
@@ -95,6 +105,13 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
95 return size; 105 return size;
96} 106}
97 107
108size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x)
109{
110 if (size <= 256)
111 return copy_to_user_std(size, ptr, x);
112 return copy_to_user_mvcos(size, ptr, x);
113}
114
98size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) 115size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
99{ 116{
100 register unsigned long reg0 asm("0") = 0x810081UL; 117 register unsigned long reg0 asm("0") = 0x810081UL;
@@ -145,18 +162,16 @@ size_t clear_user_mvcos(size_t size, void __user *to)
145 return size; 162 return size;
146} 163}
147 164
148extern size_t copy_from_user_std_small(size_t, const void __user *, void *);
149extern size_t copy_to_user_std_small(size_t, void __user *, const void *);
150extern size_t strnlen_user_std(size_t, const char __user *); 165extern size_t strnlen_user_std(size_t, const char __user *);
151extern size_t strncpy_from_user_std(size_t, const char __user *, char *); 166extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
152extern int futex_atomic_op(int, int __user *, int, int *); 167extern int futex_atomic_op(int, int __user *, int, int *);
153extern int futex_atomic_cmpxchg(int __user *, int, int); 168extern int futex_atomic_cmpxchg(int __user *, int, int);
154 169
155struct uaccess_ops uaccess_mvcos = { 170struct uaccess_ops uaccess_mvcos = {
156 .copy_from_user = copy_from_user_mvcos, 171 .copy_from_user = copy_from_user_mvcos_check,
157 .copy_from_user_small = copy_from_user_std_small, 172 .copy_from_user_small = copy_from_user_std,
158 .copy_to_user = copy_to_user_mvcos, 173 .copy_to_user = copy_to_user_mvcos_check,
159 .copy_to_user_small = copy_to_user_std_small, 174 .copy_to_user_small = copy_to_user_std,
160 .copy_in_user = copy_in_user_mvcos, 175 .copy_in_user = copy_in_user_mvcos,
161 .clear_user = clear_user_mvcos, 176 .clear_user = clear_user_mvcos,
162 .strnlen_user = strnlen_user_std, 177 .strnlen_user = strnlen_user_std,
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
new file mode 100644
index 000000000000..8741bdc09299
--- /dev/null
+++ b/arch/s390/lib/uaccess_pt.c
@@ -0,0 +1,153 @@
1/*
2 * arch/s390/lib/uaccess_pt.c
3 *
4 * User access functions based on page table walks.
5 *
6 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
8 */
9
10#include <linux/errno.h>
11#include <asm/uaccess.h>
12#include <linux/mm.h>
13#include <asm/futex.h>
14
15static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
16 int write_access)
17{
18 struct vm_area_struct *vma;
19 int ret = -EFAULT;
20
21 down_read(&mm->mmap_sem);
22 vma = find_vma(mm, address);
23 if (unlikely(!vma))
24 goto out;
25 if (unlikely(vma->vm_start > address)) {
26 if (!(vma->vm_flags & VM_GROWSDOWN))
27 goto out;
28 if (expand_stack(vma, address))
29 goto out;
30 }
31
32 if (!write_access) {
33 /* page not present, check vm flags */
34 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
35 goto out;
36 } else {
37 if (!(vma->vm_flags & VM_WRITE))
38 goto out;
39 }
40
41survive:
42 switch (handle_mm_fault(mm, vma, address, write_access)) {
43 case VM_FAULT_MINOR:
44 current->min_flt++;
45 break;
46 case VM_FAULT_MAJOR:
47 current->maj_flt++;
48 break;
49 case VM_FAULT_SIGBUS:
50 goto out_sigbus;
51 case VM_FAULT_OOM:
52 goto out_of_memory;
53 default:
54 BUG();
55 }
56 ret = 0;
57out:
58 up_read(&mm->mmap_sem);
59 return ret;
60
61out_of_memory:
62 up_read(&mm->mmap_sem);
63 if (current->pid == 1) {
64 yield();
65 goto survive;
66 }
67 printk("VM: killing process %s\n", current->comm);
68 return ret;
69
70out_sigbus:
71 up_read(&mm->mmap_sem);
72 current->thread.prot_addr = address;
73 current->thread.trap_no = 0x11;
74 force_sig(SIGBUS, current);
75 return ret;
76}
77
78static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
79 size_t n, int write_user)
80{
81 struct mm_struct *mm = current->mm;
82 unsigned long offset, pfn, done, size;
83 pgd_t *pgd;
84 pmd_t *pmd;
85 pte_t *pte;
86 void *from, *to;
87
88 done = 0;
89retry:
90 spin_lock(&mm->page_table_lock);
91 do {
92 pgd = pgd_offset(mm, uaddr);
93 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
94 goto fault;
95
96 pmd = pmd_offset(pgd, uaddr);
97 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
98 goto fault;
99
100 pte = pte_offset_map(pmd, uaddr);
101 if (!pte || !pte_present(*pte) ||
102 (write_user && !pte_write(*pte)))
103 goto fault;
104
105 pfn = pte_pfn(*pte);
106 if (!pfn_valid(pfn))
107 goto out;
108
109 offset = uaddr & (PAGE_SIZE - 1);
110 size = min(n - done, PAGE_SIZE - offset);
111 if (write_user) {
112 to = (void *)((pfn << PAGE_SHIFT) + offset);
113 from = kptr + done;
114 } else {
115 from = (void *)((pfn << PAGE_SHIFT) + offset);
116 to = kptr + done;
117 }
118 memcpy(to, from, size);
119 done += size;
120 uaddr += size;
121 } while (done < n);
122out:
123 spin_unlock(&mm->page_table_lock);
124 return n - done;
125fault:
126 spin_unlock(&mm->page_table_lock);
127 if (__handle_fault(mm, uaddr, write_user))
128 return n - done;
129 goto retry;
130}
131
132size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
133{
134 size_t rc;
135
136 if (segment_eq(get_fs(), KERNEL_DS)) {
137 memcpy(to, (void __kernel __force *) from, n);
138 return 0;
139 }
140 rc = __user_copy_pt((unsigned long) from, to, n, 0);
141 if (unlikely(rc))
142 memset(to + n - rc, 0, rc);
143 return rc;
144}
145
146size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
147{
148 if (segment_eq(get_fs(), KERNEL_DS)) {
149 memcpy((void __kernel __force *) to, from, n);
150 return 0;
151 }
152 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
153}
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index f44f0078b354..2d549ed2e113 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -28,6 +28,9 @@
28#define SLR "slgr" 28#define SLR "slgr"
29#endif 29#endif
30 30
31extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to);
32extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from);
33
31size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) 34size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
32{ 35{
33 unsigned long tmp1, tmp2; 36 unsigned long tmp1, tmp2;
@@ -69,34 +72,11 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
69 return size; 72 return size;
70} 73}
71 74
72size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x) 75size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x)
73{ 76{
74 unsigned long tmp1, tmp2; 77 if (size <= 1024)
75 78 return copy_from_user_std(size, ptr, x);
76 tmp1 = 0UL; 79 return copy_from_user_pt(size, ptr, x);
77 asm volatile(
78 "0: mvcp 0(%0,%2),0(%1),%3\n"
79 " "SLR" %0,%0\n"
80 " j 5f\n"
81 "1: la %4,255(%1)\n" /* %4 = ptr + 255 */
82 " "LHI" %3,-4096\n"
83 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
84 " "SLR" %4,%1\n"
85 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
86 " jnh 5f\n"
87 "2: mvcp 0(%4,%2),0(%1),%3\n"
88 " "SLR" %0,%4\n"
89 " "ALR" %2,%4\n"
90 "3:"LHI" %4,-1\n"
91 " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
92 " bras %3,4f\n"
93 " xc 0(1,%2),0(%2)\n"
94 "4: ex %4,0(%3)\n"
95 "5:\n"
96 EX_TABLE(0b,1b) EX_TABLE(2b,3b)
97 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
98 : : "cc", "memory");
99 return size;
100} 80}
101 81
102size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) 82size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
@@ -130,28 +110,11 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
130 return size; 110 return size;
131} 111}
132 112
133size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x) 113size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x)
134{ 114{
135 unsigned long tmp1, tmp2; 115 if (size <= 1024)
136 116 return copy_to_user_std(size, ptr, x);
137 tmp1 = 0UL; 117 return copy_to_user_pt(size, ptr, x);
138 asm volatile(
139 "0: mvcs 0(%0,%1),0(%2),%3\n"
140 " "SLR" %0,%0\n"
141 " j 3f\n"
142 "1: la %4,255(%1)\n" /* ptr + 255 */
143 " "LHI" %3,-4096\n"
144 " nr %4,%3\n" /* (ptr + 255) & -4096UL */
145 " "SLR" %4,%1\n"
146 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
147 " jnh 3f\n"
148 "2: mvcs 0(%4,%1),0(%2),%3\n"
149 " "SLR" %0,%4\n"
150 "3:\n"
151 EX_TABLE(0b,1b) EX_TABLE(2b,3b)
152 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
153 : : "cc", "memory");
154 return size;
155} 118}
156 119
157size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) 120size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
@@ -343,10 +306,10 @@ int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval)
343} 306}
344 307
345struct uaccess_ops uaccess_std = { 308struct uaccess_ops uaccess_std = {
346 .copy_from_user = copy_from_user_std, 309 .copy_from_user = copy_from_user_std_check,
347 .copy_from_user_small = copy_from_user_std_small, 310 .copy_from_user_small = copy_from_user_std,
348 .copy_to_user = copy_to_user_std, 311 .copy_to_user = copy_to_user_std_check,
349 .copy_to_user_small = copy_to_user_std_small, 312 .copy_to_user_small = copy_to_user_std,
350 .copy_in_user = copy_in_user_std, 313 .copy_in_user = copy_in_user_std,
351 .clear_user = clear_user_std, 314 .clear_user = clear_user_std,
352 .strnlen_user = strnlen_user_std, 315 .strnlen_user = strnlen_user_std,