aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2007-02-06 19:30:38 -0500
committerSteve French <sfrench@us.ibm.com>2007-02-06 19:30:38 -0500
commit1d9564ea0ac72ef7c4068d66fe42ad23af4ff53f (patch)
treefa63b4c1bbd775252f71d05fef4de6a904eb4448 /arch/s390/lib
parent87f440e70e07dace7db130f2f9fcea3f132aad8f (diff)
parentdda2ac15d23b38e4335e858848aa8c9a6710304f (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/delay.c48
-rw-r--r--arch/s390/lib/qrnnd.S77
-rw-r--r--arch/s390/lib/uaccess.h23
-rw-r--r--arch/s390/lib/uaccess_mvcos.c78
-rw-r--r--arch/s390/lib/uaccess_pt.c329
-rw-r--r--arch/s390/lib/uaccess_std.c23
7 files changed, 534 insertions, 46 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b5f94cf3bde8..7a44fed21b35 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o qrnnd.o
8lib-$(CONFIG_32BIT) += div64.o 8lib-$(CONFIG_32BIT) += div64.o
9lib-$(CONFIG_64BIT) += uaccess_mvcos.o 9lib-$(CONFIG_64BIT) += uaccess_mvcos.o
10lib-$(CONFIG_SMP) += spinlock.o 10lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 027c4742a001..02854449b74b 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/s390/kernel/delay.c 2 * arch/s390/lib/delay.c
3 * Precise Delay Loops for S390 3 * Precise Delay Loops for S390
4 * 4 *
5 * S390 version 5 * S390 version
@@ -13,10 +13,8 @@
13 13
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16#include <linux/timex.h>
17#ifdef CONFIG_SMP 17#include <linux/irqflags.h>
18#include <asm/smp.h>
19#endif
20 18
21void __delay(unsigned long loops) 19void __delay(unsigned long loops)
22{ 20{
@@ -31,17 +29,39 @@ void __delay(unsigned long loops)
31} 29}
32 30
33/* 31/*
34 * Waits for 'usecs' microseconds using the tod clock, giving up the time slice 32 * Waits for 'usecs' microseconds using the TOD clock comparator.
35 * of the virtual PU inbetween to avoid congestion.
36 */ 33 */
37void __udelay(unsigned long usecs) 34void __udelay(unsigned long usecs)
38{ 35{
39 uint64_t start_cc; 36 u64 end, time, jiffy_timer = 0;
37 unsigned long flags, cr0, mask, dummy;
38
39 local_irq_save(flags);
40 if (raw_irqs_disabled_flags(flags)) {
41 jiffy_timer = S390_lowcore.jiffy_timer;
42 S390_lowcore.jiffy_timer = -1ULL - (4096 << 12);
43 __ctl_store(cr0, 0, 0);
44 dummy = (cr0 & 0xffff00e0) | 0x00000800;
45 __ctl_load(dummy , 0, 0);
46 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
47 } else
48 mask = psw_kernel_bits | PSW_MASK_WAIT |
49 PSW_MASK_EXT | PSW_MASK_IO;
50
51 end = get_clock() + ((u64) usecs << 12);
52 do {
53 time = end < S390_lowcore.jiffy_timer ?
54 end : S390_lowcore.jiffy_timer;
55 set_clock_comparator(time);
56 trace_hardirqs_on();
57 __load_psw_mask(mask);
58 local_irq_disable();
59 } while (get_clock() < end);
40 60
41 if (usecs == 0) 61 if (raw_irqs_disabled_flags(flags)) {
42 return; 62 __ctl_load(cr0, 0, 0);
43 start_cc = get_clock(); 63 S390_lowcore.jiffy_timer = jiffy_timer;
44 do { 64 }
45 cpu_relax(); 65 set_clock_comparator(S390_lowcore.jiffy_timer);
46 } while (((get_clock() - start_cc)/4096) < usecs); 66 local_irq_restore(flags);
47} 67}
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
new file mode 100644
index 000000000000..eb1df632e749
--- /dev/null
+++ b/arch/s390/lib/qrnnd.S
@@ -0,0 +1,77 @@
1# S/390 __udiv_qrnnd
2
3# r2 : &__r
4# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n
6# r5 : divisor d
7# the reminder r of the division is to be stored to &__r and
8# the quotient q is to be returned
9
10 .text
11 .globl __udiv_qrnnd
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n
15 lr %r1,%r4
16 ltr %r2,%r5 # reload and test divisor
17 jp 5f
18 # divisor >= 0x80000000
19 srdl %r0,2 # n/4
20 srl %r2,1 # d/2
21 slr %r1,%r2 # special case if last bit of d is set
22 brc 3,0f # (n/4) div (n/2) can overflow by 1
23 ahi %r0,-1 # trick: subtract n/2, then divide
240: dr %r0,%r2 # signed division
25 ahi %r1,1 # trick part 2: add 1 to the quotient
26 # now (n >> 2) = (d >> 1) * %r1 + %r0
27 lhi %r3,1
28 nr %r3,%r1 # test last bit of q
29 jz 1f
30 alr %r0,%r2 # add (d>>1) to r
311: srl %r1,1 # q >>= 1
32 # now (n >> 2) = (d&-2) * %r1 + %r0
33 lhi %r3,1
34 nr %r3,%r5 # test last bit of d
35 jz 2f
36 slr %r0,%r1 # r -= q
37 brc 3,2f # borrow ?
38 alr %r0,%r5 # r += d
39 ahi %r1,-1
402: # now (n >> 2) = d * %r1 + %r0
41 alr %r1,%r1 # q <<= 1
42 alr %r0,%r0 # r <<= 1
43 brc 12,3f # overflow on r ?
44 slr %r0,%r5 # r -= d
45 ahi %r1,1 # q += 1
463: lhi %r3,2
47 nr %r3,%r4 # test next to last bit of n
48 jz 4f
49 ahi %r0,1 # r += 1
504: clr %r0,%r5 # r >= d ?
51 jl 6f
52 slr %r0,%r5 # r -= d
53 ahi %r1,1 # q += 1
54 # now (n >> 1) = d * %r1 + %r0
55 j 6f
565: # divisor < 0x80000000
57 srdl %r0,1
58 dr %r0,%r2 # signed division
59 # now (n >> 1) = d * %r1 + %r0
606: alr %r1,%r1 # q <<= 1
61 alr %r0,%r0 # r <<= 1
62 brc 12,7f # overflow on r ?
63 slr %r0,%r5 # r -= d
64 ahi %r1,1 # q += 1
657: lhi %r3,1
66 nr %r3,%r4 # isolate last bit of n
67 alr %r0,%r3 # r += (n & 1)
68 clr %r0,%r5 # r >= d ?
69 jl 8f
70 slr %r0,%r5 # r -= d
71 ahi %r1,1 # q += 1
728: # now n = d * %r1 + %r0
73 l %r2,24(%r15)
74 st %r0,0(%r2)
75 lr %r2,%r1
76 br %r14
77 .end __udiv_qrnnd
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
new file mode 100644
index 000000000000..126011df14f1
--- /dev/null
+++ b/arch/s390/lib/uaccess.h
@@ -0,0 +1,23 @@
1/*
2 * arch/s390/uaccess.h
3 *
4 * Copyright IBM Corp. 2007
5 *
6 */
7
8#ifndef __ARCH_S390_LIB_UACCESS_H
9#define __ARCH_S390_LIB_UACCESS_H
10
11extern size_t copy_from_user_std(size_t, const void __user *, void *);
12extern size_t copy_to_user_std(size_t, void __user *, const void *);
13extern size_t strnlen_user_std(size_t, const char __user *);
14extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
15extern int futex_atomic_cmpxchg_std(int __user *, int, int);
16extern int futex_atomic_op_std(int, int __user *, int, int *);
17
18extern size_t copy_from_user_pt(size_t, const void __user *, void *);
19extern size_t copy_to_user_pt(size_t, void __user *, const void *);
20extern int futex_atomic_op_pt(int, int __user *, int, int *);
21extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
22
23#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index f9a23d57eb79..6d8772339d76 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/futex.h> 14#include <asm/futex.h>
15#include "uaccess.h"
15 16
16#ifndef __s390x__ 17#ifndef __s390x__
17#define AHI "ahi" 18#define AHI "ahi"
@@ -27,10 +28,7 @@
27#define SLR "slgr" 28#define SLR "slgr"
28#endif 29#endif
29 30
30extern size_t copy_from_user_std(size_t, const void __user *, void *); 31static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
31extern size_t copy_to_user_std(size_t, void __user *, const void *);
32
33size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
34{ 32{
35 register unsigned long reg0 asm("0") = 0x81UL; 33 register unsigned long reg0 asm("0") = 0x81UL;
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -69,14 +67,14 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
69 return size; 67 return size;
70} 68}
71 69
72size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) 70static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
73{ 71{
74 if (size <= 256) 72 if (size <= 256)
75 return copy_from_user_std(size, ptr, x); 73 return copy_from_user_std(size, ptr, x);
76 return copy_from_user_mvcos(size, ptr, x); 74 return copy_from_user_mvcos(size, ptr, x);
77} 75}
78 76
79size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) 77static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
80{ 78{
81 register unsigned long reg0 asm("0") = 0x810000UL; 79 register unsigned long reg0 asm("0") = 0x810000UL;
82 unsigned long tmp1, tmp2; 80 unsigned long tmp1, tmp2;
@@ -105,14 +103,16 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
105 return size; 103 return size;
106} 104}
107 105
108size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) 106static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
107 const void *x)
109{ 108{
110 if (size <= 256) 109 if (size <= 256)
111 return copy_to_user_std(size, ptr, x); 110 return copy_to_user_std(size, ptr, x);
112 return copy_to_user_mvcos(size, ptr, x); 111 return copy_to_user_mvcos(size, ptr, x);
113} 112}
114 113
115size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) 114static size_t copy_in_user_mvcos(size_t size, void __user *to,
115 const void __user *from)
116{ 116{
117 register unsigned long reg0 asm("0") = 0x810081UL; 117 register unsigned long reg0 asm("0") = 0x810081UL;
118 unsigned long tmp1, tmp2; 118 unsigned long tmp1, tmp2;
@@ -134,7 +134,7 @@ size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
134 return size; 134 return size;
135} 135}
136 136
137size_t clear_user_mvcos(size_t size, void __user *to) 137static size_t clear_user_mvcos(size_t size, void __user *to)
138{ 138{
139 register unsigned long reg0 asm("0") = 0x810000UL; 139 register unsigned long reg0 asm("0") = 0x810000UL;
140 unsigned long tmp1, tmp2; 140 unsigned long tmp1, tmp2;
@@ -162,10 +162,43 @@ size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165extern size_t strnlen_user_std(size_t, const char __user *); 165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
166extern size_t strncpy_from_user_std(size_t, const char __user *, char *); 166{
167extern int futex_atomic_op(int, int __user *, int, int *); 167 char buf[256];
168extern int futex_atomic_cmpxchg(int __user *, int, int); 168 int rc;
169 size_t done, len, len_str;
170
171 done = 0;
172 do {
173 len = min(count - done, (size_t) 256);
174 rc = uaccess.copy_from_user(len, src + done, buf);
175 if (unlikely(rc == len))
176 return 0;
177 len -= rc;
178 len_str = strnlen(buf, len);
179 done += len_str;
180 } while ((len_str == len) && (done < count));
181 return done + 1;
182}
183
184static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
185 char *dst)
186{
187 int rc;
188 size_t done, len, len_str;
189
190 done = 0;
191 do {
192 len = min(count - done, (size_t) 4096);
193 rc = uaccess.copy_from_user(len, src + done, dst);
194 if (unlikely(rc == len))
195 return -EFAULT;
196 len -= rc;
197 len_str = strnlen(dst, len);
198 done += len_str;
199 } while ((len_str == len) && (done < count));
200 return done;
201}
169 202
170struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
171 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
@@ -176,6 +209,21 @@ struct uaccess_ops uaccess_mvcos = {
176 .clear_user = clear_user_mvcos, 209 .clear_user = clear_user_mvcos,
177 .strnlen_user = strnlen_user_std, 210 .strnlen_user = strnlen_user_std,
178 .strncpy_from_user = strncpy_from_user_std, 211 .strncpy_from_user = strncpy_from_user_std,
179 .futex_atomic_op = futex_atomic_op, 212 .futex_atomic_op = futex_atomic_op_std,
180 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
214};
215
216#ifdef CONFIG_S390_SWITCH_AMODE
217struct uaccess_ops uaccess_mvcos_switch = {
218 .copy_from_user = copy_from_user_mvcos,
219 .copy_from_user_small = copy_from_user_mvcos,
220 .copy_to_user = copy_to_user_mvcos,
221 .copy_to_user_small = copy_to_user_mvcos,
222 .copy_in_user = copy_in_user_mvcos,
223 .clear_user = clear_user_mvcos,
224 .strnlen_user = strnlen_user_mvcos,
225 .strncpy_from_user = strncpy_from_user_mvcos,
226 .futex_atomic_op = futex_atomic_op_pt,
227 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
181}; 228};
229#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 49c3e46b4065..63181671e3e3 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/lib/uaccess_pt.c 2 * arch/s390/lib/uaccess_pt.c
3 * 3 *
4 * User access functions based on page table walks. 4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
5 * 6 *
6 * Copyright IBM Corp. 2006 7 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) 8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
@@ -12,9 +13,10 @@
12#include <linux/mm.h> 13#include <linux/mm.h>
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
15 17
16static inline int __handle_fault(struct mm_struct *mm, unsigned long address, 18static int __handle_fault(struct mm_struct *mm, unsigned long address,
17 int write_access) 19 int write_access)
18{ 20{
19 struct vm_area_struct *vma; 21 struct vm_area_struct *vma;
20 int ret = -EFAULT; 22 int ret = -EFAULT;
@@ -79,8 +81,8 @@ out_sigbus:
79 return ret; 81 return ret;
80} 82}
81 83
82static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 84static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
83 size_t n, int write_user) 85 size_t n, int write_user)
84{ 86{
85 struct mm_struct *mm = current->mm; 87 struct mm_struct *mm = current->mm;
86 unsigned long offset, pfn, done, size; 88 unsigned long offset, pfn, done, size;
@@ -133,6 +135,49 @@ fault:
133 goto retry; 135 goto retry;
134} 136}
135 137
138/*
139 * Do DAT for user address by page table walk, return kernel address.
140 * This function needs to be called with current->mm->page_table_lock held.
141 */
142static unsigned long __dat_user_addr(unsigned long uaddr)
143{
144 struct mm_struct *mm = current->mm;
145 unsigned long pfn, ret;
146 pgd_t *pgd;
147 pmd_t *pmd;
148 pte_t *pte;
149 int rc;
150
151 ret = 0;
152retry:
153 pgd = pgd_offset(mm, uaddr);
154 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
155 goto fault;
156
157 pmd = pmd_offset(pgd, uaddr);
158 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
159 goto fault;
160
161 pte = pte_offset_map(pmd, uaddr);
162 if (!pte || !pte_present(*pte))
163 goto fault;
164
165 pfn = pte_pfn(*pte);
166 if (!pfn_valid(pfn))
167 goto out;
168
169 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
170out:
171 return ret;
172fault:
173 spin_unlock(&mm->page_table_lock);
174 rc = __handle_fault(mm, uaddr, 0);
175 spin_lock(&mm->page_table_lock);
176 if (rc)
177 goto out;
178 goto retry;
179}
180
136size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 181size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
137{ 182{
138 size_t rc; 183 size_t rc;
@@ -155,3 +200,277 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
155 } 200 }
156 return __user_copy_pt((unsigned long) to, (void *) from, n, 1); 201 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
157} 202}
203
204static size_t clear_user_pt(size_t n, void __user *to)
205{
206 long done, size, ret;
207
208 if (segment_eq(get_fs(), KERNEL_DS)) {
209 memset((void __kernel __force *) to, 0, n);
210 return 0;
211 }
212 done = 0;
213 do {
214 if (n - done > PAGE_SIZE)
215 size = PAGE_SIZE;
216 else
217 size = n - done;
218 ret = __user_copy_pt((unsigned long) to + done,
219 &empty_zero_page, size, 1);
220 done += size;
221 if (ret)
222 return ret + n - done;
223 } while (done < n);
224 return 0;
225}
226
227static size_t strnlen_user_pt(size_t count, const char __user *src)
228{
229 char *addr;
230 unsigned long uaddr = (unsigned long) src;
231 struct mm_struct *mm = current->mm;
232 unsigned long offset, pfn, done, len;
233 pgd_t *pgd;
234 pmd_t *pmd;
235 pte_t *pte;
236 size_t len_str;
237
238 if (segment_eq(get_fs(), KERNEL_DS))
239 return strnlen((const char __kernel __force *) src, count) + 1;
240 done = 0;
241retry:
242 spin_lock(&mm->page_table_lock);
243 do {
244 pgd = pgd_offset(mm, uaddr);
245 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
246 goto fault;
247
248 pmd = pmd_offset(pgd, uaddr);
249 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
250 goto fault;
251
252 pte = pte_offset_map(pmd, uaddr);
253 if (!pte || !pte_present(*pte))
254 goto fault;
255
256 pfn = pte_pfn(*pte);
257 if (!pfn_valid(pfn)) {
258 done = -1;
259 goto out;
260 }
261
262 offset = uaddr & (PAGE_SIZE-1);
263 addr = (char *)(pfn << PAGE_SHIFT) + offset;
264 len = min(count - done, PAGE_SIZE - offset);
265 len_str = strnlen(addr, len);
266 done += len_str;
267 uaddr += len_str;
268 } while ((len_str == len) && (done < count));
269out:
270 spin_unlock(&mm->page_table_lock);
271 return done + 1;
272fault:
273 spin_unlock(&mm->page_table_lock);
274 if (__handle_fault(mm, uaddr, 0)) {
275 return 0;
276 }
277 goto retry;
278}
279
280static size_t strncpy_from_user_pt(size_t count, const char __user *src,
281 char *dst)
282{
283 size_t n = strnlen_user_pt(count, src);
284
285 if (!n)
286 return -EFAULT;
287 if (n > count)
288 n = count;
289 if (segment_eq(get_fs(), KERNEL_DS)) {
290 memcpy(dst, (const char __kernel __force *) src, n);
291 if (dst[n-1] == '\0')
292 return n-1;
293 else
294 return n;
295 }
296 if (__user_copy_pt((unsigned long) src, dst, n, 0))
297 return -EFAULT;
298 if (dst[n-1] == '\0')
299 return n-1;
300 else
301 return n;
302}
303
304static size_t copy_in_user_pt(size_t n, void __user *to,
305 const void __user *from)
306{
307 struct mm_struct *mm = current->mm;
308 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
309 uaddr, done, size;
310 unsigned long uaddr_from = (unsigned long) from;
311 unsigned long uaddr_to = (unsigned long) to;
312 pgd_t *pgd_from, *pgd_to;
313 pmd_t *pmd_from, *pmd_to;
314 pte_t *pte_from, *pte_to;
315 int write_user;
316
317 done = 0;
318retry:
319 spin_lock(&mm->page_table_lock);
320 do {
321 pgd_from = pgd_offset(mm, uaddr_from);
322 if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
323 uaddr = uaddr_from;
324 write_user = 0;
325 goto fault;
326 }
327 pgd_to = pgd_offset(mm, uaddr_to);
328 if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
329 uaddr = uaddr_to;
330 write_user = 1;
331 goto fault;
332 }
333
334 pmd_from = pmd_offset(pgd_from, uaddr_from);
335 if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
336 uaddr = uaddr_from;
337 write_user = 0;
338 goto fault;
339 }
340 pmd_to = pmd_offset(pgd_to, uaddr_to);
341 if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
342 uaddr = uaddr_to;
343 write_user = 1;
344 goto fault;
345 }
346
347 pte_from = pte_offset_map(pmd_from, uaddr_from);
348 if (!pte_from || !pte_present(*pte_from)) {
349 uaddr = uaddr_from;
350 write_user = 0;
351 goto fault;
352 }
353 pte_to = pte_offset_map(pmd_to, uaddr_to);
354 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
355 uaddr = uaddr_to;
356 write_user = 1;
357 goto fault;
358 }
359
360 pfn_from = pte_pfn(*pte_from);
361 if (!pfn_valid(pfn_from))
362 goto out;
363 pfn_to = pte_pfn(*pte_to);
364 if (!pfn_valid(pfn_to))
365 goto out;
366
367 offset_from = uaddr_from & (PAGE_SIZE-1);
368 offset_to = uaddr_from & (PAGE_SIZE-1);
369 offset_max = max(offset_from, offset_to);
370 size = min(n - done, PAGE_SIZE - offset_max);
371
372 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
373 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
374 done += size;
375 uaddr_from += size;
376 uaddr_to += size;
377 } while (done < n);
378out:
379 spin_unlock(&mm->page_table_lock);
380 return n - done;
381fault:
382 spin_unlock(&mm->page_table_lock);
383 if (__handle_fault(mm, uaddr, write_user))
384 return n - done;
385 goto retry;
386}
387
388#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
389 asm volatile("0: l %1,0(%6)\n" \
390 "1: " insn \
391 "2: cs %1,%2,0(%6)\n" \
392 "3: jl 1b\n" \
393 " lhi %0,0\n" \
394 "4:\n" \
395 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
396 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
397 "=m" (*uaddr) \
398 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
399 "m" (*uaddr) : "cc" );
400
401int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
402{
403 int oldval = 0, newval, ret;
404
405 spin_lock(&current->mm->page_table_lock);
406 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
407 if (!uaddr) {
408 spin_unlock(&current->mm->page_table_lock);
409 return -EFAULT;
410 }
411 get_page(virt_to_page(uaddr));
412 spin_unlock(&current->mm->page_table_lock);
413 switch (op) {
414 case FUTEX_OP_SET:
415 __futex_atomic_op("lr %2,%5\n",
416 ret, oldval, newval, uaddr, oparg);
417 break;
418 case FUTEX_OP_ADD:
419 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
420 ret, oldval, newval, uaddr, oparg);
421 break;
422 case FUTEX_OP_OR:
423 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
424 ret, oldval, newval, uaddr, oparg);
425 break;
426 case FUTEX_OP_ANDN:
427 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
428 ret, oldval, newval, uaddr, oparg);
429 break;
430 case FUTEX_OP_XOR:
431 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
432 ret, oldval, newval, uaddr, oparg);
433 break;
434 default:
435 ret = -ENOSYS;
436 }
437 put_page(virt_to_page(uaddr));
438 *old = oldval;
439 return ret;
440}
441
442int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
443{
444 int ret;
445
446 spin_lock(&current->mm->page_table_lock);
447 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
448 if (!uaddr) {
449 spin_unlock(&current->mm->page_table_lock);
450 return -EFAULT;
451 }
452 get_page(virt_to_page(uaddr));
453 spin_unlock(&current->mm->page_table_lock);
454 asm volatile(" cs %1,%4,0(%5)\n"
455 "0: lr %0,%1\n"
456 "1:\n"
457 EX_TABLE(0b,1b)
458 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
459 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
460 : "cc", "memory" );
461 put_page(virt_to_page(uaddr));
462 return ret;
463}
464
465struct uaccess_ops uaccess_pt = {
466 .copy_from_user = copy_from_user_pt,
467 .copy_from_user_small = copy_from_user_pt,
468 .copy_to_user = copy_to_user_pt,
469 .copy_to_user_small = copy_to_user_pt,
470 .copy_in_user = copy_in_user_pt,
471 .clear_user = clear_user_pt,
472 .strnlen_user = strnlen_user_pt,
473 .strncpy_from_user = strncpy_from_user_pt,
474 .futex_atomic_op = futex_atomic_op_pt,
475 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
476};
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 56a0214e9928..28c4500a58d0 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -13,6 +13,7 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
16 17
17#ifndef __s390x__ 18#ifndef __s390x__
18#define AHI "ahi" 19#define AHI "ahi"
@@ -28,9 +29,6 @@
28#define SLR "slgr" 29#define SLR "slgr"
29#endif 30#endif
30 31
31extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to);
32extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from);
33
34size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) 32size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
35{ 33{
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -72,7 +70,8 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
72 return size; 70 return size;
73} 71}
74 72
75size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x) 73static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
74 void *x)
76{ 75{
77 if (size <= 1024) 76 if (size <= 1024)
78 return copy_from_user_std(size, ptr, x); 77 return copy_from_user_std(size, ptr, x);
@@ -110,14 +109,16 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
110 return size; 109 return size;
111} 110}
112 111
113size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x) 112static size_t copy_to_user_std_check(size_t size, void __user *ptr,
113 const void *x)
114{ 114{
115 if (size <= 1024) 115 if (size <= 1024)
116 return copy_to_user_std(size, ptr, x); 116 return copy_to_user_std(size, ptr, x);
117 return copy_to_user_pt(size, ptr, x); 117 return copy_to_user_pt(size, ptr, x);
118} 118}
119 119
120size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) 120static size_t copy_in_user_std(size_t size, void __user *to,
121 const void __user *from)
121{ 122{
122 unsigned long tmp1; 123 unsigned long tmp1;
123 124
@@ -148,7 +149,7 @@ size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
148 return size; 149 return size;
149} 150}
150 151
151size_t clear_user_std(size_t size, void __user *to) 152static size_t clear_user_std(size_t size, void __user *to)
152{ 153{
153 unsigned long tmp1, tmp2; 154 unsigned long tmp1, tmp2;
154 155
@@ -254,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
254 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 255 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
255 "m" (*uaddr) : "cc"); 256 "m" (*uaddr) : "cc");
256 257
257int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) 258int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
258{ 259{
259 int oldval = 0, newval, ret; 260 int oldval = 0, newval, ret;
260 261
@@ -286,7 +287,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
286 return ret; 287 return ret;
287} 288}
288 289
289int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) 290int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
290{ 291{
291 int ret; 292 int ret;
292 293
@@ -311,6 +312,6 @@ struct uaccess_ops uaccess_std = {
311 .clear_user = clear_user_std, 312 .clear_user = clear_user_std,
312 .strnlen_user = strnlen_user_std, 313 .strnlen_user = strnlen_user_std,
313 .strncpy_from_user = strncpy_from_user_std, 314 .strncpy_from_user = strncpy_from_user_std,
314 .futex_atomic_op = futex_atomic_op, 315 .futex_atomic_op = futex_atomic_op_std,
315 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 316 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
316}; 317};