aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/s390/lib
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/delay.c32
-rw-r--r--arch/s390/lib/div64.c4
-rw-r--r--arch/s390/lib/mem32.S92
-rw-r--r--arch/s390/lib/mem64.S88
-rw-r--r--arch/s390/lib/spinlock.c33
-rw-r--r--arch/s390/lib/string.c59
-rw-r--r--arch/s390/lib/uaccess.h2
-rw-r--r--arch/s390/lib/uaccess_mvcos.c6
-rw-r--r--arch/s390/lib/uaccess_pt.c146
-rw-r--r--arch/s390/lib/uaccess_std.c6
11 files changed, 189 insertions, 282 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 6ab0d0b5cec..761ab8b56af 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,7 +4,6 @@
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
6obj-y += usercopy.o 6obj-y += usercopy.o
7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o
8obj-$(CONFIG_64BIT) += mem64.o
9lib-$(CONFIG_64BIT) += uaccess_mvcos.o 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
10lib-$(CONFIG_SMP) += spinlock.o 9lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 42d0cf89121..a65229d91c9 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Precise Delay Loops for S390 2 * Precise Delay Loops for S390
3 * 3 *
4 * Copyright IBM Corp. 1999, 2008 4 * Copyright IBM Corp. 1999,2008
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 * Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Heiko Carstens <heiko.carstens@de.ibm.com>,
7 */ 7 */
@@ -12,7 +12,6 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <asm/vtimer.h>
16#include <asm/div64.h> 15#include <asm/div64.h>
17 16
18void __delay(unsigned long loops) 17void __delay(unsigned long loops)
@@ -29,33 +28,35 @@ void __delay(unsigned long loops)
29 28
30static void __udelay_disabled(unsigned long long usecs) 29static void __udelay_disabled(unsigned long long usecs)
31{ 30{
32 unsigned long cr0, cr6, new; 31 unsigned long mask, cr0, cr0_saved;
33 u64 clock_saved, end; 32 u64 clock_saved;
33 u64 end;
34 34
35 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
35 end = get_clock() + (usecs << 12); 36 end = get_clock() + (usecs << 12);
36 clock_saved = local_tick_disable(); 37 clock_saved = local_tick_disable();
37 __ctl_store(cr0, 0, 0); 38 __ctl_store(cr0_saved, 0, 0);
38 __ctl_store(cr6, 6, 6); 39 cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
39 new = (cr0 & 0xffff00e0) | 0x00000800; 40 __ctl_load(cr0 , 0, 0);
40 __ctl_load(new , 0, 0);
41 new = 0;
42 __ctl_load(new, 6, 6);
43 lockdep_off(); 41 lockdep_off();
44 do { 42 do {
45 set_clock_comparator(end); 43 set_clock_comparator(end);
46 vtime_stop_cpu(); 44 trace_hardirqs_on();
45 __load_psw_mask(mask);
47 local_irq_disable(); 46 local_irq_disable();
48 } while (get_clock() < end); 47 } while (get_clock() < end);
49 lockdep_on(); 48 lockdep_on();
50 __ctl_load(cr0, 0, 0); 49 __ctl_load(cr0_saved, 0, 0);
51 __ctl_load(cr6, 6, 6);
52 local_tick_enable(clock_saved); 50 local_tick_enable(clock_saved);
53} 51}
54 52
55static void __udelay_enabled(unsigned long long usecs) 53static void __udelay_enabled(unsigned long long usecs)
56{ 54{
57 u64 clock_saved, end; 55 unsigned long mask;
56 u64 clock_saved;
57 u64 end;
58 58
59 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO;
59 end = get_clock() + (usecs << 12); 60 end = get_clock() + (usecs << 12);
60 do { 61 do {
61 clock_saved = 0; 62 clock_saved = 0;
@@ -63,7 +64,8 @@ static void __udelay_enabled(unsigned long long usecs)
63 clock_saved = local_tick_disable(); 64 clock_saved = local_tick_disable();
64 set_clock_comparator(end); 65 set_clock_comparator(end);
65 } 66 }
66 vtime_stop_cpu(); 67 trace_hardirqs_on();
68 __load_psw_mask(mask);
67 local_irq_disable(); 69 local_irq_disable();
68 if (clock_saved) 70 if (clock_saved)
69 local_tick_enable(clock_saved); 71 local_tick_enable(clock_saved);
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c
index 261152f8324..d9e62c0b576 100644
--- a/arch/s390/lib/div64.c
+++ b/arch/s390/lib/div64.c
@@ -1,7 +1,9 @@
1/* 1/*
2 * arch/s390/lib/div64.c
3 *
2 * __div64_32 implementation for 31 bit. 4 * __div64_32 implementation for 31 bit.
3 * 5 *
4 * Copyright IBM Corp. 2006 6 * Copyright (C) IBM Corp. 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 */ 8 */
7 9
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S
deleted file mode 100644
index 14ca9244b61..00000000000
--- a/arch/s390/lib/mem32.S
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * String handling functions.
3 *
4 * Copyright IBM Corp. 2012
5 */
6
7#include <linux/linkage.h>
8
9/*
10 * memset implementation
11 *
12 * This code corresponds to the C construct below. We do distinguish
13 * between clearing (c == 0) and setting a memory array (c != 0) simply
14 * because nearly all memset invocations in the kernel clear memory and
15 * the xc instruction is preferred in such cases.
16 *
17 * void *memset(void *s, int c, size_t n)
18 * {
19 * if (likely(c == 0))
20 * return __builtin_memset(s, 0, n);
21 * return __builtin_memset(s, c, n);
22 * }
23 */
24ENTRY(memset)
25 basr %r5,%r0
26.Lmemset_base:
27 ltr %r4,%r4
28 bzr %r14
29 ltr %r3,%r3
30 jnz .Lmemset_fill
31 ahi %r4,-1
32 lr %r3,%r4
33 srl %r3,8
34 ltr %r3,%r3
35 lr %r1,%r2
36 je .Lmemset_clear_rest
37.Lmemset_clear_loop:
38 xc 0(256,%r1),0(%r1)
39 la %r1,256(%r1)
40 brct %r3,.Lmemset_clear_loop
41.Lmemset_clear_rest:
42 ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
43 br %r14
44.Lmemset_fill:
45 stc %r3,0(%r2)
46 chi %r4,1
47 lr %r1,%r2
48 ber %r14
49 ahi %r4,-2
50 lr %r3,%r4
51 srl %r3,8
52 ltr %r3,%r3
53 je .Lmemset_fill_rest
54.Lmemset_fill_loop:
55 mvc 1(256,%r1),0(%r1)
56 la %r1,256(%r1)
57 brct %r3,.Lmemset_fill_loop
58.Lmemset_fill_rest:
59 ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
60 br %r14
61.Lmemset_xc:
62 xc 0(1,%r1),0(%r1)
63.Lmemset_mvc:
64 mvc 1(1,%r1),0(%r1)
65
66/*
67 * memcpy implementation
68 *
69 * void *memcpy(void *dest, const void *src, size_t n)
70 */
71ENTRY(memcpy)
72 basr %r5,%r0
73.Lmemcpy_base:
74 ltr %r4,%r4
75 bzr %r14
76 ahi %r4,-1
77 lr %r0,%r4
78 srl %r0,8
79 ltr %r0,%r0
80 lr %r1,%r2
81 jnz .Lmemcpy_loop
82.Lmemcpy_rest:
83 ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
84 br %r14
85.Lmemcpy_loop:
86 mvc 0(256,%r1),0(%r3)
87 la %r1,256(%r1)
88 la %r3,256(%r3)
89 brct %r0,.Lmemcpy_loop
90 j .Lmemcpy_rest
91.Lmemcpy_mvc:
92 mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/mem64.S b/arch/s390/lib/mem64.S
deleted file mode 100644
index c6d553e85ab..00000000000
--- a/arch/s390/lib/mem64.S
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * String handling functions.
3 *
4 * Copyright IBM Corp. 2012
5 */
6
7#include <linux/linkage.h>
8
9/*
10 * memset implementation
11 *
12 * This code corresponds to the C construct below. We do distinguish
13 * between clearing (c == 0) and setting a memory array (c != 0) simply
14 * because nearly all memset invocations in the kernel clear memory and
15 * the xc instruction is preferred in such cases.
16 *
17 * void *memset(void *s, int c, size_t n)
18 * {
19 * if (likely(c == 0))
20 * return __builtin_memset(s, 0, n);
21 * return __builtin_memset(s, c, n);
22 * }
23 */
24ENTRY(memset)
25 ltgr %r4,%r4
26 bzr %r14
27 ltgr %r3,%r3
28 jnz .Lmemset_fill
29 aghi %r4,-1
30 srlg %r3,%r4,8
31 ltgr %r3,%r3
32 lgr %r1,%r2
33 jz .Lmemset_clear_rest
34.Lmemset_clear_loop:
35 xc 0(256,%r1),0(%r1)
36 la %r1,256(%r1)
37 brctg %r3,.Lmemset_clear_loop
38.Lmemset_clear_rest:
39 larl %r3,.Lmemset_xc
40 ex %r4,0(%r3)
41 br %r14
42.Lmemset_fill:
43 stc %r3,0(%r2)
44 cghi %r4,1
45 lgr %r1,%r2
46 ber %r14
47 aghi %r4,-2
48 srlg %r3,%r4,8
49 ltgr %r3,%r3
50 jz .Lmemset_fill_rest
51.Lmemset_fill_loop:
52 mvc 1(256,%r1),0(%r1)
53 la %r1,256(%r1)
54 brctg %r3,.Lmemset_fill_loop
55.Lmemset_fill_rest:
56 larl %r3,.Lmemset_mvc
57 ex %r4,0(%r3)
58 br %r14
59.Lmemset_xc:
60 xc 0(1,%r1),0(%r1)
61.Lmemset_mvc:
62 mvc 1(1,%r1),0(%r1)
63
64/*
65 * memcpy implementation
66 *
67 * void *memcpy(void *dest, const void *src, size_t n)
68 */
69ENTRY(memcpy)
70 ltgr %r4,%r4
71 bzr %r14
72 aghi %r4,-1
73 srlg %r5,%r4,8
74 ltgr %r5,%r5
75 lgr %r1,%r2
76 jnz .Lmemcpy_loop
77.Lmemcpy_rest:
78 larl %r5,.Lmemcpy_mvc
79 ex %r4,0(%r5)
80 br %r14
81.Lmemcpy_loop:
82 mvc 0(256,%r1),0(%r3)
83 la %r1,256(%r1)
84 la %r3,256(%r3)
85 brctg %r5,.Lmemcpy_loop
86 j .Lmemcpy_rest
87.Lmemcpy_mvc:
88 mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f709983f41f..91754ffb920 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/lib/spinlock.c
2 * Out of line spinlock code. 3 * Out of line spinlock code.
3 * 4 *
4 * Copyright IBM Corp. 2004, 2006 5 * Copyright (C) IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */ 7 */
7 8
@@ -9,7 +10,6 @@
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/spinlock.h> 11#include <linux/spinlock.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/smp.h>
13#include <asm/io.h> 13#include <asm/io.h>
14 14
15int spin_retry = 1000; 15int spin_retry = 1000;
@@ -24,6 +24,21 @@ static int __init spin_retry_setup(char *str)
24} 24}
25__setup("spin_retry=", spin_retry_setup); 25__setup("spin_retry=", spin_retry_setup);
26 26
27static inline void _raw_yield(void)
28{
29 if (MACHINE_HAS_DIAG44)
30 asm volatile("diag 0,0,0x44");
31}
32
33static inline void _raw_yield_cpu(int cpu)
34{
35 if (MACHINE_HAS_DIAG9C)
36 asm volatile("diag %0,0,0x9c"
37 : : "d" (cpu_logical_map(cpu)));
38 else
39 _raw_yield();
40}
41
27void arch_spin_lock_wait(arch_spinlock_t *lp) 42void arch_spin_lock_wait(arch_spinlock_t *lp)
28{ 43{
29 int count = spin_retry; 44 int count = spin_retry;
@@ -45,7 +60,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
45 } 60 }
46 owner = lp->owner_cpu; 61 owner = lp->owner_cpu;
47 if (owner) 62 if (owner)
48 smp_yield_cpu(~owner); 63 _raw_yield_cpu(~owner);
49 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 64 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
50 return; 65 return;
51 } 66 }
@@ -76,7 +91,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
76 } 91 }
77 owner = lp->owner_cpu; 92 owner = lp->owner_cpu;
78 if (owner) 93 if (owner)
79 smp_yield_cpu(~owner); 94 _raw_yield_cpu(~owner);
80 local_irq_disable(); 95 local_irq_disable();
81 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
82 return; 97 return;
@@ -106,7 +121,7 @@ void arch_spin_relax(arch_spinlock_t *lock)
106 if (cpu != 0) { 121 if (cpu != 0) {
107 if (MACHINE_IS_VM || MACHINE_IS_KVM || 122 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
108 !smp_vcpu_scheduled(~cpu)) 123 !smp_vcpu_scheduled(~cpu))
109 smp_yield_cpu(~cpu); 124 _raw_yield_cpu(~cpu);
110 } 125 }
111} 126}
112EXPORT_SYMBOL(arch_spin_relax); 127EXPORT_SYMBOL(arch_spin_relax);
@@ -118,7 +133,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
118 133
119 while (1) { 134 while (1) {
120 if (count-- <= 0) { 135 if (count-- <= 0) {
121 smp_yield(); 136 _raw_yield();
122 count = spin_retry; 137 count = spin_retry;
123 } 138 }
124 if (!arch_read_can_lock(rw)) 139 if (!arch_read_can_lock(rw))
@@ -138,7 +153,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
138 local_irq_restore(flags); 153 local_irq_restore(flags);
139 while (1) { 154 while (1) {
140 if (count-- <= 0) { 155 if (count-- <= 0) {
141 smp_yield(); 156 _raw_yield();
142 count = spin_retry; 157 count = spin_retry;
143 } 158 }
144 if (!arch_read_can_lock(rw)) 159 if (!arch_read_can_lock(rw))
@@ -173,7 +188,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
173 188
174 while (1) { 189 while (1) {
175 if (count-- <= 0) { 190 if (count-- <= 0) {
176 smp_yield(); 191 _raw_yield();
177 count = spin_retry; 192 count = spin_retry;
178 } 193 }
179 if (!arch_write_can_lock(rw)) 194 if (!arch_write_can_lock(rw))
@@ -191,7 +206,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
191 local_irq_restore(flags); 206 local_irq_restore(flags);
192 while (1) { 207 while (1) {
193 if (count-- <= 0) { 208 if (count-- <= 0) {
194 smp_yield(); 209 _raw_yield();
195 count = spin_retry; 210 count = spin_retry;
196 } 211 }
197 if (!arch_write_can_lock(rw)) 212 if (!arch_write_can_lock(rw))
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index b647d5ff0ad..4143b7c1909 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * arch/s390/lib/string.c
2 * Optimized string functions 3 * Optimized string functions
3 * 4 *
4 * S390 version 5 * S390 version
5 * Copyright IBM Corp. 2004 6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */ 8 */
8 9
@@ -43,7 +44,11 @@ static inline char *__strnend(const char *s, size_t n)
43 */ 44 */
44size_t strlen(const char *s) 45size_t strlen(const char *s)
45{ 46{
47#if __GNUC__ < 4
46 return __strend(s) - s; 48 return __strend(s) - s;
49#else
50 return __builtin_strlen(s);
51#endif
47} 52}
48EXPORT_SYMBOL(strlen); 53EXPORT_SYMBOL(strlen);
49 54
@@ -69,6 +74,7 @@ EXPORT_SYMBOL(strnlen);
69 */ 74 */
70char *strcpy(char *dest, const char *src) 75char *strcpy(char *dest, const char *src)
71{ 76{
77#if __GNUC__ < 4
72 register int r0 asm("0") = 0; 78 register int r0 asm("0") = 0;
73 char *ret = dest; 79 char *ret = dest;
74 80
@@ -77,6 +83,9 @@ char *strcpy(char *dest, const char *src)
77 : "+&a" (dest), "+&a" (src) : "d" (r0) 83 : "+&a" (dest), "+&a" (src) : "d" (r0)
78 : "cc", "memory" ); 84 : "cc", "memory" );
79 return ret; 85 return ret;
86#else
87 return __builtin_strcpy(dest, src);
88#endif
80} 89}
81EXPORT_SYMBOL(strcpy); 90EXPORT_SYMBOL(strcpy);
82 91
@@ -98,7 +107,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
98 if (size) { 107 if (size) {
99 size_t len = (ret >= size) ? size-1 : ret; 108 size_t len = (ret >= size) ? size-1 : ret;
100 dest[len] = '\0'; 109 dest[len] = '\0';
101 memcpy(dest, src, len); 110 __builtin_memcpy(dest, src, len);
102 } 111 }
103 return ret; 112 return ret;
104} 113}
@@ -116,8 +125,8 @@ EXPORT_SYMBOL(strlcpy);
116char *strncpy(char *dest, const char *src, size_t n) 125char *strncpy(char *dest, const char *src, size_t n)
117{ 126{
118 size_t len = __strnend(src, n) - src; 127 size_t len = __strnend(src, n) - src;
119 memset(dest + len, 0, n - len); 128 __builtin_memset(dest + len, 0, n - len);
120 memcpy(dest, src, len); 129 __builtin_memcpy(dest, src, len);
121 return dest; 130 return dest;
122} 131}
123EXPORT_SYMBOL(strncpy); 132EXPORT_SYMBOL(strncpy);
@@ -163,7 +172,7 @@ size_t strlcat(char *dest, const char *src, size_t n)
163 if (len >= n) 172 if (len >= n)
164 len = n - 1; 173 len = n - 1;
165 dest[len] = '\0'; 174 dest[len] = '\0';
166 memcpy(dest, src, len); 175 __builtin_memcpy(dest, src, len);
167 } 176 }
168 return res; 177 return res;
169} 178}
@@ -186,7 +195,7 @@ char *strncat(char *dest, const char *src, size_t n)
186 char *p = __strend(dest); 195 char *p = __strend(dest);
187 196
188 p[len] = '\0'; 197 p[len] = '\0';
189 memcpy(p, src, len); 198 __builtin_memcpy(p, src, len);
190 return dest; 199 return dest;
191} 200}
192EXPORT_SYMBOL(strncat); 201EXPORT_SYMBOL(strncat);
@@ -340,3 +349,41 @@ void *memscan(void *s, int c, size_t n)
340 return (void *) ret; 349 return (void *) ret;
341} 350}
342EXPORT_SYMBOL(memscan); 351EXPORT_SYMBOL(memscan);
352
353/**
354 * memcpy - Copy one area of memory to another
355 * @dest: Where to copy to
356 * @src: Where to copy from
357 * @n: The size of the area.
358 *
359 * returns a pointer to @dest
360 */
361void *memcpy(void *dest, const void *src, size_t n)
362{
363 return __builtin_memcpy(dest, src, n);
364}
365EXPORT_SYMBOL(memcpy);
366
367/**
368 * memset - Fill a region of memory with the given value
369 * @s: Pointer to the start of the area.
370 * @c: The byte to fill the area with
371 * @n: The size of the area.
372 *
373 * returns a pointer to @s
374 */
375void *memset(void *s, int c, size_t n)
376{
377 char *xs;
378
379 if (c == 0)
380 return __builtin_memset(s, 0, n);
381
382 xs = (char *) s;
383 if (n > 0)
384 do {
385 *xs++ = c;
386 } while (--n > 0);
387 return s;
388}
389EXPORT_SYMBOL(memset);
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
index 315dbe09983..1d2536cb630 100644
--- a/arch/s390/lib/uaccess.h
+++ b/arch/s390/lib/uaccess.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * arch/s390/uaccess.h
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * 5 *
4 */ 6 */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 2443ae476e3..60455f104ea 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -1,7 +1,9 @@
1/* 1/*
2 * arch/s390/lib/uaccess_mvcos.c
3 *
2 * Optimized user space space access functions based on mvcos. 4 * Optimized user space space access functions based on mvcos.
3 * 5 *
4 * Copyright IBM Corp. 2006 6 * Copyright (C) IBM Corp. 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Gerald Schaefer (gerald.schaefer@de.ibm.com) 8 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
7 */ 9 */
@@ -12,7 +14,7 @@
12#include <asm/futex.h> 14#include <asm/futex.h>
13#include "uaccess.h" 15#include "uaccess.h"
14 16
15#ifndef CONFIG_64BIT 17#ifndef __s390x__
16#define AHI "ahi" 18#define AHI "ahi"
17#define ALR "alr" 19#define ALR "alr"
18#define CLR "clr" 20#define CLR "clr"
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 9017a63dda3..74833831417 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -1,83 +1,72 @@
1/* 1/*
2 * arch/s390/lib/uaccess_pt.c
3 *
2 * User access functions based on page table walks for enhanced 4 * User access functions based on page table walks for enhanced
3 * system layout without hardware support. 5 * system layout without hardware support.
4 * 6 *
5 * Copyright IBM Corp. 2006, 2012 7 * Copyright IBM Corp. 2006
6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) 8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
7 */ 9 */
8 10
9#include <linux/errno.h> 11#include <linux/errno.h>
10#include <linux/hardirq.h> 12#include <linux/hardirq.h>
11#include <linux/mm.h> 13#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14#include <asm/futex.h> 15#include <asm/futex.h>
15#include "uaccess.h" 16#include "uaccess.h"
16 17
17 18static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
18/*
19 * Returns kernel address for user virtual address. If the returned address is
20 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
21 * contains the (negative) exception code.
22 */
23static __always_inline unsigned long follow_table(struct mm_struct *mm,
24 unsigned long addr, int write)
25{ 19{
26 pgd_t *pgd; 20 pgd_t *pgd;
27 pud_t *pud; 21 pud_t *pud;
28 pmd_t *pmd; 22 pmd_t *pmd;
29 pte_t *ptep;
30 23
31 pgd = pgd_offset(mm, addr); 24 pgd = pgd_offset(mm, addr);
32 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
33 return -0x3aUL; 26 return (pte_t *) 0x3a;
34 27
35 pud = pud_offset(pgd, addr); 28 pud = pud_offset(pgd, addr);
36 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 29 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
37 return -0x3bUL; 30 return (pte_t *) 0x3b;
38 31
39 pmd = pmd_offset(pud, addr); 32 pmd = pmd_offset(pud, addr);
40 if (pmd_none(*pmd)) 33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
41 return -0x10UL; 34 return (pte_t *) 0x10;
42 if (pmd_large(*pmd)) {
43 if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
44 return -0x04UL;
45 return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
46 }
47 if (unlikely(pmd_bad(*pmd)))
48 return -0x10UL;
49 35
50 ptep = pte_offset_map(pmd, addr); 36 return pte_offset_map(pmd, addr);
51 if (!pte_present(*ptep))
52 return -0x11UL;
53 if (write && !pte_write(*ptep))
54 return -0x04UL;
55
56 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
57} 37}
58 38
59static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 39static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
60 size_t n, int write_user) 40 size_t n, int write_user)
61{ 41{
62 struct mm_struct *mm = current->mm; 42 struct mm_struct *mm = current->mm;
63 unsigned long offset, done, size, kaddr; 43 unsigned long offset, pfn, done, size;
44 pte_t *pte;
64 void *from, *to; 45 void *from, *to;
65 46
66 done = 0; 47 done = 0;
67retry: 48retry:
68 spin_lock(&mm->page_table_lock); 49 spin_lock(&mm->page_table_lock);
69 do { 50 do {
70 kaddr = follow_table(mm, uaddr, write_user); 51 pte = follow_table(mm, uaddr);
71 if (IS_ERR_VALUE(kaddr)) 52 if ((unsigned long) pte < 0x1000)
53 goto fault;
54 if (!pte_present(*pte)) {
55 pte = (pte_t *) 0x11;
72 goto fault; 56 goto fault;
57 } else if (write_user && !pte_write(*pte)) {
58 pte = (pte_t *) 0x04;
59 goto fault;
60 }
73 61
74 offset = uaddr & ~PAGE_MASK; 62 pfn = pte_pfn(*pte);
63 offset = uaddr & (PAGE_SIZE - 1);
75 size = min(n - done, PAGE_SIZE - offset); 64 size = min(n - done, PAGE_SIZE - offset);
76 if (write_user) { 65 if (write_user) {
77 to = (void *) kaddr; 66 to = (void *)((pfn << PAGE_SHIFT) + offset);
78 from = kptr + done; 67 from = kptr + done;
79 } else { 68 } else {
80 from = (void *) kaddr; 69 from = (void *)((pfn << PAGE_SHIFT) + offset);
81 to = kptr + done; 70 to = kptr + done;
82 } 71 }
83 memcpy(to, from, size); 72 memcpy(to, from, size);
@@ -88,7 +77,7 @@ retry:
88 return n - done; 77 return n - done;
89fault: 78fault:
90 spin_unlock(&mm->page_table_lock); 79 spin_unlock(&mm->page_table_lock);
91 if (__handle_fault(uaddr, -kaddr, write_user)) 80 if (__handle_fault(uaddr, (unsigned long) pte, write_user))
92 return n - done; 81 return n - done;
93 goto retry; 82 goto retry;
94} 83}
@@ -97,22 +86,27 @@ fault:
97 * Do DAT for user address by page table walk, return kernel address. 86 * Do DAT for user address by page table walk, return kernel address.
98 * This function needs to be called with current->mm->page_table_lock held. 87 * This function needs to be called with current->mm->page_table_lock held.
99 */ 88 */
100static __always_inline unsigned long __dat_user_addr(unsigned long uaddr, 89static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
101 int write)
102{ 90{
103 struct mm_struct *mm = current->mm; 91 struct mm_struct *mm = current->mm;
104 unsigned long kaddr; 92 unsigned long pfn;
93 pte_t *pte;
105 int rc; 94 int rc;
106 95
107retry: 96retry:
108 kaddr = follow_table(mm, uaddr, write); 97 pte = follow_table(mm, uaddr);
109 if (IS_ERR_VALUE(kaddr)) 98 if ((unsigned long) pte < 0x1000)
110 goto fault; 99 goto fault;
100 if (!pte_present(*pte)) {
101 pte = (pte_t *) 0x11;
102 goto fault;
103 }
111 104
112 return kaddr; 105 pfn = pte_pfn(*pte);
106 return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
113fault: 107fault:
114 spin_unlock(&mm->page_table_lock); 108 spin_unlock(&mm->page_table_lock);
115 rc = __handle_fault(uaddr, -kaddr, write); 109 rc = __handle_fault(uaddr, (unsigned long) pte, 0);
116 spin_lock(&mm->page_table_lock); 110 spin_lock(&mm->page_table_lock);
117 if (!rc) 111 if (!rc)
118 goto retry; 112 goto retry;
@@ -167,9 +161,11 @@ static size_t clear_user_pt(size_t n, void __user *to)
167 161
168static size_t strnlen_user_pt(size_t count, const char __user *src) 162static size_t strnlen_user_pt(size_t count, const char __user *src)
169{ 163{
164 char *addr;
170 unsigned long uaddr = (unsigned long) src; 165 unsigned long uaddr = (unsigned long) src;
171 struct mm_struct *mm = current->mm; 166 struct mm_struct *mm = current->mm;
172 unsigned long offset, done, len, kaddr; 167 unsigned long offset, pfn, done, len;
168 pte_t *pte;
173 size_t len_str; 169 size_t len_str;
174 170
175 if (segment_eq(get_fs(), KERNEL_DS)) 171 if (segment_eq(get_fs(), KERNEL_DS))
@@ -178,13 +174,19 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
178retry: 174retry:
179 spin_lock(&mm->page_table_lock); 175 spin_lock(&mm->page_table_lock);
180 do { 176 do {
181 kaddr = follow_table(mm, uaddr, 0); 177 pte = follow_table(mm, uaddr);
182 if (IS_ERR_VALUE(kaddr)) 178 if ((unsigned long) pte < 0x1000)
179 goto fault;
180 if (!pte_present(*pte)) {
181 pte = (pte_t *) 0x11;
183 goto fault; 182 goto fault;
183 }
184 184
185 offset = uaddr & ~PAGE_MASK; 185 pfn = pte_pfn(*pte);
186 offset = uaddr & (PAGE_SIZE-1);
187 addr = (char *)(pfn << PAGE_SHIFT) + offset;
186 len = min(count - done, PAGE_SIZE - offset); 188 len = min(count - done, PAGE_SIZE - offset);
187 len_str = strnlen((char *) kaddr, len); 189 len_str = strnlen(addr, len);
188 done += len_str; 190 done += len_str;
189 uaddr += len_str; 191 uaddr += len_str;
190 } while ((len_str == len) && (done < count)); 192 } while ((len_str == len) && (done < count));
@@ -192,7 +194,7 @@ retry:
192 return done + 1; 194 return done + 1;
193fault: 195fault:
194 spin_unlock(&mm->page_table_lock); 196 spin_unlock(&mm->page_table_lock);
195 if (__handle_fault(uaddr, -kaddr, 0)) 197 if (__handle_fault(uaddr, (unsigned long) pte, 0))
196 return 0; 198 return 0;
197 goto retry; 199 goto retry;
198} 200}
@@ -225,10 +227,11 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
225 const void __user *from) 227 const void __user *from)
226{ 228{
227 struct mm_struct *mm = current->mm; 229 struct mm_struct *mm = current->mm;
228 unsigned long offset_max, uaddr, done, size, error_code; 230 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
231 uaddr, done, size, error_code;
229 unsigned long uaddr_from = (unsigned long) from; 232 unsigned long uaddr_from = (unsigned long) from;
230 unsigned long uaddr_to = (unsigned long) to; 233 unsigned long uaddr_to = (unsigned long) to;
231 unsigned long kaddr_to, kaddr_from; 234 pte_t *pte_from, *pte_to;
232 int write_user; 235 int write_user;
233 236
234 if (segment_eq(get_fs(), KERNEL_DS)) { 237 if (segment_eq(get_fs(), KERNEL_DS)) {
@@ -241,23 +244,38 @@ retry:
241 do { 244 do {
242 write_user = 0; 245 write_user = 0;
243 uaddr = uaddr_from; 246 uaddr = uaddr_from;
244 kaddr_from = follow_table(mm, uaddr_from, 0); 247 pte_from = follow_table(mm, uaddr_from);
245 error_code = kaddr_from; 248 error_code = (unsigned long) pte_from;
246 if (IS_ERR_VALUE(error_code)) 249 if (error_code < 0x1000)
250 goto fault;
251 if (!pte_present(*pte_from)) {
252 error_code = 0x11;
247 goto fault; 253 goto fault;
254 }
248 255
249 write_user = 1; 256 write_user = 1;
250 uaddr = uaddr_to; 257 uaddr = uaddr_to;
251 kaddr_to = follow_table(mm, uaddr_to, 1); 258 pte_to = follow_table(mm, uaddr_to);
252 error_code = (unsigned long) kaddr_to; 259 error_code = (unsigned long) pte_to;
253 if (IS_ERR_VALUE(error_code)) 260 if (error_code < 0x1000)
261 goto fault;
262 if (!pte_present(*pte_to)) {
263 error_code = 0x11;
254 goto fault; 264 goto fault;
265 } else if (!pte_write(*pte_to)) {
266 error_code = 0x04;
267 goto fault;
268 }
255 269
256 offset_max = max(uaddr_from & ~PAGE_MASK, 270 pfn_from = pte_pfn(*pte_from);
257 uaddr_to & ~PAGE_MASK); 271 pfn_to = pte_pfn(*pte_to);
272 offset_from = uaddr_from & (PAGE_SIZE-1);
273 offset_to = uaddr_from & (PAGE_SIZE-1);
274 offset_max = max(offset_from, offset_to);
258 size = min(n - done, PAGE_SIZE - offset_max); 275 size = min(n - done, PAGE_SIZE - offset_max);
259 276
260 memcpy((void *) kaddr_to, (void *) kaddr_from, size); 277 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
278 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
261 done += size; 279 done += size;
262 uaddr_from += size; 280 uaddr_from += size;
263 uaddr_to += size; 281 uaddr_to += size;
@@ -266,7 +284,7 @@ retry:
266 return n - done; 284 return n - done;
267fault: 285fault:
268 spin_unlock(&mm->page_table_lock); 286 spin_unlock(&mm->page_table_lock);
269 if (__handle_fault(uaddr, -error_code, write_user)) 287 if (__handle_fault(uaddr, error_code, write_user))
270 return n - done; 288 return n - done;
271 goto retry; 289 goto retry;
272} 290}
@@ -324,8 +342,7 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
324 if (segment_eq(get_fs(), KERNEL_DS)) 342 if (segment_eq(get_fs(), KERNEL_DS))
325 return __futex_atomic_op_pt(op, uaddr, oparg, old); 343 return __futex_atomic_op_pt(op, uaddr, oparg, old);
326 spin_lock(&current->mm->page_table_lock); 344 spin_lock(&current->mm->page_table_lock);
327 uaddr = (u32 __force __user *) 345 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
328 __dat_user_addr((__force unsigned long) uaddr, 1);
329 if (!uaddr) { 346 if (!uaddr) {
330 spin_unlock(&current->mm->page_table_lock); 347 spin_unlock(&current->mm->page_table_lock);
331 return -EFAULT; 348 return -EFAULT;
@@ -361,8 +378,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
361 if (segment_eq(get_fs(), KERNEL_DS)) 378 if (segment_eq(get_fs(), KERNEL_DS))
362 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); 379 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
363 spin_lock(&current->mm->page_table_lock); 380 spin_lock(&current->mm->page_table_lock);
364 uaddr = (u32 __force __user *) 381 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
365 __dat_user_addr((__force unsigned long) uaddr, 1);
366 if (!uaddr) { 382 if (!uaddr) {
367 spin_unlock(&current->mm->page_table_lock); 383 spin_unlock(&current->mm->page_table_lock);
368 return -EFAULT; 384 return -EFAULT;
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 6fbd0633827..bb1a7eed42c 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -1,8 +1,10 @@
1/* 1/*
2 * arch/s390/lib/uaccess_std.c
3 *
2 * Standard user space access functions based on mvcp/mvcs and doing 4 * Standard user space access functions based on mvcp/mvcs and doing
3 * interesting things in the secondary space mode. 5 * interesting things in the secondary space mode.
4 * 6 *
5 * Copyright IBM Corp. 2006 7 * Copyright (C) IBM Corp. 2006
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 8 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Gerald Schaefer (gerald.schaefer@de.ibm.com) 9 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
8 */ 10 */
@@ -13,7 +15,7 @@
13#include <asm/futex.h> 15#include <asm/futex.h>
14#include "uaccess.h" 16#include "uaccess.h"
15 17
16#ifndef CONFIG_64BIT 18#ifndef __s390x__
17#define AHI "ahi" 19#define AHI "ahi"
18#define ALR "alr" 20#define ALR "alr"
19#define CLR "clr" 21#define CLR "clr"