aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile1
-rw-r--r--arch/s390/lib/spinlock.c97
-rw-r--r--arch/s390/lib/uaccess_mvcos.c4
-rw-r--r--arch/s390/lib/uaccess_pt.c147
-rw-r--r--arch/s390/lib/usercopy.c8
5 files changed, 119 insertions, 138 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 97975ec7a274..761ab8b56afc 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
6obj-y += usercopy.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o 7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o
7lib-$(CONFIG_64BIT) += uaccess_mvcos.o 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
8lib-$(CONFIG_SMP) += spinlock.o 9lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f7e0d30250b7..91754ffb9203 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -34,78 +34,99 @@ static inline void _raw_yield_cpu(int cpu)
34{ 34{
35 if (MACHINE_HAS_DIAG9C) 35 if (MACHINE_HAS_DIAG9C)
36 asm volatile("diag %0,0,0x9c" 36 asm volatile("diag %0,0,0x9c"
37 : : "d" (__cpu_logical_map[cpu])); 37 : : "d" (cpu_logical_map(cpu)));
38 else 38 else
39 _raw_yield(); 39 _raw_yield();
40} 40}
41 41
42void _raw_spin_lock_wait(raw_spinlock_t *lp) 42void arch_spin_lock_wait(arch_spinlock_t *lp)
43{ 43{
44 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id(); 45 unsigned int cpu = ~smp_processor_id();
46 unsigned int owner;
46 47
47 while (1) { 48 while (1) {
48 if (count-- <= 0) { 49 owner = lp->owner_cpu;
49 unsigned int owner = lp->owner_cpu; 50 if (!owner || smp_vcpu_scheduled(~owner)) {
50 if (owner != 0) 51 for (count = spin_retry; count > 0; count--) {
51 _raw_yield_cpu(~owner); 52 if (arch_spin_is_locked(lp))
52 count = spin_retry; 53 continue;
54 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
55 cpu) == 0)
56 return;
57 }
58 if (MACHINE_IS_LPAR)
59 continue;
53 } 60 }
54 if (__raw_spin_is_locked(lp)) 61 owner = lp->owner_cpu;
55 continue; 62 if (owner)
63 _raw_yield_cpu(~owner);
56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 64 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 return; 65 return;
58 } 66 }
59} 67}
60EXPORT_SYMBOL(_raw_spin_lock_wait); 68EXPORT_SYMBOL(arch_spin_lock_wait);
61 69
62void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) 70void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
63{ 71{
64 int count = spin_retry; 72 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id(); 73 unsigned int cpu = ~smp_processor_id();
74 unsigned int owner;
66 75
67 local_irq_restore(flags); 76 local_irq_restore(flags);
68 while (1) { 77 while (1) {
69 if (count-- <= 0) { 78 owner = lp->owner_cpu;
70 unsigned int owner = lp->owner_cpu; 79 if (!owner || smp_vcpu_scheduled(~owner)) {
71 if (owner != 0) 80 for (count = spin_retry; count > 0; count--) {
72 _raw_yield_cpu(~owner); 81 if (arch_spin_is_locked(lp))
73 count = spin_retry; 82 continue;
83 local_irq_disable();
84 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
85 cpu) == 0)
86 return;
87 local_irq_restore(flags);
88 }
89 if (MACHINE_IS_LPAR)
90 continue;
74 } 91 }
75 if (__raw_spin_is_locked(lp)) 92 owner = lp->owner_cpu;
76 continue; 93 if (owner)
94 _raw_yield_cpu(~owner);
77 local_irq_disable(); 95 local_irq_disable();
78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
79 return; 97 return;
80 local_irq_restore(flags); 98 local_irq_restore(flags);
81 } 99 }
82} 100}
83EXPORT_SYMBOL(_raw_spin_lock_wait_flags); 101EXPORT_SYMBOL(arch_spin_lock_wait_flags);
84 102
85int _raw_spin_trylock_retry(raw_spinlock_t *lp) 103int arch_spin_trylock_retry(arch_spinlock_t *lp)
86{ 104{
87 unsigned int cpu = ~smp_processor_id(); 105 unsigned int cpu = ~smp_processor_id();
88 int count; 106 int count;
89 107
90 for (count = spin_retry; count > 0; count--) { 108 for (count = spin_retry; count > 0; count--) {
91 if (__raw_spin_is_locked(lp)) 109 if (arch_spin_is_locked(lp))
92 continue; 110 continue;
93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 111 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
94 return 1; 112 return 1;
95 } 113 }
96 return 0; 114 return 0;
97} 115}
98EXPORT_SYMBOL(_raw_spin_trylock_retry); 116EXPORT_SYMBOL(arch_spin_trylock_retry);
99 117
100void _raw_spin_relax(raw_spinlock_t *lock) 118void arch_spin_relax(arch_spinlock_t *lock)
101{ 119{
102 unsigned int cpu = lock->owner_cpu; 120 unsigned int cpu = lock->owner_cpu;
103 if (cpu != 0) 121 if (cpu != 0) {
104 _raw_yield_cpu(~cpu); 122 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
123 !smp_vcpu_scheduled(~cpu))
124 _raw_yield_cpu(~cpu);
125 }
105} 126}
106EXPORT_SYMBOL(_raw_spin_relax); 127EXPORT_SYMBOL(arch_spin_relax);
107 128
108void _raw_read_lock_wait(raw_rwlock_t *rw) 129void _raw_read_lock_wait(arch_rwlock_t *rw)
109{ 130{
110 unsigned int old; 131 unsigned int old;
111 int count = spin_retry; 132 int count = spin_retry;
@@ -115,7 +136,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
115 _raw_yield(); 136 _raw_yield();
116 count = spin_retry; 137 count = spin_retry;
117 } 138 }
118 if (!__raw_read_can_lock(rw)) 139 if (!arch_read_can_lock(rw))
119 continue; 140 continue;
120 old = rw->lock & 0x7fffffffU; 141 old = rw->lock & 0x7fffffffU;
121 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 142 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -124,7 +145,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
124} 145}
125EXPORT_SYMBOL(_raw_read_lock_wait); 146EXPORT_SYMBOL(_raw_read_lock_wait);
126 147
127void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 148void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
128{ 149{
129 unsigned int old; 150 unsigned int old;
130 int count = spin_retry; 151 int count = spin_retry;
@@ -135,7 +156,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
135 _raw_yield(); 156 _raw_yield();
136 count = spin_retry; 157 count = spin_retry;
137 } 158 }
138 if (!__raw_read_can_lock(rw)) 159 if (!arch_read_can_lock(rw))
139 continue; 160 continue;
140 old = rw->lock & 0x7fffffffU; 161 old = rw->lock & 0x7fffffffU;
141 local_irq_disable(); 162 local_irq_disable();
@@ -145,13 +166,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
145} 166}
146EXPORT_SYMBOL(_raw_read_lock_wait_flags); 167EXPORT_SYMBOL(_raw_read_lock_wait_flags);
147 168
148int _raw_read_trylock_retry(raw_rwlock_t *rw) 169int _raw_read_trylock_retry(arch_rwlock_t *rw)
149{ 170{
150 unsigned int old; 171 unsigned int old;
151 int count = spin_retry; 172 int count = spin_retry;
152 173
153 while (count-- > 0) { 174 while (count-- > 0) {
154 if (!__raw_read_can_lock(rw)) 175 if (!arch_read_can_lock(rw))
155 continue; 176 continue;
156 old = rw->lock & 0x7fffffffU; 177 old = rw->lock & 0x7fffffffU;
157 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 178 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -161,7 +182,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw)
161} 182}
162EXPORT_SYMBOL(_raw_read_trylock_retry); 183EXPORT_SYMBOL(_raw_read_trylock_retry);
163 184
164void _raw_write_lock_wait(raw_rwlock_t *rw) 185void _raw_write_lock_wait(arch_rwlock_t *rw)
165{ 186{
166 int count = spin_retry; 187 int count = spin_retry;
167 188
@@ -170,7 +191,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
170 _raw_yield(); 191 _raw_yield();
171 count = spin_retry; 192 count = spin_retry;
172 } 193 }
173 if (!__raw_write_can_lock(rw)) 194 if (!arch_write_can_lock(rw))
174 continue; 195 continue;
175 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 196 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
176 return; 197 return;
@@ -178,7 +199,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
178} 199}
179EXPORT_SYMBOL(_raw_write_lock_wait); 200EXPORT_SYMBOL(_raw_write_lock_wait);
180 201
181void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 202void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
182{ 203{
183 int count = spin_retry; 204 int count = spin_retry;
184 205
@@ -188,7 +209,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
188 _raw_yield(); 209 _raw_yield();
189 count = spin_retry; 210 count = spin_retry;
190 } 211 }
191 if (!__raw_write_can_lock(rw)) 212 if (!arch_write_can_lock(rw))
192 continue; 213 continue;
193 local_irq_disable(); 214 local_irq_disable();
194 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 215 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
@@ -197,12 +218,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
197} 218}
198EXPORT_SYMBOL(_raw_write_lock_wait_flags); 219EXPORT_SYMBOL(_raw_write_lock_wait_flags);
199 220
200int _raw_write_trylock_retry(raw_rwlock_t *rw) 221int _raw_write_trylock_retry(arch_rwlock_t *rw)
201{ 222{
202 int count = spin_retry; 223 int count = spin_retry;
203 224
204 while (count-- > 0) { 225 while (count-- > 0) {
205 if (!__raw_write_can_lock(rw)) 226 if (!arch_write_can_lock(rw))
206 continue; 227 continue;
207 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 228 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
208 return 1; 229 return 1;
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 58da3f461214..60455f104ea3 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -162,7 +162,6 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165#ifdef CONFIG_S390_SWITCH_AMODE
166static size_t strnlen_user_mvcos(size_t count, const char __user *src) 165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
167{ 166{
168 char buf[256]; 167 char buf[256];
@@ -200,7 +199,6 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
200 } while ((len_str == len) && (done < count)); 199 } while ((len_str == len) && (done < count));
201 return done; 200 return done;
202} 201}
203#endif /* CONFIG_S390_SWITCH_AMODE */
204 202
205struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
206 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
@@ -215,7 +213,6 @@ struct uaccess_ops uaccess_mvcos = {
215 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
216}; 214};
217 215
218#ifdef CONFIG_S390_SWITCH_AMODE
219struct uaccess_ops uaccess_mvcos_switch = { 216struct uaccess_ops uaccess_mvcos_switch = {
220 .copy_from_user = copy_from_user_mvcos, 217 .copy_from_user = copy_from_user_mvcos,
221 .copy_from_user_small = copy_from_user_mvcos, 218 .copy_from_user_small = copy_from_user_mvcos,
@@ -228,4 +225,3 @@ struct uaccess_ops uaccess_mvcos_switch = {
228 .futex_atomic_op = futex_atomic_op_pt, 225 .futex_atomic_op = futex_atomic_op_pt,
229 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, 226 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
230}; 227};
231#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index cb5d59eab0ee..404f2de296dc 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -23,86 +23,21 @@ static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
23 23
24 pgd = pgd_offset(mm, addr); 24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
26 return NULL; 26 return (pte_t *) 0x3a;
27 27
28 pud = pud_offset(pgd, addr); 28 pud = pud_offset(pgd, addr);
29 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 29 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
30 return NULL; 30 return (pte_t *) 0x3b;
31 31
32 pmd = pmd_offset(pud, addr); 32 pmd = pmd_offset(pud, addr);
33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
34 return NULL; 34 return (pte_t *) 0x10;
35 35
36 return pte_offset_map(pmd, addr); 36 return pte_offset_map(pmd, addr);
37} 37}
38 38
39static int __handle_fault(struct mm_struct *mm, unsigned long address, 39static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
40 int write_access) 40 size_t n, int write_user)
41{
42 struct vm_area_struct *vma;
43 int ret = -EFAULT;
44 int fault;
45
46 if (in_atomic())
47 return ret;
48 down_read(&mm->mmap_sem);
49 vma = find_vma(mm, address);
50 if (unlikely(!vma))
51 goto out;
52 if (unlikely(vma->vm_start > address)) {
53 if (!(vma->vm_flags & VM_GROWSDOWN))
54 goto out;
55 if (expand_stack(vma, address))
56 goto out;
57 }
58
59 if (!write_access) {
60 /* page not present, check vm flags */
61 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
62 goto out;
63 } else {
64 if (!(vma->vm_flags & VM_WRITE))
65 goto out;
66 }
67
68survive:
69 fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
70 if (unlikely(fault & VM_FAULT_ERROR)) {
71 if (fault & VM_FAULT_OOM)
72 goto out_of_memory;
73 else if (fault & VM_FAULT_SIGBUS)
74 goto out_sigbus;
75 BUG();
76 }
77 if (fault & VM_FAULT_MAJOR)
78 current->maj_flt++;
79 else
80 current->min_flt++;
81 ret = 0;
82out:
83 up_read(&mm->mmap_sem);
84 return ret;
85
86out_of_memory:
87 up_read(&mm->mmap_sem);
88 if (is_global_init(current)) {
89 yield();
90 down_read(&mm->mmap_sem);
91 goto survive;
92 }
93 printk("VM: killing process %s\n", current->comm);
94 return ret;
95
96out_sigbus:
97 up_read(&mm->mmap_sem);
98 current->thread.prot_addr = address;
99 current->thread.trap_no = 0x11;
100 force_sig(SIGBUS, current);
101 return ret;
102}
103
104static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
105 size_t n, int write_user)
106{ 41{
107 struct mm_struct *mm = current->mm; 42 struct mm_struct *mm = current->mm;
108 unsigned long offset, pfn, done, size; 43 unsigned long offset, pfn, done, size;
@@ -114,12 +49,17 @@ retry:
114 spin_lock(&mm->page_table_lock); 49 spin_lock(&mm->page_table_lock);
115 do { 50 do {
116 pte = follow_table(mm, uaddr); 51 pte = follow_table(mm, uaddr);
117 if (!pte || !pte_present(*pte) || 52 if ((unsigned long) pte < 0x1000)
118 (write_user && !pte_write(*pte)))
119 goto fault; 53 goto fault;
54 if (!pte_present(*pte)) {
55 pte = (pte_t *) 0x11;
56 goto fault;
57 } else if (write_user && !pte_write(*pte)) {
58 pte = (pte_t *) 0x04;
59 goto fault;
60 }
120 61
121 pfn = pte_pfn(*pte); 62 pfn = pte_pfn(*pte);
122
123 offset = uaddr & (PAGE_SIZE - 1); 63 offset = uaddr & (PAGE_SIZE - 1);
124 size = min(n - done, PAGE_SIZE - offset); 64 size = min(n - done, PAGE_SIZE - offset);
125 if (write_user) { 65 if (write_user) {
@@ -137,7 +77,7 @@ retry:
137 return n - done; 77 return n - done;
138fault: 78fault:
139 spin_unlock(&mm->page_table_lock); 79 spin_unlock(&mm->page_table_lock);
140 if (__handle_fault(mm, uaddr, write_user)) 80 if (__handle_fault(uaddr, (unsigned long) pte, write_user))
141 return n - done; 81 return n - done;
142 goto retry; 82 goto retry;
143} 83}
@@ -146,30 +86,31 @@ fault:
146 * Do DAT for user address by page table walk, return kernel address. 86 * Do DAT for user address by page table walk, return kernel address.
147 * This function needs to be called with current->mm->page_table_lock held. 87 * This function needs to be called with current->mm->page_table_lock held.
148 */ 88 */
149static unsigned long __dat_user_addr(unsigned long uaddr) 89static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
150{ 90{
151 struct mm_struct *mm = current->mm; 91 struct mm_struct *mm = current->mm;
152 unsigned long pfn, ret; 92 unsigned long pfn;
153 pte_t *pte; 93 pte_t *pte;
154 int rc; 94 int rc;
155 95
156 ret = 0;
157retry: 96retry:
158 pte = follow_table(mm, uaddr); 97 pte = follow_table(mm, uaddr);
159 if (!pte || !pte_present(*pte)) 98 if ((unsigned long) pte < 0x1000)
160 goto fault; 99 goto fault;
100 if (!pte_present(*pte)) {
101 pte = (pte_t *) 0x11;
102 goto fault;
103 }
161 104
162 pfn = pte_pfn(*pte); 105 pfn = pte_pfn(*pte);
163 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); 106 return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
164out:
165 return ret;
166fault: 107fault:
167 spin_unlock(&mm->page_table_lock); 108 spin_unlock(&mm->page_table_lock);
168 rc = __handle_fault(mm, uaddr, 0); 109 rc = __handle_fault(uaddr, (unsigned long) pte, 0);
169 spin_lock(&mm->page_table_lock); 110 spin_lock(&mm->page_table_lock);
170 if (rc) 111 if (!rc)
171 goto out; 112 goto retry;
172 goto retry; 113 return 0;
173} 114}
174 115
175size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 116size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
@@ -234,8 +175,12 @@ retry:
234 spin_lock(&mm->page_table_lock); 175 spin_lock(&mm->page_table_lock);
235 do { 176 do {
236 pte = follow_table(mm, uaddr); 177 pte = follow_table(mm, uaddr);
237 if (!pte || !pte_present(*pte)) 178 if ((unsigned long) pte < 0x1000)
179 goto fault;
180 if (!pte_present(*pte)) {
181 pte = (pte_t *) 0x11;
238 goto fault; 182 goto fault;
183 }
239 184
240 pfn = pte_pfn(*pte); 185 pfn = pte_pfn(*pte);
241 offset = uaddr & (PAGE_SIZE-1); 186 offset = uaddr & (PAGE_SIZE-1);
@@ -249,9 +194,8 @@ retry:
249 return done + 1; 194 return done + 1;
250fault: 195fault:
251 spin_unlock(&mm->page_table_lock); 196 spin_unlock(&mm->page_table_lock);
252 if (__handle_fault(mm, uaddr, 0)) { 197 if (__handle_fault(uaddr, (unsigned long) pte, 0))
253 return 0; 198 return 0;
254 }
255 goto retry; 199 goto retry;
256} 200}
257 201
@@ -284,7 +228,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
284{ 228{
285 struct mm_struct *mm = current->mm; 229 struct mm_struct *mm = current->mm;
286 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, 230 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
287 uaddr, done, size; 231 uaddr, done, size, error_code;
288 unsigned long uaddr_from = (unsigned long) from; 232 unsigned long uaddr_from = (unsigned long) from;
289 unsigned long uaddr_to = (unsigned long) to; 233 unsigned long uaddr_to = (unsigned long) to;
290 pte_t *pte_from, *pte_to; 234 pte_t *pte_from, *pte_to;
@@ -298,17 +242,28 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
298retry: 242retry:
299 spin_lock(&mm->page_table_lock); 243 spin_lock(&mm->page_table_lock);
300 do { 244 do {
245 write_user = 0;
246 uaddr = uaddr_from;
301 pte_from = follow_table(mm, uaddr_from); 247 pte_from = follow_table(mm, uaddr_from);
302 if (!pte_from || !pte_present(*pte_from)) { 248 error_code = (unsigned long) pte_from;
303 uaddr = uaddr_from; 249 if (error_code < 0x1000)
304 write_user = 0; 250 goto fault;
251 if (!pte_present(*pte_from)) {
252 error_code = 0x11;
305 goto fault; 253 goto fault;
306 } 254 }
307 255
256 write_user = 1;
257 uaddr = uaddr_to;
308 pte_to = follow_table(mm, uaddr_to); 258 pte_to = follow_table(mm, uaddr_to);
309 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { 259 error_code = (unsigned long) pte_to;
310 uaddr = uaddr_to; 260 if (error_code < 0x1000)
311 write_user = 1; 261 goto fault;
262 if (!pte_present(*pte_to)) {
263 error_code = 0x11;
264 goto fault;
265 } else if (!pte_write(*pte_to)) {
266 error_code = 0x04;
312 goto fault; 267 goto fault;
313 } 268 }
314 269
@@ -329,7 +284,7 @@ retry:
329 return n - done; 284 return n - done;
330fault: 285fault:
331 spin_unlock(&mm->page_table_lock); 286 spin_unlock(&mm->page_table_lock);
332 if (__handle_fault(mm, uaddr, write_user)) 287 if (__handle_fault(uaddr, error_code, write_user))
333 return n - done; 288 return n - done;
334 goto retry; 289 goto retry;
335} 290}
diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c
new file mode 100644
index 000000000000..14b363fec8a2
--- /dev/null
+++ b/arch/s390/lib/usercopy.c
@@ -0,0 +1,8 @@
1#include <linux/module.h>
2#include <linux/bug.h>
3
4void copy_from_user_overflow(void)
5{
6 WARN(1, "Buffer overflow detected!\n");
7}
8EXPORT_SYMBOL(copy_from_user_overflow);