aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/hugetlb.h24
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c142
-rw-r--r--arch/s390/oprofile/init.c10
5 files changed, 80 insertions, 100 deletions
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 799ed0f1643d..2d6e6e380564 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -66,16 +66,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
66 return pte; 66 return pte;
67} 67}
68 68
69static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
70 unsigned long addr, pte_t *ptep)
71{
72 pte_t pte = huge_ptep_get(ptep);
73
74 mm->context.flush_mm = 1;
75 pmd_clear((pmd_t *) ptep);
76 return pte;
77}
78
79static inline void __pmd_csp(pmd_t *pmdp) 69static inline void __pmd_csp(pmd_t *pmdp)
80{ 70{
81 register unsigned long reg2 asm("2") = pmd_val(*pmdp); 71 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
@@ -117,6 +107,15 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
117 __pmd_csp(pmdp); 107 __pmd_csp(pmdp);
118} 108}
119 109
110static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
111 unsigned long addr, pte_t *ptep)
112{
113 pte_t pte = huge_ptep_get(ptep);
114
115 huge_ptep_invalidate(mm, addr, ptep);
116 return pte;
117}
118
120#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 119#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
121({ \ 120({ \
122 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \ 121 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
@@ -131,10 +130,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
131({ \ 130({ \
132 pte_t __pte = huge_ptep_get(__ptep); \ 131 pte_t __pte = huge_ptep_get(__ptep); \
133 if (pte_write(__pte)) { \ 132 if (pte_write(__pte)) { \
134 (__mm)->context.flush_mm = 1; \ 133 huge_ptep_invalidate(__mm, __addr, __ptep); \
135 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
136 (__mm) != current->active_mm) \
137 huge_ptep_invalidate(__mm, __addr, __ptep); \
138 set_huge_pte_at(__mm, __addr, __ptep, \ 134 set_huge_pte_at(__mm, __addr, __ptep, \
139 huge_pte_wrprotect(__pte)); \ 135 huge_pte_wrprotect(__pte)); \
140 } \ 136 } \
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 9fde315f3a7c..1d8fe2b17ef6 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -90,12 +90,10 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
90 90
91static inline void __tlb_flush_mm_cond(struct mm_struct * mm) 91static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
92{ 92{
93 spin_lock(&mm->page_table_lock);
94 if (mm->context.flush_mm) { 93 if (mm->context.flush_mm) {
95 __tlb_flush_mm(mm); 94 __tlb_flush_mm(mm);
96 mm->context.flush_mm = 0; 95 mm->context.flush_mm = 0;
97 } 96 }
98 spin_unlock(&mm->page_table_lock);
99} 97}
100 98
101/* 99/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f86c81e13c37..40b57693de38 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -974,11 +974,13 @@ static void __init setup_hwcaps(void)
974 if (MACHINE_HAS_HPAGE) 974 if (MACHINE_HAS_HPAGE)
975 elf_hwcap |= HWCAP_S390_HPAGE; 975 elf_hwcap |= HWCAP_S390_HPAGE;
976 976
977#if defined(CONFIG_64BIT)
977 /* 978 /*
978 * 64-bit register support for 31-bit processes 979 * 64-bit register support for 31-bit processes
979 * HWCAP_S390_HIGH_GPRS is bit 9. 980 * HWCAP_S390_HIGH_GPRS is bit 9.
980 */ 981 */
981 elf_hwcap |= HWCAP_S390_HIGH_GPRS; 982 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
983#endif
982 984
983 get_cpu_id(&cpu_id); 985 get_cpu_id(&cpu_id);
984 switch (cpu_id.machine) { 986 switch (cpu_id.machine) {
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 60ee2b883797..2d37bb861faf 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -2,69 +2,82 @@
2 * User access functions based on page table walks for enhanced 2 * User access functions based on page table walks for enhanced
3 * system layout without hardware support. 3 * system layout without hardware support.
4 * 4 *
5 * Copyright IBM Corp. 2006 5 * Copyright IBM Corp. 2006, 2012
6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) 6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
7 */ 7 */
8 8
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/hardirq.h> 10#include <linux/hardirq.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/hugetlb.h>
12#include <asm/uaccess.h> 13#include <asm/uaccess.h>
13#include <asm/futex.h> 14#include <asm/futex.h>
14#include "uaccess.h" 15#include "uaccess.h"
15 16
16static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) 17
18/*
19 * Returns kernel address for user virtual address. If the returned address is
20 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
21 * contains the (negative) exception code.
22 */
23static __always_inline unsigned long follow_table(struct mm_struct *mm,
24 unsigned long addr, int write)
17{ 25{
18 pgd_t *pgd; 26 pgd_t *pgd;
19 pud_t *pud; 27 pud_t *pud;
20 pmd_t *pmd; 28 pmd_t *pmd;
29 pte_t *ptep;
21 30
22 pgd = pgd_offset(mm, addr); 31 pgd = pgd_offset(mm, addr);
23 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 32 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
24 return (pte_t *) 0x3a; 33 return -0x3aUL;
25 34
26 pud = pud_offset(pgd, addr); 35 pud = pud_offset(pgd, addr);
27 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 36 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
28 return (pte_t *) 0x3b; 37 return -0x3bUL;
29 38
30 pmd = pmd_offset(pud, addr); 39 pmd = pmd_offset(pud, addr);
31 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 40 if (pmd_none(*pmd))
32 return (pte_t *) 0x10; 41 return -0x10UL;
42 if (pmd_huge(*pmd)) {
43 if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
44 return -0x04UL;
45 return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
46 }
47 if (unlikely(pmd_bad(*pmd)))
48 return -0x10UL;
49
50 ptep = pte_offset_map(pmd, addr);
51 if (!pte_present(*ptep))
52 return -0x11UL;
53 if (write && !pte_write(*ptep))
54 return -0x04UL;
33 55
34 return pte_offset_map(pmd, addr); 56 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
35} 57}
36 58
37static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 59static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
38 size_t n, int write_user) 60 size_t n, int write_user)
39{ 61{
40 struct mm_struct *mm = current->mm; 62 struct mm_struct *mm = current->mm;
41 unsigned long offset, pfn, done, size; 63 unsigned long offset, done, size, kaddr;
42 pte_t *pte;
43 void *from, *to; 64 void *from, *to;
44 65
45 done = 0; 66 done = 0;
46retry: 67retry:
47 spin_lock(&mm->page_table_lock); 68 spin_lock(&mm->page_table_lock);
48 do { 69 do {
49 pte = follow_table(mm, uaddr); 70 kaddr = follow_table(mm, uaddr, write_user);
50 if ((unsigned long) pte < 0x1000) 71 if (IS_ERR_VALUE(kaddr))
51 goto fault; 72 goto fault;
52 if (!pte_present(*pte)) {
53 pte = (pte_t *) 0x11;
54 goto fault;
55 } else if (write_user && !pte_write(*pte)) {
56 pte = (pte_t *) 0x04;
57 goto fault;
58 }
59 73
60 pfn = pte_pfn(*pte); 74 offset = uaddr & ~PAGE_MASK;
61 offset = uaddr & (PAGE_SIZE - 1);
62 size = min(n - done, PAGE_SIZE - offset); 75 size = min(n - done, PAGE_SIZE - offset);
63 if (write_user) { 76 if (write_user) {
64 to = (void *)((pfn << PAGE_SHIFT) + offset); 77 to = (void *) kaddr;
65 from = kptr + done; 78 from = kptr + done;
66 } else { 79 } else {
67 from = (void *)((pfn << PAGE_SHIFT) + offset); 80 from = (void *) kaddr;
68 to = kptr + done; 81 to = kptr + done;
69 } 82 }
70 memcpy(to, from, size); 83 memcpy(to, from, size);
@@ -75,7 +88,7 @@ retry:
75 return n - done; 88 return n - done;
76fault: 89fault:
77 spin_unlock(&mm->page_table_lock); 90 spin_unlock(&mm->page_table_lock);
78 if (__handle_fault(uaddr, (unsigned long) pte, write_user)) 91 if (__handle_fault(uaddr, -kaddr, write_user))
79 return n - done; 92 return n - done;
80 goto retry; 93 goto retry;
81} 94}
@@ -84,27 +97,22 @@ fault:
84 * Do DAT for user address by page table walk, return kernel address. 97 * Do DAT for user address by page table walk, return kernel address.
85 * This function needs to be called with current->mm->page_table_lock held. 98 * This function needs to be called with current->mm->page_table_lock held.
86 */ 99 */
87static __always_inline unsigned long __dat_user_addr(unsigned long uaddr) 100static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
101 int write)
88{ 102{
89 struct mm_struct *mm = current->mm; 103 struct mm_struct *mm = current->mm;
90 unsigned long pfn; 104 unsigned long kaddr;
91 pte_t *pte;
92 int rc; 105 int rc;
93 106
94retry: 107retry:
95 pte = follow_table(mm, uaddr); 108 kaddr = follow_table(mm, uaddr, write);
96 if ((unsigned long) pte < 0x1000) 109 if (IS_ERR_VALUE(kaddr))
97 goto fault;
98 if (!pte_present(*pte)) {
99 pte = (pte_t *) 0x11;
100 goto fault; 110 goto fault;
101 }
102 111
103 pfn = pte_pfn(*pte); 112 return kaddr;
104 return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
105fault: 113fault:
106 spin_unlock(&mm->page_table_lock); 114 spin_unlock(&mm->page_table_lock);
107 rc = __handle_fault(uaddr, (unsigned long) pte, 0); 115 rc = __handle_fault(uaddr, -kaddr, write);
108 spin_lock(&mm->page_table_lock); 116 spin_lock(&mm->page_table_lock);
109 if (!rc) 117 if (!rc)
110 goto retry; 118 goto retry;
@@ -159,11 +167,9 @@ static size_t clear_user_pt(size_t n, void __user *to)
159 167
160static size_t strnlen_user_pt(size_t count, const char __user *src) 168static size_t strnlen_user_pt(size_t count, const char __user *src)
161{ 169{
162 char *addr;
163 unsigned long uaddr = (unsigned long) src; 170 unsigned long uaddr = (unsigned long) src;
164 struct mm_struct *mm = current->mm; 171 struct mm_struct *mm = current->mm;
165 unsigned long offset, pfn, done, len; 172 unsigned long offset, done, len, kaddr;
166 pte_t *pte;
167 size_t len_str; 173 size_t len_str;
168 174
169 if (segment_eq(get_fs(), KERNEL_DS)) 175 if (segment_eq(get_fs(), KERNEL_DS))
@@ -172,19 +178,13 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
172retry: 178retry:
173 spin_lock(&mm->page_table_lock); 179 spin_lock(&mm->page_table_lock);
174 do { 180 do {
175 pte = follow_table(mm, uaddr); 181 kaddr = follow_table(mm, uaddr, 0);
176 if ((unsigned long) pte < 0x1000) 182 if (IS_ERR_VALUE(kaddr))
177 goto fault;
178 if (!pte_present(*pte)) {
179 pte = (pte_t *) 0x11;
180 goto fault; 183 goto fault;
181 }
182 184
183 pfn = pte_pfn(*pte); 185 offset = uaddr & ~PAGE_MASK;
184 offset = uaddr & (PAGE_SIZE-1);
185 addr = (char *)(pfn << PAGE_SHIFT) + offset;
186 len = min(count - done, PAGE_SIZE - offset); 186 len = min(count - done, PAGE_SIZE - offset);
187 len_str = strnlen(addr, len); 187 len_str = strnlen((char *) kaddr, len);
188 done += len_str; 188 done += len_str;
189 uaddr += len_str; 189 uaddr += len_str;
190 } while ((len_str == len) && (done < count)); 190 } while ((len_str == len) && (done < count));
@@ -192,7 +192,7 @@ retry:
192 return done + 1; 192 return done + 1;
193fault: 193fault:
194 spin_unlock(&mm->page_table_lock); 194 spin_unlock(&mm->page_table_lock);
195 if (__handle_fault(uaddr, (unsigned long) pte, 0)) 195 if (__handle_fault(uaddr, -kaddr, 0))
196 return 0; 196 return 0;
197 goto retry; 197 goto retry;
198} 198}
@@ -225,11 +225,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
225 const void __user *from) 225 const void __user *from)
226{ 226{
227 struct mm_struct *mm = current->mm; 227 struct mm_struct *mm = current->mm;
228 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, 228 unsigned long offset_max, uaddr, done, size, error_code;
229 uaddr, done, size, error_code;
230 unsigned long uaddr_from = (unsigned long) from; 229 unsigned long uaddr_from = (unsigned long) from;
231 unsigned long uaddr_to = (unsigned long) to; 230 unsigned long uaddr_to = (unsigned long) to;
232 pte_t *pte_from, *pte_to; 231 unsigned long kaddr_to, kaddr_from;
233 int write_user; 232 int write_user;
234 233
235 if (segment_eq(get_fs(), KERNEL_DS)) { 234 if (segment_eq(get_fs(), KERNEL_DS)) {
@@ -242,38 +241,23 @@ retry:
242 do { 241 do {
243 write_user = 0; 242 write_user = 0;
244 uaddr = uaddr_from; 243 uaddr = uaddr_from;
245 pte_from = follow_table(mm, uaddr_from); 244 kaddr_from = follow_table(mm, uaddr_from, 0);
246 error_code = (unsigned long) pte_from; 245 error_code = kaddr_from;
247 if (error_code < 0x1000) 246 if (IS_ERR_VALUE(error_code))
248 goto fault;
249 if (!pte_present(*pte_from)) {
250 error_code = 0x11;
251 goto fault; 247 goto fault;
252 }
253 248
254 write_user = 1; 249 write_user = 1;
255 uaddr = uaddr_to; 250 uaddr = uaddr_to;
256 pte_to = follow_table(mm, uaddr_to); 251 kaddr_to = follow_table(mm, uaddr_to, 1);
257 error_code = (unsigned long) pte_to; 252 error_code = (unsigned long) kaddr_to;
258 if (error_code < 0x1000) 253 if (IS_ERR_VALUE(error_code))
259 goto fault;
260 if (!pte_present(*pte_to)) {
261 error_code = 0x11;
262 goto fault; 254 goto fault;
263 } else if (!pte_write(*pte_to)) {
264 error_code = 0x04;
265 goto fault;
266 }
267 255
268 pfn_from = pte_pfn(*pte_from); 256 offset_max = max(uaddr_from & ~PAGE_MASK,
269 pfn_to = pte_pfn(*pte_to); 257 uaddr_to & ~PAGE_MASK);
270 offset_from = uaddr_from & (PAGE_SIZE-1);
271 offset_to = uaddr_from & (PAGE_SIZE-1);
272 offset_max = max(offset_from, offset_to);
273 size = min(n - done, PAGE_SIZE - offset_max); 258 size = min(n - done, PAGE_SIZE - offset_max);
274 259
275 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, 260 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
276 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
277 done += size; 261 done += size;
278 uaddr_from += size; 262 uaddr_from += size;
279 uaddr_to += size; 263 uaddr_to += size;
@@ -282,7 +266,7 @@ retry:
282 return n - done; 266 return n - done;
283fault: 267fault:
284 spin_unlock(&mm->page_table_lock); 268 spin_unlock(&mm->page_table_lock);
285 if (__handle_fault(uaddr, error_code, write_user)) 269 if (__handle_fault(uaddr, -error_code, write_user))
286 return n - done; 270 return n - done;
287 goto retry; 271 goto retry;
288} 272}
@@ -341,7 +325,7 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
341 return __futex_atomic_op_pt(op, uaddr, oparg, old); 325 return __futex_atomic_op_pt(op, uaddr, oparg, old);
342 spin_lock(&current->mm->page_table_lock); 326 spin_lock(&current->mm->page_table_lock);
343 uaddr = (u32 __force __user *) 327 uaddr = (u32 __force __user *)
344 __dat_user_addr((__force unsigned long) uaddr); 328 __dat_user_addr((__force unsigned long) uaddr, 1);
345 if (!uaddr) { 329 if (!uaddr) {
346 spin_unlock(&current->mm->page_table_lock); 330 spin_unlock(&current->mm->page_table_lock);
347 return -EFAULT; 331 return -EFAULT;
@@ -378,7 +362,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
378 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); 362 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
379 spin_lock(&current->mm->page_table_lock); 363 spin_lock(&current->mm->page_table_lock);
380 uaddr = (u32 __force __user *) 364 uaddr = (u32 __force __user *)
381 __dat_user_addr((__force unsigned long) uaddr); 365 __dat_user_addr((__force unsigned long) uaddr, 1);
382 if (!uaddr) { 366 if (!uaddr) {
383 spin_unlock(&current->mm->page_table_lock); 367 spin_unlock(&current->mm->page_table_lock);
384 return -EFAULT; 368 return -EFAULT;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index a1e9d69a9c90..584b93674ea4 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
169 if (*offset) 169 if (*offset)
170 return -EINVAL; 170 return -EINVAL;
171 retval = oprofilefs_ulong_from_user(&val, buf, count); 171 retval = oprofilefs_ulong_from_user(&val, buf, count);
172 if (retval) 172 if (retval <= 0)
173 return retval; 173 return retval;
174 if (val < oprofile_min_interval) 174 if (val < oprofile_min_interval)
175 oprofile_hw_interval = oprofile_min_interval; 175 oprofile_hw_interval = oprofile_min_interval;
@@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
212 return -EINVAL; 212 return -EINVAL;
213 213
214 retval = oprofilefs_ulong_from_user(&val, buf, count); 214 retval = oprofilefs_ulong_from_user(&val, buf, count);
215 if (retval) 215 if (retval <= 0)
216 return retval; 216 return retval;
217 if (val != 0) 217 if (val != 0)
218 return -EINVAL; 218 return -EINVAL;
@@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
243 return -EINVAL; 243 return -EINVAL;
244 244
245 retval = oprofilefs_ulong_from_user(&val, buf, count); 245 retval = oprofilefs_ulong_from_user(&val, buf, count);
246 if (retval) 246 if (retval <= 0)
247 return retval; 247 return retval;
248 248
249 if (val != 0 && val != 1) 249 if (val != 0 && val != 1)
@@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
278 return -EINVAL; 278 return -EINVAL;
279 279
280 retval = oprofilefs_ulong_from_user(&val, buf, count); 280 retval = oprofilefs_ulong_from_user(&val, buf, count);
281 if (retval) 281 if (retval <= 0)
282 return retval; 282 return retval;
283 283
284 if (val != 0 && val != 1) 284 if (val != 0 && val != 1)
@@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
317 return -EINVAL; 317 return -EINVAL;
318 318
319 retval = oprofilefs_ulong_from_user(&val, buf, count); 319 retval = oprofilefs_ulong_from_user(&val, buf, count);
320 if (retval) 320 if (retval <= 0)
321 return retval; 321 return retval;
322 322
323 if (val != 0 && val != 1) 323 if (val != 0 && val != 1)