aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2009-12-07 06:51:47 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2009-12-07 06:51:34 -0500
commit6c1e3e79430615d0472dbf9f8fed89c571e66423 (patch)
treeed7b6fde69c9b74cd6be35f82a7a75cc2fc77775 /arch/s390/lib
parent1ab947de293f43812276b60cf9fa21127e7a5bb2 (diff)
[S390] Use do_exception() in pagetable walk usercopy functions.
The pagetable walk usercopy functions have used a modified copy of the do_exception() function for fault handling. This lead to inconsistencies with recent changes to do_exception(), e.g. performance counters. This patch changes the pagetable walk usercopy code to call do_exception() directly, eliminating the redundancy. A new parameter is added to do_exception() to specify the fault address. Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/uaccess_pt.c147
1 files changed, 51 insertions, 96 deletions
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index cb5d59eab0ee..404f2de296dc 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -23,86 +23,21 @@ static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
23 23
24 pgd = pgd_offset(mm, addr); 24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
26 return NULL; 26 return (pte_t *) 0x3a;
27 27
28 pud = pud_offset(pgd, addr); 28 pud = pud_offset(pgd, addr);
29 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 29 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
30 return NULL; 30 return (pte_t *) 0x3b;
31 31
32 pmd = pmd_offset(pud, addr); 32 pmd = pmd_offset(pud, addr);
33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
34 return NULL; 34 return (pte_t *) 0x10;
35 35
36 return pte_offset_map(pmd, addr); 36 return pte_offset_map(pmd, addr);
37} 37}
38 38
39static int __handle_fault(struct mm_struct *mm, unsigned long address, 39static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
40 int write_access) 40 size_t n, int write_user)
41{
42 struct vm_area_struct *vma;
43 int ret = -EFAULT;
44 int fault;
45
46 if (in_atomic())
47 return ret;
48 down_read(&mm->mmap_sem);
49 vma = find_vma(mm, address);
50 if (unlikely(!vma))
51 goto out;
52 if (unlikely(vma->vm_start > address)) {
53 if (!(vma->vm_flags & VM_GROWSDOWN))
54 goto out;
55 if (expand_stack(vma, address))
56 goto out;
57 }
58
59 if (!write_access) {
60 /* page not present, check vm flags */
61 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
62 goto out;
63 } else {
64 if (!(vma->vm_flags & VM_WRITE))
65 goto out;
66 }
67
68survive:
69 fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
70 if (unlikely(fault & VM_FAULT_ERROR)) {
71 if (fault & VM_FAULT_OOM)
72 goto out_of_memory;
73 else if (fault & VM_FAULT_SIGBUS)
74 goto out_sigbus;
75 BUG();
76 }
77 if (fault & VM_FAULT_MAJOR)
78 current->maj_flt++;
79 else
80 current->min_flt++;
81 ret = 0;
82out:
83 up_read(&mm->mmap_sem);
84 return ret;
85
86out_of_memory:
87 up_read(&mm->mmap_sem);
88 if (is_global_init(current)) {
89 yield();
90 down_read(&mm->mmap_sem);
91 goto survive;
92 }
93 printk("VM: killing process %s\n", current->comm);
94 return ret;
95
96out_sigbus:
97 up_read(&mm->mmap_sem);
98 current->thread.prot_addr = address;
99 current->thread.trap_no = 0x11;
100 force_sig(SIGBUS, current);
101 return ret;
102}
103
104static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
105 size_t n, int write_user)
106{ 41{
107 struct mm_struct *mm = current->mm; 42 struct mm_struct *mm = current->mm;
108 unsigned long offset, pfn, done, size; 43 unsigned long offset, pfn, done, size;
@@ -114,12 +49,17 @@ retry:
114 spin_lock(&mm->page_table_lock); 49 spin_lock(&mm->page_table_lock);
115 do { 50 do {
116 pte = follow_table(mm, uaddr); 51 pte = follow_table(mm, uaddr);
117 if (!pte || !pte_present(*pte) || 52 if ((unsigned long) pte < 0x1000)
118 (write_user && !pte_write(*pte)))
119 goto fault; 53 goto fault;
54 if (!pte_present(*pte)) {
55 pte = (pte_t *) 0x11;
56 goto fault;
57 } else if (write_user && !pte_write(*pte)) {
58 pte = (pte_t *) 0x04;
59 goto fault;
60 }
120 61
121 pfn = pte_pfn(*pte); 62 pfn = pte_pfn(*pte);
122
123 offset = uaddr & (PAGE_SIZE - 1); 63 offset = uaddr & (PAGE_SIZE - 1);
124 size = min(n - done, PAGE_SIZE - offset); 64 size = min(n - done, PAGE_SIZE - offset);
125 if (write_user) { 65 if (write_user) {
@@ -137,7 +77,7 @@ retry:
137 return n - done; 77 return n - done;
138fault: 78fault:
139 spin_unlock(&mm->page_table_lock); 79 spin_unlock(&mm->page_table_lock);
140 if (__handle_fault(mm, uaddr, write_user)) 80 if (__handle_fault(uaddr, (unsigned long) pte, write_user))
141 return n - done; 81 return n - done;
142 goto retry; 82 goto retry;
143} 83}
@@ -146,30 +86,31 @@ fault:
146 * Do DAT for user address by page table walk, return kernel address. 86 * Do DAT for user address by page table walk, return kernel address.
147 * This function needs to be called with current->mm->page_table_lock held. 87 * This function needs to be called with current->mm->page_table_lock held.
148 */ 88 */
149static unsigned long __dat_user_addr(unsigned long uaddr) 89static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
150{ 90{
151 struct mm_struct *mm = current->mm; 91 struct mm_struct *mm = current->mm;
152 unsigned long pfn, ret; 92 unsigned long pfn;
153 pte_t *pte; 93 pte_t *pte;
154 int rc; 94 int rc;
155 95
156 ret = 0;
157retry: 96retry:
158 pte = follow_table(mm, uaddr); 97 pte = follow_table(mm, uaddr);
159 if (!pte || !pte_present(*pte)) 98 if ((unsigned long) pte < 0x1000)
160 goto fault; 99 goto fault;
100 if (!pte_present(*pte)) {
101 pte = (pte_t *) 0x11;
102 goto fault;
103 }
161 104
162 pfn = pte_pfn(*pte); 105 pfn = pte_pfn(*pte);
163 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); 106 return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
164out:
165 return ret;
166fault: 107fault:
167 spin_unlock(&mm->page_table_lock); 108 spin_unlock(&mm->page_table_lock);
168 rc = __handle_fault(mm, uaddr, 0); 109 rc = __handle_fault(uaddr, (unsigned long) pte, 0);
169 spin_lock(&mm->page_table_lock); 110 spin_lock(&mm->page_table_lock);
170 if (rc) 111 if (!rc)
171 goto out; 112 goto retry;
172 goto retry; 113 return 0;
173} 114}
174 115
175size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 116size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
@@ -234,8 +175,12 @@ retry:
234 spin_lock(&mm->page_table_lock); 175 spin_lock(&mm->page_table_lock);
235 do { 176 do {
236 pte = follow_table(mm, uaddr); 177 pte = follow_table(mm, uaddr);
237 if (!pte || !pte_present(*pte)) 178 if ((unsigned long) pte < 0x1000)
179 goto fault;
180 if (!pte_present(*pte)) {
181 pte = (pte_t *) 0x11;
238 goto fault; 182 goto fault;
183 }
239 184
240 pfn = pte_pfn(*pte); 185 pfn = pte_pfn(*pte);
241 offset = uaddr & (PAGE_SIZE-1); 186 offset = uaddr & (PAGE_SIZE-1);
@@ -249,9 +194,8 @@ retry:
249 return done + 1; 194 return done + 1;
250fault: 195fault:
251 spin_unlock(&mm->page_table_lock); 196 spin_unlock(&mm->page_table_lock);
252 if (__handle_fault(mm, uaddr, 0)) { 197 if (__handle_fault(uaddr, (unsigned long) pte, 0))
253 return 0; 198 return 0;
254 }
255 goto retry; 199 goto retry;
256} 200}
257 201
@@ -284,7 +228,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
284{ 228{
285 struct mm_struct *mm = current->mm; 229 struct mm_struct *mm = current->mm;
286 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, 230 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
287 uaddr, done, size; 231 uaddr, done, size, error_code;
288 unsigned long uaddr_from = (unsigned long) from; 232 unsigned long uaddr_from = (unsigned long) from;
289 unsigned long uaddr_to = (unsigned long) to; 233 unsigned long uaddr_to = (unsigned long) to;
290 pte_t *pte_from, *pte_to; 234 pte_t *pte_from, *pte_to;
@@ -298,17 +242,28 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
298retry: 242retry:
299 spin_lock(&mm->page_table_lock); 243 spin_lock(&mm->page_table_lock);
300 do { 244 do {
245 write_user = 0;
246 uaddr = uaddr_from;
301 pte_from = follow_table(mm, uaddr_from); 247 pte_from = follow_table(mm, uaddr_from);
302 if (!pte_from || !pte_present(*pte_from)) { 248 error_code = (unsigned long) pte_from;
303 uaddr = uaddr_from; 249 if (error_code < 0x1000)
304 write_user = 0; 250 goto fault;
251 if (!pte_present(*pte_from)) {
252 error_code = 0x11;
305 goto fault; 253 goto fault;
306 } 254 }
307 255
256 write_user = 1;
257 uaddr = uaddr_to;
308 pte_to = follow_table(mm, uaddr_to); 258 pte_to = follow_table(mm, uaddr_to);
309 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { 259 error_code = (unsigned long) pte_to;
310 uaddr = uaddr_to; 260 if (error_code < 0x1000)
311 write_user = 1; 261 goto fault;
262 if (!pte_present(*pte_to)) {
263 error_code = 0x11;
264 goto fault;
265 } else if (!pte_write(*pte_to)) {
266 error_code = 0x04;
312 goto fault; 267 goto fault;
313 } 268 }
314 269
@@ -329,7 +284,7 @@ retry:
329 return n - done; 284 return n - done;
330fault: 285fault:
331 spin_unlock(&mm->page_table_lock); 286 spin_unlock(&mm->page_table_lock);
332 if (__handle_fault(mm, uaddr, write_user)) 287 if (__handle_fault(uaddr, error_code, write_user))
333 return n - done; 288 return n - done;
334 goto retry; 289 goto retry;
335} 290}