diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-10-22 06:52:46 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-10-22 06:52:49 -0400 |
commit | e4aa402e7a3b6b87d8df6243a37171cdcd2f01c2 (patch) | |
tree | 7827fd9c19c5d190cd72e649c4a37975a705f2b6 /arch/s390 | |
parent | 6f3fa3f0eb8fe4675f8543dd4be3365577e1d487 (diff) |
[S390] Introduce follow_table in uaccess_pt.c
Define and use follow_table inline in uaccess_pt.c to simplify
the code.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 85 |
1 files changed, 22 insertions, 63 deletions
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index b159a9d65680..dc37ea827f4e 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -15,6 +15,22 @@ | |||
15 | #include <asm/futex.h> | 15 | #include <asm/futex.h> |
16 | #include "uaccess.h" | 16 | #include "uaccess.h" |
17 | 17 | ||
18 | static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) | ||
19 | { | ||
20 | pgd_t *pgd; | ||
21 | pmd_t *pmd; | ||
22 | |||
23 | pgd = pgd_offset(mm, addr); | ||
24 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
25 | return NULL; | ||
26 | |||
27 | pmd = pmd_offset(pgd, addr); | ||
28 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
29 | return NULL; | ||
30 | |||
31 | return pte_offset_map(pmd, addr); | ||
32 | } | ||
33 | |||
18 | static int __handle_fault(struct mm_struct *mm, unsigned long address, | 34 | static int __handle_fault(struct mm_struct *mm, unsigned long address, |
19 | int write_access) | 35 | int write_access) |
20 | { | 36 | { |
@@ -85,8 +101,6 @@ static size_t __user_copy_pt(unsigned long uaddr, void *kptr, | |||
85 | { | 101 | { |
86 | struct mm_struct *mm = current->mm; | 102 | struct mm_struct *mm = current->mm; |
87 | unsigned long offset, pfn, done, size; | 103 | unsigned long offset, pfn, done, size; |
88 | pgd_t *pgd; | ||
89 | pmd_t *pmd; | ||
90 | pte_t *pte; | 104 | pte_t *pte; |
91 | void *from, *to; | 105 | void *from, *to; |
92 | 106 | ||
@@ -94,15 +108,7 @@ static size_t __user_copy_pt(unsigned long uaddr, void *kptr, | |||
94 | retry: | 108 | retry: |
95 | spin_lock(&mm->page_table_lock); | 109 | spin_lock(&mm->page_table_lock); |
96 | do { | 110 | do { |
97 | pgd = pgd_offset(mm, uaddr); | 111 | pte = follow_table(mm, uaddr); |
98 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
99 | goto fault; | ||
100 | |||
101 | pmd = pmd_offset(pgd, uaddr); | ||
102 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
103 | goto fault; | ||
104 | |||
105 | pte = pte_offset_map(pmd, uaddr); | ||
106 | if (!pte || !pte_present(*pte) || | 112 | if (!pte || !pte_present(*pte) || |
107 | (write_user && !pte_write(*pte))) | 113 | (write_user && !pte_write(*pte))) |
108 | goto fault; | 114 | goto fault; |
@@ -142,22 +148,12 @@ static unsigned long __dat_user_addr(unsigned long uaddr) | |||
142 | { | 148 | { |
143 | struct mm_struct *mm = current->mm; | 149 | struct mm_struct *mm = current->mm; |
144 | unsigned long pfn, ret; | 150 | unsigned long pfn, ret; |
145 | pgd_t *pgd; | ||
146 | pmd_t *pmd; | ||
147 | pte_t *pte; | 151 | pte_t *pte; |
148 | int rc; | 152 | int rc; |
149 | 153 | ||
150 | ret = 0; | 154 | ret = 0; |
151 | retry: | 155 | retry: |
152 | pgd = pgd_offset(mm, uaddr); | 156 | pte = follow_table(mm, uaddr); |
153 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
154 | goto fault; | ||
155 | |||
156 | pmd = pmd_offset(pgd, uaddr); | ||
157 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
158 | goto fault; | ||
159 | |||
160 | pte = pte_offset_map(pmd, uaddr); | ||
161 | if (!pte || !pte_present(*pte)) | 157 | if (!pte || !pte_present(*pte)) |
162 | goto fault; | 158 | goto fault; |
163 | 159 | ||
@@ -229,8 +225,6 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) | |||
229 | unsigned long uaddr = (unsigned long) src; | 225 | unsigned long uaddr = (unsigned long) src; |
230 | struct mm_struct *mm = current->mm; | 226 | struct mm_struct *mm = current->mm; |
231 | unsigned long offset, pfn, done, len; | 227 | unsigned long offset, pfn, done, len; |
232 | pgd_t *pgd; | ||
233 | pmd_t *pmd; | ||
234 | pte_t *pte; | 228 | pte_t *pte; |
235 | size_t len_str; | 229 | size_t len_str; |
236 | 230 | ||
@@ -240,15 +234,7 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) | |||
240 | retry: | 234 | retry: |
241 | spin_lock(&mm->page_table_lock); | 235 | spin_lock(&mm->page_table_lock); |
242 | do { | 236 | do { |
243 | pgd = pgd_offset(mm, uaddr); | 237 | pte = follow_table(mm, uaddr); |
244 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
245 | goto fault; | ||
246 | |||
247 | pmd = pmd_offset(pgd, uaddr); | ||
248 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
249 | goto fault; | ||
250 | |||
251 | pte = pte_offset_map(pmd, uaddr); | ||
252 | if (!pte || !pte_present(*pte)) | 238 | if (!pte || !pte_present(*pte)) |
253 | goto fault; | 239 | goto fault; |
254 | 240 | ||
@@ -308,8 +294,6 @@ static size_t copy_in_user_pt(size_t n, void __user *to, | |||
308 | uaddr, done, size; | 294 | uaddr, done, size; |
309 | unsigned long uaddr_from = (unsigned long) from; | 295 | unsigned long uaddr_from = (unsigned long) from; |
310 | unsigned long uaddr_to = (unsigned long) to; | 296 | unsigned long uaddr_to = (unsigned long) to; |
311 | pgd_t *pgd_from, *pgd_to; | ||
312 | pmd_t *pmd_from, *pmd_to; | ||
313 | pte_t *pte_from, *pte_to; | 297 | pte_t *pte_from, *pte_to; |
314 | int write_user; | 298 | int write_user; |
315 | 299 | ||
@@ -317,39 +301,14 @@ static size_t copy_in_user_pt(size_t n, void __user *to, | |||
317 | retry: | 301 | retry: |
318 | spin_lock(&mm->page_table_lock); | 302 | spin_lock(&mm->page_table_lock); |
319 | do { | 303 | do { |
320 | pgd_from = pgd_offset(mm, uaddr_from); | 304 | pte_from = follow_table(mm, uaddr_from); |
321 | if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) { | ||
322 | uaddr = uaddr_from; | ||
323 | write_user = 0; | ||
324 | goto fault; | ||
325 | } | ||
326 | pgd_to = pgd_offset(mm, uaddr_to); | ||
327 | if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) { | ||
328 | uaddr = uaddr_to; | ||
329 | write_user = 1; | ||
330 | goto fault; | ||
331 | } | ||
332 | |||
333 | pmd_from = pmd_offset(pgd_from, uaddr_from); | ||
334 | if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) { | ||
335 | uaddr = uaddr_from; | ||
336 | write_user = 0; | ||
337 | goto fault; | ||
338 | } | ||
339 | pmd_to = pmd_offset(pgd_to, uaddr_to); | ||
340 | if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) { | ||
341 | uaddr = uaddr_to; | ||
342 | write_user = 1; | ||
343 | goto fault; | ||
344 | } | ||
345 | |||
346 | pte_from = pte_offset_map(pmd_from, uaddr_from); | ||
347 | if (!pte_from || !pte_present(*pte_from)) { | 305 | if (!pte_from || !pte_present(*pte_from)) { |
348 | uaddr = uaddr_from; | 306 | uaddr = uaddr_from; |
349 | write_user = 0; | 307 | write_user = 0; |
350 | goto fault; | 308 | goto fault; |
351 | } | 309 | } |
352 | pte_to = pte_offset_map(pmd_to, uaddr_to); | 310 | |
311 | pte_to = follow_table(mm, uaddr_to); | ||
353 | if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { | 312 | if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { |
354 | uaddr = uaddr_to; | 313 | uaddr = uaddr_to; |
355 | write_user = 1; | 314 | write_user = 1; |