diff options
author | Gerald Schaefer <geraldsc@de.ibm.com> | 2006-12-04 09:40:45 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-12-04 09:40:45 -0500 |
commit | 59f35d53fde3987d071ea1c9bf1c9ba29fcb69fe (patch) | |
tree | 50853c1bf3ad3af080707fb2e6b3722d3d86db9e /arch/s390/lib/uaccess_pt.c | |
parent | d57de5a36791cb1b7285649c62f183b0d3505f7d (diff) |
[S390] Add dynamic size check for usercopy functions.
Use a wrapper for copy_to/from_user to chose the best usercopy method.
The mvcos instruction is better for sizes greater than 256 bytes, if
mvcos is not available a page table walk is better for sizes greater
than 1024 bytes. Also removed the redundant copy_to/from_user_std_small
functions.
Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib/uaccess_pt.c')
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 153 |
1 files changed, 153 insertions, 0 deletions
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c new file mode 100644 index 000000000000..8741bdc09299 --- /dev/null +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -0,0 +1,153 @@ | |||
1 | /* | ||
2 | * arch/s390/lib/uaccess_pt.c | ||
3 | * | ||
4 | * User access functions based on page table walks. | ||
5 | * | ||
6 | * Copyright IBM Corp. 2006 | ||
7 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <asm/uaccess.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/futex.h> | ||
14 | |||
15 | static inline int __handle_fault(struct mm_struct *mm, unsigned long address, | ||
16 | int write_access) | ||
17 | { | ||
18 | struct vm_area_struct *vma; | ||
19 | int ret = -EFAULT; | ||
20 | |||
21 | down_read(&mm->mmap_sem); | ||
22 | vma = find_vma(mm, address); | ||
23 | if (unlikely(!vma)) | ||
24 | goto out; | ||
25 | if (unlikely(vma->vm_start > address)) { | ||
26 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
27 | goto out; | ||
28 | if (expand_stack(vma, address)) | ||
29 | goto out; | ||
30 | } | ||
31 | |||
32 | if (!write_access) { | ||
33 | /* page not present, check vm flags */ | ||
34 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
35 | goto out; | ||
36 | } else { | ||
37 | if (!(vma->vm_flags & VM_WRITE)) | ||
38 | goto out; | ||
39 | } | ||
40 | |||
41 | survive: | ||
42 | switch (handle_mm_fault(mm, vma, address, write_access)) { | ||
43 | case VM_FAULT_MINOR: | ||
44 | current->min_flt++; | ||
45 | break; | ||
46 | case VM_FAULT_MAJOR: | ||
47 | current->maj_flt++; | ||
48 | break; | ||
49 | case VM_FAULT_SIGBUS: | ||
50 | goto out_sigbus; | ||
51 | case VM_FAULT_OOM: | ||
52 | goto out_of_memory; | ||
53 | default: | ||
54 | BUG(); | ||
55 | } | ||
56 | ret = 0; | ||
57 | out: | ||
58 | up_read(&mm->mmap_sem); | ||
59 | return ret; | ||
60 | |||
61 | out_of_memory: | ||
62 | up_read(&mm->mmap_sem); | ||
63 | if (current->pid == 1) { | ||
64 | yield(); | ||
65 | goto survive; | ||
66 | } | ||
67 | printk("VM: killing process %s\n", current->comm); | ||
68 | return ret; | ||
69 | |||
70 | out_sigbus: | ||
71 | up_read(&mm->mmap_sem); | ||
72 | current->thread.prot_addr = address; | ||
73 | current->thread.trap_no = 0x11; | ||
74 | force_sig(SIGBUS, current); | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, | ||
79 | size_t n, int write_user) | ||
80 | { | ||
81 | struct mm_struct *mm = current->mm; | ||
82 | unsigned long offset, pfn, done, size; | ||
83 | pgd_t *pgd; | ||
84 | pmd_t *pmd; | ||
85 | pte_t *pte; | ||
86 | void *from, *to; | ||
87 | |||
88 | done = 0; | ||
89 | retry: | ||
90 | spin_lock(&mm->page_table_lock); | ||
91 | do { | ||
92 | pgd = pgd_offset(mm, uaddr); | ||
93 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
94 | goto fault; | ||
95 | |||
96 | pmd = pmd_offset(pgd, uaddr); | ||
97 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
98 | goto fault; | ||
99 | |||
100 | pte = pte_offset_map(pmd, uaddr); | ||
101 | if (!pte || !pte_present(*pte) || | ||
102 | (write_user && !pte_write(*pte))) | ||
103 | goto fault; | ||
104 | |||
105 | pfn = pte_pfn(*pte); | ||
106 | if (!pfn_valid(pfn)) | ||
107 | goto out; | ||
108 | |||
109 | offset = uaddr & (PAGE_SIZE - 1); | ||
110 | size = min(n - done, PAGE_SIZE - offset); | ||
111 | if (write_user) { | ||
112 | to = (void *)((pfn << PAGE_SHIFT) + offset); | ||
113 | from = kptr + done; | ||
114 | } else { | ||
115 | from = (void *)((pfn << PAGE_SHIFT) + offset); | ||
116 | to = kptr + done; | ||
117 | } | ||
118 | memcpy(to, from, size); | ||
119 | done += size; | ||
120 | uaddr += size; | ||
121 | } while (done < n); | ||
122 | out: | ||
123 | spin_unlock(&mm->page_table_lock); | ||
124 | return n - done; | ||
125 | fault: | ||
126 | spin_unlock(&mm->page_table_lock); | ||
127 | if (__handle_fault(mm, uaddr, write_user)) | ||
128 | return n - done; | ||
129 | goto retry; | ||
130 | } | ||
131 | |||
132 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) | ||
133 | { | ||
134 | size_t rc; | ||
135 | |||
136 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
137 | memcpy(to, (void __kernel __force *) from, n); | ||
138 | return 0; | ||
139 | } | ||
140 | rc = __user_copy_pt((unsigned long) from, to, n, 0); | ||
141 | if (unlikely(rc)) | ||
142 | memset(to + n - rc, 0, rc); | ||
143 | return rc; | ||
144 | } | ||
145 | |||
146 | size_t copy_to_user_pt(size_t n, void __user *to, const void *from) | ||
147 | { | ||
148 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
149 | memcpy((void __kernel __force *) to, from, n); | ||
150 | return 0; | ||
151 | } | ||
152 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); | ||
153 | } | ||