aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-03-05 07:14:45 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2013-03-07 14:21:21 -0500
commitf9dc72e82d32cc9fe40d1dea7709d434bba2d4a9 (patch)
tree61a650e12d2942dc2878dfd303756a8852e64a1e /arch/s390
parent396083a964aa4e86061d0e3449b1e0548a8197a9 (diff)
s390/kvm,gaccess: shorten copy_to/from_guest code
The code can be significantly shortened. There is no functional change, except that for large (> PAGE_SIZE) copies the guest translation would be done more frequently. However, there is not a single user which does this currently. If one gets added later on this functionality can be added easily again. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kvm/gaccess.h294
1 files changed, 41 insertions, 253 deletions
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 82f450ecb585..8608d7e6a334 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,16 +18,19 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include "kvm-s390.h" 19#include "kvm-s390.h"
20 20
21static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) 21static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr,
22 int prefixing)
22{ 23{
23 unsigned long prefix = vcpu->arch.sie_block->prefix; 24 unsigned long prefix = vcpu->arch.sie_block->prefix;
24 unsigned long gaddr = (unsigned long) gptr; 25 unsigned long gaddr = (unsigned long) gptr;
25 unsigned long uaddr; 26 unsigned long uaddr;
26 27
27 if (gaddr < 2 * PAGE_SIZE) 28 if (prefixing) {
28 gaddr += prefix; 29 if (gaddr < 2 * PAGE_SIZE)
29 else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE)) 30 gaddr += prefix;
30 gaddr -= prefix; 31 else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE))
32 gaddr -= prefix;
33 }
31 uaddr = gmap_fault(gaddr, vcpu->arch.gmap); 34 uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
32 if (IS_ERR_VALUE(uaddr)) 35 if (IS_ERR_VALUE(uaddr))
33 uaddr = -EFAULT; 36 uaddr = -EFAULT;
@@ -36,7 +39,7 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr)
36 39
37#define get_guest(vcpu, x, gptr) \ 40#define get_guest(vcpu, x, gptr) \
38({ \ 41({ \
39 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \ 42 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
40 int __mask = sizeof(__typeof__(*(gptr))) - 1; \ 43 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
41 int __ret = PTR_RET(__uptr); \ 44 int __ret = PTR_RET(__uptr); \
42 \ 45 \
@@ -49,7 +52,7 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr)
49 52
50#define put_guest(vcpu, x, gptr) \ 53#define put_guest(vcpu, x, gptr) \
51({ \ 54({ \
52 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \ 55 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
53 int __mask = sizeof(__typeof__(*(gptr))) - 1; \ 56 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
54 int __ret = PTR_RET(__uptr); \ 57 int __ret = PTR_RET(__uptr); \
55 \ 58 \
@@ -60,255 +63,40 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr)
60 __ret; \ 63 __ret; \
61}) 64})
62 65
63static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, 66static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to,
64 unsigned long guestdest, 67 unsigned long from, unsigned long len,
65 void *from, unsigned long n) 68 int to_guest, int prefixing)
66{
67 int rc;
68 unsigned long i;
69 u8 *data = from;
70
71 for (i = 0; i < n; i++) {
72 rc = put_guest(vcpu, *(data++), (u8 *)guestdest++);
73 if (rc < 0)
74 return rc;
75 }
76 return 0;
77}
78
79static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
80 unsigned long guestdest,
81 void *from, unsigned long n)
82{
83 int r;
84 void __user *uptr;
85 unsigned long size;
86
87 if (guestdest + n < guestdest)
88 return -EFAULT;
89
90 /* simple case: all within one segment table entry? */
91 if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
92 uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
93
94 if (IS_ERR((void __force *) uptr))
95 return PTR_ERR((void __force *) uptr);
96
97 r = copy_to_user(uptr, from, n);
98
99 if (r)
100 r = -EFAULT;
101
102 goto out;
103 }
104
105 /* copy first segment */
106 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
107
108 if (IS_ERR((void __force *) uptr))
109 return PTR_ERR((void __force *) uptr);
110
111 size = PMD_SIZE - (guestdest & ~PMD_MASK);
112
113 r = copy_to_user(uptr, from, size);
114
115 if (r) {
116 r = -EFAULT;
117 goto out;
118 }
119 from += size;
120 n -= size;
121 guestdest += size;
122
123 /* copy full segments */
124 while (n >= PMD_SIZE) {
125 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
126
127 if (IS_ERR((void __force *) uptr))
128 return PTR_ERR((void __force *) uptr);
129
130 r = copy_to_user(uptr, from, PMD_SIZE);
131
132 if (r) {
133 r = -EFAULT;
134 goto out;
135 }
136 from += PMD_SIZE;
137 n -= PMD_SIZE;
138 guestdest += PMD_SIZE;
139 }
140
141 /* copy the tail segment */
142 if (n) {
143 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
144
145 if (IS_ERR((void __force *) uptr))
146 return PTR_ERR((void __force *) uptr);
147
148 r = copy_to_user(uptr, from, n);
149
150 if (r)
151 r = -EFAULT;
152 }
153out:
154 return r;
155}
156
157static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
158 unsigned long guestdest,
159 void *from, unsigned long n)
160{
161 return __copy_to_guest_fast(vcpu, guestdest, from, n);
162}
163
164static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
165 void *from, unsigned long n)
166{
167 unsigned long prefix = vcpu->arch.sie_block->prefix;
168
169 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
170 goto slowpath;
171
172 if ((guestdest < prefix) && (guestdest + n > prefix))
173 goto slowpath;
174
175 if ((guestdest < prefix + 2 * PAGE_SIZE)
176 && (guestdest + n > prefix + 2 * PAGE_SIZE))
177 goto slowpath;
178
179 if (guestdest < 2 * PAGE_SIZE)
180 guestdest += prefix;
181 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
182 guestdest -= prefix;
183
184 return __copy_to_guest_fast(vcpu, guestdest, from, n);
185slowpath:
186 return __copy_to_guest_slow(vcpu, guestdest, from, n);
187}
188
189static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
190 unsigned long guestsrc,
191 unsigned long n)
192{ 69{
193 int rc; 70 unsigned long _len, rc;
194 unsigned long i; 71 void *uptr;
195 u8 *data = to; 72
196 73 while (len) {
197 for (i = 0; i < n; i++) { 74 uptr = to_guest ? (void *)to : (void *)from;
198 rc = get_guest(vcpu, *(data++), (u8 *)guestsrc++); 75 uptr = __gptr_to_uptr(vcpu, uptr, prefixing);
199 if (rc < 0) 76 if (IS_ERR(uptr))
200 return rc; 77 return -EFAULT;
78 _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1));
79 _len = min(_len, len);
80 if (to_guest)
81 rc = copy_to_user(uptr, (void *)from, _len);
82 else
83 rc = copy_from_user((void *)to, uptr, _len);
84 if (rc)
85 return -EFAULT;
86 len -= _len;
87 from += _len;
88 to += _len;
201 } 89 }
202 return 0; 90 return 0;
203} 91}
204 92
205static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to, 93#define copy_to_guest(vcpu, to, from, size) \
206 unsigned long guestsrc, 94 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1)
207 unsigned long n) 95#define copy_from_guest(vcpu, to, from, size) \
208{ 96 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1)
209 int r; 97#define copy_to_guest_absolute(vcpu, to, from, size) \
210 void __user *uptr; 98 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0)
211 unsigned long size; 99#define copy_from_guest_absolute(vcpu, to, from, size) \
212 100 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0)
213 if (guestsrc + n < guestsrc)
214 return -EFAULT;
215
216 /* simple case: all within one segment table entry? */
217 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
218 uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
219
220 if (IS_ERR((void __force *) uptr))
221 return PTR_ERR((void __force *) uptr);
222
223 r = copy_from_user(to, uptr, n);
224
225 if (r)
226 r = -EFAULT;
227
228 goto out;
229 }
230
231 /* copy first segment */
232 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
233
234 if (IS_ERR((void __force *) uptr))
235 return PTR_ERR((void __force *) uptr);
236
237 size = PMD_SIZE - (guestsrc & ~PMD_MASK);
238
239 r = copy_from_user(to, uptr, size);
240
241 if (r) {
242 r = -EFAULT;
243 goto out;
244 }
245 to += size;
246 n -= size;
247 guestsrc += size;
248
249 /* copy full segments */
250 while (n >= PMD_SIZE) {
251 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
252
253 if (IS_ERR((void __force *) uptr))
254 return PTR_ERR((void __force *) uptr);
255 101
256 r = copy_from_user(to, uptr, PMD_SIZE); 102#endif /* __KVM_S390_GACCESS_H */
257
258 if (r) {
259 r = -EFAULT;
260 goto out;
261 }
262 to += PMD_SIZE;
263 n -= PMD_SIZE;
264 guestsrc += PMD_SIZE;
265 }
266
267 /* copy the tail segment */
268 if (n) {
269 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
270
271 if (IS_ERR((void __force *) uptr))
272 return PTR_ERR((void __force *) uptr);
273
274 r = copy_from_user(to, uptr, n);
275
276 if (r)
277 r = -EFAULT;
278 }
279out:
280 return r;
281}
282
283static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
284 unsigned long guestsrc,
285 unsigned long n)
286{
287 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
288}
289
290static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
291 unsigned long guestsrc, unsigned long n)
292{
293 unsigned long prefix = vcpu->arch.sie_block->prefix;
294
295 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
296 goto slowpath;
297
298 if ((guestsrc < prefix) && (guestsrc + n > prefix))
299 goto slowpath;
300
301 if ((guestsrc < prefix + 2 * PAGE_SIZE)
302 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
303 goto slowpath;
304
305 if (guestsrc < 2 * PAGE_SIZE)
306 guestsrc += prefix;
307 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
308 guestsrc -= prefix;
309
310 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
311slowpath:
312 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
313}
314#endif