aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/gaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/gaccess.h')
-rw-r--r--arch/s390/kvm/gaccess.h379
1 files changed, 300 insertions, 79 deletions
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 374a439ccc60..0149cf15058a 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * access guest memory 2 * access guest memory
3 * 3 *
4 * Copyright IBM Corp. 2008, 2009 4 * Copyright IBM Corp. 2008, 2014
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -15,100 +15,321 @@
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include <asm/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/ptrace.h>
19#include "kvm-s390.h" 20#include "kvm-s390.h"
20 21
21/* Convert real to absolute address by applying the prefix of the CPU */ 22/**
23 * kvm_s390_real_to_abs - convert guest real address to guest absolute address
24 * @vcpu - guest virtual cpu
25 * @gra - guest real address
26 *
27 * Returns the guest absolute address that corresponds to the passed guest real
28 * address @gra of a virtual guest cpu by applying its prefix.
29 */
22static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, 30static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
23 unsigned long gaddr) 31 unsigned long gra)
24{ 32{
25 unsigned long prefix = vcpu->arch.sie_block->prefix; 33 unsigned long prefix = kvm_s390_get_prefix(vcpu);
26 if (gaddr < 2 * PAGE_SIZE) 34
27 gaddr += prefix; 35 if (gra < 2 * PAGE_SIZE)
28 else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE) 36 gra += prefix;
29 gaddr -= prefix; 37 else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
30 return gaddr; 38 gra -= prefix;
39 return gra;
31} 40}
32 41
33static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu, 42/**
34 void __user *gptr, 43 * kvm_s390_logical_to_effective - convert guest logical to effective address
35 int prefixing) 44 * @vcpu: guest virtual cpu
45 * @ga: guest logical address
46 *
47 * Convert a guest vcpu logical address to a guest vcpu effective address by
48 * applying the rules of the vcpu's addressing mode defined by PSW bits 31
49 * and 32 (extendended/basic addressing mode).
50 *
51 * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
52 * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
53 * of @ga will be zeroed and the remaining bits will be returned.
54 */
55static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
56 unsigned long ga)
36{ 57{
37 unsigned long gaddr = (unsigned long) gptr; 58 psw_t *psw = &vcpu->arch.sie_block->gpsw;
38 unsigned long uaddr; 59
39 60 if (psw_bits(*psw).eaba == PSW_AMODE_64BIT)
40 if (prefixing) 61 return ga;
41 gaddr = kvm_s390_real_to_abs(vcpu, gaddr); 62 if (psw_bits(*psw).eaba == PSW_AMODE_31BIT)
42 uaddr = gmap_fault(gaddr, vcpu->arch.gmap); 63 return ga & ((1UL << 31) - 1);
43 if (IS_ERR_VALUE(uaddr)) 64 return ga & ((1UL << 24) - 1);
44 uaddr = -EFAULT;
45 return (void __user *)uaddr;
46} 65}
47 66
48#define get_guest(vcpu, x, gptr) \ 67/*
49({ \ 68 * put_guest_lc, read_guest_lc and write_guest_lc are guest access functions
50 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ 69 * which shall only be used to access the lowcore of a vcpu.
51 int __mask = sizeof(__typeof__(*(gptr))) - 1; \ 70 * These functions should be used for e.g. interrupt handlers where no
52 int __ret; \ 71 * guest memory access protection facilities, like key or low address
53 \ 72 * protection, are applicable.
54 if (IS_ERR((void __force *)__uptr)) { \ 73 * At a later point guest vcpu lowcore access should happen via pinned
55 __ret = PTR_ERR((void __force *)__uptr); \ 74 * prefix pages, so that these pages can be accessed directly via the
56 } else { \ 75 * kernel mapping. All of these *_lc functions can be removed then.
57 BUG_ON((unsigned long)__uptr & __mask); \ 76 */
58 __ret = get_user(x, __uptr); \
59 } \
60 __ret; \
61})
62 77
63#define put_guest(vcpu, x, gptr) \ 78/**
79 * put_guest_lc - write a simple variable to a guest vcpu's lowcore
80 * @vcpu: virtual cpu
81 * @x: value to copy to guest
82 * @gra: vcpu's destination guest real address
83 *
84 * Copies a simple value from kernel space to a guest vcpu's lowcore.
85 * The size of the variable may be 1, 2, 4 or 8 bytes. The destination
86 * must be located in the vcpu's lowcore. Otherwise the result is undefined.
87 *
88 * Returns zero on success or -EFAULT on error.
89 *
90 * Note: an error indicates that either the kernel is out of memory or
91 * the guest memory mapping is broken. In any case the best solution
92 * would be to terminate the guest.
93 * It is wrong to inject a guest exception.
94 */
95#define put_guest_lc(vcpu, x, gra) \
64({ \ 96({ \
65 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ 97 struct kvm_vcpu *__vcpu = (vcpu); \
66 int __mask = sizeof(__typeof__(*(gptr))) - 1; \ 98 __typeof__(*(gra)) __x = (x); \
67 int __ret; \ 99 unsigned long __gpa; \
68 \ 100 \
69 if (IS_ERR((void __force *)__uptr)) { \ 101 __gpa = (unsigned long)(gra); \
70 __ret = PTR_ERR((void __force *)__uptr); \ 102 __gpa += kvm_s390_get_prefix(__vcpu); \
71 } else { \ 103 kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \
72 BUG_ON((unsigned long)__uptr & __mask); \
73 __ret = put_user(x, __uptr); \
74 } \
75 __ret; \
76}) 104})
77 105
78static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to, 106/**
79 unsigned long from, unsigned long len, 107 * write_guest_lc - copy data from kernel space to guest vcpu's lowcore
80 int to_guest, int prefixing) 108 * @vcpu: virtual cpu
109 * @gra: vcpu's source guest real address
110 * @data: source address in kernel space
111 * @len: number of bytes to copy
112 *
113 * Copy data from kernel space to guest vcpu's lowcore. The entire range must
114 * be located within the vcpu's lowcore, otherwise the result is undefined.
115 *
116 * Returns zero on success or -EFAULT on error.
117 *
118 * Note: an error indicates that either the kernel is out of memory or
119 * the guest memory mapping is broken. In any case the best solution
120 * would be to terminate the guest.
121 * It is wrong to inject a guest exception.
122 */
123static inline __must_check
124int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
125 unsigned long len)
126{
127 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
128
129 return kvm_write_guest(vcpu->kvm, gpa, data, len);
130}
131
132/**
133 * read_guest_lc - copy data from guest vcpu's lowcore to kernel space
134 * @vcpu: virtual cpu
135 * @gra: vcpu's source guest real address
136 * @data: destination address in kernel space
137 * @len: number of bytes to copy
138 *
139 * Copy data from guest vcpu's lowcore to kernel space. The entire range must
140 * be located within the vcpu's lowcore, otherwise the result is undefined.
141 *
142 * Returns zero on success or -EFAULT on error.
143 *
144 * Note: an error indicates that either the kernel is out of memory or
145 * the guest memory mapping is broken. In any case the best solution
146 * would be to terminate the guest.
147 * It is wrong to inject a guest exception.
148 */
149static inline __must_check
150int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
151 unsigned long len)
152{
153 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
154
155 return kvm_read_guest(vcpu->kvm, gpa, data, len);
156}
157
158int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
159 unsigned long *gpa, int write);
160
161int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
162 unsigned long len, int write);
163
164int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
165 void *data, unsigned long len, int write);
166
167/**
168 * write_guest - copy data from kernel space to guest space
169 * @vcpu: virtual cpu
170 * @ga: guest address
171 * @data: source address in kernel space
172 * @len: number of bytes to copy
173 *
174 * Copy @len bytes from @data (kernel space) to @ga (guest address).
175 * In order to copy data to guest space the PSW of the vcpu is inspected:
176 * If DAT is off data will be copied to guest real or absolute memory.
177 * If DAT is on data will be copied to the address space as specified by
178 * the address space bits of the PSW:
179 * Primary, secondory or home space (access register mode is currently not
180 * implemented).
181 * The addressing mode of the PSW is also inspected, so that address wrap
182 * around is taken into account for 24-, 31- and 64-bit addressing mode,
183 * if the to be copied data crosses page boundaries in guest address space.
184 * In addition also low address and DAT protection are inspected before
185 * copying any data (key protection is currently not implemented).
186 *
187 * This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu.
188 * In case of an access exception (e.g. protection exception) pgm will contain
189 * all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()'
190 * will inject a correct exception into the guest.
191 * If no access exception happened, the contents of pgm are undefined when
192 * this function returns.
193 *
194 * Returns: - zero on success
195 * - a negative value if e.g. the guest mapping is broken or in
196 * case of out-of-memory. In this case the contents of pgm are
197 * undefined. Also parts of @data may have been copied to guest
198 * space.
199 * - a positive value if an access exception happened. In this case
200 * the returned value is the program interruption code and the
201 * contents of pgm may be used to inject an exception into the
202 * guest. No data has been copied to guest space.
203 *
204 * Note: in case an access exception is recognized no data has been copied to
205 * guest space (this is also true, if the to be copied data would cross
206 * one or more page boundaries in guest space).
207 * Therefore this function may be used for nullifying and suppressing
208 * instruction emulation.
209 * It may also be used for terminating instructions, if it is undefined
210 * if data has been changed in guest space in case of an exception.
211 */
212static inline __must_check
213int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
214 unsigned long len)
215{
216 return access_guest(vcpu, ga, data, len, 1);
217}
218
219/**
220 * read_guest - copy data from guest space to kernel space
221 * @vcpu: virtual cpu
222 * @ga: guest address
223 * @data: destination address in kernel space
224 * @len: number of bytes to copy
225 *
226 * Copy @len bytes from @ga (guest address) to @data (kernel space).
227 *
228 * The behaviour of read_guest is identical to write_guest, except that
229 * data will be copied from guest space to kernel space.
230 */
231static inline __must_check
232int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
233 unsigned long len)
234{
235 return access_guest(vcpu, ga, data, len, 0);
236}
237
238/**
239 * write_guest_abs - copy data from kernel space to guest space absolute
240 * @vcpu: virtual cpu
241 * @gpa: guest physical (absolute) address
242 * @data: source address in kernel space
243 * @len: number of bytes to copy
244 *
245 * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address).
246 * It is up to the caller to ensure that the entire guest memory range is
247 * valid memory before calling this function.
248 * Guest low address and key protection are not checked.
249 *
250 * Returns zero on success or -EFAULT on error.
251 *
252 * If an error occurs data may have been copied partially to guest memory.
253 */
254static inline __must_check
255int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
256 unsigned long len)
257{
258 return kvm_write_guest(vcpu->kvm, gpa, data, len);
259}
260
261/**
262 * read_guest_abs - copy data from guest space absolute to kernel space
263 * @vcpu: virtual cpu
264 * @gpa: guest physical (absolute) address
265 * @data: destination address in kernel space
266 * @len: number of bytes to copy
267 *
268 * Copy @len bytes from @gpa (guest absolute address) to @data (kernel space).
269 * It is up to the caller to ensure that the entire guest memory range is
270 * valid memory before calling this function.
271 * Guest key protection is not checked.
272 *
273 * Returns zero on success or -EFAULT on error.
274 *
275 * If an error occurs data may have been copied partially to kernel space.
276 */
277static inline __must_check
278int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
279 unsigned long len)
280{
281 return kvm_read_guest(vcpu->kvm, gpa, data, len);
282}
283
284/**
285 * write_guest_real - copy data from kernel space to guest space real
286 * @vcpu: virtual cpu
287 * @gra: guest real address
288 * @data: source address in kernel space
289 * @len: number of bytes to copy
290 *
291 * Copy @len bytes from @data (kernel space) to @gra (guest real address).
292 * It is up to the caller to ensure that the entire guest memory range is
293 * valid memory before calling this function.
294 * Guest low address and key protection are not checked.
295 *
296 * Returns zero on success or -EFAULT on error.
297 *
298 * If an error occurs data may have been copied partially to guest memory.
299 */
300static inline __must_check
301int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
302 unsigned long len)
303{
304 return access_guest_real(vcpu, gra, data, len, 1);
305}
306
307/**
308 * read_guest_real - copy data from guest space real to kernel space
309 * @vcpu: virtual cpu
310 * @gra: guest real address
311 * @data: destination address in kernel space
312 * @len: number of bytes to copy
313 *
314 * Copy @len bytes from @gra (guest real address) to @data (kernel space).
315 * It is up to the caller to ensure that the entire guest memory range is
316 * valid memory before calling this function.
317 * Guest key protection is not checked.
318 *
319 * Returns zero on success or -EFAULT on error.
320 *
321 * If an error occurs data may have been copied partially to kernel space.
322 */
323static inline __must_check
324int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
325 unsigned long len)
81{ 326{
82 unsigned long _len, rc; 327 return access_guest_real(vcpu, gra, data, len, 0);
83 void __user *uptr;
84
85 while (len) {
86 uptr = to_guest ? (void __user *)to : (void __user *)from;
87 uptr = __gptr_to_uptr(vcpu, uptr, prefixing);
88 if (IS_ERR((void __force *)uptr))
89 return -EFAULT;
90 _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1));
91 _len = min(_len, len);
92 if (to_guest)
93 rc = copy_to_user((void __user *) uptr, (void *)from, _len);
94 else
95 rc = copy_from_user((void *)to, (void __user *)uptr, _len);
96 if (rc)
97 return -EFAULT;
98 len -= _len;
99 from += _len;
100 to += _len;
101 }
102 return 0;
103} 328}
104 329
105#define copy_to_guest(vcpu, to, from, size) \ 330void ipte_lock(struct kvm_vcpu *vcpu);
106 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1) 331void ipte_unlock(struct kvm_vcpu *vcpu);
107#define copy_from_guest(vcpu, to, from, size) \ 332int ipte_lock_held(struct kvm_vcpu *vcpu);
108 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1) 333int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
109#define copy_to_guest_absolute(vcpu, to, from, size) \
110 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0)
111#define copy_from_guest_absolute(vcpu, to, from, size) \
112 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0)
113 334
114#endif /* __KVM_S390_GACCESS_H */ 335#endif /* __KVM_S390_GACCESS_H */