aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/gaccess.h
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2008-03-25 13:47:20 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:42 -0400
commitb0c632db637d68ad39d9f97f452ce176253f5f4e (patch)
tree770d68c17cbcabc1543d1e9125669130fcf4fee4 /arch/s390/kvm/gaccess.h
parent8a88ac6183975c73c65b45f365f6f3b875c1348b (diff)
KVM: s390: arch backend for the kvm kernel module
This patch contains the port of Qumranet's kvm kernel module to IBM zSeries (aka s390x, mainframe) architecture. It uses the mainframe's virtualization instruction SIE to run virtual machines with up to 64 virtual CPUs each. This port is only usable on 64bit host kernels, and can only run 64bit guest kernels. However, running 31bit applications in guest userspace is possible. The following source files are introduced by this patch arch/s390/kvm/kvm-s390.c similar to arch/x86/kvm/x86.c, this implements all arch callbacks for kvm. __vcpu_run calls back into sie64a to enter the guest machine context arch/s390/kvm/sie64a.S assembler function sie64a, which enters guest context via SIE, and switches world before and after that include/asm-s390/kvm_host.h contains all vital data structures needed to run virtual machines on the mainframe include/asm-s390/kvm.h defines kvm_regs and friends for user access to guest register content arch/s390/kvm/gaccess.h functions similar to uaccess to access guest memory arch/s390/kvm/kvm-s390.h header file for kvm-s390 internals, extended by later patches Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/s390/kvm/gaccess.h')
-rw-r--r--arch/s390/kvm/gaccess.h274
1 files changed, 274 insertions, 0 deletions
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
new file mode 100644
index 000000000000..4e0633c413f3
--- /dev/null
+++ b/arch/s390/kvm/gaccess.h
@@ -0,0 +1,274 @@
1/*
2 * gaccess.h - access guest memory
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_GACCESS_H
14#define __KVM_S390_GACCESS_H
15
16#include <linux/compiler.h>
17#include <linux/kvm_host.h>
18#include <asm/uaccess.h>
19
20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
21 u64 guestaddr)
22{
23 u64 prefix = vcpu->arch.sie_block->prefix;
24 u64 origin = vcpu->kvm->arch.guest_origin;
25 u64 memsize = vcpu->kvm->arch.guest_memsize;
26
27 if (guestaddr < 2 * PAGE_SIZE)
28 guestaddr += prefix;
29 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
30 guestaddr -= prefix;
31
32 if (guestaddr > memsize)
33 return (void __user __force *) ERR_PTR(-EFAULT);
34
35 guestaddr += origin;
36
37 return (void __user *) guestaddr;
38}
39
40static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
41 u64 *result)
42{
43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
44
45 BUG_ON(guestaddr & 7);
46
47 if (IS_ERR((void __force *) uptr))
48 return PTR_ERR((void __force *) uptr);
49
50 return get_user(*result, (u64 __user *) uptr);
51}
52
53static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
54 u32 *result)
55{
56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
57
58 BUG_ON(guestaddr & 3);
59
60 if (IS_ERR((void __force *) uptr))
61 return PTR_ERR((void __force *) uptr);
62
63 return get_user(*result, (u32 __user *) uptr);
64}
65
66static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
67 u16 *result)
68{
69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
70
71 BUG_ON(guestaddr & 1);
72
73 if (IS_ERR(uptr))
74 return PTR_ERR(uptr);
75
76 return get_user(*result, (u16 __user *) uptr);
77}
78
79static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
80 u8 *result)
81{
82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
83
84 if (IS_ERR((void __force *) uptr))
85 return PTR_ERR((void __force *) uptr);
86
87 return get_user(*result, (u8 __user *) uptr);
88}
89
90static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
91 u64 value)
92{
93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
94
95 BUG_ON(guestaddr & 7);
96
97 if (IS_ERR((void __force *) uptr))
98 return PTR_ERR((void __force *) uptr);
99
100 return put_user(value, (u64 __user *) uptr);
101}
102
103static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
104 u32 value)
105{
106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
107
108 BUG_ON(guestaddr & 3);
109
110 if (IS_ERR((void __force *) uptr))
111 return PTR_ERR((void __force *) uptr);
112
113 return put_user(value, (u32 __user *) uptr);
114}
115
116static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
117 u16 value)
118{
119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
120
121 BUG_ON(guestaddr & 1);
122
123 if (IS_ERR((void __force *) uptr))
124 return PTR_ERR((void __force *) uptr);
125
126 return put_user(value, (u16 __user *) uptr);
127}
128
129static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
130 u8 value)
131{
132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
133
134 if (IS_ERR((void __force *) uptr))
135 return PTR_ERR((void __force *) uptr);
136
137 return put_user(value, (u8 __user *) uptr);
138}
139
140
141static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
142 const void *from, unsigned long n)
143{
144 int rc;
145 unsigned long i;
146 const u8 *data = from;
147
148 for (i = 0; i < n; i++) {
149 rc = put_guest_u8(vcpu, guestdest++, *(data++));
150 if (rc < 0)
151 return rc;
152 }
153 return 0;
154}
155
156static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
157 const void *from, unsigned long n)
158{
159 u64 prefix = vcpu->arch.sie_block->prefix;
160 u64 origin = vcpu->kvm->arch.guest_origin;
161 u64 memsize = vcpu->kvm->arch.guest_memsize;
162
163 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
164 goto slowpath;
165
166 if ((guestdest < prefix) && (guestdest + n > prefix))
167 goto slowpath;
168
169 if ((guestdest < prefix + 2 * PAGE_SIZE)
170 && (guestdest + n > prefix + 2 * PAGE_SIZE))
171 goto slowpath;
172
173 if (guestdest < 2 * PAGE_SIZE)
174 guestdest += prefix;
175 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
176 guestdest -= prefix;
177
178 if (guestdest + n > memsize)
179 return -EFAULT;
180
181 if (guestdest + n < guestdest)
182 return -EFAULT;
183
184 guestdest += origin;
185
186 return copy_to_user((void __user *) guestdest, from, n);
187slowpath:
188 return __copy_to_guest_slow(vcpu, guestdest, from, n);
189}
190
191static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
192 u64 guestsrc, unsigned long n)
193{
194 int rc;
195 unsigned long i;
196 u8 *data = to;
197
198 for (i = 0; i < n; i++) {
199 rc = get_guest_u8(vcpu, guestsrc++, data++);
200 if (rc < 0)
201 return rc;
202 }
203 return 0;
204}
205
206static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
207 u64 guestsrc, unsigned long n)
208{
209 u64 prefix = vcpu->arch.sie_block->prefix;
210 u64 origin = vcpu->kvm->arch.guest_origin;
211 u64 memsize = vcpu->kvm->arch.guest_memsize;
212
213 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
214 goto slowpath;
215
216 if ((guestsrc < prefix) && (guestsrc + n > prefix))
217 goto slowpath;
218
219 if ((guestsrc < prefix + 2 * PAGE_SIZE)
220 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
221 goto slowpath;
222
223 if (guestsrc < 2 * PAGE_SIZE)
224 guestsrc += prefix;
225 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
226 guestsrc -= prefix;
227
228 if (guestsrc + n > memsize)
229 return -EFAULT;
230
231 if (guestsrc + n < guestsrc)
232 return -EFAULT;
233
234 guestsrc += origin;
235
236 return copy_from_user(to, (void __user *) guestsrc, n);
237slowpath:
238 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
239}
240
241static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
242 const void *from, unsigned long n)
243{
244 u64 origin = vcpu->kvm->arch.guest_origin;
245 u64 memsize = vcpu->kvm->arch.guest_memsize;
246
247 if (guestdest + n > memsize)
248 return -EFAULT;
249
250 if (guestdest + n < guestdest)
251 return -EFAULT;
252
253 guestdest += origin;
254
255 return copy_to_user((void __user *) guestdest, from, n);
256}
257
258static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
259 u64 guestsrc, unsigned long n)
260{
261 u64 origin = vcpu->kvm->arch.guest_origin;
262 u64 memsize = vcpu->kvm->arch.guest_memsize;
263
264 if (guestsrc + n > memsize)
265 return -EFAULT;
266
267 if (guestsrc + n < guestsrc)
268 return -EFAULT;
269
270 guestsrc += origin;
271
272 return copy_from_user(to, (void __user *) guestsrc, n);
273}
274#endif