aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/gaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/gaccess.h')
-rw-r--r--arch/s390/kvm/gaccess.h62
1 files changed, 33 insertions, 29 deletions
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 4e0633c413f3..ed60f3a74a85 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,11 +18,11 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19 19
20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
21 u64 guestaddr) 21 unsigned long guestaddr)
22{ 22{
23 u64 prefix = vcpu->arch.sie_block->prefix; 23 unsigned long prefix = vcpu->arch.sie_block->prefix;
24 u64 origin = vcpu->kvm->arch.guest_origin; 24 unsigned long origin = vcpu->kvm->arch.guest_origin;
25 u64 memsize = vcpu->kvm->arch.guest_memsize; 25 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
26 26
27 if (guestaddr < 2 * PAGE_SIZE) 27 if (guestaddr < 2 * PAGE_SIZE)
28 guestaddr += prefix; 28 guestaddr += prefix;
@@ -37,7 +37,7 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
37 return (void __user *) guestaddr; 37 return (void __user *) guestaddr;
38} 38}
39 39
40static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 40static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
41 u64 *result) 41 u64 *result)
42{ 42{
43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -47,10 +47,10 @@ static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
47 if (IS_ERR((void __force *) uptr)) 47 if (IS_ERR((void __force *) uptr))
48 return PTR_ERR((void __force *) uptr); 48 return PTR_ERR((void __force *) uptr);
49 49
50 return get_user(*result, (u64 __user *) uptr); 50 return get_user(*result, (unsigned long __user *) uptr);
51} 51}
52 52
53static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 53static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
54 u32 *result) 54 u32 *result)
55{ 55{
56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -63,7 +63,7 @@ static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
63 return get_user(*result, (u32 __user *) uptr); 63 return get_user(*result, (u32 __user *) uptr);
64} 64}
65 65
66static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 66static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
67 u16 *result) 67 u16 *result)
68{ 68{
69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -76,7 +76,7 @@ static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
76 return get_user(*result, (u16 __user *) uptr); 76 return get_user(*result, (u16 __user *) uptr);
77} 77}
78 78
79static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 79static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
80 u8 *result) 80 u8 *result)
81{ 81{
82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -87,7 +87,7 @@ static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
87 return get_user(*result, (u8 __user *) uptr); 87 return get_user(*result, (u8 __user *) uptr);
88} 88}
89 89
90static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 90static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
91 u64 value) 91 u64 value)
92{ 92{
93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -100,7 +100,7 @@ static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
100 return put_user(value, (u64 __user *) uptr); 100 return put_user(value, (u64 __user *) uptr);
101} 101}
102 102
103static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 103static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
104 u32 value) 104 u32 value)
105{ 105{
106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -113,7 +113,7 @@ static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
113 return put_user(value, (u32 __user *) uptr); 113 return put_user(value, (u32 __user *) uptr);
114} 114}
115 115
116static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 116static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
117 u16 value) 117 u16 value)
118{ 118{
119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -126,7 +126,7 @@ static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
126 return put_user(value, (u16 __user *) uptr); 126 return put_user(value, (u16 __user *) uptr);
127} 127}
128 128
129static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 129static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
130 u8 value) 130 u8 value)
131{ 131{
132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -138,7 +138,8 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
138} 138}
139 139
140 140
141static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, 141static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
142 unsigned long guestdest,
142 const void *from, unsigned long n) 143 const void *from, unsigned long n)
143{ 144{
144 int rc; 145 int rc;
@@ -153,12 +154,12 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
153 return 0; 154 return 0;
154} 155}
155 156
156static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, 157static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
157 const void *from, unsigned long n) 158 const void *from, unsigned long n)
158{ 159{
159 u64 prefix = vcpu->arch.sie_block->prefix; 160 unsigned long prefix = vcpu->arch.sie_block->prefix;
160 u64 origin = vcpu->kvm->arch.guest_origin; 161 unsigned long origin = vcpu->kvm->arch.guest_origin;
161 u64 memsize = vcpu->kvm->arch.guest_memsize; 162 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
162 163
163 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 164 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
164 goto slowpath; 165 goto slowpath;
@@ -189,7 +190,8 @@ slowpath:
189} 190}
190 191
191static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, 192static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
192 u64 guestsrc, unsigned long n) 193 unsigned long guestsrc,
194 unsigned long n)
193{ 195{
194 int rc; 196 int rc;
195 unsigned long i; 197 unsigned long i;
@@ -204,11 +206,11 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
204} 206}
205 207
206static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, 208static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
207 u64 guestsrc, unsigned long n) 209 unsigned long guestsrc, unsigned long n)
208{ 210{
209 u64 prefix = vcpu->arch.sie_block->prefix; 211 unsigned long prefix = vcpu->arch.sie_block->prefix;
210 u64 origin = vcpu->kvm->arch.guest_origin; 212 unsigned long origin = vcpu->kvm->arch.guest_origin;
211 u64 memsize = vcpu->kvm->arch.guest_memsize; 213 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
212 214
213 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 215 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
214 goto slowpath; 216 goto slowpath;
@@ -238,11 +240,12 @@ slowpath:
238 return __copy_from_guest_slow(vcpu, to, guestsrc, n); 240 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
239} 241}
240 242
241static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, 243static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
244 unsigned long guestdest,
242 const void *from, unsigned long n) 245 const void *from, unsigned long n)
243{ 246{
244 u64 origin = vcpu->kvm->arch.guest_origin; 247 unsigned long origin = vcpu->kvm->arch.guest_origin;
245 u64 memsize = vcpu->kvm->arch.guest_memsize; 248 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
246 249
247 if (guestdest + n > memsize) 250 if (guestdest + n > memsize)
248 return -EFAULT; 251 return -EFAULT;
@@ -256,10 +259,11 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
256} 259}
257 260
258static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, 261static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
259 u64 guestsrc, unsigned long n) 262 unsigned long guestsrc,
263 unsigned long n)
260{ 264{
261 u64 origin = vcpu->kvm->arch.guest_origin; 265 unsigned long origin = vcpu->kvm->arch.guest_origin;
262 u64 memsize = vcpu->kvm->arch.guest_memsize; 266 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
263 267
264 if (guestsrc + n > memsize) 268 if (guestsrc + n > memsize)
265 return -EFAULT; 269 return -EFAULT;