diff options
| author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-07-25 09:51:00 -0400 |
|---|---|---|
| committer | Avi Kivity <avi@qumranet.com> | 2008-07-27 04:35:57 -0400 |
| commit | 0096369daa9eaaef1a309e5d8167b023af3f998d (patch) | |
| tree | ccd77da043cbdfa63907f54e10f07ed3452ba338 | |
| parent | 99e65c92f2bbf84f43766a8bf701e36817d62822 (diff) | |
KVM: s390: Change guestaddr type in gaccess
All registers are unsigned long types. This patch changes all occurences
of guestaddr in gaccess from u64 to unsigned long.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
| -rw-r--r-- | arch/s390/kvm/gaccess.h | 62 | ||||
| -rw-r--r-- | arch/s390/kvm/sigp.c | 5 | ||||
| -rw-r--r-- | include/asm-s390/kvm_host.h | 2 |
3 files changed, 37 insertions, 32 deletions
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 4e0633c413f3..ed60f3a74a85 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h | |||
| @@ -18,11 +18,11 @@ | |||
| 18 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
| 19 | 19 | ||
| 20 | static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, | 20 | static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, |
| 21 | u64 guestaddr) | 21 | unsigned long guestaddr) |
| 22 | { | 22 | { |
| 23 | u64 prefix = vcpu->arch.sie_block->prefix; | 23 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
| 24 | u64 origin = vcpu->kvm->arch.guest_origin; | 24 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
| 25 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 25 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
| 26 | 26 | ||
| 27 | if (guestaddr < 2 * PAGE_SIZE) | 27 | if (guestaddr < 2 * PAGE_SIZE) |
| 28 | guestaddr += prefix; | 28 | guestaddr += prefix; |
| @@ -37,7 +37,7 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, | |||
| 37 | return (void __user *) guestaddr; | 37 | return (void __user *) guestaddr; |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | 40 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
| 41 | u64 *result) | 41 | u64 *result) |
| 42 | { | 42 | { |
| 43 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 43 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| @@ -47,10 +47,10 @@ static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
| 47 | if (IS_ERR((void __force *) uptr)) | 47 | if (IS_ERR((void __force *) uptr)) |
| 48 | return PTR_ERR((void __force *) uptr); | 48 | return PTR_ERR((void __force *) uptr); |
| 49 | 49 | ||
| 50 | return get_user(*result, (u64 __user *) uptr); | 50 | return get_user(*result, (unsigned long __user *) uptr); |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | 53 | static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
| 54 | u32 *result) | 54 | u32 *result) |
| 55 | { | 55 | { |
| 56 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 56 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| @@ -63,7 +63,7 @@ static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
| 63 | return get_user(*result, (u32 __user *) uptr); | 63 | return get_user(*result, (u32 __user *) uptr); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | 66 | static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
| 67 | u16 *result) | 67 | u16 *result) |
| 68 | { | 68 | { |
| 69 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 69 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| @@ -76,7 +76,7 @@ static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
| 76 | return get_user(*result, (u16 __user *) uptr); | 76 | return get_user(*result, (u16 __user *) uptr); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | 79 | static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
| 80 | u8 *result) | 80 | u8 *result) |
| 81 | { | 81 | { |
| 82 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 82 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| @@ -87,7 +87,7 @@ static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
| 87 | return get_user(*result, (u8 __user *) uptr); | 87 | return get_user(*result, (u8 __user *) uptr); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | 90 | static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
| 91 | u64 value) | 91 | u64 value) |
| 92 | { | 92 | { |
| 93 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 93 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| @@ -100,7 +100,7 @@ static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
| 100 | return put_user(value, (u64 __user *) uptr); | 100 | return put_user(value, (u64 __user *) uptr); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | 103 | static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
| 104 | u32 value) | 104 | u32 value) |
| 105 | { | 105 | { |
| 106 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 106 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| @@ -113,7 +113,7 @@ static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
| 113 | return put_user(value, (u32 __user *) uptr); | 113 | return put_user(value, (u32 __user *) uptr); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | 116 | static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
| 117 | u16 value) | 117 | u16 value) |
| 118 | { | 118 | { |
| 119 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 119 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| @@ -126,7 +126,7 @@ static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
| 126 | return put_user(value, (u16 __user *) uptr); | 126 | return put_user(value, (u16 __user *) uptr); |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | 129 | static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
| 130 | u8 value) | 130 | u8 value) |
| 131 | { | 131 | { |
| 132 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 132 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| @@ -138,7 +138,8 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | 140 | ||
| 141 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, | 141 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, |
| 142 | unsigned long guestdest, | ||
| 142 | const void *from, unsigned long n) | 143 | const void *from, unsigned long n) |
| 143 | { | 144 | { |
| 144 | int rc; | 145 | int rc; |
| @@ -153,12 +154,12 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, | |||
| 153 | return 0; | 154 | return 0; |
| 154 | } | 155 | } |
| 155 | 156 | ||
| 156 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, | 157 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, |
| 157 | const void *from, unsigned long n) | 158 | const void *from, unsigned long n) |
| 158 | { | 159 | { |
| 159 | u64 prefix = vcpu->arch.sie_block->prefix; | 160 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
| 160 | u64 origin = vcpu->kvm->arch.guest_origin; | 161 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
| 161 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 162 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
| 162 | 163 | ||
| 163 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) | 164 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) |
| 164 | goto slowpath; | 165 | goto slowpath; |
| @@ -189,7 +190,8 @@ slowpath: | |||
| 189 | } | 190 | } |
| 190 | 191 | ||
| 191 | static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, | 192 | static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, |
| 192 | u64 guestsrc, unsigned long n) | 193 | unsigned long guestsrc, |
| 194 | unsigned long n) | ||
| 193 | { | 195 | { |
| 194 | int rc; | 196 | int rc; |
| 195 | unsigned long i; | 197 | unsigned long i; |
| @@ -204,11 +206,11 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, | |||
| 204 | } | 206 | } |
| 205 | 207 | ||
| 206 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, | 208 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, |
| 207 | u64 guestsrc, unsigned long n) | 209 | unsigned long guestsrc, unsigned long n) |
| 208 | { | 210 | { |
| 209 | u64 prefix = vcpu->arch.sie_block->prefix; | 211 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
| 210 | u64 origin = vcpu->kvm->arch.guest_origin; | 212 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
| 211 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 213 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
| 212 | 214 | ||
| 213 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) | 215 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) |
| 214 | goto slowpath; | 216 | goto slowpath; |
| @@ -238,11 +240,12 @@ slowpath: | |||
| 238 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); | 240 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); |
| 239 | } | 241 | } |
| 240 | 242 | ||
| 241 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, | 243 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, |
| 244 | unsigned long guestdest, | ||
| 242 | const void *from, unsigned long n) | 245 | const void *from, unsigned long n) |
| 243 | { | 246 | { |
| 244 | u64 origin = vcpu->kvm->arch.guest_origin; | 247 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
| 245 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 248 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
| 246 | 249 | ||
| 247 | if (guestdest + n > memsize) | 250 | if (guestdest + n > memsize) |
| 248 | return -EFAULT; | 251 | return -EFAULT; |
| @@ -256,10 +259,11 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, | |||
| 256 | } | 259 | } |
| 257 | 260 | ||
| 258 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, | 261 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, |
| 259 | u64 guestsrc, unsigned long n) | 262 | unsigned long guestsrc, |
| 263 | unsigned long n) | ||
| 260 | { | 264 | { |
| 261 | u64 origin = vcpu->kvm->arch.guest_origin; | 265 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
| 262 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 266 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
| 263 | 267 | ||
| 264 | if (guestsrc + n > memsize) | 268 | if (guestsrc + n > memsize) |
| 265 | return -EFAULT; | 269 | return -EFAULT; |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 5a556114eaa5..170392687ce0 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
| @@ -43,7 +43,8 @@ | |||
| 43 | #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL | 43 | #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL |
| 44 | 44 | ||
| 45 | 45 | ||
| 46 | static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) | 46 | static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, |
| 47 | unsigned long *reg) | ||
| 47 | { | 48 | { |
| 48 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | 49 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
| 49 | int rc; | 50 | int rc; |
| @@ -167,7 +168,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) | |||
| 167 | } | 168 | } |
| 168 | 169 | ||
| 169 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | 170 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, |
| 170 | u64 *reg) | 171 | unsigned long *reg) |
| 171 | { | 172 | { |
| 172 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | 173 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
| 173 | struct kvm_s390_local_interrupt *li; | 174 | struct kvm_s390_local_interrupt *li; |
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h index 3234dd5b3511..6583c0d67757 100644 --- a/include/asm-s390/kvm_host.h +++ b/include/asm-s390/kvm_host.h | |||
| @@ -231,5 +231,5 @@ struct kvm_arch{ | |||
| 231 | struct kvm_s390_float_interrupt float_int; | 231 | struct kvm_s390_float_interrupt float_int; |
| 232 | }; | 232 | }; |
| 233 | 233 | ||
| 234 | extern int sie64a(struct kvm_s390_sie_block *, __u64 *); | 234 | extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); |
| 235 | #endif | 235 | #endif |
