aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/guest.c
diff options
context:
space:
mode:
authorAlex Bennée <alex.bennee@linaro.org>2014-07-04 10:54:14 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2014-07-11 07:46:55 -0400
commit1df08ba0aa95f1a8832b7162eec51069bd9be7ae (patch)
tree17d48cbca3717b162f311bc00d4a6d4c003c432d /arch/arm64/kvm/guest.c
parentefd48ceacea78e4d4656aa0a6bf4c5b92ed22130 (diff)
arm64: KVM: allow export and import of generic timer regs
For correct guest suspend/resume behaviour we need to ensure we include the generic timer registers for 64 bit guests. As CONFIG_KVM_ARM_TIMER is always set for arm64 we don't need to worry about null implementations. However I have re-jigged the kvm_arm_timer_set/get_reg declarations to be in the common include/kvm/arm_arch_timer.h headers. Acked-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64/kvm/guest.c')
-rw-r--r--arch/arm64/kvm/guest.c68
1 files changed, 67 insertions, 1 deletions
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 60b5c31f3c10..8d1ec2887a26 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -136,13 +136,67 @@ static unsigned long num_core_regs(void)
136} 136}
137 137
138/** 138/**
139 * ARM64 versions of the TIMER registers, always available on arm64
140 */
141
142#define NUM_TIMER_REGS 3
143
144static bool is_timer_reg(u64 index)
145{
146 switch (index) {
147 case KVM_REG_ARM_TIMER_CTL:
148 case KVM_REG_ARM_TIMER_CNT:
149 case KVM_REG_ARM_TIMER_CVAL:
150 return true;
151 }
152 return false;
153}
154
155static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
156{
157 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
158 return -EFAULT;
159 uindices++;
160 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
161 return -EFAULT;
162 uindices++;
163 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
164 return -EFAULT;
165
166 return 0;
167}
168
169static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
170{
171 void __user *uaddr = (void __user *)(long)reg->addr;
172 u64 val;
173 int ret;
174
175 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
176 if (ret != 0)
177 return ret;
178
179 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
180}
181
182static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
183{
184 void __user *uaddr = (void __user *)(long)reg->addr;
185 u64 val;
186
187 val = kvm_arm_timer_get_reg(vcpu, reg->id);
188 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
189}
190
191/**
139 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG 192 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
140 * 193 *
141 * This is for all registers. 194 * This is for all registers.
142 */ 195 */
143unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 196unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
144{ 197{
145 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu); 198 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
199 + NUM_TIMER_REGS;
146} 200}
147 201
148/** 202/**
@@ -154,6 +208,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
154{ 208{
155 unsigned int i; 209 unsigned int i;
156 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; 210 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
211 int ret;
157 212
158 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { 213 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
159 if (put_user(core_reg | i, uindices)) 214 if (put_user(core_reg | i, uindices))
@@ -161,6 +216,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
161 uindices++; 216 uindices++;
162 } 217 }
163 218
219 ret = copy_timer_indices(vcpu, uindices);
220 if (ret)
221 return ret;
222 uindices += NUM_TIMER_REGS;
223
164 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 224 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
165} 225}
166 226
@@ -174,6 +234,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
174 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 234 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
175 return get_core_reg(vcpu, reg); 235 return get_core_reg(vcpu, reg);
176 236
237 if (is_timer_reg(reg->id))
238 return get_timer_reg(vcpu, reg);
239
177 return kvm_arm_sys_reg_get_reg(vcpu, reg); 240 return kvm_arm_sys_reg_get_reg(vcpu, reg);
178} 241}
179 242
@@ -187,6 +250,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
187 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 250 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
188 return set_core_reg(vcpu, reg); 251 return set_core_reg(vcpu, reg);
189 252
253 if (is_timer_reg(reg->id))
254 return set_timer_reg(vcpu, reg);
255
190 return kvm_arm_sys_reg_set_reg(vcpu, reg); 256 return kvm_arm_sys_reg_set_reg(vcpu, reg);
191} 257}
192 258