diff options
author | Alex Bennée <alex.bennee@linaro.org> | 2014-07-04 10:54:14 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2014-07-11 07:46:55 -0400 |
commit | 1df08ba0aa95f1a8832b7162eec51069bd9be7ae (patch) | |
tree | 17d48cbca3717b162f311bc00d4a6d4c003c432d | |
parent | efd48ceacea78e4d4656aa0a6bf4c5b92ed22130 (diff) |
arm64: KVM: allow export and import of generic timer regs
For correct guest suspend/resume behaviour we need to ensure we include
the generic timer registers for 64 bit guests. As CONFIG_KVM_ARM_TIMER is
always set for arm64 we don't need to worry about null implementations.
However I have re-jigged the kvm_arm_timer_set/get_reg declarations to
be in the common include/kvm/arm_arch_timer.h headers.
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm/kvm/guest.c | 10 | ||||
-rw-r--r-- | arch/arm64/kvm/guest.c | 68 | ||||
-rw-r--r-- | include/kvm/arm_arch_timer.h | 14 |
4 files changed, 81 insertions, 14 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 193ceaf01bfd..dc4e3edf39cc 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -228,7 +228,4 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext) | |||
228 | int kvm_perf_init(void); | 228 | int kvm_perf_init(void); |
229 | int kvm_perf_teardown(void); | 229 | int kvm_perf_teardown(void); |
230 | 230 | ||
231 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); | ||
232 | int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); | ||
233 | |||
234 | #endif /* __ARM_KVM_HOST_H__ */ | 231 | #endif /* __ARM_KVM_HOST_H__ */ |
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index b23a59c1c522..986e625b5dbd 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c | |||
@@ -124,16 +124,6 @@ static bool is_timer_reg(u64 index) | |||
124 | return false; | 124 | return false; |
125 | } | 125 | } |
126 | 126 | ||
127 | int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) | ||
128 | { | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) | ||
133 | { | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | #else | 127 | #else |
138 | 128 | ||
139 | #define NUM_TIMER_REGS 3 | 129 | #define NUM_TIMER_REGS 3 |
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 60b5c31f3c10..8d1ec2887a26 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c | |||
@@ -136,13 +136,67 @@ static unsigned long num_core_regs(void) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | /** | 138 | /** |
139 | * ARM64 versions of the TIMER registers, always available on arm64 | ||
140 | */ | ||
141 | |||
142 | #define NUM_TIMER_REGS 3 | ||
143 | |||
144 | static bool is_timer_reg(u64 index) | ||
145 | { | ||
146 | switch (index) { | ||
147 | case KVM_REG_ARM_TIMER_CTL: | ||
148 | case KVM_REG_ARM_TIMER_CNT: | ||
149 | case KVM_REG_ARM_TIMER_CVAL: | ||
150 | return true; | ||
151 | } | ||
152 | return false; | ||
153 | } | ||
154 | |||
155 | static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
156 | { | ||
157 | if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) | ||
158 | return -EFAULT; | ||
159 | uindices++; | ||
160 | if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) | ||
161 | return -EFAULT; | ||
162 | uindices++; | ||
163 | if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) | ||
164 | return -EFAULT; | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
170 | { | ||
171 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
172 | u64 val; | ||
173 | int ret; | ||
174 | |||
175 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); | ||
176 | if (ret != 0) | ||
177 | return ret; | ||
178 | |||
179 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); | ||
180 | } | ||
181 | |||
182 | static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
183 | { | ||
184 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
185 | u64 val; | ||
186 | |||
187 | val = kvm_arm_timer_get_reg(vcpu, reg->id); | ||
188 | return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); | ||
189 | } | ||
190 | |||
191 | /** | ||
139 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG | 192 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG |
140 | * | 193 | * |
141 | * This is for all registers. | 194 | * This is for all registers. |
142 | */ | 195 | */ |
143 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) | 196 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) |
144 | { | 197 | { |
145 | return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu); | 198 | return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) |
199 | + NUM_TIMER_REGS; | ||
146 | } | 200 | } |
147 | 201 | ||
148 | /** | 202 | /** |
@@ -154,6 +208,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |||
154 | { | 208 | { |
155 | unsigned int i; | 209 | unsigned int i; |
156 | const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; | 210 | const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; |
211 | int ret; | ||
157 | 212 | ||
158 | for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { | 213 | for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { |
159 | if (put_user(core_reg | i, uindices)) | 214 | if (put_user(core_reg | i, uindices)) |
@@ -161,6 +216,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |||
161 | uindices++; | 216 | uindices++; |
162 | } | 217 | } |
163 | 218 | ||
219 | ret = copy_timer_indices(vcpu, uindices); | ||
220 | if (ret) | ||
221 | return ret; | ||
222 | uindices += NUM_TIMER_REGS; | ||
223 | |||
164 | return kvm_arm_copy_sys_reg_indices(vcpu, uindices); | 224 | return kvm_arm_copy_sys_reg_indices(vcpu, uindices); |
165 | } | 225 | } |
166 | 226 | ||
@@ -174,6 +234,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
174 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | 234 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) |
175 | return get_core_reg(vcpu, reg); | 235 | return get_core_reg(vcpu, reg); |
176 | 236 | ||
237 | if (is_timer_reg(reg->id)) | ||
238 | return get_timer_reg(vcpu, reg); | ||
239 | |||
177 | return kvm_arm_sys_reg_get_reg(vcpu, reg); | 240 | return kvm_arm_sys_reg_get_reg(vcpu, reg); |
178 | } | 241 | } |
179 | 242 | ||
@@ -187,6 +250,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
187 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | 250 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) |
188 | return set_core_reg(vcpu, reg); | 251 | return set_core_reg(vcpu, reg); |
189 | 252 | ||
253 | if (is_timer_reg(reg->id)) | ||
254 | return set_timer_reg(vcpu, reg); | ||
255 | |||
190 | return kvm_arm_sys_reg_set_reg(vcpu, reg); | 256 | return kvm_arm_sys_reg_set_reg(vcpu, reg); |
191 | } | 257 | } |
192 | 258 | ||
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 6d9aeddc09bf..ad9db6045b2f 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h | |||
@@ -67,6 +67,10 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); | |||
67 | void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); | 67 | void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); |
68 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); | 68 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); |
69 | void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); | 69 | void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); |
70 | |||
71 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); | ||
72 | int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); | ||
73 | |||
70 | #else | 74 | #else |
71 | static inline int kvm_timer_hyp_init(void) | 75 | static inline int kvm_timer_hyp_init(void) |
72 | { | 76 | { |
@@ -84,6 +88,16 @@ static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} | |||
84 | static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {} | 88 | static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {} |
85 | static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {} | 89 | static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {} |
86 | static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {} | 90 | static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {} |
91 | |||
92 | static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) | ||
93 | { | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) | ||
98 | { | ||
99 | return 0; | ||
100 | } | ||
87 | #endif | 101 | #endif |
88 | 102 | ||
89 | #endif | 103 | #endif |