diff options
author | Jintack Lim <jintack@cs.columbia.edu> | 2017-02-03 10:20:05 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2017-02-08 10:13:35 -0500 |
commit | fb280e97576a91c01b2a1712dba31024748b3084 (patch) | |
tree | d119f2eddb1e504dac7d38ac8ad1d651243324d1 | |
parent | 58e0c9732a31afdef488a41fd1edba065124f442 (diff) |
KVM: arm/arm64: Set a background timer to the earliest timer expiration
When scheduling a background timer, consider both of the virtual and
physical timer and pick the earliest expiration time.
Signed-off-by: Jintack Lim <jintack@cs.columbia.edu>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r-- | arch/arm/kvm/arm.c | 3 | ||||
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 53 |
2 files changed, 42 insertions, 14 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 0ecd6cf362fc..21c493a9e5c9 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -300,7 +300,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
300 | 300 | ||
301 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 301 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
302 | { | 302 | { |
303 | return kvm_timer_should_fire(vcpu_vtimer(vcpu)); | 303 | return kvm_timer_should_fire(vcpu_vtimer(vcpu)) || |
304 | kvm_timer_should_fire(vcpu_ptimer(vcpu)); | ||
304 | } | 305 | } |
305 | 306 | ||
306 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) | 307 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 7f9a66419991..0ea745290871 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -118,6 +118,35 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) | |||
118 | return 0; | 118 | return 0; |
119 | } | 119 | } |
120 | 120 | ||
121 | static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) | ||
122 | { | ||
123 | return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) && | ||
124 | (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Returns the earliest expiration time in ns among guest timers. | ||
129 | * Note that it will return 0 if none of timers can fire. | ||
130 | */ | ||
131 | static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) | ||
132 | { | ||
133 | u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX; | ||
134 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
135 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
136 | |||
137 | if (kvm_timer_irq_can_fire(vtimer)) | ||
138 | min_virt = kvm_timer_compute_delta(vtimer); | ||
139 | |||
140 | if (kvm_timer_irq_can_fire(ptimer)) | ||
141 | min_phys = kvm_timer_compute_delta(ptimer); | ||
142 | |||
143 | /* If none of timers can fire, then return 0 */ | ||
144 | if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX)) | ||
145 | return 0; | ||
146 | |||
147 | return min(min_virt, min_phys); | ||
148 | } | ||
149 | |||
121 | static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) | 150 | static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) |
122 | { | 151 | { |
123 | struct arch_timer_cpu *timer; | 152 | struct arch_timer_cpu *timer; |
@@ -132,7 +161,7 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) | |||
132 | * PoV (NTP on the host may have forced it to expire | 161 | * PoV (NTP on the host may have forced it to expire |
133 | * early). If we should have slept longer, restart it. | 162 | * early). If we should have slept longer, restart it. |
134 | */ | 163 | */ |
135 | ns = kvm_timer_compute_delta(vcpu_vtimer(vcpu)); | 164 | ns = kvm_timer_earliest_exp(vcpu); |
136 | if (unlikely(ns)) { | 165 | if (unlikely(ns)) { |
137 | hrtimer_forward_now(hrt, ns_to_ktime(ns)); | 166 | hrtimer_forward_now(hrt, ns_to_ktime(ns)); |
138 | return HRTIMER_RESTART; | 167 | return HRTIMER_RESTART; |
@@ -142,12 +171,6 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) | |||
142 | return HRTIMER_NORESTART; | 171 | return HRTIMER_NORESTART; |
143 | } | 172 | } |
144 | 173 | ||
145 | static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) | ||
146 | { | ||
147 | return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) && | ||
148 | (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE); | ||
149 | } | ||
150 | |||
151 | bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) | 174 | bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) |
152 | { | 175 | { |
153 | u64 cval, now; | 176 | u64 cval, now; |
@@ -215,26 +238,30 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) | |||
215 | { | 238 | { |
216 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 239 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
217 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 240 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
241 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
218 | 242 | ||
219 | BUG_ON(timer_is_armed(timer)); | 243 | BUG_ON(timer_is_armed(timer)); |
220 | 244 | ||
221 | /* | 245 | /* |
222 | * No need to schedule a background timer if the guest timer has | 246 | * No need to schedule a background timer if any guest timer has |
223 | * already expired, because kvm_vcpu_block will return before putting | 247 | * already expired, because kvm_vcpu_block will return before putting |
224 | * the thread to sleep. | 248 | * the thread to sleep. |
225 | */ | 249 | */ |
226 | if (kvm_timer_should_fire(vtimer)) | 250 | if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer)) |
227 | return; | 251 | return; |
228 | 252 | ||
229 | /* | 253 | /* |
230 | * If the timer is not capable of raising interrupts (disabled or | 254 | * If both timers are not capable of raising interrupts (disabled or |
231 | * masked), then there's no more work for us to do. | 255 | * masked), then there's no more work for us to do. |
232 | */ | 256 | */ |
233 | if (!kvm_timer_irq_can_fire(vtimer)) | 257 | if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer)) |
234 | return; | 258 | return; |
235 | 259 | ||
236 | /* The timer has not yet expired, schedule a background timer */ | 260 | /* |
237 | timer_arm(timer, kvm_timer_compute_delta(vtimer)); | 261 | * The guest timers have not yet expired, schedule a background timer. |
262 | * Set the earliest expiration time among the guest timers. | ||
263 | */ | ||
264 | timer_arm(timer, kvm_timer_earliest_exp(vcpu)); | ||
238 | } | 265 | } |
239 | 266 | ||
240 | void kvm_timer_unschedule(struct kvm_vcpu *vcpu) | 267 | void kvm_timer_unschedule(struct kvm_vcpu *vcpu) |