aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2014-05-29 05:16:35 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-05-30 07:01:48 -0400
commite30492bbe95a2495930aa7db7eacde5141e45332 (patch)
treeacb0c06fb12ffae1ae9f118080c44a6e84a6b3ef /arch
parent3a0ba77408f824b1cebf5134c710a8455d7bc8f4 (diff)
MIPS: KVM: Rewrite count/compare timer emulation
Previously the emulation of the CPU timer was just enough to get a Linux guest running but some shortcuts were taken: - The guest timer interrupt was hard coded to always happen every 10 ms rather than being timed to when CP0_Count would match CP0_Compare. - The guest's CP0_Count register was based on the host's CP0_Count register. This isn't very portable and fails on cores without a CP_Count register implemented such as Ingenic XBurst. It also meant that the guest's CP0_Cause.DC bit to disable the CP0_Count register took no effect. - The guest's CP0_Count register was emulated by just dividing the host's CP0_Count register by 4. This resulted in continuity problems when used as a clock source, since when the host CP0_Count overflows from 0x7fffffff to 0x80000000, the guest CP0_Count transitions discontinuously from 0x1fffffff to 0xe0000000. Therefore rewrite & fix emulation of the guest timer based on the monotonic kernel time (i.e. ktime_get()). Internally a 32-bit count_bias value is added to the frequency scaled nanosecond monotonic time to get the guest's CP0_Count. The frequency of the timer is initialised to 100MHz and cannot yet be changed, but a later patch will allow the frequency to be configured via the KVM_{GET,SET}_ONE_REG ioctl interface. The timer can now be stopped via the CP0_Cause.DC bit (by the guest or via the KVM_SET_ONE_REG ioctl interface), at which point the current CP0_Count is stored and can be read directly. When it is restarted the bias is recalculated such that the CP0_Count value is continuous. Due to the nature of hrtimer interrupts any read of the guest's CP0_Count register while it is running triggers a check for whether the hrtimer has expired, so that the guest/userland cannot observe the CP0_Count passing CP0_Compare without queuing a timer interrupt. This is also taken advantage of when stopping the timer to ensure that a pending timer interrupt is queued. This replaces the implementation of: - Guest read of CP0_Count - Guest write of CP0_Count - Guest write of CP0_Compare - Guest write of CP0_Cause - Guest read of HWR 2 (CC) with RDHWR - Host read of CP0_Count via KVM_GET_ONE_REG ioctl interface - Host write of CP0_Count via KVM_SET_ONE_REG ioctl interface - Host write of CP0_Compare via KVM_SET_ONE_REG ioctl interface - Host write of CP0_Cause via KVM_SET_ONE_REG ioctl interface Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Gleb Natapov <gleb@kernel.org> Cc: kvm@vger.kernel.org Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: Sanjay Lal <sanjayl@kymasys.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/kvm_host.h21
-rw-r--r--arch/mips/kvm/kvm_mips.c10
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c393
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c27
4 files changed, 413 insertions, 38 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 79410f85a5a7..75ed94aeefe7 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -404,8 +404,15 @@ struct kvm_vcpu_arch {
404 404
405 u32 io_gpr; /* GPR used as IO source/target */ 405 u32 io_gpr; /* GPR used as IO source/target */
406 406
407 /* Used to calibrate the virutal count register for the guest */ 407 struct hrtimer comparecount_timer;
408 int32_t host_cp0_count; 408 /* Count bias from the raw time */
409 uint32_t count_bias;
410 /* Frequency of timer in Hz */
411 uint32_t count_hz;
412 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
413 s64 count_dyn_bias;
414 /* Period of timer tick in ns */
415 u64 count_period;
409 416
410 /* Bitmask of exceptions that are pending */ 417 /* Bitmask of exceptions that are pending */
411 unsigned long pending_exceptions; 418 unsigned long pending_exceptions;
@@ -426,8 +433,6 @@ struct kvm_vcpu_arch {
426 uint32_t guest_kernel_asid[NR_CPUS]; 433 uint32_t guest_kernel_asid[NR_CPUS];
427 struct mm_struct guest_kernel_mm, guest_user_mm; 434 struct mm_struct guest_kernel_mm, guest_user_mm;
428 435
429 struct hrtimer comparecount_timer;
430
431 int last_sched_cpu; 436 int last_sched_cpu;
432 437
433 /* WAIT executed */ 438 /* WAIT executed */
@@ -705,7 +710,13 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
705extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, 710extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
706 struct kvm_run *run); 711 struct kvm_run *run);
707 712
708enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu); 713uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
714void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
715void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
716void kvm_mips_init_count(struct kvm_vcpu *vcpu);
717void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
718void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
719enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
709 720
710enum emulation_result kvm_mips_check_privilege(unsigned long cause, 721enum emulation_result kvm_mips_check_privilege(unsigned long cause,
711 uint32_t *opc, 722 uint32_t *opc,
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index 0a9c7ab56df1..fc5e44d827fc 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -363,7 +363,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
363 vcpu->arch.last_sched_cpu = -1; 363 vcpu->arch.last_sched_cpu = -1;
364 364
365 /* Start off the timer */ 365 /* Start off the timer */
366 kvm_mips_emulate_count(vcpu); 366 kvm_mips_init_count(vcpu);
367 367
368 return vcpu; 368 return vcpu;
369 369
@@ -707,9 +707,6 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
707 case KVM_REG_MIPS_CP0_STATUS: 707 case KVM_REG_MIPS_CP0_STATUS:
708 kvm_write_c0_guest_status(cop0, v); 708 kvm_write_c0_guest_status(cop0, v);
709 break; 709 break;
710 case KVM_REG_MIPS_CP0_CAUSE:
711 kvm_write_c0_guest_cause(cop0, v);
712 break;
713 case KVM_REG_MIPS_CP0_EPC: 710 case KVM_REG_MIPS_CP0_EPC:
714 kvm_write_c0_guest_epc(cop0, v); 711 kvm_write_c0_guest_epc(cop0, v);
715 break; 712 break;
@@ -719,6 +716,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
719 /* registers to be handled specially */ 716 /* registers to be handled specially */
720 case KVM_REG_MIPS_CP0_COUNT: 717 case KVM_REG_MIPS_CP0_COUNT:
721 case KVM_REG_MIPS_CP0_COMPARE: 718 case KVM_REG_MIPS_CP0_COMPARE:
719 case KVM_REG_MIPS_CP0_CAUSE:
722 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); 720 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
723 default: 721 default:
724 return -EINVAL; 722 return -EINVAL;
@@ -992,9 +990,7 @@ enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
992 990
993 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); 991 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
994 kvm_mips_comparecount_func((unsigned long) vcpu); 992 kvm_mips_comparecount_func((unsigned long) vcpu);
995 hrtimer_forward_now(&vcpu->arch.comparecount_timer, 993 return kvm_mips_count_timeout(vcpu);
996 ktime_set(0, MS_TO_NS(10)));
997 return HRTIMER_RESTART;
998} 994}
999 995
1000int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 996int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index bad31c6235d4..088c25d73a11 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/ktime.h>
14#include <linux/kvm_host.h> 15#include <linux/kvm_host.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
@@ -228,25 +229,364 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
228 return er; 229 return er;
229} 230}
230 231
231/* Everytime the compare register is written to, we need to decide when to fire 232/**
232 * the timer that represents timer ticks to the GUEST. 233 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
234 * @vcpu: Virtual CPU.
233 * 235 *
236 * Returns: 1 if the CP0_Count timer is disabled by the guest CP0_Cause.DC
237 * bit.
238 * 0 otherwise (in which case CP0_Count timer is running).
234 */ 239 */
235enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu) 240static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
236{ 241{
237 struct mips_coproc *cop0 = vcpu->arch.cop0; 242 struct mips_coproc *cop0 = vcpu->arch.cop0;
238 enum emulation_result er = EMULATE_DONE; 243 return kvm_read_c0_guest_cause(cop0) & CAUSEF_DC;
244}
239 245
240 /* If COUNT is enabled */ 246/**
241 if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) { 247 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
242 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); 248 *
243 hrtimer_start(&vcpu->arch.comparecount_timer, 249 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
244 ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL); 250 *
245 } else { 251 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
246 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); 252 */
253static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
254{
255 s64 now_ns, periods;
256 u64 delta;
257
258 now_ns = ktime_to_ns(now);
259 delta = now_ns + vcpu->arch.count_dyn_bias;
260
261 if (delta >= vcpu->arch.count_period) {
262 /* If delta is out of safe range the bias needs adjusting */
263 periods = div64_s64(now_ns, vcpu->arch.count_period);
264 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
265 /* Recalculate delta with new bias */
266 delta = now_ns + vcpu->arch.count_dyn_bias;
247 } 267 }
248 268
249 return er; 269 /*
270 * We've ensured that:
271 * delta < count_period
272 *
273 * Therefore the intermediate delta*count_hz will never overflow since
274 * at the boundary condition:
275 * delta = count_period
276 * delta = NSEC_PER_SEC * 2^32 / count_hz
277 * delta * count_hz = NSEC_PER_SEC * 2^32
278 */
279 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
280}
281
282/**
283 * kvm_mips_read_count_running() - Read the current count value as if running.
284 * @vcpu: Virtual CPU.
285 * @now: Kernel time to read CP0_Count at.
286 *
287 * Returns the current guest CP0_Count register at time @now and handles if the
288 * timer interrupt is pending and hasn't been handled yet.
289 *
290 * Returns: The current value of the guest CP0_Count register.
291 */
292static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
293{
294 ktime_t expires;
295 int running;
296
297 /* Is the hrtimer pending? */
298 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
299 if (ktime_compare(now, expires) >= 0) {
300 /*
301 * Cancel it while we handle it so there's no chance of
302 * interference with the timeout handler.
303 */
304 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
305
306 /* Nothing should be waiting on the timeout */
307 kvm_mips_callbacks->queue_timer_int(vcpu);
308
309 /*
310 * Restart the timer if it was running based on the expiry time
311 * we read, so that we don't push it back 2 periods.
312 */
313 if (running) {
314 expires = ktime_add_ns(expires,
315 vcpu->arch.count_period);
316 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
317 HRTIMER_MODE_ABS);
318 }
319 }
320
321 /* Return the biased and scaled guest CP0_Count */
322 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
323}
324
325/**
326 * kvm_mips_read_count() - Read the current count value.
327 * @vcpu: Virtual CPU.
328 *
329 * Read the current guest CP0_Count value, taking into account whether the timer
330 * is stopped.
331 *
332 * Returns: The current guest CP0_Count value.
333 */
334uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
335{
336 struct mips_coproc *cop0 = vcpu->arch.cop0;
337
338 /* If count disabled just read static copy of count */
339 if (kvm_mips_count_disabled(vcpu))
340 return kvm_read_c0_guest_count(cop0);
341
342 return kvm_mips_read_count_running(vcpu, ktime_get());
343}
344
345/**
346 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
347 * @vcpu: Virtual CPU.
348 * @count: Output pointer for CP0_Count value at point of freeze.
349 *
350 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
351 * at the point it was frozen. It is guaranteed that any pending interrupts at
352 * the point it was frozen are handled, and none after that point.
353 *
354 * This is useful where the time/CP0_Count is needed in the calculation of the
355 * new parameters.
356 *
357 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
358 *
359 * Returns: The ktime at the point of freeze.
360 */
361static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
362 uint32_t *count)
363{
364 ktime_t now;
365
366 /* stop hrtimer before finding time */
367 hrtimer_cancel(&vcpu->arch.comparecount_timer);
368 now = ktime_get();
369
370 /* find count at this point and handle pending hrtimer */
371 *count = kvm_mips_read_count_running(vcpu, now);
372
373 return now;
374}
375
376
377/**
378 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
379 * @vcpu: Virtual CPU.
380 * @now: ktime at point of resume.
381 * @count: CP0_Count at point of resume.
382 *
383 * Resumes the timer and updates the timer expiry based on @now and @count.
384 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
385 * parameters need to be changed.
386 *
387 * It is guaranteed that a timer interrupt immediately after resume will be
388 * handled, but not if CP_Compare is exactly at @count. That case is already
389 * handled by kvm_mips_freeze_timer().
390 *
391 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
392 */
393static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
394 ktime_t now, uint32_t count)
395{
396 struct mips_coproc *cop0 = vcpu->arch.cop0;
397 uint32_t compare;
398 u64 delta;
399 ktime_t expire;
400
401 /* Calculate timeout (wrap 0 to 2^32) */
402 compare = kvm_read_c0_guest_compare(cop0);
403 delta = (u64)(uint32_t)(compare - count - 1) + 1;
404 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
405 expire = ktime_add_ns(now, delta);
406
407 /* Update hrtimer to use new timeout */
408 hrtimer_cancel(&vcpu->arch.comparecount_timer);
409 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
410}
411
412/**
413 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
414 * @vcpu: Virtual CPU.
415 *
416 * Recalculates and updates the expiry time of the hrtimer. This can be used
417 * after timer parameters have been altered which do not depend on the time that
418 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
419 * kvm_mips_resume_hrtimer() are used directly).
420 *
421 * It is guaranteed that no timer interrupts will be lost in the process.
422 *
423 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
424 */
425static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
426{
427 ktime_t now;
428 uint32_t count;
429
430 /*
431 * freeze_hrtimer takes care of a timer interrupts <= count, and
432 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
433 */
434 now = kvm_mips_freeze_hrtimer(vcpu, &count);
435 kvm_mips_resume_hrtimer(vcpu, now, count);
436}
437
438/**
439 * kvm_mips_write_count() - Modify the count and update timer.
440 * @vcpu: Virtual CPU.
441 * @count: Guest CP0_Count value to set.
442 *
443 * Sets the CP0_Count value and updates the timer accordingly.
444 */
445void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
446{
447 struct mips_coproc *cop0 = vcpu->arch.cop0;
448 ktime_t now;
449
450 /* Calculate bias */
451 now = ktime_get();
452 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
453
454 if (kvm_mips_count_disabled(vcpu))
455 /* The timer's disabled, adjust the static count */
456 kvm_write_c0_guest_count(cop0, count);
457 else
458 /* Update timeout */
459 kvm_mips_resume_hrtimer(vcpu, now, count);
460}
461
462/**
463 * kvm_mips_init_count() - Initialise timer.
464 * @vcpu: Virtual CPU.
465 *
466 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
467 * it going if it's enabled.
468 */
469void kvm_mips_init_count(struct kvm_vcpu *vcpu)
470{
471 /* 100 MHz */
472 vcpu->arch.count_hz = 100*1000*1000;
473 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
474 vcpu->arch.count_hz);
475 vcpu->arch.count_dyn_bias = 0;
476
477 /* Starting at 0 */
478 kvm_mips_write_count(vcpu, 0);
479}
480
481/**
482 * kvm_mips_write_compare() - Modify compare and update timer.
483 * @vcpu: Virtual CPU.
484 * @compare: New CP0_Compare value.
485 *
486 * Update CP0_Compare to a new value and update the timeout.
487 */
488void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
489{
490 struct mips_coproc *cop0 = vcpu->arch.cop0;
491
492 /* if unchanged, must just be an ack */
493 if (kvm_read_c0_guest_compare(cop0) == compare)
494 return;
495
496 /* Update compare */
497 kvm_write_c0_guest_compare(cop0, compare);
498
499 /* Update timeout if count enabled */
500 if (!kvm_mips_count_disabled(vcpu))
501 kvm_mips_update_hrtimer(vcpu);
502}
503
504/**
505 * kvm_mips_count_disable() - Disable count.
506 * @vcpu: Virtual CPU.
507 *
508 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
509 * time will be handled but not after.
510 *
511 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC has been
512 * set (count disabled).
513 *
514 * Returns: The time that the timer was stopped.
515 */
516static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
517{
518 struct mips_coproc *cop0 = vcpu->arch.cop0;
519 uint32_t count;
520 ktime_t now;
521
522 /* Stop hrtimer */
523 hrtimer_cancel(&vcpu->arch.comparecount_timer);
524
525 /* Set the static count from the dynamic count, handling pending TI */
526 now = ktime_get();
527 count = kvm_mips_read_count_running(vcpu, now);
528 kvm_write_c0_guest_count(cop0, count);
529
530 return now;
531}
532
533/**
534 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
535 * @vcpu: Virtual CPU.
536 *
537 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
538 * before the final stop time will be handled, but not after.
539 *
540 * Assumes CP0_Cause.DC is clear (count enabled).
541 */
542void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
543{
544 struct mips_coproc *cop0 = vcpu->arch.cop0;
545
546 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
547 kvm_mips_count_disable(vcpu);
548}
549
550/**
551 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
552 * @vcpu: Virtual CPU.
553 *
554 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
555 * the start time will be handled, potentially before even returning, so the
556 * caller should be careful with ordering of CP0_Cause modifications so as not
557 * to lose it.
558 *
559 * Assumes CP0_Cause.DC is set (count disabled).
560 */
561void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
562{
563 struct mips_coproc *cop0 = vcpu->arch.cop0;
564 uint32_t count;
565
566 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
567
568 /*
569 * Set the dynamic count to match the static count.
570 * This starts the hrtimer.
571 */
572 count = kvm_read_c0_guest_count(cop0);
573 kvm_mips_write_count(vcpu, count);
574}
575
576/**
577 * kvm_mips_count_timeout() - Push timer forward on timeout.
578 * @vcpu: Virtual CPU.
579 *
580 * Handle an hrtimer event by push the hrtimer forward a period.
581 *
582 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
583 */
584enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
585{
586 /* Add the Count period to the current expiry time */
587 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
588 vcpu->arch.count_period);
589 return HRTIMER_RESTART;
250} 590}
251 591
252enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) 592enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
@@ -471,8 +811,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
471#endif 811#endif
472 /* Get reg */ 812 /* Get reg */
473 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 813 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
474 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */ 814 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
475 vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
476 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { 815 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
477 vcpu->arch.gprs[rt] = 0x0; 816 vcpu->arch.gprs[rt] = 0x0;
478#ifdef CONFIG_KVM_MIPS_DYN_TRANS 817#ifdef CONFIG_KVM_MIPS_DYN_TRANS
@@ -539,10 +878,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
539 } 878 }
540 /* Are we writing to COUNT */ 879 /* Are we writing to COUNT */
541 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 880 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
542 /* Linux doesn't seem to write into COUNT, we throw an error 881 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
543 * if we notice a write to COUNT
544 */
545 /*er = EMULATE_FAIL; */
546 goto done; 882 goto done;
547 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { 883 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
548 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", 884 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
@@ -552,8 +888,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
552 /* If we are writing to COMPARE */ 888 /* If we are writing to COMPARE */
553 /* Clear pending timer interrupt, if any */ 889 /* Clear pending timer interrupt, if any */
554 kvm_mips_callbacks->dequeue_timer_int(vcpu); 890 kvm_mips_callbacks->dequeue_timer_int(vcpu);
555 kvm_write_c0_guest_compare(cop0, 891 kvm_mips_write_compare(vcpu,
556 vcpu->arch.gprs[rt]); 892 vcpu->arch.gprs[rt]);
557 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 893 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
558 kvm_write_c0_guest_status(cop0, 894 kvm_write_c0_guest_status(cop0,
559 vcpu->arch.gprs[rt]); 895 vcpu->arch.gprs[rt]);
@@ -564,6 +900,20 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
564#ifdef CONFIG_KVM_MIPS_DYN_TRANS 900#ifdef CONFIG_KVM_MIPS_DYN_TRANS
565 kvm_mips_trans_mtc0(inst, opc, vcpu); 901 kvm_mips_trans_mtc0(inst, opc, vcpu);
566#endif 902#endif
903 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
904 uint32_t old_cause, new_cause;
905 old_cause = kvm_read_c0_guest_cause(cop0);
906 new_cause = vcpu->arch.gprs[rt];
907 /* Update R/W bits */
908 kvm_change_c0_guest_cause(cop0, 0x08800300,
909 new_cause);
910 /* DC bit enabling/disabling timer? */
911 if ((old_cause ^ new_cause) & CAUSEF_DC) {
912 if (new_cause & CAUSEF_DC)
913 kvm_mips_count_disable_cause(vcpu);
914 else
915 kvm_mips_count_enable_cause(vcpu);
916 }
567 } else { 917 } else {
568 cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; 918 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
569#ifdef CONFIG_KVM_MIPS_DYN_TRANS 919#ifdef CONFIG_KVM_MIPS_DYN_TRANS
@@ -1553,8 +1903,7 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1553 current_cpu_data.icache.linesz); 1903 current_cpu_data.icache.linesz);
1554 break; 1904 break;
1555 case 2: /* Read count register */ 1905 case 2: /* Read count register */
1556 printk("RDHWR: Cont register\n"); 1906 arch->gprs[rt] = kvm_mips_read_count(vcpu);
1557 arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
1558 break; 1907 break;
1559 case 3: /* Count register resolution */ 1908 case 3: /* Count register resolution */
1560 switch (current_cpu_data.cputype) { 1909 switch (current_cpu_data.cputype) {
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
index f1e8389f8d33..9908f2b0ff46 100644
--- a/arch/mips/kvm/kvm_trap_emul.c
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -407,8 +407,7 @@ static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
407{ 407{
408 switch (reg->id) { 408 switch (reg->id) {
409 case KVM_REG_MIPS_CP0_COUNT: 409 case KVM_REG_MIPS_CP0_COUNT:
410 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */ 410 *v = kvm_mips_read_count(vcpu);
411 *v = (read_c0_count() >> 2);
412 break; 411 break;
413 default: 412 default:
414 return -EINVAL; 413 return -EINVAL;
@@ -424,10 +423,30 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
424 423
425 switch (reg->id) { 424 switch (reg->id) {
426 case KVM_REG_MIPS_CP0_COUNT: 425 case KVM_REG_MIPS_CP0_COUNT:
427 /* Not supported yet */ 426 kvm_mips_write_count(vcpu, v);
428 break; 427 break;
429 case KVM_REG_MIPS_CP0_COMPARE: 428 case KVM_REG_MIPS_CP0_COMPARE:
430 kvm_write_c0_guest_compare(cop0, v); 429 kvm_mips_write_compare(vcpu, v);
430 break;
431 case KVM_REG_MIPS_CP0_CAUSE:
432 /*
433 * If the timer is stopped or started (DC bit) it must look
434 * atomic with changes to the interrupt pending bits (TI, IRQ5).
435 * A timer interrupt should not happen in between.
436 */
437 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
438 if (v & CAUSEF_DC) {
439 /* disable timer first */
440 kvm_mips_count_disable_cause(vcpu);
441 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
442 } else {
443 /* enable timer last */
444 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
445 kvm_mips_count_enable_cause(vcpu);
446 }
447 } else {
448 kvm_write_c0_guest_cause(cop0, v);
449 }
431 break; 450 break;
432 default: 451 default:
433 return -EINVAL; 452 return -EINVAL;