aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/booke.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r--arch/powerpc/kvm/booke.c346
1 files changed, 296 insertions, 50 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index d25a097c852b..69f114015780 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -36,9 +36,11 @@
36#include <asm/dbell.h> 36#include <asm/dbell.h>
37#include <asm/hw_irq.h> 37#include <asm/hw_irq.h>
38#include <asm/irq.h> 38#include <asm/irq.h>
39#include <asm/time.h>
39 40
40#include "timing.h" 41#include "timing.h"
41#include "booke.h" 42#include "booke.h"
43#include "trace.h"
42 44
43unsigned long kvmppc_booke_handlers; 45unsigned long kvmppc_booke_handlers;
44 46
@@ -62,6 +64,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
62 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 64 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 { "doorbell", VCPU_STAT(dbell_exits) }, 65 { "doorbell", VCPU_STAT(dbell_exits) },
64 { "guest doorbell", VCPU_STAT(gdbell_exits) }, 66 { "guest doorbell", VCPU_STAT(gdbell_exits) },
67 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
65 { NULL } 68 { NULL }
66}; 69};
67 70
@@ -120,6 +123,16 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
120} 123}
121#endif 124#endif
122 125
126static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
127{
128#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
129 /* We always treat the FP bit as enabled from the host
130 perspective, so only need to adjust the shadow MSR */
131 vcpu->arch.shadow_msr &= ~MSR_FP;
132 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
133#endif
134}
135
123/* 136/*
124 * Helper function for "full" MSR writes. No need to call this if only 137 * Helper function for "full" MSR writes. No need to call this if only
125 * EE/CE/ME/DE/RI are changing. 138 * EE/CE/ME/DE/RI are changing.
@@ -136,11 +149,13 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
136 149
137 kvmppc_mmu_msr_notify(vcpu, old_msr); 150 kvmppc_mmu_msr_notify(vcpu, old_msr);
138 kvmppc_vcpu_sync_spe(vcpu); 151 kvmppc_vcpu_sync_spe(vcpu);
152 kvmppc_vcpu_sync_fpu(vcpu);
139} 153}
140 154
141static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 155static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
142 unsigned int priority) 156 unsigned int priority)
143{ 157{
158 trace_kvm_booke_queue_irqprio(vcpu, priority);
144 set_bit(priority, &vcpu->arch.pending_exceptions); 159 set_bit(priority, &vcpu->arch.pending_exceptions);
145} 160}
146 161
@@ -206,6 +221,16 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
206 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 221 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
207} 222}
208 223
224static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
225{
226 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
227}
228
229static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
230{
231 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
232}
233
209static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) 234static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
210{ 235{
211#ifdef CONFIG_KVM_BOOKE_HV 236#ifdef CONFIG_KVM_BOOKE_HV
@@ -287,6 +312,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
287 bool crit; 312 bool crit;
288 bool keep_irq = false; 313 bool keep_irq = false;
289 enum int_class int_class; 314 enum int_class int_class;
315 ulong new_msr = vcpu->arch.shared->msr;
290 316
291 /* Truncate crit indicators in 32 bit mode */ 317 /* Truncate crit indicators in 32 bit mode */
292 if (!(vcpu->arch.shared->msr & MSR_SF)) { 318 if (!(vcpu->arch.shared->msr & MSR_SF)) {
@@ -325,6 +351,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
325 msr_mask = MSR_CE | MSR_ME | MSR_DE; 351 msr_mask = MSR_CE | MSR_ME | MSR_DE;
326 int_class = INT_CLASS_NONCRIT; 352 int_class = INT_CLASS_NONCRIT;
327 break; 353 break;
354 case BOOKE_IRQPRIO_WATCHDOG:
328 case BOOKE_IRQPRIO_CRITICAL: 355 case BOOKE_IRQPRIO_CRITICAL:
329 case BOOKE_IRQPRIO_DBELL_CRIT: 356 case BOOKE_IRQPRIO_DBELL_CRIT:
330 allowed = vcpu->arch.shared->msr & MSR_CE; 357 allowed = vcpu->arch.shared->msr & MSR_CE;
@@ -381,7 +408,13 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
381 set_guest_esr(vcpu, vcpu->arch.queued_esr); 408 set_guest_esr(vcpu, vcpu->arch.queued_esr);
382 if (update_dear == true) 409 if (update_dear == true)
383 set_guest_dear(vcpu, vcpu->arch.queued_dear); 410 set_guest_dear(vcpu, vcpu->arch.queued_dear);
384 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); 411
412 new_msr &= msr_mask;
413#if defined(CONFIG_64BIT)
414 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
415 new_msr |= MSR_CM;
416#endif
417 kvmppc_set_msr(vcpu, new_msr);
385 418
386 if (!keep_irq) 419 if (!keep_irq)
387 clear_bit(priority, &vcpu->arch.pending_exceptions); 420 clear_bit(priority, &vcpu->arch.pending_exceptions);
@@ -404,12 +437,121 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
404 return allowed; 437 return allowed;
405} 438}
406 439
440/*
441 * Return the number of jiffies until the next timeout. If the timeout is
442 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
443 * because the larger value can break the timer APIs.
444 */
445static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
446{
447 u64 tb, wdt_tb, wdt_ticks = 0;
448 u64 nr_jiffies = 0;
449 u32 period = TCR_GET_WP(vcpu->arch.tcr);
450
451 wdt_tb = 1ULL << (63 - period);
452 tb = get_tb();
453 /*
454 * The watchdog timeout will hapeen when TB bit corresponding
455 * to watchdog will toggle from 0 to 1.
456 */
457 if (tb & wdt_tb)
458 wdt_ticks = wdt_tb;
459
460 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
461
462 /* Convert timebase ticks to jiffies */
463 nr_jiffies = wdt_ticks;
464
465 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
466 nr_jiffies++;
467
468 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
469}
470
471static void arm_next_watchdog(struct kvm_vcpu *vcpu)
472{
473 unsigned long nr_jiffies;
474 unsigned long flags;
475
476 /*
477 * If TSR_ENW and TSR_WIS are not set then no need to exit to
478 * userspace, so clear the KVM_REQ_WATCHDOG request.
479 */
480 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
481 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
482
483 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
484 nr_jiffies = watchdog_next_timeout(vcpu);
485 /*
486 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
487 * then do not run the watchdog timer as this can break timer APIs.
488 */
489 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
490 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
491 else
492 del_timer(&vcpu->arch.wdt_timer);
493 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
494}
495
496void kvmppc_watchdog_func(unsigned long data)
497{
498 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
499 u32 tsr, new_tsr;
500 int final;
501
502 do {
503 new_tsr = tsr = vcpu->arch.tsr;
504 final = 0;
505
506 /* Time out event */
507 if (tsr & TSR_ENW) {
508 if (tsr & TSR_WIS)
509 final = 1;
510 else
511 new_tsr = tsr | TSR_WIS;
512 } else {
513 new_tsr = tsr | TSR_ENW;
514 }
515 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
516
517 if (new_tsr & TSR_WIS) {
518 smp_wmb();
519 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
520 kvm_vcpu_kick(vcpu);
521 }
522
523 /*
524 * If this is final watchdog expiry and some action is required
525 * then exit to userspace.
526 */
527 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
528 vcpu->arch.watchdog_enabled) {
529 smp_wmb();
530 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
531 kvm_vcpu_kick(vcpu);
532 }
533
534 /*
535 * Stop running the watchdog timer after final expiration to
536 * prevent the host from being flooded with timers if the
537 * guest sets a short period.
538 * Timers will resume when TSR/TCR is updated next time.
539 */
540 if (!final)
541 arm_next_watchdog(vcpu);
542}
543
407static void update_timer_ints(struct kvm_vcpu *vcpu) 544static void update_timer_ints(struct kvm_vcpu *vcpu)
408{ 545{
409 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) 546 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
410 kvmppc_core_queue_dec(vcpu); 547 kvmppc_core_queue_dec(vcpu);
411 else 548 else
412 kvmppc_core_dequeue_dec(vcpu); 549 kvmppc_core_dequeue_dec(vcpu);
550
551 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
552 kvmppc_core_queue_watchdog(vcpu);
553 else
554 kvmppc_core_dequeue_watchdog(vcpu);
413} 555}
414 556
415static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) 557static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
@@ -417,13 +559,6 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
417 unsigned long *pending = &vcpu->arch.pending_exceptions; 559 unsigned long *pending = &vcpu->arch.pending_exceptions;
418 unsigned int priority; 560 unsigned int priority;
419 561
420 if (vcpu->requests) {
421 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
422 smp_mb();
423 update_timer_ints(vcpu);
424 }
425 }
426
427 priority = __ffs(*pending); 562 priority = __ffs(*pending);
428 while (priority < BOOKE_IRQPRIO_MAX) { 563 while (priority < BOOKE_IRQPRIO_MAX) {
429 if (kvmppc_booke_irqprio_deliver(vcpu, priority)) 564 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
@@ -459,37 +594,20 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
459 return r; 594 return r;
460} 595}
461 596
462/* 597int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
463 * Common checks before entering the guest world. Call with interrupts
464 * disabled.
465 *
466 * returns !0 if a signal is pending and check_signal is true
467 */
468static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
469{ 598{
470 int r = 0; 599 int r = 1; /* Indicate we want to get back into the guest */
471 600
472 WARN_ON_ONCE(!irqs_disabled()); 601 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
473 while (true) { 602 update_timer_ints(vcpu);
474 if (need_resched()) { 603#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
475 local_irq_enable(); 604 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
476 cond_resched(); 605 kvmppc_core_flush_tlb(vcpu);
477 local_irq_disable(); 606#endif
478 continue;
479 }
480
481 if (signal_pending(current)) {
482 r = 1;
483 break;
484 }
485
486 if (kvmppc_core_prepare_to_enter(vcpu)) {
487 /* interrupts got enabled in between, so we
488 are back at square 1 */
489 continue;
490 }
491 607
492 break; 608 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
609 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
610 r = 0;
493 } 611 }
494 612
495 return r; 613 return r;
@@ -497,7 +615,7 @@ static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
497 615
498int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 616int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
499{ 617{
500 int ret; 618 int ret, s;
501#ifdef CONFIG_PPC_FPU 619#ifdef CONFIG_PPC_FPU
502 unsigned int fpscr; 620 unsigned int fpscr;
503 int fpexc_mode; 621 int fpexc_mode;
@@ -510,11 +628,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
510 } 628 }
511 629
512 local_irq_disable(); 630 local_irq_disable();
513 if (kvmppc_prepare_to_enter(vcpu)) { 631 s = kvmppc_prepare_to_enter(vcpu);
514 kvm_run->exit_reason = KVM_EXIT_INTR; 632 if (s <= 0) {
515 ret = -EINTR; 633 local_irq_enable();
634 ret = s;
516 goto out; 635 goto out;
517 } 636 }
637 kvmppc_lazy_ee_enable();
518 638
519 kvm_guest_enter(); 639 kvm_guest_enter();
520 640
@@ -542,6 +662,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
542 662
543 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 663 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
544 664
665 /* No need for kvm_guest_exit. It's done in handle_exit.
666 We also get here with interrupts enabled. */
667
545#ifdef CONFIG_PPC_FPU 668#ifdef CONFIG_PPC_FPU
546 kvmppc_save_guest_fp(vcpu); 669 kvmppc_save_guest_fp(vcpu);
547 670
@@ -557,10 +680,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
557 current->thread.fpexc_mode = fpexc_mode; 680 current->thread.fpexc_mode = fpexc_mode;
558#endif 681#endif
559 682
560 kvm_guest_exit();
561
562out: 683out:
563 local_irq_enable(); 684 vcpu->mode = OUTSIDE_GUEST_MODE;
564 return ret; 685 return ret;
565} 686}
566 687
@@ -668,6 +789,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
668 unsigned int exit_nr) 789 unsigned int exit_nr)
669{ 790{
670 int r = RESUME_HOST; 791 int r = RESUME_HOST;
792 int s;
671 793
672 /* update before a new last_exit_type is rewritten */ 794 /* update before a new last_exit_type is rewritten */
673 kvmppc_update_timing_stats(vcpu); 795 kvmppc_update_timing_stats(vcpu);
@@ -677,6 +799,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
677 799
678 local_irq_enable(); 800 local_irq_enable();
679 801
802 trace_kvm_exit(exit_nr, vcpu);
803 kvm_guest_exit();
804
680 run->exit_reason = KVM_EXIT_UNKNOWN; 805 run->exit_reason = KVM_EXIT_UNKNOWN;
681 run->ready_for_interrupt_injection = 1; 806 run->ready_for_interrupt_injection = 1;
682 807
@@ -971,10 +1096,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
971 */ 1096 */
972 if (!(r & RESUME_HOST)) { 1097 if (!(r & RESUME_HOST)) {
973 local_irq_disable(); 1098 local_irq_disable();
974 if (kvmppc_prepare_to_enter(vcpu)) { 1099 s = kvmppc_prepare_to_enter(vcpu);
975 run->exit_reason = KVM_EXIT_INTR; 1100 if (s <= 0) {
976 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 1101 local_irq_enable();
977 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 1102 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1103 } else {
1104 kvmppc_lazy_ee_enable();
978 } 1105 }
979 } 1106 }
980 1107
@@ -1011,6 +1138,21 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1011 return r; 1138 return r;
1012} 1139}
1013 1140
1141int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1142{
1143 /* setup watchdog timer once */
1144 spin_lock_init(&vcpu->arch.wdt_lock);
1145 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1146 (unsigned long)vcpu);
1147
1148 return 0;
1149}
1150
1151void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1152{
1153 del_timer_sync(&vcpu->arch.wdt_timer);
1154}
1155
1014int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1156int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1015{ 1157{
1016 int i; 1158 int i;
@@ -1106,7 +1248,13 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
1106 } 1248 }
1107 1249
1108 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { 1250 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
1251 u32 old_tsr = vcpu->arch.tsr;
1252
1109 vcpu->arch.tsr = sregs->u.e.tsr; 1253 vcpu->arch.tsr = sregs->u.e.tsr;
1254
1255 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1256 arm_next_watchdog(vcpu);
1257
1110 update_timer_ints(vcpu); 1258 update_timer_ints(vcpu);
1111 } 1259 }
1112 1260
@@ -1221,12 +1369,70 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1221 1369
1222int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1370int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1223{ 1371{
1224 return -EINVAL; 1372 int r = -EINVAL;
1373
1374 switch (reg->id) {
1375 case KVM_REG_PPC_IAC1:
1376 case KVM_REG_PPC_IAC2:
1377 case KVM_REG_PPC_IAC3:
1378 case KVM_REG_PPC_IAC4: {
1379 int iac = reg->id - KVM_REG_PPC_IAC1;
1380 r = copy_to_user((u64 __user *)(long)reg->addr,
1381 &vcpu->arch.dbg_reg.iac[iac], sizeof(u64));
1382 break;
1383 }
1384 case KVM_REG_PPC_DAC1:
1385 case KVM_REG_PPC_DAC2: {
1386 int dac = reg->id - KVM_REG_PPC_DAC1;
1387 r = copy_to_user((u64 __user *)(long)reg->addr,
1388 &vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
1389 break;
1390 }
1391#if defined(CONFIG_64BIT)
1392 case KVM_REG_PPC_EPCR:
1393 r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr);
1394 break;
1395#endif
1396 default:
1397 break;
1398 }
1399 return r;
1225} 1400}
1226 1401
1227int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1402int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1228{ 1403{
1229 return -EINVAL; 1404 int r = -EINVAL;
1405
1406 switch (reg->id) {
1407 case KVM_REG_PPC_IAC1:
1408 case KVM_REG_PPC_IAC2:
1409 case KVM_REG_PPC_IAC3:
1410 case KVM_REG_PPC_IAC4: {
1411 int iac = reg->id - KVM_REG_PPC_IAC1;
1412 r = copy_from_user(&vcpu->arch.dbg_reg.iac[iac],
1413 (u64 __user *)(long)reg->addr, sizeof(u64));
1414 break;
1415 }
1416 case KVM_REG_PPC_DAC1:
1417 case KVM_REG_PPC_DAC2: {
1418 int dac = reg->id - KVM_REG_PPC_DAC1;
1419 r = copy_from_user(&vcpu->arch.dbg_reg.dac[dac],
1420 (u64 __user *)(long)reg->addr, sizeof(u64));
1421 break;
1422 }
1423#if defined(CONFIG_64BIT)
1424 case KVM_REG_PPC_EPCR: {
1425 u32 new_epcr;
1426 r = get_user(new_epcr, (u32 __user *)(long)reg->addr);
1427 if (r == 0)
1428 kvmppc_set_epcr(vcpu, new_epcr);
1429 break;
1430 }
1431#endif
1432 default:
1433 break;
1434 }
1435 return r;
1230} 1436}
1231 1437
1232int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1438int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
@@ -1253,20 +1459,50 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1253 return -ENOTSUPP; 1459 return -ENOTSUPP;
1254} 1460}
1255 1461
1462void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1463 struct kvm_memory_slot *dont)
1464{
1465}
1466
1467int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1468 unsigned long npages)
1469{
1470 return 0;
1471}
1472
1256int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1473int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1474 struct kvm_memory_slot *memslot,
1257 struct kvm_userspace_memory_region *mem) 1475 struct kvm_userspace_memory_region *mem)
1258{ 1476{
1259 return 0; 1477 return 0;
1260} 1478}
1261 1479
1262void kvmppc_core_commit_memory_region(struct kvm *kvm, 1480void kvmppc_core_commit_memory_region(struct kvm *kvm,
1263 struct kvm_userspace_memory_region *mem) 1481 struct kvm_userspace_memory_region *mem,
1482 struct kvm_memory_slot old)
1483{
1484}
1485
1486void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1487{
1488}
1489
1490void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1264{ 1491{
1492#if defined(CONFIG_64BIT)
1493 vcpu->arch.epcr = new_epcr;
1494#ifdef CONFIG_KVM_BOOKE_HV
1495 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1496 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1497 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1498#endif
1499#endif
1265} 1500}
1266 1501
1267void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) 1502void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1268{ 1503{
1269 vcpu->arch.tcr = new_tcr; 1504 vcpu->arch.tcr = new_tcr;
1505 arm_next_watchdog(vcpu);
1270 update_timer_ints(vcpu); 1506 update_timer_ints(vcpu);
1271} 1507}
1272 1508
@@ -1281,6 +1517,14 @@ void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1281void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) 1517void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1282{ 1518{
1283 clear_bits(tsr_bits, &vcpu->arch.tsr); 1519 clear_bits(tsr_bits, &vcpu->arch.tsr);
1520
1521 /*
1522 * We may have stopped the watchdog due to
1523 * being stuck on final expiration.
1524 */
1525 if (tsr_bits & (TSR_ENW | TSR_WIS))
1526 arm_next_watchdog(vcpu);
1527
1284 update_timer_ints(vcpu); 1528 update_timer_ints(vcpu);
1285} 1529}
1286 1530
@@ -1298,12 +1542,14 @@ void kvmppc_decrementer_func(unsigned long data)
1298 1542
1299void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1543void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1300{ 1544{
1545 vcpu->cpu = smp_processor_id();
1301 current->thread.kvm_vcpu = vcpu; 1546 current->thread.kvm_vcpu = vcpu;
1302} 1547}
1303 1548
1304void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) 1549void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1305{ 1550{
1306 current->thread.kvm_vcpu = NULL; 1551 current->thread.kvm_vcpu = NULL;
1552 vcpu->cpu = -1;
1307} 1553}
1308 1554
1309int __init kvmppc_booke_init(void) 1555int __init kvmppc_booke_init(void)