aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/booke.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:51:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:51:36 -0500
commitf080480488028bcc25357f85e8ae54ccc3bb7173 (patch)
tree8fcc943f16d26c795b3b6324b478af2d5a30285d /arch/powerpc/kvm/booke.c
parenteda670c626a4f53eb8ac5f20d8c10d3f0b54c583 (diff)
parente504c9098ed6acd9e1079c5e10e4910724ad429f (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM changes from Paolo Bonzini: "Here are the 3.13 KVM changes. There was a lot of work on the PPC side: the HV and emulation flavors can now coexist in a single kernel is probably the most interesting change from a user point of view. On the x86 side there are nested virtualization improvements and a few bugfixes. ARM got transparent huge page support, improved overcommit, and support for big endian guests. Finally, there is a new interface to connect KVM with VFIO. This helps with devices that use NoSnoop PCI transactions, letting the driver in the guest execute WBINVD instructions. This includes some nVidia cards on Windows, that fail to start without these patches and the corresponding userspace changes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (146 commits) kvm, vmx: Fix lazy FPU on nested guest arm/arm64: KVM: PSCI: propagate caller endianness to the incoming vcpu arm/arm64: KVM: MMIO support for BE guest kvm, cpuid: Fix sparse warning kvm: Delete prototype for non-existent function kvm_check_iopl kvm: Delete prototype for non-existent function complete_pio hung_task: add method to reset detector pvclock: detect watchdog reset at pvclock read kvm: optimize out smp_mb after srcu_read_unlock srcu: API for barrier after srcu read unlock KVM: remove vm mmap method KVM: IOMMU: hva align mapping page size KVM: x86: trace cpuid emulation when called from emulator KVM: emulator: cleanup decode_register_operand() a bit KVM: emulator: check rex prefix inside decode_register() KVM: x86: fix emulation of "movzbl %bpl, %eax" kvm_host: typo fix KVM: x86: emulate SAHF instruction MAINTAINERS: add tree for kvm.git Documentation/kvm: add a 00-INDEX file ...
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r--arch/powerpc/kvm/booke.c337
1 files changed, 299 insertions, 38 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 5133199f6cb7..53e65a210b9a 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -40,7 +40,9 @@
40 40
41#include "timing.h" 41#include "timing.h"
42#include "booke.h" 42#include "booke.h"
43#include "trace.h" 43
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
44 46
45unsigned long kvmppc_booke_handlers; 47unsigned long kvmppc_booke_handlers;
46 48
@@ -133,6 +135,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
133#endif 135#endif
134} 136}
135 137
138static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
139{
140 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
141#ifndef CONFIG_KVM_BOOKE_HV
142 vcpu->arch.shadow_msr &= ~MSR_DE;
143 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
144#endif
145
146 /* Force enable debug interrupts when user space wants to debug */
147 if (vcpu->guest_debug) {
148#ifdef CONFIG_KVM_BOOKE_HV
149 /*
150 * Since there is no shadow MSR, sync MSR_DE into the guest
151 * visible MSR.
152 */
153 vcpu->arch.shared->msr |= MSR_DE;
154#else
155 vcpu->arch.shadow_msr |= MSR_DE;
156 vcpu->arch.shared->msr &= ~MSR_DE;
157#endif
158 }
159}
160
136/* 161/*
137 * Helper function for "full" MSR writes. No need to call this if only 162 * Helper function for "full" MSR writes. No need to call this if only
138 * EE/CE/ME/DE/RI are changing. 163 * EE/CE/ME/DE/RI are changing.
@@ -150,6 +175,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
150 kvmppc_mmu_msr_notify(vcpu, old_msr); 175 kvmppc_mmu_msr_notify(vcpu, old_msr);
151 kvmppc_vcpu_sync_spe(vcpu); 176 kvmppc_vcpu_sync_spe(vcpu);
152 kvmppc_vcpu_sync_fpu(vcpu); 177 kvmppc_vcpu_sync_fpu(vcpu);
178 kvmppc_vcpu_sync_debug(vcpu);
153} 179}
154 180
155static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 181static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
@@ -655,6 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
655int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
656{ 682{
657 int ret, s; 683 int ret, s;
684 struct thread_struct thread;
658#ifdef CONFIG_PPC_FPU 685#ifdef CONFIG_PPC_FPU
659 struct thread_fp_state fp; 686 struct thread_fp_state fp;
660 int fpexc_mode; 687 int fpexc_mode;
@@ -695,6 +722,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
695 kvmppc_load_guest_fp(vcpu); 722 kvmppc_load_guest_fp(vcpu);
696#endif 723#endif
697 724
725 /* Switch to guest debug context */
726 thread.debug = vcpu->arch.shadow_dbg_reg;
727 switch_booke_debug_regs(&thread);
728 thread.debug = current->thread.debug;
729 current->thread.debug = vcpu->arch.shadow_dbg_reg;
730
698 kvmppc_fix_ee_before_entry(); 731 kvmppc_fix_ee_before_entry();
699 732
700 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 733 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -702,6 +735,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
702 /* No need for kvm_guest_exit. It's done in handle_exit. 735 /* No need for kvm_guest_exit. It's done in handle_exit.
703 We also get here with interrupts enabled. */ 736 We also get here with interrupts enabled. */
704 737
738 /* Switch back to user space debug context */
739 switch_booke_debug_regs(&thread);
740 current->thread.debug = thread.debug;
741
705#ifdef CONFIG_PPC_FPU 742#ifdef CONFIG_PPC_FPU
706 kvmppc_save_guest_fp(vcpu); 743 kvmppc_save_guest_fp(vcpu);
707 744
@@ -757,6 +794,30 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
757 } 794 }
758} 795}
759 796
797static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
798{
799 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
800 u32 dbsr = vcpu->arch.dbsr;
801
802 run->debug.arch.status = 0;
803 run->debug.arch.address = vcpu->arch.pc;
804
805 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
806 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
807 } else {
808 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
809 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
810 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
811 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
812 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
813 run->debug.arch.address = dbg_reg->dac1;
814 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
815 run->debug.arch.address = dbg_reg->dac2;
816 }
817
818 return RESUME_HOST;
819}
820
760static void kvmppc_fill_pt_regs(struct pt_regs *regs) 821static void kvmppc_fill_pt_regs(struct pt_regs *regs)
761{ 822{
762 ulong r1, ip, msr, lr; 823 ulong r1, ip, msr, lr;
@@ -817,6 +878,11 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
817 case BOOKE_INTERRUPT_CRITICAL: 878 case BOOKE_INTERRUPT_CRITICAL:
818 unknown_exception(&regs); 879 unknown_exception(&regs);
819 break; 880 break;
881 case BOOKE_INTERRUPT_DEBUG:
882 /* Save DBSR before preemption is enabled */
883 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
884 kvmppc_clear_dbsr();
885 break;
820 } 886 }
821} 887}
822 888
@@ -1134,18 +1200,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1134 } 1200 }
1135 1201
1136 case BOOKE_INTERRUPT_DEBUG: { 1202 case BOOKE_INTERRUPT_DEBUG: {
1137 u32 dbsr; 1203 r = kvmppc_handle_debug(run, vcpu);
1138 1204 if (r == RESUME_HOST)
1139 vcpu->arch.pc = mfspr(SPRN_CSRR0); 1205 run->exit_reason = KVM_EXIT_DEBUG;
1140
1141 /* clear IAC events in DBSR register */
1142 dbsr = mfspr(SPRN_DBSR);
1143 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
1144 mtspr(SPRN_DBSR, dbsr);
1145
1146 run->exit_reason = KVM_EXIT_DEBUG;
1147 kvmppc_account_exit(vcpu, DEBUG_EXITS); 1206 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1148 r = RESUME_HOST;
1149 break; 1207 break;
1150 } 1208 }
1151 1209
@@ -1196,7 +1254,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1196 kvmppc_set_msr(vcpu, 0); 1254 kvmppc_set_msr(vcpu, 0);
1197 1255
1198#ifndef CONFIG_KVM_BOOKE_HV 1256#ifndef CONFIG_KVM_BOOKE_HV
1199 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; 1257 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
1200 vcpu->arch.shadow_pid = 1; 1258 vcpu->arch.shadow_pid = 1;
1201 vcpu->arch.shared->msr = 0; 1259 vcpu->arch.shared->msr = 0;
1202#endif 1260#endif
@@ -1358,7 +1416,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1358 return 0; 1416 return 0;
1359} 1417}
1360 1418
1361void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1419int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1362{ 1420{
1363 sregs->u.e.features |= KVM_SREGS_E_IVOR; 1421 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1364 1422
@@ -1378,6 +1436,7 @@ void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1378 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; 1436 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1379 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; 1437 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1380 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; 1438 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1439 return 0;
1381} 1440}
1382 1441
1383int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1442int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
@@ -1412,8 +1471,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1412 1471
1413 get_sregs_base(vcpu, sregs); 1472 get_sregs_base(vcpu, sregs);
1414 get_sregs_arch206(vcpu, sregs); 1473 get_sregs_arch206(vcpu, sregs);
1415 kvmppc_core_get_sregs(vcpu, sregs); 1474 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1416 return 0;
1417} 1475}
1418 1476
1419int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1477int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -1432,7 +1490,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1432 if (ret < 0) 1490 if (ret < 0)
1433 return ret; 1491 return ret;
1434 1492
1435 return kvmppc_core_set_sregs(vcpu, sregs); 1493 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1436} 1494}
1437 1495
1438int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1496int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
@@ -1440,7 +1498,6 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1440 int r = 0; 1498 int r = 0;
1441 union kvmppc_one_reg val; 1499 union kvmppc_one_reg val;
1442 int size; 1500 int size;
1443 long int i;
1444 1501
1445 size = one_reg_size(reg->id); 1502 size = one_reg_size(reg->id);
1446 if (size > sizeof(val)) 1503 if (size > sizeof(val))
@@ -1448,16 +1505,24 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1448 1505
1449 switch (reg->id) { 1506 switch (reg->id) {
1450 case KVM_REG_PPC_IAC1: 1507 case KVM_REG_PPC_IAC1:
1508 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
1509 break;
1451 case KVM_REG_PPC_IAC2: 1510 case KVM_REG_PPC_IAC2:
1511 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
1512 break;
1513#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1452 case KVM_REG_PPC_IAC3: 1514 case KVM_REG_PPC_IAC3:
1515 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
1516 break;
1453 case KVM_REG_PPC_IAC4: 1517 case KVM_REG_PPC_IAC4:
1454 i = reg->id - KVM_REG_PPC_IAC1; 1518 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
1455 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]);
1456 break; 1519 break;
1520#endif
1457 case KVM_REG_PPC_DAC1: 1521 case KVM_REG_PPC_DAC1:
1522 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
1523 break;
1458 case KVM_REG_PPC_DAC2: 1524 case KVM_REG_PPC_DAC2:
1459 i = reg->id - KVM_REG_PPC_DAC1; 1525 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
1460 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]);
1461 break; 1526 break;
1462 case KVM_REG_PPC_EPR: { 1527 case KVM_REG_PPC_EPR: {
1463 u32 epr = get_guest_epr(vcpu); 1528 u32 epr = get_guest_epr(vcpu);
@@ -1476,10 +1541,13 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1476 val = get_reg_val(reg->id, vcpu->arch.tsr); 1541 val = get_reg_val(reg->id, vcpu->arch.tsr);
1477 break; 1542 break;
1478 case KVM_REG_PPC_DEBUG_INST: 1543 case KVM_REG_PPC_DEBUG_INST:
1479 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV); 1544 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
1545 break;
1546 case KVM_REG_PPC_VRSAVE:
1547 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1480 break; 1548 break;
1481 default: 1549 default:
1482 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 1550 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
1483 break; 1551 break;
1484 } 1552 }
1485 1553
@@ -1497,7 +1565,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1497 int r = 0; 1565 int r = 0;
1498 union kvmppc_one_reg val; 1566 union kvmppc_one_reg val;
1499 int size; 1567 int size;
1500 long int i;
1501 1568
1502 size = one_reg_size(reg->id); 1569 size = one_reg_size(reg->id);
1503 if (size > sizeof(val)) 1570 if (size > sizeof(val))
@@ -1508,16 +1575,24 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1508 1575
1509 switch (reg->id) { 1576 switch (reg->id) {
1510 case KVM_REG_PPC_IAC1: 1577 case KVM_REG_PPC_IAC1:
1578 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
1579 break;
1511 case KVM_REG_PPC_IAC2: 1580 case KVM_REG_PPC_IAC2:
1581 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
1582 break;
1583#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1512 case KVM_REG_PPC_IAC3: 1584 case KVM_REG_PPC_IAC3:
1585 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
1586 break;
1513 case KVM_REG_PPC_IAC4: 1587 case KVM_REG_PPC_IAC4:
1514 i = reg->id - KVM_REG_PPC_IAC1; 1588 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
1515 vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val);
1516 break; 1589 break;
1590#endif
1517 case KVM_REG_PPC_DAC1: 1591 case KVM_REG_PPC_DAC1:
1592 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
1593 break;
1518 case KVM_REG_PPC_DAC2: 1594 case KVM_REG_PPC_DAC2:
1519 i = reg->id - KVM_REG_PPC_DAC1; 1595 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
1520 vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val);
1521 break; 1596 break;
1522 case KVM_REG_PPC_EPR: { 1597 case KVM_REG_PPC_EPR: {
1523 u32 new_epr = set_reg_val(reg->id, val); 1598 u32 new_epr = set_reg_val(reg->id, val);
@@ -1551,20 +1626,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1551 kvmppc_set_tcr(vcpu, tcr); 1626 kvmppc_set_tcr(vcpu, tcr);
1552 break; 1627 break;
1553 } 1628 }
1629 case KVM_REG_PPC_VRSAVE:
1630 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1631 break;
1554 default: 1632 default:
1555 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 1633 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
1556 break; 1634 break;
1557 } 1635 }
1558 1636
1559 return r; 1637 return r;
1560} 1638}
1561 1639
1562int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1563 struct kvm_guest_debug *dbg)
1564{
1565 return -EINVAL;
1566}
1567
1568int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1640int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1569{ 1641{
1570 return -ENOTSUPP; 1642 return -ENOTSUPP;
@@ -1589,12 +1661,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1589 return -ENOTSUPP; 1661 return -ENOTSUPP;
1590} 1662}
1591 1663
1592void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 1664void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1593 struct kvm_memory_slot *dont) 1665 struct kvm_memory_slot *dont)
1594{ 1666{
1595} 1667}
1596 1668
1597int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 1669int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1598 unsigned long npages) 1670 unsigned long npages)
1599{ 1671{
1600 return 0; 1672 return 0;
@@ -1670,6 +1742,157 @@ void kvmppc_decrementer_func(unsigned long data)
1670 kvmppc_set_tsr_bits(vcpu, TSR_DIS); 1742 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1671} 1743}
1672 1744
1745static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1746 uint64_t addr, int index)
1747{
1748 switch (index) {
1749 case 0:
1750 dbg_reg->dbcr0 |= DBCR0_IAC1;
1751 dbg_reg->iac1 = addr;
1752 break;
1753 case 1:
1754 dbg_reg->dbcr0 |= DBCR0_IAC2;
1755 dbg_reg->iac2 = addr;
1756 break;
1757#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1758 case 2:
1759 dbg_reg->dbcr0 |= DBCR0_IAC3;
1760 dbg_reg->iac3 = addr;
1761 break;
1762 case 3:
1763 dbg_reg->dbcr0 |= DBCR0_IAC4;
1764 dbg_reg->iac4 = addr;
1765 break;
1766#endif
1767 default:
1768 return -EINVAL;
1769 }
1770
1771 dbg_reg->dbcr0 |= DBCR0_IDM;
1772 return 0;
1773}
1774
1775static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1776 int type, int index)
1777{
1778 switch (index) {
1779 case 0:
1780 if (type & KVMPPC_DEBUG_WATCH_READ)
1781 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1782 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1783 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1784 dbg_reg->dac1 = addr;
1785 break;
1786 case 1:
1787 if (type & KVMPPC_DEBUG_WATCH_READ)
1788 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1789 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1790 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1791 dbg_reg->dac2 = addr;
1792 break;
1793 default:
1794 return -EINVAL;
1795 }
1796
1797 dbg_reg->dbcr0 |= DBCR0_IDM;
1798 return 0;
1799}
1800void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1801{
1802 /* XXX: Add similar MSR protection for BookE-PR */
1803#ifdef CONFIG_KVM_BOOKE_HV
1804 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1805 if (set) {
1806 if (prot_bitmap & MSR_UCLE)
1807 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1808 if (prot_bitmap & MSR_DE)
1809 vcpu->arch.shadow_msrp |= MSRP_DEP;
1810 if (prot_bitmap & MSR_PMM)
1811 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1812 } else {
1813 if (prot_bitmap & MSR_UCLE)
1814 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1815 if (prot_bitmap & MSR_DE)
1816 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1817 if (prot_bitmap & MSR_PMM)
1818 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1819 }
1820#endif
1821}
1822
1823int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1824 struct kvm_guest_debug *dbg)
1825{
1826 struct debug_reg *dbg_reg;
1827 int n, b = 0, w = 0;
1828
1829 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1830 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1831 vcpu->guest_debug = 0;
1832 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1833 return 0;
1834 }
1835
1836 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1837 vcpu->guest_debug = dbg->control;
1838 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1839 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1840 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1841
1842 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1843 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1844
1845 /* Code below handles only HW breakpoints */
1846 dbg_reg = &(vcpu->arch.shadow_dbg_reg);
1847
1848#ifdef CONFIG_KVM_BOOKE_HV
1849 /*
1850 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1851 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1852 */
1853 dbg_reg->dbcr1 = 0;
1854 dbg_reg->dbcr2 = 0;
1855#else
1856 /*
1857 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1858 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1859 * is set.
1860 */
1861 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1862 DBCR1_IAC4US;
1863 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1864#endif
1865
1866 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1867 return 0;
1868
1869 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1870 uint64_t addr = dbg->arch.bp[n].addr;
1871 uint32_t type = dbg->arch.bp[n].type;
1872
1873 if (type == KVMPPC_DEBUG_NONE)
1874 continue;
1875
1876 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1877 KVMPPC_DEBUG_WATCH_WRITE |
1878 KVMPPC_DEBUG_BREAKPOINT))
1879 return -EINVAL;
1880
1881 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1882 /* Setting H/W breakpoint */
1883 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1884 return -EINVAL;
1885 } else {
1886 /* Setting H/W watchpoint */
1887 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1888 type, w++))
1889 return -EINVAL;
1890 }
1891 }
1892
1893 return 0;
1894}
1895
1673void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1896void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1674{ 1897{
1675 vcpu->cpu = smp_processor_id(); 1898 vcpu->cpu = smp_processor_id();
@@ -1680,6 +1903,44 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1680{ 1903{
1681 current->thread.kvm_vcpu = NULL; 1904 current->thread.kvm_vcpu = NULL;
1682 vcpu->cpu = -1; 1905 vcpu->cpu = -1;
1906
1907 /* Clear pending debug event in DBSR */
1908 kvmppc_clear_dbsr();
1909}
1910
1911void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1912{
1913 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
1914}
1915
1916int kvmppc_core_init_vm(struct kvm *kvm)
1917{
1918 return kvm->arch.kvm_ops->init_vm(kvm);
1919}
1920
1921struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1922{
1923 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
1924}
1925
1926void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1927{
1928 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
1929}
1930
1931void kvmppc_core_destroy_vm(struct kvm *kvm)
1932{
1933 kvm->arch.kvm_ops->destroy_vm(kvm);
1934}
1935
1936void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1937{
1938 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
1939}
1940
1941void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
1942{
1943 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
1683} 1944}
1684 1945
1685int __init kvmppc_booke_init(void) 1946int __init kvmppc_booke_init(void)