aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorBharat Bhushan <r65777@freescale.com>2013-07-04 02:57:47 -0400
committerAlexander Graf <agraf@suse.de>2013-10-17 08:49:40 -0400
commitce11e48b7fdd256ec68b932a89b397a790566031 (patch)
tree7794c296c89e56c098344592df4fcb253c1b1048 /arch/powerpc/kvm
parent547465ef8bcad77a3a73dad5151d9d28a0c1b88d (diff)
KVM: PPC: E500: Add userspace debug stub support
This patch adds the debug stub support on booke/bookehv. Now QEMU debug stub can use hw breakpoint, watchpoint and software breakpoint to debug guest. This is how we save/restore debug register context when switching between guest, userspace and kernel user-process: When QEMU is running -> thread->debug_reg == QEMU debug register context. -> Kernel will handle switching the debug register on context switch. -> no vcpu_load() called QEMU makes ioctls (except RUN) -> This will call vcpu_load() -> should not change context. -> Some ioctls can change vcpu debug register, context saved in vcpu->debug_regs QEMU Makes RUN ioctl -> Save thread->debug_reg on STACK -> Store thread->debug_reg == vcpu->debug_reg -> load thread->debug_reg -> RUN VCPU ( So thread points to vcpu context ) Context switch happens When VCPU running -> makes vcpu_load() should not load any context -> kernel loads the vcpu context as thread->debug_regs points to vcpu context. On heavyweight_exit -> Load the context saved on stack in thread->debug_reg Currently we do not support debug resource emulation to guest, On debug exception, always exit to user space irrespective of user space is expecting the debug exception or not. If this is unexpected exception (breakpoint/watchpoint event not set by userspace) then let us leave the action on user space. This is similar to what it was before, only thing is that now we have proper exit state available to user space. Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/booke.c240
-rw-r--r--arch/powerpc/kvm/booke.h5
2 files changed, 227 insertions, 18 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index a972fb600a99..8b6a790c0562 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -133,6 +133,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
133#endif 133#endif
134} 134}
135 135
136static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
137{
138 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
139#ifndef CONFIG_KVM_BOOKE_HV
140 vcpu->arch.shadow_msr &= ~MSR_DE;
141 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
142#endif
143
144 /* Force enable debug interrupts when user space wants to debug */
145 if (vcpu->guest_debug) {
146#ifdef CONFIG_KVM_BOOKE_HV
147 /*
148 * Since there is no shadow MSR, sync MSR_DE into the guest
149 * visible MSR.
150 */
151 vcpu->arch.shared->msr |= MSR_DE;
152#else
153 vcpu->arch.shadow_msr |= MSR_DE;
154 vcpu->arch.shared->msr &= ~MSR_DE;
155#endif
156 }
157}
158
136/* 159/*
137 * Helper function for "full" MSR writes. No need to call this if only 160 * Helper function for "full" MSR writes. No need to call this if only
138 * EE/CE/ME/DE/RI are changing. 161 * EE/CE/ME/DE/RI are changing.
@@ -150,6 +173,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
150 kvmppc_mmu_msr_notify(vcpu, old_msr); 173 kvmppc_mmu_msr_notify(vcpu, old_msr);
151 kvmppc_vcpu_sync_spe(vcpu); 174 kvmppc_vcpu_sync_spe(vcpu);
152 kvmppc_vcpu_sync_fpu(vcpu); 175 kvmppc_vcpu_sync_fpu(vcpu);
176 kvmppc_vcpu_sync_debug(vcpu);
153} 177}
154 178
155static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 179static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
@@ -655,6 +679,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
655int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 679int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
656{ 680{
657 int ret, s; 681 int ret, s;
682 struct thread_struct thread;
658#ifdef CONFIG_PPC_FPU 683#ifdef CONFIG_PPC_FPU
659 unsigned int fpscr; 684 unsigned int fpscr;
660 int fpexc_mode; 685 int fpexc_mode;
@@ -696,6 +721,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
696 kvmppc_load_guest_fp(vcpu); 721 kvmppc_load_guest_fp(vcpu);
697#endif 722#endif
698 723
724 /* Switch to guest debug context */
725 thread.debug = vcpu->arch.shadow_dbg_reg;
726 switch_booke_debug_regs(&thread);
727 thread.debug = current->thread.debug;
728 current->thread.debug = vcpu->arch.shadow_dbg_reg;
729
699 kvmppc_fix_ee_before_entry(); 730 kvmppc_fix_ee_before_entry();
700 731
701 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 732 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -703,6 +734,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
703 /* No need for kvm_guest_exit. It's done in handle_exit. 734 /* No need for kvm_guest_exit. It's done in handle_exit.
704 We also get here with interrupts enabled. */ 735 We also get here with interrupts enabled. */
705 736
737 /* Switch back to user space debug context */
738 switch_booke_debug_regs(&thread);
739 current->thread.debug = thread.debug;
740
706#ifdef CONFIG_PPC_FPU 741#ifdef CONFIG_PPC_FPU
707 kvmppc_save_guest_fp(vcpu); 742 kvmppc_save_guest_fp(vcpu);
708 743
@@ -758,6 +793,30 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
758 } 793 }
759} 794}
760 795
796static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
797{
798 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
799 u32 dbsr = vcpu->arch.dbsr;
800
801 run->debug.arch.status = 0;
802 run->debug.arch.address = vcpu->arch.pc;
803
804 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
805 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
806 } else {
807 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
808 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
809 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
810 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
811 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
812 run->debug.arch.address = dbg_reg->dac1;
813 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
814 run->debug.arch.address = dbg_reg->dac2;
815 }
816
817 return RESUME_HOST;
818}
819
761static void kvmppc_fill_pt_regs(struct pt_regs *regs) 820static void kvmppc_fill_pt_regs(struct pt_regs *regs)
762{ 821{
763 ulong r1, ip, msr, lr; 822 ulong r1, ip, msr, lr;
@@ -818,6 +877,11 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
818 case BOOKE_INTERRUPT_CRITICAL: 877 case BOOKE_INTERRUPT_CRITICAL:
819 unknown_exception(&regs); 878 unknown_exception(&regs);
820 break; 879 break;
880 case BOOKE_INTERRUPT_DEBUG:
881 /* Save DBSR before preemption is enabled */
882 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
883 kvmppc_clear_dbsr();
884 break;
821 } 885 }
822} 886}
823 887
@@ -1135,18 +1199,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1135 } 1199 }
1136 1200
1137 case BOOKE_INTERRUPT_DEBUG: { 1201 case BOOKE_INTERRUPT_DEBUG: {
1138 u32 dbsr; 1202 r = kvmppc_handle_debug(run, vcpu);
1139 1203 if (r == RESUME_HOST)
1140 vcpu->arch.pc = mfspr(SPRN_CSRR0); 1204 run->exit_reason = KVM_EXIT_DEBUG;
1141
1142 /* clear IAC events in DBSR register */
1143 dbsr = mfspr(SPRN_DBSR);
1144 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
1145 mtspr(SPRN_DBSR, dbsr);
1146
1147 run->exit_reason = KVM_EXIT_DEBUG;
1148 kvmppc_account_exit(vcpu, DEBUG_EXITS); 1205 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1149 r = RESUME_HOST;
1150 break; 1206 break;
1151 } 1207 }
1152 1208
@@ -1197,7 +1253,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1197 kvmppc_set_msr(vcpu, 0); 1253 kvmppc_set_msr(vcpu, 0);
1198 1254
1199#ifndef CONFIG_KVM_BOOKE_HV 1255#ifndef CONFIG_KVM_BOOKE_HV
1200 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; 1256 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
1201 vcpu->arch.shadow_pid = 1; 1257 vcpu->arch.shadow_pid = 1;
1202 vcpu->arch.shared->msr = 0; 1258 vcpu->arch.shared->msr = 0;
1203#endif 1259#endif
@@ -1580,12 +1636,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1580 return r; 1636 return r;
1581} 1637}
1582 1638
1583int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1584 struct kvm_guest_debug *dbg)
1585{
1586 return -EINVAL;
1587}
1588
1589int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1639int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1590{ 1640{
1591 return -ENOTSUPP; 1641 return -ENOTSUPP;
@@ -1691,6 +1741,157 @@ void kvmppc_decrementer_func(unsigned long data)
1691 kvmppc_set_tsr_bits(vcpu, TSR_DIS); 1741 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1692} 1742}
1693 1743
1744static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1745 uint64_t addr, int index)
1746{
1747 switch (index) {
1748 case 0:
1749 dbg_reg->dbcr0 |= DBCR0_IAC1;
1750 dbg_reg->iac1 = addr;
1751 break;
1752 case 1:
1753 dbg_reg->dbcr0 |= DBCR0_IAC2;
1754 dbg_reg->iac2 = addr;
1755 break;
1756#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1757 case 2:
1758 dbg_reg->dbcr0 |= DBCR0_IAC3;
1759 dbg_reg->iac3 = addr;
1760 break;
1761 case 3:
1762 dbg_reg->dbcr0 |= DBCR0_IAC4;
1763 dbg_reg->iac4 = addr;
1764 break;
1765#endif
1766 default:
1767 return -EINVAL;
1768 }
1769
1770 dbg_reg->dbcr0 |= DBCR0_IDM;
1771 return 0;
1772}
1773
1774static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1775 int type, int index)
1776{
1777 switch (index) {
1778 case 0:
1779 if (type & KVMPPC_DEBUG_WATCH_READ)
1780 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1781 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1782 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1783 dbg_reg->dac1 = addr;
1784 break;
1785 case 1:
1786 if (type & KVMPPC_DEBUG_WATCH_READ)
1787 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1788 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1789 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1790 dbg_reg->dac2 = addr;
1791 break;
1792 default:
1793 return -EINVAL;
1794 }
1795
1796 dbg_reg->dbcr0 |= DBCR0_IDM;
1797 return 0;
1798}
1799void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1800{
1801 /* XXX: Add similar MSR protection for BookE-PR */
1802#ifdef CONFIG_KVM_BOOKE_HV
1803 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1804 if (set) {
1805 if (prot_bitmap & MSR_UCLE)
1806 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1807 if (prot_bitmap & MSR_DE)
1808 vcpu->arch.shadow_msrp |= MSRP_DEP;
1809 if (prot_bitmap & MSR_PMM)
1810 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1811 } else {
1812 if (prot_bitmap & MSR_UCLE)
1813 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1814 if (prot_bitmap & MSR_DE)
1815 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1816 if (prot_bitmap & MSR_PMM)
1817 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1818 }
1819#endif
1820}
1821
1822int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1823 struct kvm_guest_debug *dbg)
1824{
1825 struct debug_reg *dbg_reg;
1826 int n, b = 0, w = 0;
1827
1828 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1829 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1830 vcpu->guest_debug = 0;
1831 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1832 return 0;
1833 }
1834
1835 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1836 vcpu->guest_debug = dbg->control;
1837 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1838 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1839 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1840
1841 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1842 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1843
1844 /* Code below handles only HW breakpoints */
1845 dbg_reg = &(vcpu->arch.shadow_dbg_reg);
1846
1847#ifdef CONFIG_KVM_BOOKE_HV
1848 /*
1849 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1850 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1851 */
1852 dbg_reg->dbcr1 = 0;
1853 dbg_reg->dbcr2 = 0;
1854#else
1855 /*
1856 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1857 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1858 * is set.
1859 */
1860 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1861 DBCR1_IAC4US;
1862 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1863#endif
1864
1865 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1866 return 0;
1867
1868 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1869 uint64_t addr = dbg->arch.bp[n].addr;
1870 uint32_t type = dbg->arch.bp[n].type;
1871
1872 if (type == KVMPPC_DEBUG_NONE)
1873 continue;
1874
1875 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1876 KVMPPC_DEBUG_WATCH_WRITE |
1877 KVMPPC_DEBUG_BREAKPOINT))
1878 return -EINVAL;
1879
1880 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1881 /* Setting H/W breakpoint */
1882 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1883 return -EINVAL;
1884 } else {
1885 /* Setting H/W watchpoint */
1886 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1887 type, w++))
1888 return -EINVAL;
1889 }
1890 }
1891
1892 return 0;
1893}
1894
1694void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1895void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1695{ 1896{
1696 vcpu->cpu = smp_processor_id(); 1897 vcpu->cpu = smp_processor_id();
@@ -1701,6 +1902,9 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1701{ 1902{
1702 current->thread.kvm_vcpu = NULL; 1903 current->thread.kvm_vcpu = NULL;
1703 vcpu->cpu = -1; 1904 vcpu->cpu = -1;
1905
1906 /* Clear pending debug event in DBSR */
1907 kvmppc_clear_dbsr();
1704} 1908}
1705 1909
1706int __init kvmppc_booke_init(void) 1910int __init kvmppc_booke_init(void)
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 5fd1ba693579..a1ff67d04022 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -129,4 +129,9 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
129 giveup_fpu(current); 129 giveup_fpu(current);
130#endif 130#endif
131} 131}
132
133static inline void kvmppc_clear_dbsr(void)
134{
135 mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
136}
132#endif /* __KVM_BOOKE_H__ */ 137#endif /* __KVM_BOOKE_H__ */