aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h3
-rw-r--r--arch/powerpc/kvm/44x.c66
-rw-r--r--arch/powerpc/kvm/powerpc.c27
4 files changed, 2 insertions, 99 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1c6187697520..dfdf13c9fefd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -111,11 +111,6 @@ struct kvm_arch {
111struct kvm_vcpu_arch { 111struct kvm_vcpu_arch {
112 u32 host_stack; 112 u32 host_stack;
113 u32 host_pid; 113 u32 host_pid;
114 u32 host_dbcr0;
115 u32 host_dbcr1;
116 u32 host_dbcr2;
117 u32 host_iac[4];
118 u32 host_msr;
119 114
120 u64 fpr[32]; 115 u64 fpr[32];
121 ulong gpr[32]; 116 ulong gpr[32];
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 6052779dbb56..2c6ee349df5e 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -77,9 +77,6 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
77extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 77extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
78extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); 78extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
79 79
80extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu);
81extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu);
82
83extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); 80extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
84extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); 81extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
85extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); 82extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 8383603dd8a4..0cef809cec21 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -28,72 +28,6 @@
28 28
29#include "44x_tlb.h" 29#include "44x_tlb.h"
30 30
31/* Note: clearing MSR[DE] just means that the debug interrupt will not be
32 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
33 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
34 * will be delivered as an "imprecise debug event" (which is indicated by
35 * DBSR[IDE].
36 */
37static void kvm44x_disable_debug_interrupts(void)
38{
39 mtmsr(mfmsr() & ~MSR_DE);
40}
41
42void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
43{
44 kvm44x_disable_debug_interrupts();
45
46 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
47 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
48 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
49 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
50 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
51 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
52 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
53 mtmsr(vcpu->arch.host_msr);
54}
55
56void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
57{
58 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
59 u32 dbcr0 = 0;
60
61 vcpu->arch.host_msr = mfmsr();
62 kvm44x_disable_debug_interrupts();
63
64 /* Save host debug register state. */
65 vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
66 vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
67 vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
68 vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
69 vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
70 vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
71 vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
72
73 /* set registers up for guest */
74
75 if (dbg->bp[0]) {
76 mtspr(SPRN_IAC1, dbg->bp[0]);
77 dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
78 }
79 if (dbg->bp[1]) {
80 mtspr(SPRN_IAC2, dbg->bp[1]);
81 dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
82 }
83 if (dbg->bp[2]) {
84 mtspr(SPRN_IAC3, dbg->bp[2]);
85 dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
86 }
87 if (dbg->bp[3]) {
88 mtspr(SPRN_IAC4, dbg->bp[3]);
89 dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
90 }
91
92 mtspr(SPRN_DBCR0, dbcr0);
93 mtspr(SPRN_DBCR1, 0);
94 mtspr(SPRN_DBCR2, 0);
95}
96
97void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 31void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
98{ 32{
99 kvmppc_44x_tlb_load(vcpu); 33 kvmppc_44x_tlb_load(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index f30be9ef2314..9057335fdc61 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -221,41 +221,18 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
221 221
222void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 222void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
223{ 223{
224 if (vcpu->guest_debug.enabled)
225 kvmppc_core_load_guest_debugstate(vcpu);
226
227 kvmppc_core_vcpu_load(vcpu, cpu); 224 kvmppc_core_vcpu_load(vcpu, cpu);
228} 225}
229 226
230void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 227void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
231{ 228{
232 if (vcpu->guest_debug.enabled)
233 kvmppc_core_load_host_debugstate(vcpu);
234
235 /* Don't leave guest TLB entries resident when being de-scheduled. */
236 /* XXX It would be nice to differentiate between heavyweight exit and
237 * sched_out here, since we could avoid the TLB flush for heavyweight
238 * exits. */
239 _tlbil_all();
240 kvmppc_core_vcpu_put(vcpu); 229 kvmppc_core_vcpu_put(vcpu);
241} 230}
242 231
243int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 232int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
244 struct kvm_guest_debug *dbg) 233 struct kvm_guest_debug *dbg)
245{ 234{
246 int i; 235 return -EINVAL;
247
248 vcpu->guest_debug.enabled = dbg->enabled;
249 if (vcpu->guest_debug.enabled) {
250 for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) {
251 if (dbg->breakpoints[i].enabled)
252 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
253 else
254 vcpu->guest_debug.bp[i] = 0;
255 }
256 }
257
258 return 0;
259} 236}
260 237
261static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 238static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,