aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h34
-rw-r--r--arch/powerpc/kvm/44x.c123
-rw-r--r--arch/powerpc/kvm/44x_tlb.h1
-rw-r--r--arch/powerpc/kvm/Kconfig11
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/booke.c61
-rw-r--r--arch/powerpc/kvm/emulate.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c98
9 files changed, 207 insertions, 130 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index df733511d671..f5850d7d57a5 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -74,6 +74,9 @@ struct kvmppc_44x_tlbe {
74struct kvm_arch { 74struct kvm_arch {
75}; 75};
76 76
77/* XXX Can't include mmu-44x.h because it redefines struct mm_context. */
78#define PPC44x_TLB_SIZE 64
79
77struct kvm_vcpu_arch { 80struct kvm_vcpu_arch {
78 /* Unmodified copy of the guest's TLB. */ 81 /* Unmodified copy of the guest's TLB. */
79 struct kvmppc_44x_tlbe guest_tlb[PPC44x_TLB_SIZE]; 82 struct kvmppc_44x_tlbe guest_tlb[PPC44x_TLB_SIZE];
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 39daeaa82b53..96d5de90ac5a 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -61,23 +61,6 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
61extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 61extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
62extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); 62extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
63 63
64/* XXX Book E specific */
65extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
66
67extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
68
69static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
70{
71 unsigned int priority = exception_priority[exception];
72 set_bit(priority, &vcpu->arch.pending_exceptions);
73}
74
75static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
76{
77 unsigned int priority = exception_priority[exception];
78 clear_bit(priority, &vcpu->arch.pending_exceptions);
79}
80
81/* Helper function for "full" MSR writes. No need to call this if only EE is 64/* Helper function for "full" MSR writes. No need to call this if only EE is
82 * changing. */ 65 * changing. */
83static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 66static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
@@ -99,6 +82,23 @@ static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
99 } 82 }
100} 83}
101 84
85/* Core-specific hooks */
86
87extern int kvmppc_core_check_processor_compat(void);
88
89extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
90extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
91
92extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu);
93extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu);
94
95extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
96extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
97extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
98extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
99extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
100 struct kvm_interrupt *irq);
101
102extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); 102extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
103 103
104#endif /* __POWERPC_KVM_PPC_H__ */ 104#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
new file mode 100644
index 000000000000..fcf8c7d0af45
--- /dev/null
+++ b/arch/powerpc/kvm/44x.c
@@ -0,0 +1,123 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/kvm_host.h>
21#include <asm/reg.h>
22#include <asm/cputable.h>
23#include <asm/tlbflush.h>
24
25#include "44x_tlb.h"
26
27/* Note: clearing MSR[DE] just means that the debug interrupt will not be
28 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
29 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
30 * will be delivered as an "imprecise debug event" (which is indicated by
31 * DBSR[IDE].
32 */
33static void kvm44x_disable_debug_interrupts(void)
34{
35 mtmsr(mfmsr() & ~MSR_DE);
36}
37
38void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
39{
40 kvm44x_disable_debug_interrupts();
41
42 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
43 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
44 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
45 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
46 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
47 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
48 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
49 mtmsr(vcpu->arch.host_msr);
50}
51
52void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
53{
54 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
55 u32 dbcr0 = 0;
56
57 vcpu->arch.host_msr = mfmsr();
58 kvm44x_disable_debug_interrupts();
59
60 /* Save host debug register state. */
61 vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
62 vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
63 vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
64 vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
65 vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
66 vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
67 vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
68
69 /* set registers up for guest */
70
71 if (dbg->bp[0]) {
72 mtspr(SPRN_IAC1, dbg->bp[0]);
73 dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
74 }
75 if (dbg->bp[1]) {
76 mtspr(SPRN_IAC2, dbg->bp[1]);
77 dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
78 }
79 if (dbg->bp[2]) {
80 mtspr(SPRN_IAC3, dbg->bp[2]);
81 dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
82 }
83 if (dbg->bp[3]) {
84 mtspr(SPRN_IAC4, dbg->bp[3]);
85 dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
86 }
87
88 mtspr(SPRN_DBCR0, dbcr0);
89 mtspr(SPRN_DBCR1, 0);
90 mtspr(SPRN_DBCR2, 0);
91}
92
93void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
94{
95 int i;
96
97 /* Mark every guest entry in the shadow TLB entry modified, so that they
98 * will all be reloaded on the next vcpu run (instead of being
99 * demand-faulted). */
100 for (i = 0; i <= tlb_44x_hwater; i++)
101 kvmppc_tlbe_set_modified(vcpu, i);
102}
103
104void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
105{
106 /* Don't leave guest TLB entries resident when being de-scheduled. */
107 /* XXX It would be nice to differentiate between heavyweight exit and
108 * sched_out here, since we could avoid the TLB flush for heavyweight
109 * exits. */
110 _tlbia();
111}
112
113int kvmppc_core_check_processor_compat(void)
114{
115 int r;
116
117 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
118 r = 0;
119 else
120 r = -ENOTSUPP;
121
122 return r;
123}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
index e5b0a76798bd..357d79ae5493 100644
--- a/arch/powerpc/kvm/44x_tlb.h
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -29,6 +29,7 @@ extern struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
29 gva_t eaddr); 29 gva_t eaddr);
30extern struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, 30extern struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
31 gva_t eaddr); 31 gva_t eaddr);
32extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
32 33
33/* TLB helper functions */ 34/* TLB helper functions */
34static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe) 35static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe)
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index ffed96f817f7..37e9b3c52a38 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -16,11 +16,9 @@ if VIRTUALIZATION
16 16
17config KVM 17config KVM
18 bool "Kernel-based Virtual Machine (KVM) support" 18 bool "Kernel-based Virtual Machine (KVM) support"
19 depends on 44x && EXPERIMENTAL 19 depends on EXPERIMENTAL
20 select PREEMPT_NOTIFIERS 20 select PREEMPT_NOTIFIERS
21 select ANON_INODES 21 select ANON_INODES
22 # We can only run on Book E hosts so far
23 select KVM_BOOKE
24 ---help--- 22 ---help---
25 Support hosting virtualized guest machines. You will also 23 Support hosting virtualized guest machines. You will also
26 need to select one or more of the processor modules below. 24 need to select one or more of the processor modules below.
@@ -30,12 +28,11 @@ config KVM
30 28
31 If unsure, say N. 29 If unsure, say N.
32 30
33config KVM_BOOKE 31config KVM_440
34 bool "KVM support for Book E PowerPC processors" 32 bool "KVM support for PowerPC 440 processors"
35 depends on KVM && 44x 33 depends on KVM && 44x
36 ---help--- 34 ---help---
37 Provides host support for KVM on Book E PowerPC processors. Currently 35 KVM can run unmodified 440 guest kernels on 440 host processors.
38 this works on 440 processors only.
39 36
40config KVM_TRACE 37config KVM_TRACE
41 bool "KVM trace support" 38 bool "KVM trace support"
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index a7f857446c8a..f5e33756f318 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -13,5 +13,5 @@ obj-$(CONFIG_KVM) += kvm.o
13 13
14AFLAGS_booke_interrupts.o := -I$(obj) 14AFLAGS_booke_interrupts.o := -I$(obj)
15 15
16kvm-booke-objs := booke.o booke_interrupts.o 44x_tlb.o 16kvm-440-objs := booke.o booke_interrupts.o 44x.o 44x_tlb.o
17obj-$(CONFIG_KVM_BOOKE) += kvm-booke.o 17obj-$(CONFIG_KVM_440) += kvm-440.o
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index b1e90a15155a..138014acf3cf 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -134,6 +134,40 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
134 } 134 }
135} 135}
136 136
137static void kvmppc_booke_queue_exception(struct kvm_vcpu *vcpu, int exception)
138{
139 unsigned int priority = exception_priority[exception];
140 set_bit(priority, &vcpu->arch.pending_exceptions);
141}
142
143static void kvmppc_booke_clear_exception(struct kvm_vcpu *vcpu, int exception)
144{
145 unsigned int priority = exception_priority[exception];
146 clear_bit(priority, &vcpu->arch.pending_exceptions);
147}
148
149void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
150{
151 kvmppc_booke_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
152}
153
154void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
155{
156 kvmppc_booke_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
157}
158
159int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
160{
161 unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER];
162 return test_bit(priority, &vcpu->arch.pending_exceptions);
163}
164
165void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
166 struct kvm_interrupt *irq)
167{
168 kvmppc_booke_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
169}
170
137/* Check if we are ready to deliver the interrupt */ 171/* Check if we are ready to deliver the interrupt */
138static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) 172static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
139{ 173{
@@ -168,7 +202,7 @@ static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
168 return r; 202 return r;
169} 203}
170 204
171static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) 205static void kvmppc_booke_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
172{ 206{
173 switch (interrupt) { 207 switch (interrupt) {
174 case BOOKE_INTERRUPT_DECREMENTER: 208 case BOOKE_INTERRUPT_DECREMENTER:
@@ -183,7 +217,7 @@ static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
183} 217}
184 218
185/* Check pending exceptions and deliver one, if possible. */ 219/* Check pending exceptions and deliver one, if possible. */
186void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu) 220void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
187{ 221{
188 unsigned long *pending = &vcpu->arch.pending_exceptions; 222 unsigned long *pending = &vcpu->arch.pending_exceptions;
189 unsigned int exception; 223 unsigned int exception;
@@ -193,8 +227,8 @@ void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
193 while (priority <= BOOKE_MAX_INTERRUPT) { 227 while (priority <= BOOKE_MAX_INTERRUPT) {
194 exception = priority_exception[priority]; 228 exception = priority_exception[priority];
195 if (kvmppc_can_deliver_interrupt(vcpu, exception)) { 229 if (kvmppc_can_deliver_interrupt(vcpu, exception)) {
196 kvmppc_clear_exception(vcpu, exception); 230 kvmppc_booke_clear_exception(vcpu, exception);
197 kvmppc_deliver_interrupt(vcpu, exception); 231 kvmppc_booke_deliver_interrupt(vcpu, exception);
198 break; 232 break;
199 } 233 }
200 234
@@ -251,7 +285,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
251 /* Program traps generated by user-level software must be handled 285 /* Program traps generated by user-level software must be handled
252 * by the guest kernel. */ 286 * by the guest kernel. */
253 vcpu->arch.esr = vcpu->arch.fault_esr; 287 vcpu->arch.esr = vcpu->arch.fault_esr;
254 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); 288 kvmppc_booke_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
255 r = RESUME_GUEST; 289 r = RESUME_GUEST;
256 break; 290 break;
257 } 291 }
@@ -284,27 +318,27 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
284 break; 318 break;
285 319
286 case BOOKE_INTERRUPT_FP_UNAVAIL: 320 case BOOKE_INTERRUPT_FP_UNAVAIL:
287 kvmppc_queue_exception(vcpu, exit_nr); 321 kvmppc_booke_queue_exception(vcpu, exit_nr);
288 r = RESUME_GUEST; 322 r = RESUME_GUEST;
289 break; 323 break;
290 324
291 case BOOKE_INTERRUPT_DATA_STORAGE: 325 case BOOKE_INTERRUPT_DATA_STORAGE:
292 vcpu->arch.dear = vcpu->arch.fault_dear; 326 vcpu->arch.dear = vcpu->arch.fault_dear;
293 vcpu->arch.esr = vcpu->arch.fault_esr; 327 vcpu->arch.esr = vcpu->arch.fault_esr;
294 kvmppc_queue_exception(vcpu, exit_nr); 328 kvmppc_booke_queue_exception(vcpu, exit_nr);
295 vcpu->stat.dsi_exits++; 329 vcpu->stat.dsi_exits++;
296 r = RESUME_GUEST; 330 r = RESUME_GUEST;
297 break; 331 break;
298 332
299 case BOOKE_INTERRUPT_INST_STORAGE: 333 case BOOKE_INTERRUPT_INST_STORAGE:
300 vcpu->arch.esr = vcpu->arch.fault_esr; 334 vcpu->arch.esr = vcpu->arch.fault_esr;
301 kvmppc_queue_exception(vcpu, exit_nr); 335 kvmppc_booke_queue_exception(vcpu, exit_nr);
302 vcpu->stat.isi_exits++; 336 vcpu->stat.isi_exits++;
303 r = RESUME_GUEST; 337 r = RESUME_GUEST;
304 break; 338 break;
305 339
306 case BOOKE_INTERRUPT_SYSCALL: 340 case BOOKE_INTERRUPT_SYSCALL:
307 kvmppc_queue_exception(vcpu, exit_nr); 341 kvmppc_booke_queue_exception(vcpu, exit_nr);
308 vcpu->stat.syscall_exits++; 342 vcpu->stat.syscall_exits++;
309 r = RESUME_GUEST; 343 r = RESUME_GUEST;
310 break; 344 break;
@@ -318,7 +352,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
318 gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr); 352 gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr);
319 if (!gtlbe) { 353 if (!gtlbe) {
320 /* The guest didn't have a mapping for it. */ 354 /* The guest didn't have a mapping for it. */
321 kvmppc_queue_exception(vcpu, exit_nr); 355 kvmppc_booke_queue_exception(vcpu, exit_nr);
322 vcpu->arch.dear = vcpu->arch.fault_dear; 356 vcpu->arch.dear = vcpu->arch.fault_dear;
323 vcpu->arch.esr = vcpu->arch.fault_esr; 357 vcpu->arch.esr = vcpu->arch.fault_esr;
324 vcpu->stat.dtlb_real_miss_exits++; 358 vcpu->stat.dtlb_real_miss_exits++;
@@ -360,7 +394,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
360 gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr); 394 gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr);
361 if (!gtlbe) { 395 if (!gtlbe) {
362 /* The guest didn't have a mapping for it. */ 396 /* The guest didn't have a mapping for it. */
363 kvmppc_queue_exception(vcpu, exit_nr); 397 kvmppc_booke_queue_exception(vcpu, exit_nr);
364 vcpu->stat.itlb_real_miss_exits++; 398 vcpu->stat.itlb_real_miss_exits++;
365 break; 399 break;
366 } 400 }
@@ -380,8 +414,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
380 gtlbe->word2); 414 gtlbe->word2);
381 } else { 415 } else {
382 /* Guest mapped and leaped at non-RAM! */ 416 /* Guest mapped and leaped at non-RAM! */
383 kvmppc_queue_exception(vcpu, 417 kvmppc_booke_queue_exception(vcpu, BOOKE_INTERRUPT_MACHINE_CHECK);
384 BOOKE_INTERRUPT_MACHINE_CHECK);
385 } 418 }
386 419
387 break; 420 break;
@@ -409,7 +442,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
409 442
410 local_irq_disable(); 443 local_irq_disable();
411 444
412 kvmppc_check_and_deliver_interrupts(vcpu); 445 kvmppc_core_deliver_interrupts(vcpu);
413 446
414 /* Do some exit accounting. */ 447 /* Do some exit accounting. */
415 vcpu->stat.sum_exits++; 448 vcpu->stat.sum_exits++;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 0ce8ed539bae..c5d2bfcf567a 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -139,7 +139,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
139 switch (get_op(inst)) { 139 switch (get_op(inst)) {
140 case 3: /* trap */ 140 case 3: /* trap */
141 printk("trap!\n"); 141 printk("trap!\n");
142 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); 142 kvmppc_core_queue_program(vcpu);
143 advance = 0; 143 advance = 0;
144 break; 144 break;
145 145
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 8bef0efcdfe1..8d0aaf96d838 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -99,14 +99,7 @@ void kvm_arch_hardware_unsetup(void)
99 99
100void kvm_arch_check_processor_compat(void *rtn) 100void kvm_arch_check_processor_compat(void *rtn)
101{ 101{
102 int r; 102 *(int *)rtn = kvmppc_core_check_processor_compat();
103
104 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
105 r = 0;
106 else
107 r = -ENOTSUPP;
108
109 *(int *)rtn = r;
110} 103}
111 104
112struct kvm *kvm_arch_create_vm(void) 105struct kvm *kvm_arch_create_vm(void)
@@ -212,16 +205,14 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
212 205
213int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 206int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
214{ 207{
215 unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER]; 208 return kvmppc_core_pending_dec(vcpu);
216
217 return test_bit(priority, &vcpu->arch.pending_exceptions);
218} 209}
219 210
220static void kvmppc_decrementer_func(unsigned long data) 211static void kvmppc_decrementer_func(unsigned long data)
221{ 212{
222 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 213 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
223 214
224 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); 215 kvmppc_core_queue_dec(vcpu);
225 216
226 if (waitqueue_active(&vcpu->wq)) { 217 if (waitqueue_active(&vcpu->wq)) {
227 wake_up_interruptible(&vcpu->wq); 218 wake_up_interruptible(&vcpu->wq);
@@ -242,96 +233,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
242 kvmppc_core_destroy_mmu(vcpu); 233 kvmppc_core_destroy_mmu(vcpu);
243} 234}
244 235
245/* Note: clearing MSR[DE] just means that the debug interrupt will not be
246 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
247 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
248 * will be delivered as an "imprecise debug event" (which is indicated by
249 * DBSR[IDE].
250 */
251static void kvmppc_disable_debug_interrupts(void)
252{
253 mtmsr(mfmsr() & ~MSR_DE);
254}
255
256static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
257{
258 kvmppc_disable_debug_interrupts();
259
260 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
261 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
262 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
263 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
264 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
265 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
266 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
267 mtmsr(vcpu->arch.host_msr);
268}
269
270static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
271{
272 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
273 u32 dbcr0 = 0;
274
275 vcpu->arch.host_msr = mfmsr();
276 kvmppc_disable_debug_interrupts();
277
278 /* Save host debug register state. */
279 vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
280 vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
281 vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
282 vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
283 vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
284 vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
285 vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
286
287 /* set registers up for guest */
288
289 if (dbg->bp[0]) {
290 mtspr(SPRN_IAC1, dbg->bp[0]);
291 dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
292 }
293 if (dbg->bp[1]) {
294 mtspr(SPRN_IAC2, dbg->bp[1]);
295 dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
296 }
297 if (dbg->bp[2]) {
298 mtspr(SPRN_IAC3, dbg->bp[2]);
299 dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
300 }
301 if (dbg->bp[3]) {
302 mtspr(SPRN_IAC4, dbg->bp[3]);
303 dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
304 }
305
306 mtspr(SPRN_DBCR0, dbcr0);
307 mtspr(SPRN_DBCR1, 0);
308 mtspr(SPRN_DBCR2, 0);
309}
310
311void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 236void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
312{ 237{
313 int i;
314
315 if (vcpu->guest_debug.enabled) 238 if (vcpu->guest_debug.enabled)
316 kvmppc_load_guest_debug_registers(vcpu); 239 kvmppc_core_load_guest_debugstate(vcpu);
317 240
318 /* Mark every guest entry in the shadow TLB entry modified, so that they 241 kvmppc_core_vcpu_load(vcpu, cpu);
319 * will all be reloaded on the next vcpu run (instead of being
320 * demand-faulted). */
321 for (i = 0; i <= tlb_44x_hwater; i++)
322 kvmppc_tlbe_set_modified(vcpu, i);
323} 242}
324 243
325void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 244void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
326{ 245{
327 if (vcpu->guest_debug.enabled) 246 if (vcpu->guest_debug.enabled)
328 kvmppc_restore_host_debug_state(vcpu); 247 kvmppc_core_load_host_debugstate(vcpu);
329 248
330 /* Don't leave guest TLB entries resident when being de-scheduled. */ 249 /* Don't leave guest TLB entries resident when being de-scheduled. */
331 /* XXX It would be nice to differentiate between heavyweight exit and 250 /* XXX It would be nice to differentiate between heavyweight exit and
332 * sched_out here, since we could avoid the TLB flush for heavyweight 251 * sched_out here, since we could avoid the TLB flush for heavyweight
333 * exits. */ 252 * exits. */
334 _tlbil_all(); 253 _tlbil_all();
254 kvmppc_core_vcpu_put(vcpu);
335} 255}
336 256
337int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 257int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
@@ -460,7 +380,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
460 vcpu->arch.dcr_needed = 0; 380 vcpu->arch.dcr_needed = 0;
461 } 381 }
462 382
463 kvmppc_check_and_deliver_interrupts(vcpu); 383 kvmppc_core_deliver_interrupts(vcpu);
464 384
465 local_irq_disable(); 385 local_irq_disable();
466 kvm_guest_enter(); 386 kvm_guest_enter();
@@ -478,7 +398,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
478 398
479int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 399int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
480{ 400{
481 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); 401 kvmppc_core_queue_external(vcpu, irq);
482 402
483 if (waitqueue_active(&vcpu->wq)) { 403 if (waitqueue_active(&vcpu->wq)) {
484 wake_up_interruptible(&vcpu->wq); 404 wake_up_interruptible(&vcpu->wq);