aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/powerpc.c
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2008-11-05 10:36:14 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:52:21 -0500
commit9dd921cfea734409a931ccc6eafd7f09850311e9 (patch)
tree84bd4c0fe65cb866dd78882c90e54df5f7d17313 /arch/powerpc/kvm/powerpc.c
parentd9fbd03d240380826c0ec16f927242be24ff6265 (diff)
KVM: ppc: Refactor powerpc.c to relocate 440-specific code
This introduces a set of core-provided hooks. For 440, some of these are implemented by booke.c, with the rest in (the new) 44x.c. Note that these hooks are link-time, not run-time. Since it is not possible to build a single kernel for both e500 and 440 (for example), using function pointers would only add overhead. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r--arch/powerpc/kvm/powerpc.c98
1 files changed, 9 insertions, 89 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 8bef0efcdfe1..8d0aaf96d838 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -99,14 +99,7 @@ void kvm_arch_hardware_unsetup(void)
99 99
100void kvm_arch_check_processor_compat(void *rtn) 100void kvm_arch_check_processor_compat(void *rtn)
101{ 101{
102 int r; 102 *(int *)rtn = kvmppc_core_check_processor_compat();
103
104 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
105 r = 0;
106 else
107 r = -ENOTSUPP;
108
109 *(int *)rtn = r;
110} 103}
111 104
112struct kvm *kvm_arch_create_vm(void) 105struct kvm *kvm_arch_create_vm(void)
@@ -212,16 +205,14 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
212 205
213int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 206int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
214{ 207{
215 unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER]; 208 return kvmppc_core_pending_dec(vcpu);
216
217 return test_bit(priority, &vcpu->arch.pending_exceptions);
218} 209}
219 210
220static void kvmppc_decrementer_func(unsigned long data) 211static void kvmppc_decrementer_func(unsigned long data)
221{ 212{
222 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 213 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
223 214
224 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); 215 kvmppc_core_queue_dec(vcpu);
225 216
226 if (waitqueue_active(&vcpu->wq)) { 217 if (waitqueue_active(&vcpu->wq)) {
227 wake_up_interruptible(&vcpu->wq); 218 wake_up_interruptible(&vcpu->wq);
@@ -242,96 +233,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
242 kvmppc_core_destroy_mmu(vcpu); 233 kvmppc_core_destroy_mmu(vcpu);
243} 234}
244 235
245/* Note: clearing MSR[DE] just means that the debug interrupt will not be
246 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
247 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
248 * will be delivered as an "imprecise debug event" (which is indicated by
249 * DBSR[IDE].
250 */
251static void kvmppc_disable_debug_interrupts(void)
252{
253 mtmsr(mfmsr() & ~MSR_DE);
254}
255
256static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
257{
258 kvmppc_disable_debug_interrupts();
259
260 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
261 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
262 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
263 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
264 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
265 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
266 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
267 mtmsr(vcpu->arch.host_msr);
268}
269
270static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
271{
272 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
273 u32 dbcr0 = 0;
274
275 vcpu->arch.host_msr = mfmsr();
276 kvmppc_disable_debug_interrupts();
277
278 /* Save host debug register state. */
279 vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
280 vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
281 vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
282 vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
283 vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
284 vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
285 vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
286
287 /* set registers up for guest */
288
289 if (dbg->bp[0]) {
290 mtspr(SPRN_IAC1, dbg->bp[0]);
291 dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
292 }
293 if (dbg->bp[1]) {
294 mtspr(SPRN_IAC2, dbg->bp[1]);
295 dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
296 }
297 if (dbg->bp[2]) {
298 mtspr(SPRN_IAC3, dbg->bp[2]);
299 dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
300 }
301 if (dbg->bp[3]) {
302 mtspr(SPRN_IAC4, dbg->bp[3]);
303 dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
304 }
305
306 mtspr(SPRN_DBCR0, dbcr0);
307 mtspr(SPRN_DBCR1, 0);
308 mtspr(SPRN_DBCR2, 0);
309}
310
311void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 236void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
312{ 237{
313 int i;
314
315 if (vcpu->guest_debug.enabled) 238 if (vcpu->guest_debug.enabled)
316 kvmppc_load_guest_debug_registers(vcpu); 239 kvmppc_core_load_guest_debugstate(vcpu);
317 240
318 /* Mark every guest entry in the shadow TLB entry modified, so that they 241 kvmppc_core_vcpu_load(vcpu, cpu);
319 * will all be reloaded on the next vcpu run (instead of being
320 * demand-faulted). */
321 for (i = 0; i <= tlb_44x_hwater; i++)
322 kvmppc_tlbe_set_modified(vcpu, i);
323} 242}
324 243
325void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 244void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
326{ 245{
327 if (vcpu->guest_debug.enabled) 246 if (vcpu->guest_debug.enabled)
328 kvmppc_restore_host_debug_state(vcpu); 247 kvmppc_core_load_host_debugstate(vcpu);
329 248
330 /* Don't leave guest TLB entries resident when being de-scheduled. */ 249 /* Don't leave guest TLB entries resident when being de-scheduled. */
331 /* XXX It would be nice to differentiate between heavyweight exit and 250 /* XXX It would be nice to differentiate between heavyweight exit and
332 * sched_out here, since we could avoid the TLB flush for heavyweight 251 * sched_out here, since we could avoid the TLB flush for heavyweight
333 * exits. */ 252 * exits. */
334 _tlbil_all(); 253 _tlbil_all();
254 kvmppc_core_vcpu_put(vcpu);
335} 255}
336 256
337int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 257int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
@@ -460,7 +380,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
460 vcpu->arch.dcr_needed = 0; 380 vcpu->arch.dcr_needed = 0;
461 } 381 }
462 382
463 kvmppc_check_and_deliver_interrupts(vcpu); 383 kvmppc_core_deliver_interrupts(vcpu);
464 384
465 local_irq_disable(); 385 local_irq_disable();
466 kvm_guest_enter(); 386 kvm_guest_enter();
@@ -478,7 +398,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
478 398
479int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 399int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
480{ 400{
481 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); 401 kvmppc_core_queue_external(vcpu, irq);
482 402
483 if (waitqueue_active(&vcpu->wq)) { 403 if (waitqueue_active(&vcpu->wq)) {
484 wake_up_interruptible(&vcpu->wq); 404 wake_up_interruptible(&vcpu->wq);