aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/powerpc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 20:16:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 20:16:21 -0400
commit98edb6ca4174f17a64890a02f44c211c8b44fb3c (patch)
tree033bc5f7da410046d28dd1cefcd2d63cda33d25b /arch/powerpc/kvm/powerpc.c
parenta8251096b427283c47e7d8f9568be6b388dd68ec (diff)
parent8fbf065d625617bbbf6b72d5f78f84ad13c8b547 (diff)
Merge branch 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (269 commits) KVM: x86: Add missing locking to arch specific vcpu ioctls KVM: PPC: Add missing vcpu_load()/vcpu_put() in vcpu ioctls KVM: MMU: Segregate shadow pages with different cr0.wp KVM: x86: Check LMA bit before set_efer KVM: Don't allow lmsw to clear cr0.pe KVM: Add cpuid.txt file KVM: x86: Tell the guest we'll warn it about tsc stability x86, paravirt: don't compute pvclock adjustments if we trust the tsc x86: KVM guest: Try using new kvm clock msrs KVM: x86: export paravirtual cpuid flags in KVM_GET_SUPPORTED_CPUID KVM: x86: add new KVMCLOCK cpuid feature KVM: x86: change msr numbers for kvmclock x86, paravirt: Add a global synchronization point for pvclock x86, paravirt: Enable pvclock flags in vcpu_time_info structure KVM: x86: Inject #GP with the right rip on efer writes KVM: SVM: Don't allow nested guest to VMMCALL into host KVM: x86: Fix exception reinjection forced to true KVM: Fix wallclock version writing race KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots KVM: VMX: enable VMXON check with SMX enabled (Intel TXT) ...
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r--arch/powerpc/kvm/powerpc.c110
1 files changed, 105 insertions, 5 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 297fcd2ff7d0..9b8683f39e05 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -70,7 +70,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
70 case EMULATE_FAIL: 70 case EMULATE_FAIL:
71 /* XXX Deliver Program interrupt to guest. */ 71 /* XXX Deliver Program interrupt to guest. */
72 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 72 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
73 vcpu->arch.last_inst); 73 kvmppc_get_last_inst(vcpu));
74 r = RESUME_HOST; 74 r = RESUME_HOST;
75 break; 75 break;
76 default: 76 default:
@@ -148,6 +148,10 @@ int kvm_dev_ioctl_check_extension(long ext)
148 148
149 switch (ext) { 149 switch (ext) {
150 case KVM_CAP_PPC_SEGSTATE: 150 case KVM_CAP_PPC_SEGSTATE:
151 case KVM_CAP_PPC_PAIRED_SINGLES:
152 case KVM_CAP_PPC_UNSET_IRQ:
153 case KVM_CAP_ENABLE_CAP:
154 case KVM_CAP_PPC_OSI:
151 r = 1; 155 r = 1;
152 break; 156 break;
153 case KVM_CAP_COALESCED_MMIO: 157 case KVM_CAP_COALESCED_MMIO:
@@ -193,12 +197,17 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
193{ 197{
194 struct kvm_vcpu *vcpu; 198 struct kvm_vcpu *vcpu;
195 vcpu = kvmppc_core_vcpu_create(kvm, id); 199 vcpu = kvmppc_core_vcpu_create(kvm, id);
196 kvmppc_create_vcpu_debugfs(vcpu, id); 200 if (!IS_ERR(vcpu))
201 kvmppc_create_vcpu_debugfs(vcpu, id);
197 return vcpu; 202 return vcpu;
198} 203}
199 204
200void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 205void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
201{ 206{
207 /* Make sure we're not using the vcpu anymore */
208 hrtimer_cancel(&vcpu->arch.dec_timer);
209 tasklet_kill(&vcpu->arch.tasklet);
210
202 kvmppc_remove_vcpu_debugfs(vcpu); 211 kvmppc_remove_vcpu_debugfs(vcpu);
203 kvmppc_core_vcpu_free(vcpu); 212 kvmppc_core_vcpu_free(vcpu);
204} 213}
@@ -278,7 +287,7 @@ static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
278static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 287static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
279 struct kvm_run *run) 288 struct kvm_run *run)
280{ 289{
281 ulong gpr; 290 u64 gpr;
282 291
283 if (run->mmio.len > sizeof(gpr)) { 292 if (run->mmio.len > sizeof(gpr)) {
284 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 293 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
@@ -287,6 +296,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
287 296
288 if (vcpu->arch.mmio_is_bigendian) { 297 if (vcpu->arch.mmio_is_bigendian) {
289 switch (run->mmio.len) { 298 switch (run->mmio.len) {
299 case 8: gpr = *(u64 *)run->mmio.data; break;
290 case 4: gpr = *(u32 *)run->mmio.data; break; 300 case 4: gpr = *(u32 *)run->mmio.data; break;
291 case 2: gpr = *(u16 *)run->mmio.data; break; 301 case 2: gpr = *(u16 *)run->mmio.data; break;
292 case 1: gpr = *(u8 *)run->mmio.data; break; 302 case 1: gpr = *(u8 *)run->mmio.data; break;
@@ -300,7 +310,43 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
300 } 310 }
301 } 311 }
302 312
313 if (vcpu->arch.mmio_sign_extend) {
314 switch (run->mmio.len) {
315#ifdef CONFIG_PPC64
316 case 4:
317 gpr = (s64)(s32)gpr;
318 break;
319#endif
320 case 2:
321 gpr = (s64)(s16)gpr;
322 break;
323 case 1:
324 gpr = (s64)(s8)gpr;
325 break;
326 }
327 }
328
303 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 329 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
330
331 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
332 case KVM_REG_GPR:
333 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
334 break;
335 case KVM_REG_FPR:
336 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
337 break;
338#ifdef CONFIG_PPC_BOOK3S
339 case KVM_REG_QPR:
340 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
341 break;
342 case KVM_REG_FQPR:
343 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
344 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
345 break;
346#endif
347 default:
348 BUG();
349 }
304} 350}
305 351
306int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 352int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -319,12 +365,25 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
319 vcpu->arch.mmio_is_bigendian = is_bigendian; 365 vcpu->arch.mmio_is_bigendian = is_bigendian;
320 vcpu->mmio_needed = 1; 366 vcpu->mmio_needed = 1;
321 vcpu->mmio_is_write = 0; 367 vcpu->mmio_is_write = 0;
368 vcpu->arch.mmio_sign_extend = 0;
322 369
323 return EMULATE_DO_MMIO; 370 return EMULATE_DO_MMIO;
324} 371}
325 372
373/* Same as above, but sign extends */
374int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
375 unsigned int rt, unsigned int bytes, int is_bigendian)
376{
377 int r;
378
379 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
380 vcpu->arch.mmio_sign_extend = 1;
381
382 return r;
383}
384
326int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 385int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
327 u32 val, unsigned int bytes, int is_bigendian) 386 u64 val, unsigned int bytes, int is_bigendian)
328{ 387{
329 void *data = run->mmio.data; 388 void *data = run->mmio.data;
330 389
@@ -342,6 +401,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
342 /* Store the value at the lowest bytes in 'data'. */ 401 /* Store the value at the lowest bytes in 'data'. */
343 if (is_bigendian) { 402 if (is_bigendian) {
344 switch (bytes) { 403 switch (bytes) {
404 case 8: *(u64 *)data = val; break;
345 case 4: *(u32 *)data = val; break; 405 case 4: *(u32 *)data = val; break;
346 case 2: *(u16 *)data = val; break; 406 case 2: *(u16 *)data = val; break;
347 case 1: *(u8 *)data = val; break; 407 case 1: *(u8 *)data = val; break;
@@ -376,6 +436,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
376 if (!vcpu->arch.dcr_is_write) 436 if (!vcpu->arch.dcr_is_write)
377 kvmppc_complete_dcr_load(vcpu, run); 437 kvmppc_complete_dcr_load(vcpu, run);
378 vcpu->arch.dcr_needed = 0; 438 vcpu->arch.dcr_needed = 0;
439 } else if (vcpu->arch.osi_needed) {
440 u64 *gprs = run->osi.gprs;
441 int i;
442
443 for (i = 0; i < 32; i++)
444 kvmppc_set_gpr(vcpu, i, gprs[i]);
445 vcpu->arch.osi_needed = 0;
379 } 446 }
380 447
381 kvmppc_core_deliver_interrupts(vcpu); 448 kvmppc_core_deliver_interrupts(vcpu);
@@ -396,7 +463,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
396 463
397int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 464int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
398{ 465{
399 kvmppc_core_queue_external(vcpu, irq); 466 if (irq->irq == KVM_INTERRUPT_UNSET)
467 kvmppc_core_dequeue_external(vcpu, irq);
468 else
469 kvmppc_core_queue_external(vcpu, irq);
400 470
401 if (waitqueue_active(&vcpu->wq)) { 471 if (waitqueue_active(&vcpu->wq)) {
402 wake_up_interruptible(&vcpu->wq); 472 wake_up_interruptible(&vcpu->wq);
@@ -406,6 +476,27 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
406 return 0; 476 return 0;
407} 477}
408 478
479static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
480 struct kvm_enable_cap *cap)
481{
482 int r;
483
484 if (cap->flags)
485 return -EINVAL;
486
487 switch (cap->cap) {
488 case KVM_CAP_PPC_OSI:
489 r = 0;
490 vcpu->arch.osi_enabled = true;
491 break;
492 default:
493 r = -EINVAL;
494 break;
495 }
496
497 return r;
498}
499
409int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 500int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
410 struct kvm_mp_state *mp_state) 501 struct kvm_mp_state *mp_state)
411{ 502{
@@ -434,6 +525,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
434 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 525 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
435 break; 526 break;
436 } 527 }
528 case KVM_ENABLE_CAP:
529 {
530 struct kvm_enable_cap cap;
531 r = -EFAULT;
532 if (copy_from_user(&cap, argp, sizeof(cap)))
533 goto out;
534 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
535 break;
536 }
437 default: 537 default:
438 r = -EINVAL; 538 r = -EINVAL;
439 } 539 }