diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 17:35:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 17:35:31 -0400 |
commit | 2e7580b0e75d771d93e24e681031a165b1d31071 (patch) | |
tree | d9449702609eeaab28913a43b5a4434667e09d43 /arch/powerpc/kvm/powerpc.c | |
parent | d25413efa9536e2f425ea45c7720598035c597bc (diff) | |
parent | cf9eeac46350b8b43730b7dc5e999757bed089a4 (diff) |
Merge branch 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Avi Kivity:
"Changes include timekeeping improvements, support for assigning host
PCI devices that share interrupt lines, s390 user-controlled guests, a
large ppc update, and random fixes."
This is with the sign-off's fixed, hopefully next merge window we won't
have rebased commits.
* 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (130 commits)
KVM: Convert intx_mask_lock to spin lock
KVM: x86: fix kvm_write_tsc() TSC matching thinko
x86: kvmclock: abstract save/restore sched_clock_state
KVM: nVMX: Fix erroneous exception bitmap check
KVM: Ignore the writes to MSR_K7_HWCR(3)
KVM: MMU: make use of ->root_level in reset_rsvds_bits_mask
KVM: PMU: add proper support for fixed counter 2
KVM: PMU: Fix raw event check
KVM: PMU: warn when pin control is set in eventsel msr
KVM: VMX: Fix delayed load of shared MSRs
KVM: use correct tlbs dirty type in cmpxchg
KVM: Allow host IRQ sharing for assigned PCI 2.3 devices
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
KVM: x86 emulator: Allow PM/VM86 switch during task switch
KVM: SVM: Fix CPL updates
KVM: x86 emulator: VM86 segments must have DPL 3
KVM: x86 emulator: Fix task switch privilege checks
arch/powerpc/kvm/book3s_hv.c: included linux/sched.h twice
KVM: x86 emulator: correctly mask pmc index bits in RDPMC instruction emulation
KVM: mmu_notifier: Flush TLBs before releasing mmu_lock
...
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 148 |
1 files changed, 113 insertions, 35 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 607fbdf24b84..00d7e345b3fe 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -39,7 +39,8 @@ | |||
39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
40 | { | 40 | { |
41 | return !(v->arch.shared->msr & MSR_WE) || | 41 | return !(v->arch.shared->msr & MSR_WE) || |
42 | !!(v->arch.pending_exceptions); | 42 | !!(v->arch.pending_exceptions) || |
43 | v->requests; | ||
43 | } | 44 | } |
44 | 45 | ||
45 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | 46 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
@@ -66,7 +67,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |||
66 | vcpu->arch.magic_page_pa = param1; | 67 | vcpu->arch.magic_page_pa = param1; |
67 | vcpu->arch.magic_page_ea = param2; | 68 | vcpu->arch.magic_page_ea = param2; |
68 | 69 | ||
69 | r2 = KVM_MAGIC_FEAT_SR; | 70 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
70 | 71 | ||
71 | r = HC_EV_SUCCESS; | 72 | r = HC_EV_SUCCESS; |
72 | break; | 73 | break; |
@@ -171,8 +172,11 @@ void kvm_arch_check_processor_compat(void *rtn) | |||
171 | *(int *)rtn = kvmppc_core_check_processor_compat(); | 172 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
172 | } | 173 | } |
173 | 174 | ||
174 | int kvm_arch_init_vm(struct kvm *kvm) | 175 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
175 | { | 176 | { |
177 | if (type) | ||
178 | return -EINVAL; | ||
179 | |||
176 | return kvmppc_core_init_vm(kvm); | 180 | return kvmppc_core_init_vm(kvm); |
177 | } | 181 | } |
178 | 182 | ||
@@ -208,17 +212,22 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
208 | case KVM_CAP_PPC_BOOKE_SREGS: | 212 | case KVM_CAP_PPC_BOOKE_SREGS: |
209 | #else | 213 | #else |
210 | case KVM_CAP_PPC_SEGSTATE: | 214 | case KVM_CAP_PPC_SEGSTATE: |
215 | case KVM_CAP_PPC_HIOR: | ||
211 | case KVM_CAP_PPC_PAPR: | 216 | case KVM_CAP_PPC_PAPR: |
212 | #endif | 217 | #endif |
213 | case KVM_CAP_PPC_UNSET_IRQ: | 218 | case KVM_CAP_PPC_UNSET_IRQ: |
214 | case KVM_CAP_PPC_IRQ_LEVEL: | 219 | case KVM_CAP_PPC_IRQ_LEVEL: |
215 | case KVM_CAP_ENABLE_CAP: | 220 | case KVM_CAP_ENABLE_CAP: |
221 | case KVM_CAP_ONE_REG: | ||
216 | r = 1; | 222 | r = 1; |
217 | break; | 223 | break; |
218 | #ifndef CONFIG_KVM_BOOK3S_64_HV | 224 | #ifndef CONFIG_KVM_BOOK3S_64_HV |
219 | case KVM_CAP_PPC_PAIRED_SINGLES: | 225 | case KVM_CAP_PPC_PAIRED_SINGLES: |
220 | case KVM_CAP_PPC_OSI: | 226 | case KVM_CAP_PPC_OSI: |
221 | case KVM_CAP_PPC_GET_PVINFO: | 227 | case KVM_CAP_PPC_GET_PVINFO: |
228 | #ifdef CONFIG_KVM_E500 | ||
229 | case KVM_CAP_SW_TLB: | ||
230 | #endif | ||
222 | r = 1; | 231 | r = 1; |
223 | break; | 232 | break; |
224 | case KVM_CAP_COALESCED_MMIO: | 233 | case KVM_CAP_COALESCED_MMIO: |
@@ -238,7 +247,26 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
238 | if (cpu_has_feature(CPU_FTR_ARCH_201)) | 247 | if (cpu_has_feature(CPU_FTR_ARCH_201)) |
239 | r = 2; | 248 | r = 2; |
240 | break; | 249 | break; |
250 | case KVM_CAP_SYNC_MMU: | ||
251 | r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; | ||
252 | break; | ||
241 | #endif | 253 | #endif |
254 | case KVM_CAP_NR_VCPUS: | ||
255 | /* | ||
256 | * Recommending a number of CPUs is somewhat arbitrary; we | ||
257 | * return the number of present CPUs for -HV (since a host | ||
258 | * will have secondary threads "offline"), and for other KVM | ||
259 | * implementations just count online CPUs. | ||
260 | */ | ||
261 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
262 | r = num_present_cpus(); | ||
263 | #else | ||
264 | r = num_online_cpus(); | ||
265 | #endif | ||
266 | break; | ||
267 | case KVM_CAP_MAX_VCPUS: | ||
268 | r = KVM_MAX_VCPUS; | ||
269 | break; | ||
242 | default: | 270 | default: |
243 | r = 0; | 271 | r = 0; |
244 | break; | 272 | break; |
@@ -253,6 +281,16 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
253 | return -EINVAL; | 281 | return -EINVAL; |
254 | } | 282 | } |
255 | 283 | ||
284 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, | ||
285 | struct kvm_memory_slot *dont) | ||
286 | { | ||
287 | } | ||
288 | |||
289 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) | ||
290 | { | ||
291 | return 0; | ||
292 | } | ||
293 | |||
256 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 294 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
257 | struct kvm_memory_slot *memslot, | 295 | struct kvm_memory_slot *memslot, |
258 | struct kvm_memory_slot old, | 296 | struct kvm_memory_slot old, |
@@ -279,9 +317,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
279 | { | 317 | { |
280 | struct kvm_vcpu *vcpu; | 318 | struct kvm_vcpu *vcpu; |
281 | vcpu = kvmppc_core_vcpu_create(kvm, id); | 319 | vcpu = kvmppc_core_vcpu_create(kvm, id); |
282 | vcpu->arch.wqp = &vcpu->wq; | 320 | if (!IS_ERR(vcpu)) { |
283 | if (!IS_ERR(vcpu)) | 321 | vcpu->arch.wqp = &vcpu->wq; |
284 | kvmppc_create_vcpu_debugfs(vcpu, id); | 322 | kvmppc_create_vcpu_debugfs(vcpu, id); |
323 | } | ||
285 | return vcpu; | 324 | return vcpu; |
286 | } | 325 | } |
287 | 326 | ||
@@ -305,18 +344,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
305 | return kvmppc_core_pending_dec(vcpu); | 344 | return kvmppc_core_pending_dec(vcpu); |
306 | } | 345 | } |
307 | 346 | ||
308 | static void kvmppc_decrementer_func(unsigned long data) | ||
309 | { | ||
310 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | ||
311 | |||
312 | kvmppc_core_queue_dec(vcpu); | ||
313 | |||
314 | if (waitqueue_active(vcpu->arch.wqp)) { | ||
315 | wake_up_interruptible(vcpu->arch.wqp); | ||
316 | vcpu->stat.halt_wakeup++; | ||
317 | } | ||
318 | } | ||
319 | |||
320 | /* | 347 | /* |
321 | * low level hrtimer wake routine. Because this runs in hardirq context | 348 | * low level hrtimer wake routine. Because this runs in hardirq context |
322 | * we schedule a tasklet to do the real work. | 349 | * we schedule a tasklet to do the real work. |
@@ -431,20 +458,20 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |||
431 | 458 | ||
432 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); | 459 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
433 | 460 | ||
434 | switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) { | 461 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
435 | case KVM_REG_GPR: | 462 | case KVM_MMIO_REG_GPR: |
436 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); | 463 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
437 | break; | 464 | break; |
438 | case KVM_REG_FPR: | 465 | case KVM_MMIO_REG_FPR: |
439 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; | 466 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
440 | break; | 467 | break; |
441 | #ifdef CONFIG_PPC_BOOK3S | 468 | #ifdef CONFIG_PPC_BOOK3S |
442 | case KVM_REG_QPR: | 469 | case KVM_MMIO_REG_QPR: |
443 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; | 470 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
444 | break; | 471 | break; |
445 | case KVM_REG_FQPR: | 472 | case KVM_MMIO_REG_FQPR: |
446 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; | 473 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
447 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; | 474 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
448 | break; | 475 | break; |
449 | #endif | 476 | #endif |
450 | default: | 477 | default: |
@@ -553,8 +580,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
553 | vcpu->arch.hcall_needed = 0; | 580 | vcpu->arch.hcall_needed = 0; |
554 | } | 581 | } |
555 | 582 | ||
556 | kvmppc_core_deliver_interrupts(vcpu); | ||
557 | |||
558 | r = kvmppc_vcpu_run(run, vcpu); | 583 | r = kvmppc_vcpu_run(run, vcpu); |
559 | 584 | ||
560 | if (vcpu->sigset_active) | 585 | if (vcpu->sigset_active) |
@@ -563,6 +588,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
563 | return r; | 588 | return r; |
564 | } | 589 | } |
565 | 590 | ||
591 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | ||
592 | { | ||
593 | int me; | ||
594 | int cpu = vcpu->cpu; | ||
595 | |||
596 | me = get_cpu(); | ||
597 | if (waitqueue_active(vcpu->arch.wqp)) { | ||
598 | wake_up_interruptible(vcpu->arch.wqp); | ||
599 | vcpu->stat.halt_wakeup++; | ||
600 | } else if (cpu != me && cpu != -1) { | ||
601 | smp_send_reschedule(vcpu->cpu); | ||
602 | } | ||
603 | put_cpu(); | ||
604 | } | ||
605 | |||
566 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | 606 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
567 | { | 607 | { |
568 | if (irq->irq == KVM_INTERRUPT_UNSET) { | 608 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
@@ -571,13 +611,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |||
571 | } | 611 | } |
572 | 612 | ||
573 | kvmppc_core_queue_external(vcpu, irq); | 613 | kvmppc_core_queue_external(vcpu, irq); |
574 | 614 | kvm_vcpu_kick(vcpu); | |
575 | if (waitqueue_active(vcpu->arch.wqp)) { | ||
576 | wake_up_interruptible(vcpu->arch.wqp); | ||
577 | vcpu->stat.halt_wakeup++; | ||
578 | } else if (vcpu->cpu != -1) { | ||
579 | smp_send_reschedule(vcpu->cpu); | ||
580 | } | ||
581 | 615 | ||
582 | return 0; | 616 | return 0; |
583 | } | 617 | } |
@@ -599,6 +633,19 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | |||
599 | r = 0; | 633 | r = 0; |
600 | vcpu->arch.papr_enabled = true; | 634 | vcpu->arch.papr_enabled = true; |
601 | break; | 635 | break; |
636 | #ifdef CONFIG_KVM_E500 | ||
637 | case KVM_CAP_SW_TLB: { | ||
638 | struct kvm_config_tlb cfg; | ||
639 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | ||
640 | |||
641 | r = -EFAULT; | ||
642 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) | ||
643 | break; | ||
644 | |||
645 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); | ||
646 | break; | ||
647 | } | ||
648 | #endif | ||
602 | default: | 649 | default: |
603 | r = -EINVAL; | 650 | r = -EINVAL; |
604 | break; | 651 | break; |
@@ -648,6 +695,32 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
648 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | 695 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
649 | break; | 696 | break; |
650 | } | 697 | } |
698 | |||
699 | case KVM_SET_ONE_REG: | ||
700 | case KVM_GET_ONE_REG: | ||
701 | { | ||
702 | struct kvm_one_reg reg; | ||
703 | r = -EFAULT; | ||
704 | if (copy_from_user(®, argp, sizeof(reg))) | ||
705 | goto out; | ||
706 | if (ioctl == KVM_SET_ONE_REG) | ||
707 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); | ||
708 | else | ||
709 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); | ||
710 | break; | ||
711 | } | ||
712 | |||
713 | #ifdef CONFIG_KVM_E500 | ||
714 | case KVM_DIRTY_TLB: { | ||
715 | struct kvm_dirty_tlb dirty; | ||
716 | r = -EFAULT; | ||
717 | if (copy_from_user(&dirty, argp, sizeof(dirty))) | ||
718 | goto out; | ||
719 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | ||
720 | break; | ||
721 | } | ||
722 | #endif | ||
723 | |||
651 | default: | 724 | default: |
652 | r = -EINVAL; | 725 | r = -EINVAL; |
653 | } | 726 | } |
@@ -656,6 +729,11 @@ out: | |||
656 | return r; | 729 | return r; |
657 | } | 730 | } |
658 | 731 | ||
732 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | ||
733 | { | ||
734 | return VM_FAULT_SIGBUS; | ||
735 | } | ||
736 | |||
659 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) | 737 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
660 | { | 738 | { |
661 | u32 inst_lis = 0x3c000000; | 739 | u32 inst_lis = 0x3c000000; |