diff options
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 78 |
1 files changed, 68 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 616dd516ca1f..a107c9be0fb1 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
31 | #include <asm/kvm_ppc.h> | 31 | #include <asm/kvm_ppc.h> |
32 | #include <asm/tlbflush.h> | 32 | #include <asm/tlbflush.h> |
33 | #include <asm/cputhreads.h> | ||
33 | #include "timing.h" | 34 | #include "timing.h" |
34 | #include "../mm/mmu_decl.h" | 35 | #include "../mm/mmu_decl.h" |
35 | 36 | ||
@@ -38,8 +39,12 @@ | |||
38 | 39 | ||
39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 40 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
40 | { | 41 | { |
42 | #ifndef CONFIG_KVM_BOOK3S_64_HV | ||
41 | return !(v->arch.shared->msr & MSR_WE) || | 43 | return !(v->arch.shared->msr & MSR_WE) || |
42 | !!(v->arch.pending_exceptions); | 44 | !!(v->arch.pending_exceptions); |
45 | #else | ||
46 | return !(v->arch.ceded) || !!(v->arch.pending_exceptions); | ||
47 | #endif | ||
43 | } | 48 | } |
44 | 49 | ||
45 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | 50 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
@@ -73,7 +78,8 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |||
73 | } | 78 | } |
74 | case HC_VENDOR_KVM | KVM_HC_FEATURES: | 79 | case HC_VENDOR_KVM | KVM_HC_FEATURES: |
75 | r = HC_EV_SUCCESS; | 80 | r = HC_EV_SUCCESS; |
76 | #if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */ | 81 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500) |
82 | /* XXX Missing magic page on 44x */ | ||
77 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); | 83 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
78 | #endif | 84 | #endif |
79 | 85 | ||
@@ -147,7 +153,7 @@ void kvm_arch_check_processor_compat(void *rtn) | |||
147 | 153 | ||
148 | int kvm_arch_init_vm(struct kvm *kvm) | 154 | int kvm_arch_init_vm(struct kvm *kvm) |
149 | { | 155 | { |
150 | return 0; | 156 | return kvmppc_core_init_vm(kvm); |
151 | } | 157 | } |
152 | 158 | ||
153 | void kvm_arch_destroy_vm(struct kvm *kvm) | 159 | void kvm_arch_destroy_vm(struct kvm *kvm) |
@@ -163,6 +169,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
163 | kvm->vcpus[i] = NULL; | 169 | kvm->vcpus[i] = NULL; |
164 | 170 | ||
165 | atomic_set(&kvm->online_vcpus, 0); | 171 | atomic_set(&kvm->online_vcpus, 0); |
172 | |||
173 | kvmppc_core_destroy_vm(kvm); | ||
174 | |||
166 | mutex_unlock(&kvm->lock); | 175 | mutex_unlock(&kvm->lock); |
167 | } | 176 | } |
168 | 177 | ||
@@ -180,10 +189,13 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
180 | #else | 189 | #else |
181 | case KVM_CAP_PPC_SEGSTATE: | 190 | case KVM_CAP_PPC_SEGSTATE: |
182 | #endif | 191 | #endif |
183 | case KVM_CAP_PPC_PAIRED_SINGLES: | ||
184 | case KVM_CAP_PPC_UNSET_IRQ: | 192 | case KVM_CAP_PPC_UNSET_IRQ: |
185 | case KVM_CAP_PPC_IRQ_LEVEL: | 193 | case KVM_CAP_PPC_IRQ_LEVEL: |
186 | case KVM_CAP_ENABLE_CAP: | 194 | case KVM_CAP_ENABLE_CAP: |
195 | r = 1; | ||
196 | break; | ||
197 | #ifndef CONFIG_KVM_BOOK3S_64_HV | ||
198 | case KVM_CAP_PPC_PAIRED_SINGLES: | ||
187 | case KVM_CAP_PPC_OSI: | 199 | case KVM_CAP_PPC_OSI: |
188 | case KVM_CAP_PPC_GET_PVINFO: | 200 | case KVM_CAP_PPC_GET_PVINFO: |
189 | r = 1; | 201 | r = 1; |
@@ -191,6 +203,21 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
191 | case KVM_CAP_COALESCED_MMIO: | 203 | case KVM_CAP_COALESCED_MMIO: |
192 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 204 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
193 | break; | 205 | break; |
206 | #endif | ||
207 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
208 | case KVM_CAP_SPAPR_TCE: | ||
209 | r = 1; | ||
210 | break; | ||
211 | case KVM_CAP_PPC_SMT: | ||
212 | r = threads_per_core; | ||
213 | break; | ||
214 | case KVM_CAP_PPC_RMA: | ||
215 | r = 1; | ||
216 | /* PPC970 requires an RMA */ | ||
217 | if (cpu_has_feature(CPU_FTR_ARCH_201)) | ||
218 | r = 2; | ||
219 | break; | ||
220 | #endif | ||
194 | default: | 221 | default: |
195 | r = 0; | 222 | r = 0; |
196 | break; | 223 | break; |
@@ -211,7 +238,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
211 | struct kvm_userspace_memory_region *mem, | 238 | struct kvm_userspace_memory_region *mem, |
212 | int user_alloc) | 239 | int user_alloc) |
213 | { | 240 | { |
214 | return 0; | 241 | return kvmppc_core_prepare_memory_region(kvm, mem); |
215 | } | 242 | } |
216 | 243 | ||
217 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 244 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
@@ -219,7 +246,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
219 | struct kvm_memory_slot old, | 246 | struct kvm_memory_slot old, |
220 | int user_alloc) | 247 | int user_alloc) |
221 | { | 248 | { |
222 | return; | 249 | kvmppc_core_commit_memory_region(kvm, mem); |
223 | } | 250 | } |
224 | 251 | ||
225 | 252 | ||
@@ -287,6 +314,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
287 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 314 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
288 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); | 315 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); |
289 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; | 316 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
317 | vcpu->arch.dec_expires = ~(u64)0; | ||
290 | 318 | ||
291 | #ifdef CONFIG_KVM_EXIT_TIMING | 319 | #ifdef CONFIG_KVM_EXIT_TIMING |
292 | mutex_init(&vcpu->arch.exit_timing_lock); | 320 | mutex_init(&vcpu->arch.exit_timing_lock); |
@@ -313,6 +341,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
313 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | 341 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); |
314 | #endif | 342 | #endif |
315 | kvmppc_core_vcpu_load(vcpu, cpu); | 343 | kvmppc_core_vcpu_load(vcpu, cpu); |
344 | vcpu->cpu = smp_processor_id(); | ||
316 | } | 345 | } |
317 | 346 | ||
318 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 347 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
@@ -321,6 +350,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
321 | #ifdef CONFIG_BOOKE | 350 | #ifdef CONFIG_BOOKE |
322 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | 351 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); |
323 | #endif | 352 | #endif |
353 | vcpu->cpu = -1; | ||
324 | } | 354 | } |
325 | 355 | ||
326 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 356 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
@@ -492,15 +522,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
492 | for (i = 0; i < 32; i++) | 522 | for (i = 0; i < 32; i++) |
493 | kvmppc_set_gpr(vcpu, i, gprs[i]); | 523 | kvmppc_set_gpr(vcpu, i, gprs[i]); |
494 | vcpu->arch.osi_needed = 0; | 524 | vcpu->arch.osi_needed = 0; |
525 | } else if (vcpu->arch.hcall_needed) { | ||
526 | int i; | ||
527 | |||
528 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | ||
529 | for (i = 0; i < 9; ++i) | ||
530 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | ||
531 | vcpu->arch.hcall_needed = 0; | ||
495 | } | 532 | } |
496 | 533 | ||
497 | kvmppc_core_deliver_interrupts(vcpu); | 534 | kvmppc_core_deliver_interrupts(vcpu); |
498 | 535 | ||
499 | local_irq_disable(); | 536 | r = kvmppc_vcpu_run(run, vcpu); |
500 | kvm_guest_enter(); | ||
501 | r = __kvmppc_vcpu_run(run, vcpu); | ||
502 | kvm_guest_exit(); | ||
503 | local_irq_enable(); | ||
504 | 537 | ||
505 | if (vcpu->sigset_active) | 538 | if (vcpu->sigset_active) |
506 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 539 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
@@ -518,6 +551,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |||
518 | if (waitqueue_active(&vcpu->wq)) { | 551 | if (waitqueue_active(&vcpu->wq)) { |
519 | wake_up_interruptible(&vcpu->wq); | 552 | wake_up_interruptible(&vcpu->wq); |
520 | vcpu->stat.halt_wakeup++; | 553 | vcpu->stat.halt_wakeup++; |
554 | } else if (vcpu->cpu != -1) { | ||
555 | smp_send_reschedule(vcpu->cpu); | ||
521 | } | 556 | } |
522 | 557 | ||
523 | return 0; | 558 | return 0; |
@@ -633,6 +668,29 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
633 | 668 | ||
634 | break; | 669 | break; |
635 | } | 670 | } |
671 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
672 | case KVM_CREATE_SPAPR_TCE: { | ||
673 | struct kvm_create_spapr_tce create_tce; | ||
674 | struct kvm *kvm = filp->private_data; | ||
675 | |||
676 | r = -EFAULT; | ||
677 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) | ||
678 | goto out; | ||
679 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); | ||
680 | goto out; | ||
681 | } | ||
682 | |||
683 | case KVM_ALLOCATE_RMA: { | ||
684 | struct kvm *kvm = filp->private_data; | ||
685 | struct kvm_allocate_rma rma; | ||
686 | |||
687 | r = kvm_vm_ioctl_allocate_rma(kvm, &rma); | ||
688 | if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) | ||
689 | r = -EFAULT; | ||
690 | break; | ||
691 | } | ||
692 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | ||
693 | |||
636 | default: | 694 | default: |
637 | r = -ENOTTY; | 695 | r = -ENOTTY; |
638 | } | 696 | } |