diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 88 |
1 files changed, 74 insertions, 14 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 1e521baf9a7d..178521e81ce4 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -66,6 +66,31 @@ | |||
66 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); | 66 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
67 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); | 67 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
68 | 68 | ||
69 | void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) | ||
70 | { | ||
71 | int me; | ||
72 | int cpu = vcpu->cpu; | ||
73 | wait_queue_head_t *wqp; | ||
74 | |||
75 | wqp = kvm_arch_vcpu_wq(vcpu); | ||
76 | if (waitqueue_active(wqp)) { | ||
77 | wake_up_interruptible(wqp); | ||
78 | ++vcpu->stat.halt_wakeup; | ||
79 | } | ||
80 | |||
81 | me = get_cpu(); | ||
82 | |||
83 | /* CPU points to the first thread of the core */ | ||
84 | if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { | ||
85 | int real_cpu = cpu + vcpu->arch.ptid; | ||
86 | if (paca[real_cpu].kvm_hstate.xics_phys) | ||
87 | xics_wake_cpu(real_cpu); | ||
88 | else if (cpu_online(cpu)) | ||
89 | smp_send_reschedule(cpu); | ||
90 | } | ||
91 | put_cpu(); | ||
92 | } | ||
93 | |||
69 | /* | 94 | /* |
70 | * We use the vcpu_load/put functions to measure stolen time. | 95 | * We use the vcpu_load/put functions to measure stolen time. |
71 | * Stolen time is counted as time when either the vcpu is able to | 96 | * Stolen time is counted as time when either the vcpu is able to |
@@ -259,7 +284,7 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, | |||
259 | len = ((struct reg_vpa *)va)->length.hword; | 284 | len = ((struct reg_vpa *)va)->length.hword; |
260 | else | 285 | else |
261 | len = ((struct reg_vpa *)va)->length.word; | 286 | len = ((struct reg_vpa *)va)->length.word; |
262 | kvmppc_unpin_guest_page(kvm, va); | 287 | kvmppc_unpin_guest_page(kvm, va, vpa, false); |
263 | 288 | ||
264 | /* Check length */ | 289 | /* Check length */ |
265 | if (len > nb || len < sizeof(struct reg_vpa)) | 290 | if (len > nb || len < sizeof(struct reg_vpa)) |
@@ -359,13 +384,13 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) | |||
359 | va = NULL; | 384 | va = NULL; |
360 | nb = 0; | 385 | nb = 0; |
361 | if (gpa) | 386 | if (gpa) |
362 | va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); | 387 | va = kvmppc_pin_guest_page(kvm, gpa, &nb); |
363 | spin_lock(&vcpu->arch.vpa_update_lock); | 388 | spin_lock(&vcpu->arch.vpa_update_lock); |
364 | if (gpa == vpap->next_gpa) | 389 | if (gpa == vpap->next_gpa) |
365 | break; | 390 | break; |
366 | /* sigh... unpin that one and try again */ | 391 | /* sigh... unpin that one and try again */ |
367 | if (va) | 392 | if (va) |
368 | kvmppc_unpin_guest_page(kvm, va); | 393 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
369 | } | 394 | } |
370 | 395 | ||
371 | vpap->update_pending = 0; | 396 | vpap->update_pending = 0; |
@@ -375,12 +400,15 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) | |||
375 | * has changed the mappings underlying guest memory, | 400 | * has changed the mappings underlying guest memory, |
376 | * so unregister the region. | 401 | * so unregister the region. |
377 | */ | 402 | */ |
378 | kvmppc_unpin_guest_page(kvm, va); | 403 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
379 | va = NULL; | 404 | va = NULL; |
380 | } | 405 | } |
381 | if (vpap->pinned_addr) | 406 | if (vpap->pinned_addr) |
382 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); | 407 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, |
408 | vpap->dirty); | ||
409 | vpap->gpa = gpa; | ||
383 | vpap->pinned_addr = va; | 410 | vpap->pinned_addr = va; |
411 | vpap->dirty = false; | ||
384 | if (va) | 412 | if (va) |
385 | vpap->pinned_end = va + vpap->len; | 413 | vpap->pinned_end = va + vpap->len; |
386 | } | 414 | } |
@@ -472,6 +500,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | |||
472 | /* order writing *dt vs. writing vpa->dtl_idx */ | 500 | /* order writing *dt vs. writing vpa->dtl_idx */ |
473 | smp_wmb(); | 501 | smp_wmb(); |
474 | vpa->dtl_idx = ++vcpu->arch.dtl_index; | 502 | vpa->dtl_idx = ++vcpu->arch.dtl_index; |
503 | vcpu->arch.dtl.dirty = true; | ||
475 | } | 504 | } |
476 | 505 | ||
477 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | 506 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
@@ -479,7 +508,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
479 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | 508 | unsigned long req = kvmppc_get_gpr(vcpu, 3); |
480 | unsigned long target, ret = H_SUCCESS; | 509 | unsigned long target, ret = H_SUCCESS; |
481 | struct kvm_vcpu *tvcpu; | 510 | struct kvm_vcpu *tvcpu; |
482 | int idx; | 511 | int idx, rc; |
483 | 512 | ||
484 | switch (req) { | 513 | switch (req) { |
485 | case H_ENTER: | 514 | case H_ENTER: |
@@ -515,6 +544,28 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
515 | kvmppc_get_gpr(vcpu, 5), | 544 | kvmppc_get_gpr(vcpu, 5), |
516 | kvmppc_get_gpr(vcpu, 6)); | 545 | kvmppc_get_gpr(vcpu, 6)); |
517 | break; | 546 | break; |
547 | case H_RTAS: | ||
548 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | ||
549 | return RESUME_HOST; | ||
550 | |||
551 | rc = kvmppc_rtas_hcall(vcpu); | ||
552 | |||
553 | if (rc == -ENOENT) | ||
554 | return RESUME_HOST; | ||
555 | else if (rc == 0) | ||
556 | break; | ||
557 | |||
558 | /* Send the error out to userspace via KVM_RUN */ | ||
559 | return rc; | ||
560 | |||
561 | case H_XIRR: | ||
562 | case H_CPPR: | ||
563 | case H_EOI: | ||
564 | case H_IPI: | ||
565 | if (kvmppc_xics_enabled(vcpu)) { | ||
566 | ret = kvmppc_xics_hcall(vcpu, req); | ||
567 | break; | ||
568 | } /* fallthrough */ | ||
518 | default: | 569 | default: |
519 | return RESUME_HOST; | 570 | return RESUME_HOST; |
520 | } | 571 | } |
@@ -913,15 +964,19 @@ out: | |||
913 | return ERR_PTR(err); | 964 | return ERR_PTR(err); |
914 | } | 965 | } |
915 | 966 | ||
967 | static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) | ||
968 | { | ||
969 | if (vpa->pinned_addr) | ||
970 | kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, | ||
971 | vpa->dirty); | ||
972 | } | ||
973 | |||
916 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 974 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) |
917 | { | 975 | { |
918 | spin_lock(&vcpu->arch.vpa_update_lock); | 976 | spin_lock(&vcpu->arch.vpa_update_lock); |
919 | if (vcpu->arch.dtl.pinned_addr) | 977 | unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); |
920 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr); | 978 | unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); |
921 | if (vcpu->arch.slb_shadow.pinned_addr) | 979 | unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); |
922 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr); | ||
923 | if (vcpu->arch.vpa.pinned_addr) | ||
924 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr); | ||
925 | spin_unlock(&vcpu->arch.vpa_update_lock); | 980 | spin_unlock(&vcpu->arch.vpa_update_lock); |
926 | kvm_vcpu_uninit(vcpu); | 981 | kvm_vcpu_uninit(vcpu); |
927 | kmem_cache_free(kvm_vcpu_cache, vcpu); | 982 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
@@ -955,7 +1010,6 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu) | |||
955 | } | 1010 | } |
956 | 1011 | ||
957 | extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 1012 | extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
958 | extern void xics_wake_cpu(int cpu); | ||
959 | 1013 | ||
960 | static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | 1014 | static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, |
961 | struct kvm_vcpu *vcpu) | 1015 | struct kvm_vcpu *vcpu) |
@@ -1330,9 +1384,12 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1330 | break; | 1384 | break; |
1331 | vc->runner = vcpu; | 1385 | vc->runner = vcpu; |
1332 | n_ceded = 0; | 1386 | n_ceded = 0; |
1333 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) | 1387 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { |
1334 | if (!v->arch.pending_exceptions) | 1388 | if (!v->arch.pending_exceptions) |
1335 | n_ceded += v->arch.ceded; | 1389 | n_ceded += v->arch.ceded; |
1390 | else | ||
1391 | v->arch.ceded = 0; | ||
1392 | } | ||
1336 | if (n_ceded == vc->n_runnable) | 1393 | if (n_ceded == vc->n_runnable) |
1337 | kvmppc_vcore_blocked(vc); | 1394 | kvmppc_vcore_blocked(vc); |
1338 | else | 1395 | else |
@@ -1821,6 +1878,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
1821 | cpumask_setall(&kvm->arch.need_tlb_flush); | 1878 | cpumask_setall(&kvm->arch.need_tlb_flush); |
1822 | 1879 | ||
1823 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | 1880 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); |
1881 | INIT_LIST_HEAD(&kvm->arch.rtas_tokens); | ||
1824 | 1882 | ||
1825 | kvm->arch.rma = NULL; | 1883 | kvm->arch.rma = NULL; |
1826 | 1884 | ||
@@ -1866,6 +1924,8 @@ void kvmppc_core_destroy_vm(struct kvm *kvm) | |||
1866 | kvm->arch.rma = NULL; | 1924 | kvm->arch.rma = NULL; |
1867 | } | 1925 | } |
1868 | 1926 | ||
1927 | kvmppc_rtas_tokens_free(kvm); | ||
1928 | |||
1869 | kvmppc_free_hpt(kvm); | 1929 | kvmppc_free_hpt(kvm); |
1870 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | 1930 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); |
1871 | } | 1931 | } |