diff options
| -rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 1 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 41 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv_builtin.c | 32 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 2 |
4 files changed, 74 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 65441875b025..7efd666a3fa7 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
| @@ -295,6 +295,7 @@ struct kvmppc_vcore { | |||
| 295 | ulong dpdes; /* doorbell state (POWER8) */ | 295 | ulong dpdes; /* doorbell state (POWER8) */ |
| 296 | void *mpp_buffer; /* Micro Partition Prefetch buffer */ | 296 | void *mpp_buffer; /* Micro Partition Prefetch buffer */ |
| 297 | bool mpp_buffer_is_valid; | 297 | bool mpp_buffer_is_valid; |
| 298 | ulong conferring_threads; | ||
| 298 | }; | 299 | }; |
| 299 | 300 | ||
| 300 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) | 301 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 299351e77eb9..de4018a1bc4b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -607,10 +607,45 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, | |||
| 607 | } | 607 | } |
| 608 | } | 608 | } |
| 609 | 609 | ||
| 610 | static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) | ||
| 611 | { | ||
| 612 | struct kvmppc_vcore *vcore = target->arch.vcore; | ||
| 613 | |||
| 614 | /* | ||
| 615 | * We expect to have been called by the real mode handler | ||
| 616 | * (kvmppc_rm_h_confer()) which would have directly returned | ||
| 617 | * H_SUCCESS if the source vcore wasn't idle (e.g. if it may | ||
| 618 | * have useful work to do and should not confer) so we don't | ||
| 619 | * recheck that here. | ||
| 620 | */ | ||
| 621 | |||
| 622 | spin_lock(&vcore->lock); | ||
| 623 | if (target->arch.state == KVMPPC_VCPU_RUNNABLE && | ||
| 624 | vcore->vcore_state != VCORE_INACTIVE) | ||
| 625 | target = vcore->runner; | ||
| 626 | spin_unlock(&vcore->lock); | ||
| 627 | |||
| 628 | return kvm_vcpu_yield_to(target); | ||
| 629 | } | ||
| 630 | |||
| 631 | static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) | ||
| 632 | { | ||
| 633 | int yield_count = 0; | ||
| 634 | struct lppaca *lppaca; | ||
| 635 | |||
| 636 | spin_lock(&vcpu->arch.vpa_update_lock); | ||
| 637 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; | ||
| 638 | if (lppaca) | ||
| 639 | yield_count = lppaca->yield_count; | ||
| 640 | spin_unlock(&vcpu->arch.vpa_update_lock); | ||
| 641 | return yield_count; | ||
| 642 | } | ||
| 643 | |||
| 610 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | 644 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
| 611 | { | 645 | { |
| 612 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | 646 | unsigned long req = kvmppc_get_gpr(vcpu, 3); |
| 613 | unsigned long target, ret = H_SUCCESS; | 647 | unsigned long target, ret = H_SUCCESS; |
| 648 | int yield_count; | ||
| 614 | struct kvm_vcpu *tvcpu; | 649 | struct kvm_vcpu *tvcpu; |
| 615 | int idx, rc; | 650 | int idx, rc; |
| 616 | 651 | ||
| @@ -646,7 +681,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
| 646 | ret = H_PARAMETER; | 681 | ret = H_PARAMETER; |
| 647 | break; | 682 | break; |
| 648 | } | 683 | } |
| 649 | kvm_vcpu_yield_to(tvcpu); | 684 | yield_count = kvmppc_get_gpr(vcpu, 5); |
| 685 | if (kvmppc_get_yield_count(tvcpu) != yield_count) | ||
| 686 | break; | ||
| 687 | kvm_arch_vcpu_yield_to(tvcpu); | ||
| 650 | break; | 688 | break; |
| 651 | case H_REGISTER_VPA: | 689 | case H_REGISTER_VPA: |
| 652 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), | 690 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), |
| @@ -1697,6 +1735,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
| 1697 | vc->vcore_state = VCORE_STARTING; | 1735 | vc->vcore_state = VCORE_STARTING; |
| 1698 | vc->in_guest = 0; | 1736 | vc->in_guest = 0; |
| 1699 | vc->napping_threads = 0; | 1737 | vc->napping_threads = 0; |
| 1738 | vc->conferring_threads = 0; | ||
| 1700 | 1739 | ||
| 1701 | /* | 1740 | /* |
| 1702 | * Updating any of the vpas requires calling kvmppc_pin_guest_page, | 1741 | * Updating any of the vpas requires calling kvmppc_pin_guest_page, |
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 1786bf80bf00..3e43f815ac5d 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/memblock.h> | 17 | #include <linux/memblock.h> |
| 18 | #include <linux/sizes.h> | 18 | #include <linux/sizes.h> |
| 19 | #include <linux/cma.h> | 19 | #include <linux/cma.h> |
| 20 | #include <linux/bitops.h> | ||
| 20 | 21 | ||
| 21 | #include <asm/cputable.h> | 22 | #include <asm/cputable.h> |
| 22 | #include <asm/kvm_ppc.h> | 23 | #include <asm/kvm_ppc.h> |
| @@ -97,6 +98,37 @@ void __init kvm_cma_reserve(void) | |||
| 97 | } | 98 | } |
| 98 | 99 | ||
| 99 | /* | 100 | /* |
| 101 | * Real-mode H_CONFER implementation. | ||
| 102 | * We check if we are the only vcpu out of this virtual core | ||
| 103 | * still running in the guest and not ceded. If so, we pop up | ||
| 104 | * to the virtual-mode implementation; if not, just return to | ||
| 105 | * the guest. | ||
| 106 | */ | ||
| 107 | long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, | ||
| 108 | unsigned int yield_count) | ||
| 109 | { | ||
| 110 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | ||
| 111 | int threads_running; | ||
| 112 | int threads_ceded; | ||
| 113 | int threads_conferring; | ||
| 114 | u64 stop = get_tb() + 10 * tb_ticks_per_usec; | ||
| 115 | int rv = H_SUCCESS; /* => don't yield */ | ||
| 116 | |||
| 117 | set_bit(vcpu->arch.ptid, &vc->conferring_threads); | ||
| 118 | while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) { | ||
| 119 | threads_running = VCORE_ENTRY_COUNT(vc); | ||
| 120 | threads_ceded = hweight32(vc->napping_threads); | ||
| 121 | threads_conferring = hweight32(vc->conferring_threads); | ||
| 122 | if (threads_ceded + threads_conferring >= threads_running) { | ||
| 123 | rv = H_TOO_HARD; /* => do yield */ | ||
| 124 | break; | ||
| 125 | } | ||
| 126 | } | ||
| 127 | clear_bit(vcpu->arch.ptid, &vc->conferring_threads); | ||
| 128 | return rv; | ||
| 129 | } | ||
| 130 | |||
| 131 | /* | ||
| 100 | * When running HV mode KVM we need to block certain operations while KVM VMs | 132 | * When running HV mode KVM we need to block certain operations while KVM VMs |
| 101 | * exist in the system. We use a counter of VMs to track this. | 133 | * exist in the system. We use a counter of VMs to track this. |
| 102 | * | 134 | * |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 26a5b8d4dd3c..0a2d64fb5de8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -1798,7 +1798,7 @@ hcall_real_table: | |||
| 1798 | .long 0 /* 0xd8 */ | 1798 | .long 0 /* 0xd8 */ |
| 1799 | .long 0 /* 0xdc */ | 1799 | .long 0 /* 0xdc */ |
| 1800 | .long DOTSYM(kvmppc_h_cede) - hcall_real_table | 1800 | .long DOTSYM(kvmppc_h_cede) - hcall_real_table |
| 1801 | .long 0 /* 0xe4 */ | 1801 | .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table |
| 1802 | .long 0 /* 0xe8 */ | 1802 | .long 0 /* 0xe8 */ |
| 1803 | .long 0 /* 0xec */ | 1803 | .long 0 /* 0xec */ |
| 1804 | .long 0 /* 0xf0 */ | 1804 | .long 0 /* 0xf0 */ |
