diff options
author | Jes Sorensen <jes@sgi.com> | 2009-04-16 04:43:48 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:43 -0400 |
commit | c6b60c6921381130e5288b19f5fdf81152230b37 (patch) | |
tree | d2d69c726c2acd4ef83dc463adc2379ef896aac4 /arch/ia64 | |
parent | 463656c0007ddccee78db383eeb9e6eac75ccb7f (diff) |
KVM: ia64: Don't hold slots_lock in guest mode
Reorder locking to avoid holding the slots_lock when entering
the guest.
Signed-off-by: Jes Sorensen <jes@sgi.com>
Acked-by : Xiantao Zhang<xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 64 |
1 files changed, 33 insertions, 31 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 3bf0a345224a..f127fb723f2f 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -632,34 +632,22 @@ static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) | |||
632 | vti_set_rr6(vcpu->arch.vmm_rr); | 632 | vti_set_rr6(vcpu->arch.vmm_rr); |
633 | return kvm_insert_vmm_mapping(vcpu); | 633 | return kvm_insert_vmm_mapping(vcpu); |
634 | } | 634 | } |
635 | |||
635 | static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) | 636 | static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) |
636 | { | 637 | { |
637 | kvm_purge_vmm_mapping(vcpu); | 638 | kvm_purge_vmm_mapping(vcpu); |
638 | vti_set_rr6(vcpu->arch.host_rr6); | 639 | vti_set_rr6(vcpu->arch.host_rr6); |
639 | } | 640 | } |
640 | 641 | ||
641 | static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 642 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
642 | { | 643 | { |
643 | union context *host_ctx, *guest_ctx; | 644 | union context *host_ctx, *guest_ctx; |
644 | int r; | 645 | int r; |
645 | 646 | ||
646 | /*Get host and guest context with guest address space.*/ | 647 | /* |
647 | host_ctx = kvm_get_host_context(vcpu); | 648 | * down_read() may sleep and return with interrupts enabled |
648 | guest_ctx = kvm_get_guest_context(vcpu); | 649 | */ |
649 | 650 | down_read(&vcpu->kvm->slots_lock); | |
650 | r = kvm_vcpu_pre_transition(vcpu); | ||
651 | if (r < 0) | ||
652 | goto out; | ||
653 | kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); | ||
654 | kvm_vcpu_post_transition(vcpu); | ||
655 | r = 0; | ||
656 | out: | ||
657 | return r; | ||
658 | } | ||
659 | |||
660 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
661 | { | ||
662 | int r; | ||
663 | 651 | ||
664 | again: | 652 | again: |
665 | if (signal_pending(current)) { | 653 | if (signal_pending(current)) { |
@@ -668,23 +656,28 @@ again: | |||
668 | goto out; | 656 | goto out; |
669 | } | 657 | } |
670 | 658 | ||
671 | /* | ||
672 | * down_read() may sleep and return with interrupts enabled | ||
673 | */ | ||
674 | down_read(&vcpu->kvm->slots_lock); | ||
675 | |||
676 | preempt_disable(); | 659 | preempt_disable(); |
677 | local_irq_disable(); | 660 | local_irq_disable(); |
678 | 661 | ||
662 | /*Get host and guest context with guest address space.*/ | ||
663 | host_ctx = kvm_get_host_context(vcpu); | ||
664 | guest_ctx = kvm_get_guest_context(vcpu); | ||
665 | |||
679 | vcpu->guest_mode = 1; | 666 | vcpu->guest_mode = 1; |
667 | |||
668 | r = kvm_vcpu_pre_transition(vcpu); | ||
669 | if (r < 0) | ||
670 | goto vcpu_run_fail; | ||
671 | |||
672 | up_read(&vcpu->kvm->slots_lock); | ||
680 | kvm_guest_enter(); | 673 | kvm_guest_enter(); |
681 | r = vti_vcpu_run(vcpu, kvm_run); | 674 | |
682 | if (r < 0) { | 675 | /* |
683 | local_irq_enable(); | 676 | * Transition to the guest |
684 | preempt_enable(); | 677 | */ |
685 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 678 | kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); |
686 | goto out; | 679 | |
687 | } | 680 | kvm_vcpu_post_transition(vcpu); |
688 | 681 | ||
689 | vcpu->arch.launched = 1; | 682 | vcpu->arch.launched = 1; |
690 | vcpu->guest_mode = 0; | 683 | vcpu->guest_mode = 0; |
@@ -698,9 +691,10 @@ again: | |||
698 | */ | 691 | */ |
699 | barrier(); | 692 | barrier(); |
700 | kvm_guest_exit(); | 693 | kvm_guest_exit(); |
701 | up_read(&vcpu->kvm->slots_lock); | ||
702 | preempt_enable(); | 694 | preempt_enable(); |
703 | 695 | ||
696 | down_read(&vcpu->kvm->slots_lock); | ||
697 | |||
704 | r = kvm_handle_exit(kvm_run, vcpu); | 698 | r = kvm_handle_exit(kvm_run, vcpu); |
705 | 699 | ||
706 | if (r > 0) { | 700 | if (r > 0) { |
@@ -709,12 +703,20 @@ again: | |||
709 | } | 703 | } |
710 | 704 | ||
711 | out: | 705 | out: |
706 | up_read(&vcpu->kvm->slots_lock); | ||
712 | if (r > 0) { | 707 | if (r > 0) { |
713 | kvm_resched(vcpu); | 708 | kvm_resched(vcpu); |
709 | down_read(&vcpu->kvm->slots_lock); | ||
714 | goto again; | 710 | goto again; |
715 | } | 711 | } |
716 | 712 | ||
717 | return r; | 713 | return r; |
714 | |||
715 | vcpu_run_fail: | ||
716 | local_irq_enable(); | ||
717 | preempt_enable(); | ||
718 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | ||
719 | goto out; | ||
718 | } | 720 | } |
719 | 721 | ||
720 | static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) | 722 | static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) |