diff options
author | Anthony Liguori <aliguori@us.ibm.com> | 2007-10-29 16:15:20 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:58 -0500 |
commit | aab61cc0d28f6fab0c2c9137d95dea54c7dbcf46 (patch) | |
tree | 66352cc8d0791b83f9b1435465bc35c3126c4dad | |
parent | f78e0e2ee498e8f847500b565792c7d7634dcf54 (diff) |
KVM: Fix gfn_to_page() acquiring mmap_sem twice
KVM's nopage handler calls gfn_to_page() which acquires the mmap_sem when
calling out to get_user_pages(). nopage handlers are already invoked with the
mmap_sem held though. Introduce a __gfn_to_page() for use by the nopage
handler which requires the lock to already be held.
This was noticed by tglx.
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | drivers/kvm/kvm_main.c | 22 |
1 files changed, 18 insertions, 4 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index ac5ed00e9065..f439e45233f2 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -633,7 +633,10 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | |||
633 | } | 633 | } |
634 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); | 634 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); |
635 | 635 | ||
636 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | 636 | /* |
637 | * Requires current->mm->mmap_sem to be held | ||
638 | */ | ||
639 | static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn) | ||
637 | { | 640 | { |
638 | struct kvm_memory_slot *slot; | 641 | struct kvm_memory_slot *slot; |
639 | struct page *page[1]; | 642 | struct page *page[1]; |
@@ -648,12 +651,10 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | |||
648 | return bad_page; | 651 | return bad_page; |
649 | } | 652 | } |
650 | 653 | ||
651 | down_read(¤t->mm->mmap_sem); | ||
652 | npages = get_user_pages(current, current->mm, | 654 | npages = get_user_pages(current, current->mm, |
653 | slot->userspace_addr | 655 | slot->userspace_addr |
654 | + (gfn - slot->base_gfn) * PAGE_SIZE, 1, | 656 | + (gfn - slot->base_gfn) * PAGE_SIZE, 1, |
655 | 1, 1, page, NULL); | 657 | 1, 1, page, NULL); |
656 | up_read(¤t->mm->mmap_sem); | ||
657 | if (npages != 1) { | 658 | if (npages != 1) { |
658 | get_page(bad_page); | 659 | get_page(bad_page); |
659 | return bad_page; | 660 | return bad_page; |
@@ -661,6 +662,18 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | |||
661 | 662 | ||
662 | return page[0]; | 663 | return page[0]; |
663 | } | 664 | } |
665 | |||
666 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | ||
667 | { | ||
668 | struct page *page; | ||
669 | |||
670 | down_read(¤t->mm->mmap_sem); | ||
671 | page = __gfn_to_page(kvm, gfn); | ||
672 | up_read(¤t->mm->mmap_sem); | ||
673 | |||
674 | return page; | ||
675 | } | ||
676 | |||
664 | EXPORT_SYMBOL_GPL(gfn_to_page); | 677 | EXPORT_SYMBOL_GPL(gfn_to_page); |
665 | 678 | ||
666 | void kvm_release_page(struct page *page) | 679 | void kvm_release_page(struct page *page) |
@@ -2621,7 +2634,8 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma, | |||
2621 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 2634 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
2622 | if (!kvm_is_visible_gfn(kvm, pgoff)) | 2635 | if (!kvm_is_visible_gfn(kvm, pgoff)) |
2623 | return NOPAGE_SIGBUS; | 2636 | return NOPAGE_SIGBUS; |
2624 | page = gfn_to_page(kvm, pgoff); | 2637 | /* current->mm->mmap_sem is already held so call lockless version */ |
2638 | page = __gfn_to_page(kvm, pgoff); | ||
2625 | if (is_error_page(page)) { | 2639 | if (is_error_page(page)) { |
2626 | kvm_release_page(page); | 2640 | kvm_release_page(page); |
2627 | return NOPAGE_SIGBUS; | 2641 | return NOPAGE_SIGBUS; |