aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2008-04-30 16:37:07 -0400
committerAvi Kivity <avi@qumranet.com>2008-07-20 05:40:49 -0400
commit2e2e3738af33575cba59597acd5e80cdd5ec11ee (patch)
tree8ba77f83b781bf219a28986b31802b48858cd869 /virt
parentd2ebb4103ff349af6dac14955bf93e57487a6694 (diff)
KVM: Handle vma regions with no backing page
This patch allows VMAs that contain no backing page to be used for guest memory. This is useful for assigning mmio regions to a guest. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c49
1 files changed, 37 insertions, 12 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b6a59498b5a7..f9dd20606c40 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -532,6 +532,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
532 struct page *page[1]; 532 struct page *page[1];
533 unsigned long addr; 533 unsigned long addr;
534 int npages; 534 int npages;
535 pfn_t pfn;
535 536
536 might_sleep(); 537 might_sleep();
537 538
@@ -544,19 +545,38 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
544 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, 545 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
545 NULL); 546 NULL);
546 547
547 if (npages != 1) { 548 if (unlikely(npages != 1)) {
548 get_page(bad_page); 549 struct vm_area_struct *vma;
549 return page_to_pfn(bad_page);
550 }
551 550
552 return page_to_pfn(page[0]); 551 vma = find_vma(current->mm, addr);
552 if (vma == NULL || addr < vma->vm_start ||
553 !(vma->vm_flags & VM_PFNMAP)) {
554 get_page(bad_page);
555 return page_to_pfn(bad_page);
556 }
557
558 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
559 BUG_ON(pfn_valid(pfn));
560 } else
561 pfn = page_to_pfn(page[0]);
562
563 return pfn;
553} 564}
554 565
555EXPORT_SYMBOL_GPL(gfn_to_pfn); 566EXPORT_SYMBOL_GPL(gfn_to_pfn);
556 567
557struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 568struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
558{ 569{
559 return pfn_to_page(gfn_to_pfn(kvm, gfn)); 570 pfn_t pfn;
571
572 pfn = gfn_to_pfn(kvm, gfn);
573 if (pfn_valid(pfn))
574 return pfn_to_page(pfn);
575
576 WARN_ON(!pfn_valid(pfn));
577
578 get_page(bad_page);
579 return bad_page;
560} 580}
561 581
562EXPORT_SYMBOL_GPL(gfn_to_page); 582EXPORT_SYMBOL_GPL(gfn_to_page);
@@ -569,7 +589,8 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
569 589
570void kvm_release_pfn_clean(pfn_t pfn) 590void kvm_release_pfn_clean(pfn_t pfn)
571{ 591{
572 put_page(pfn_to_page(pfn)); 592 if (pfn_valid(pfn))
593 put_page(pfn_to_page(pfn));
573} 594}
574EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 595EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
575 596
@@ -594,21 +615,25 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
594 615
595void kvm_set_pfn_dirty(pfn_t pfn) 616void kvm_set_pfn_dirty(pfn_t pfn)
596{ 617{
597 struct page *page = pfn_to_page(pfn); 618 if (pfn_valid(pfn)) {
598 if (!PageReserved(page)) 619 struct page *page = pfn_to_page(pfn);
599 SetPageDirty(page); 620 if (!PageReserved(page))
621 SetPageDirty(page);
622 }
600} 623}
601EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 624EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
602 625
603void kvm_set_pfn_accessed(pfn_t pfn) 626void kvm_set_pfn_accessed(pfn_t pfn)
604{ 627{
605 mark_page_accessed(pfn_to_page(pfn)); 628 if (pfn_valid(pfn))
629 mark_page_accessed(pfn_to_page(pfn));
606} 630}
607EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 631EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
608 632
609void kvm_get_pfn(pfn_t pfn) 633void kvm_get_pfn(pfn_t pfn)
610{ 634{
611 get_page(pfn_to_page(pfn)); 635 if (pfn_valid(pfn))
636 get_page(pfn_to_page(pfn));
612} 637}
613EXPORT_SYMBOL_GPL(kvm_get_pfn); 638EXPORT_SYMBOL_GPL(kvm_get_pfn);
614 639