aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2008-04-02 15:46:56 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:01:15 -0400
commit35149e2129fe34fc8cb5917e1ecf5156b0fa3415 (patch)
treeb67cb16fa6054769ee476fce99a32601b126af10 /virt
parentfdae862f91728aec6dd8fd62cd2398868c906b6b (diff)
KVM: MMU: Don't assume struct page for x86
This patch introduces a gfn_to_pfn() function and corresponding functions like kvm_release_pfn_dirty(). Using these new functions, we can modify the x86 MMU to no longer assume that it can always get a struct page for any given gfn. We don't want to eliminate gfn_to_page() entirely because a number of places assume they can do gfn_to_page() and then kmap() the results. When we support IO memory, gfn_to_page() will fail for IO pages although gfn_to_pfn() will succeed. This does not implement support for avoiding reference counting for reserved RAM or for IO memory. However, it should make those things pretty straight forward. Since we're only introducing new common symbols, I don't think it will break the non-x86 architectures but I haven't tested those. I've tested Intel, AMD, NPT, and hugetlbfs with Windows and Linux guests. [avi: fix overflow when shifting left pfns by adding casts] Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c68
1 files changed, 61 insertions, 7 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 93ed78b015c0..6a52c084e068 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -40,6 +40,7 @@
40#include <linux/kvm_para.h> 40#include <linux/kvm_para.h>
41#include <linux/pagemap.h> 41#include <linux/pagemap.h>
42#include <linux/mman.h> 42#include <linux/mman.h>
43#include <linux/swap.h>
43 44
44#include <asm/processor.h> 45#include <asm/processor.h>
45#include <asm/io.h> 46#include <asm/io.h>
@@ -458,6 +459,12 @@ int is_error_page(struct page *page)
458} 459}
459EXPORT_SYMBOL_GPL(is_error_page); 460EXPORT_SYMBOL_GPL(is_error_page);
460 461
462int is_error_pfn(pfn_t pfn)
463{
464 return pfn == bad_pfn;
465}
466EXPORT_SYMBOL_GPL(is_error_pfn);
467
461static inline unsigned long bad_hva(void) 468static inline unsigned long bad_hva(void)
462{ 469{
463 return PAGE_OFFSET; 470 return PAGE_OFFSET;
@@ -519,7 +526,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
519/* 526/*
520 * Requires current->mm->mmap_sem to be held 527 * Requires current->mm->mmap_sem to be held
521 */ 528 */
522struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 529pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
523{ 530{
524 struct page *page[1]; 531 struct page *page[1];
525 unsigned long addr; 532 unsigned long addr;
@@ -530,7 +537,7 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
530 addr = gfn_to_hva(kvm, gfn); 537 addr = gfn_to_hva(kvm, gfn);
531 if (kvm_is_error_hva(addr)) { 538 if (kvm_is_error_hva(addr)) {
532 get_page(bad_page); 539 get_page(bad_page);
533 return bad_page; 540 return page_to_pfn(bad_page);
534 } 541 }
535 542
536 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, 543 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
@@ -538,27 +545,71 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
538 545
539 if (npages != 1) { 546 if (npages != 1) {
540 get_page(bad_page); 547 get_page(bad_page);
541 return bad_page; 548 return page_to_pfn(bad_page);
542 } 549 }
543 550
544 return page[0]; 551 return page_to_pfn(page[0]);
552}
553
554EXPORT_SYMBOL_GPL(gfn_to_pfn);
555
556struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
557{
558 return pfn_to_page(gfn_to_pfn(kvm, gfn));
545} 559}
546 560
547EXPORT_SYMBOL_GPL(gfn_to_page); 561EXPORT_SYMBOL_GPL(gfn_to_page);
548 562
549void kvm_release_page_clean(struct page *page) 563void kvm_release_page_clean(struct page *page)
550{ 564{
551 put_page(page); 565 kvm_release_pfn_clean(page_to_pfn(page));
552} 566}
553EXPORT_SYMBOL_GPL(kvm_release_page_clean); 567EXPORT_SYMBOL_GPL(kvm_release_page_clean);
554 568
569void kvm_release_pfn_clean(pfn_t pfn)
570{
571 put_page(pfn_to_page(pfn));
572}
573EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
574
555void kvm_release_page_dirty(struct page *page) 575void kvm_release_page_dirty(struct page *page)
556{ 576{
577 kvm_release_pfn_dirty(page_to_pfn(page));
578}
579EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
580
581void kvm_release_pfn_dirty(pfn_t pfn)
582{
583 kvm_set_pfn_dirty(pfn);
584 kvm_release_pfn_clean(pfn);
585}
586EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
587
588void kvm_set_page_dirty(struct page *page)
589{
590 kvm_set_pfn_dirty(page_to_pfn(page));
591}
592EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
593
594void kvm_set_pfn_dirty(pfn_t pfn)
595{
596 struct page *page = pfn_to_page(pfn);
557 if (!PageReserved(page)) 597 if (!PageReserved(page))
558 SetPageDirty(page); 598 SetPageDirty(page);
559 put_page(page);
560} 599}
561EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 600EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
601
602void kvm_set_pfn_accessed(pfn_t pfn)
603{
604 mark_page_accessed(pfn_to_page(pfn));
605}
606EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
607
608void kvm_get_pfn(pfn_t pfn)
609{
610 get_page(pfn_to_page(pfn));
611}
612EXPORT_SYMBOL_GPL(kvm_get_pfn);
562 613
563static int next_segment(unsigned long len, int offset) 614static int next_segment(unsigned long len, int offset)
564{ 615{
@@ -1351,6 +1402,7 @@ static struct sys_device kvm_sysdev = {
1351}; 1402};
1352 1403
1353struct page *bad_page; 1404struct page *bad_page;
1405pfn_t bad_pfn;
1354 1406
1355static inline 1407static inline
1356struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 1408struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
@@ -1392,6 +1444,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
1392 goto out; 1444 goto out;
1393 } 1445 }
1394 1446
1447 bad_pfn = page_to_pfn(bad_page);
1448
1395 r = kvm_arch_hardware_setup(); 1449 r = kvm_arch_hardware_setup();
1396 if (r < 0) 1450 if (r < 0)
1397 goto out_free_0; 1451 goto out_free_0;