diff options
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 68 |
1 files changed, 61 insertions, 7 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 93ed78b015c0..6a52c084e068 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/kvm_para.h> | 40 | #include <linux/kvm_para.h> |
41 | #include <linux/pagemap.h> | 41 | #include <linux/pagemap.h> |
42 | #include <linux/mman.h> | 42 | #include <linux/mman.h> |
43 | #include <linux/swap.h> | ||
43 | 44 | ||
44 | #include <asm/processor.h> | 45 | #include <asm/processor.h> |
45 | #include <asm/io.h> | 46 | #include <asm/io.h> |
@@ -458,6 +459,12 @@ int is_error_page(struct page *page) | |||
458 | } | 459 | } |
459 | EXPORT_SYMBOL_GPL(is_error_page); | 460 | EXPORT_SYMBOL_GPL(is_error_page); |
460 | 461 | ||
462 | int is_error_pfn(pfn_t pfn) | ||
463 | { | ||
464 | return pfn == bad_pfn; | ||
465 | } | ||
466 | EXPORT_SYMBOL_GPL(is_error_pfn); | ||
467 | |||
461 | static inline unsigned long bad_hva(void) | 468 | static inline unsigned long bad_hva(void) |
462 | { | 469 | { |
463 | return PAGE_OFFSET; | 470 | return PAGE_OFFSET; |
@@ -519,7 +526,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | |||
519 | /* | 526 | /* |
520 | * Requires current->mm->mmap_sem to be held | 527 | * Requires current->mm->mmap_sem to be held |
521 | */ | 528 | */ |
522 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | 529 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) |
523 | { | 530 | { |
524 | struct page *page[1]; | 531 | struct page *page[1]; |
525 | unsigned long addr; | 532 | unsigned long addr; |
@@ -530,7 +537,7 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | |||
530 | addr = gfn_to_hva(kvm, gfn); | 537 | addr = gfn_to_hva(kvm, gfn); |
531 | if (kvm_is_error_hva(addr)) { | 538 | if (kvm_is_error_hva(addr)) { |
532 | get_page(bad_page); | 539 | get_page(bad_page); |
533 | return bad_page; | 540 | return page_to_pfn(bad_page); |
534 | } | 541 | } |
535 | 542 | ||
536 | npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, | 543 | npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, |
@@ -538,27 +545,71 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | |||
538 | 545 | ||
539 | if (npages != 1) { | 546 | if (npages != 1) { |
540 | get_page(bad_page); | 547 | get_page(bad_page); |
541 | return bad_page; | 548 | return page_to_pfn(bad_page); |
542 | } | 549 | } |
543 | 550 | ||
544 | return page[0]; | 551 | return page_to_pfn(page[0]); |
552 | } | ||
553 | |||
554 | EXPORT_SYMBOL_GPL(gfn_to_pfn); | ||
555 | |||
556 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | ||
557 | { | ||
558 | return pfn_to_page(gfn_to_pfn(kvm, gfn)); | ||
545 | } | 559 | } |
546 | 560 | ||
547 | EXPORT_SYMBOL_GPL(gfn_to_page); | 561 | EXPORT_SYMBOL_GPL(gfn_to_page); |
548 | 562 | ||
549 | void kvm_release_page_clean(struct page *page) | 563 | void kvm_release_page_clean(struct page *page) |
550 | { | 564 | { |
551 | put_page(page); | 565 | kvm_release_pfn_clean(page_to_pfn(page)); |
552 | } | 566 | } |
553 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); | 567 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); |
554 | 568 | ||
569 | void kvm_release_pfn_clean(pfn_t pfn) | ||
570 | { | ||
571 | put_page(pfn_to_page(pfn)); | ||
572 | } | ||
573 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); | ||
574 | |||
555 | void kvm_release_page_dirty(struct page *page) | 575 | void kvm_release_page_dirty(struct page *page) |
556 | { | 576 | { |
577 | kvm_release_pfn_dirty(page_to_pfn(page)); | ||
578 | } | ||
579 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); | ||
580 | |||
581 | void kvm_release_pfn_dirty(pfn_t pfn) | ||
582 | { | ||
583 | kvm_set_pfn_dirty(pfn); | ||
584 | kvm_release_pfn_clean(pfn); | ||
585 | } | ||
586 | EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); | ||
587 | |||
588 | void kvm_set_page_dirty(struct page *page) | ||
589 | { | ||
590 | kvm_set_pfn_dirty(page_to_pfn(page)); | ||
591 | } | ||
592 | EXPORT_SYMBOL_GPL(kvm_set_page_dirty); | ||
593 | |||
594 | void kvm_set_pfn_dirty(pfn_t pfn) | ||
595 | { | ||
596 | struct page *page = pfn_to_page(pfn); | ||
557 | if (!PageReserved(page)) | 597 | if (!PageReserved(page)) |
558 | SetPageDirty(page); | 598 | SetPageDirty(page); |
559 | put_page(page); | ||
560 | } | 599 | } |
561 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); | 600 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); |
601 | |||
602 | void kvm_set_pfn_accessed(pfn_t pfn) | ||
603 | { | ||
604 | mark_page_accessed(pfn_to_page(pfn)); | ||
605 | } | ||
606 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); | ||
607 | |||
608 | void kvm_get_pfn(pfn_t pfn) | ||
609 | { | ||
610 | get_page(pfn_to_page(pfn)); | ||
611 | } | ||
612 | EXPORT_SYMBOL_GPL(kvm_get_pfn); | ||
562 | 613 | ||
563 | static int next_segment(unsigned long len, int offset) | 614 | static int next_segment(unsigned long len, int offset) |
564 | { | 615 | { |
@@ -1351,6 +1402,7 @@ static struct sys_device kvm_sysdev = { | |||
1351 | }; | 1402 | }; |
1352 | 1403 | ||
1353 | struct page *bad_page; | 1404 | struct page *bad_page; |
1405 | pfn_t bad_pfn; | ||
1354 | 1406 | ||
1355 | static inline | 1407 | static inline |
1356 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) | 1408 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) |
@@ -1392,6 +1444,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size, | |||
1392 | goto out; | 1444 | goto out; |
1393 | } | 1445 | } |
1394 | 1446 | ||
1447 | bad_pfn = page_to_pfn(bad_page); | ||
1448 | |||
1395 | r = kvm_arch_hardware_setup(); | 1449 | r = kvm_arch_hardware_setup(); |
1396 | if (r < 0) | 1450 | if (r < 0) |
1397 | goto out_free_0; | 1451 | goto out_free_0; |