diff options
author | Izik Eidus <avi@qumranet.com> | 2007-10-17 13:17:48 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:54 -0500 |
commit | cea7bb21280e3a825e64b54740edc5d3e6e4193c (patch) | |
tree | d9714d8c13491a433951b83a176f2a04f6521009 /drivers | |
parent | 9647c14c98687d0abf5197e74b9d1448ab6ebb95 (diff) |
KVM: MMU: Make gfn_to_page() always safe
In case the page is not present in the guest memory map, return a dummy
page the guest can scribble on.
This simplifies error checking in its users.
Signed-off-by: Izik Eidus <izike@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/kvm/kvm.h | 3 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 26 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 16 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 7 |
4 files changed, 23 insertions, 29 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 6ae7b6332e32..0c17c76d030f 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -565,8 +565,9 @@ static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } | |||
565 | hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva); | 565 | hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva); |
566 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); | 566 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); |
567 | 567 | ||
568 | extern hpa_t bad_page_address; | 568 | extern struct page *bad_page; |
569 | 569 | ||
570 | int is_error_page(struct page *page); | ||
570 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); | 571 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); |
571 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 572 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
572 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | 573 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 1879b409bed2..47000be25479 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -993,6 +993,12 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
993 | return r; | 993 | return r; |
994 | } | 994 | } |
995 | 995 | ||
996 | int is_error_page(struct page *page) | ||
997 | { | ||
998 | return page == bad_page; | ||
999 | } | ||
1000 | EXPORT_SYMBOL_GPL(is_error_page); | ||
1001 | |||
996 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 1002 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
997 | { | 1003 | { |
998 | int i; | 1004 | int i; |
@@ -1034,7 +1040,7 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | |||
1034 | gfn = unalias_gfn(kvm, gfn); | 1040 | gfn = unalias_gfn(kvm, gfn); |
1035 | slot = __gfn_to_memslot(kvm, gfn); | 1041 | slot = __gfn_to_memslot(kvm, gfn); |
1036 | if (!slot) | 1042 | if (!slot) |
1037 | return NULL; | 1043 | return bad_page; |
1038 | return slot->phys_mem[gfn - slot->base_gfn]; | 1044 | return slot->phys_mem[gfn - slot->base_gfn]; |
1039 | } | 1045 | } |
1040 | EXPORT_SYMBOL_GPL(gfn_to_page); | 1046 | EXPORT_SYMBOL_GPL(gfn_to_page); |
@@ -1054,7 +1060,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | |||
1054 | struct page *page; | 1060 | struct page *page; |
1055 | 1061 | ||
1056 | page = gfn_to_page(kvm, gfn); | 1062 | page = gfn_to_page(kvm, gfn); |
1057 | if (!page) | 1063 | if (is_error_page(page)) |
1058 | return -EFAULT; | 1064 | return -EFAULT; |
1059 | page_virt = kmap_atomic(page, KM_USER0); | 1065 | page_virt = kmap_atomic(page, KM_USER0); |
1060 | 1066 | ||
@@ -1092,7 +1098,7 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | |||
1092 | struct page *page; | 1098 | struct page *page; |
1093 | 1099 | ||
1094 | page = gfn_to_page(kvm, gfn); | 1100 | page = gfn_to_page(kvm, gfn); |
1095 | if (!page) | 1101 | if (is_error_page(page)) |
1096 | return -EFAULT; | 1102 | return -EFAULT; |
1097 | page_virt = kmap_atomic(page, KM_USER0); | 1103 | page_virt = kmap_atomic(page, KM_USER0); |
1098 | 1104 | ||
@@ -1130,7 +1136,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) | |||
1130 | struct page *page; | 1136 | struct page *page; |
1131 | 1137 | ||
1132 | page = gfn_to_page(kvm, gfn); | 1138 | page = gfn_to_page(kvm, gfn); |
1133 | if (!page) | 1139 | if (is_error_page(page)) |
1134 | return -EFAULT; | 1140 | return -EFAULT; |
1135 | page_virt = kmap_atomic(page, KM_USER0); | 1141 | page_virt = kmap_atomic(page, KM_USER0); |
1136 | 1142 | ||
@@ -3068,7 +3074,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma, | |||
3068 | 3074 | ||
3069 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 3075 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
3070 | page = gfn_to_page(kvm, pgoff); | 3076 | page = gfn_to_page(kvm, pgoff); |
3071 | if (!page) | 3077 | if (is_error_page(page)) |
3072 | return NOPAGE_SIGBUS; | 3078 | return NOPAGE_SIGBUS; |
3073 | get_page(page); | 3079 | get_page(page); |
3074 | if (type != NULL) | 3080 | if (type != NULL) |
@@ -3383,7 +3389,7 @@ static struct sys_device kvm_sysdev = { | |||
3383 | .cls = &kvm_sysdev_class, | 3389 | .cls = &kvm_sysdev_class, |
3384 | }; | 3390 | }; |
3385 | 3391 | ||
3386 | hpa_t bad_page_address; | 3392 | struct page *bad_page; |
3387 | 3393 | ||
3388 | static inline | 3394 | static inline |
3389 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) | 3395 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) |
@@ -3512,7 +3518,6 @@ EXPORT_SYMBOL_GPL(kvm_exit_x86); | |||
3512 | 3518 | ||
3513 | static __init int kvm_init(void) | 3519 | static __init int kvm_init(void) |
3514 | { | 3520 | { |
3515 | static struct page *bad_page; | ||
3516 | int r; | 3521 | int r; |
3517 | 3522 | ||
3518 | r = kvm_mmu_module_init(); | 3523 | r = kvm_mmu_module_init(); |
@@ -3523,16 +3528,13 @@ static __init int kvm_init(void) | |||
3523 | 3528 | ||
3524 | kvm_arch_init(); | 3529 | kvm_arch_init(); |
3525 | 3530 | ||
3526 | bad_page = alloc_page(GFP_KERNEL); | 3531 | bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
3527 | 3532 | ||
3528 | if (bad_page == NULL) { | 3533 | if (bad_page == NULL) { |
3529 | r = -ENOMEM; | 3534 | r = -ENOMEM; |
3530 | goto out; | 3535 | goto out; |
3531 | } | 3536 | } |
3532 | 3537 | ||
3533 | bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT; | ||
3534 | memset(__va(bad_page_address), 0, PAGE_SIZE); | ||
3535 | |||
3536 | return 0; | 3538 | return 0; |
3537 | 3539 | ||
3538 | out: | 3540 | out: |
@@ -3545,7 +3547,7 @@ out4: | |||
3545 | static __exit void kvm_exit(void) | 3547 | static __exit void kvm_exit(void) |
3546 | { | 3548 | { |
3547 | kvm_exit_debug(); | 3549 | kvm_exit_debug(); |
3548 | __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT)); | 3550 | __free_page(bad_page); |
3549 | kvm_mmu_module_exit(); | 3551 | kvm_mmu_module_exit(); |
3550 | } | 3552 | } |
3551 | 3553 | ||
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index bbf5eb427dc6..2ad14fbdcfa0 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -850,23 +850,17 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) | |||
850 | __set_bit(slot, &page_head->slot_bitmap); | 850 | __set_bit(slot, &page_head->slot_bitmap); |
851 | } | 851 | } |
852 | 852 | ||
853 | hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa) | ||
854 | { | ||
855 | hpa_t hpa = gpa_to_hpa(kvm, gpa); | ||
856 | |||
857 | return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa; | ||
858 | } | ||
859 | |||
860 | hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa) | 853 | hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa) |
861 | { | 854 | { |
862 | struct page *page; | 855 | struct page *page; |
856 | hpa_t hpa; | ||
863 | 857 | ||
864 | ASSERT((gpa & HPA_ERR_MASK) == 0); | 858 | ASSERT((gpa & HPA_ERR_MASK) == 0); |
865 | page = gfn_to_page(kvm, gpa >> PAGE_SHIFT); | 859 | page = gfn_to_page(kvm, gpa >> PAGE_SHIFT); |
866 | if (!page) | 860 | hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1)); |
867 | return gpa | HPA_ERR_MASK; | 861 | if (is_error_page(page)) |
868 | return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | 862 | return hpa | HPA_ERR_MASK; |
869 | | (gpa & (PAGE_SIZE-1)); | 863 | return hpa; |
870 | } | 864 | } |
871 | 865 | ||
872 | hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva) | 866 | hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva) |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index bab1b7f8d705..572e5b6d9a7a 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -72,8 +72,6 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
72 | struct kvm_vcpu *vcpu, gva_t addr, | 72 | struct kvm_vcpu *vcpu, gva_t addr, |
73 | int write_fault, int user_fault, int fetch_fault) | 73 | int write_fault, int user_fault, int fetch_fault) |
74 | { | 74 | { |
75 | hpa_t hpa; | ||
76 | struct kvm_memory_slot *slot; | ||
77 | struct page *page; | 75 | struct page *page; |
78 | pt_element_t *table; | 76 | pt_element_t *table; |
79 | pt_element_t pte; | 77 | pt_element_t pte; |
@@ -105,9 +103,8 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
105 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, | 103 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, |
106 | walker->level - 1, table_gfn); | 104 | walker->level - 1, table_gfn); |
107 | 105 | ||
108 | slot = gfn_to_memslot(vcpu->kvm, table_gfn); | 106 | page = gfn_to_page(vcpu->kvm, (pte & PT64_BASE_ADDR_MASK) |
109 | hpa = safe_gpa_to_hpa(vcpu->kvm, pte & PT64_BASE_ADDR_MASK); | 107 | >> PAGE_SHIFT); |
110 | page = pfn_to_page(hpa >> PAGE_SHIFT); | ||
111 | 108 | ||
112 | table = kmap_atomic(page, KM_USER0); | 109 | table = kmap_atomic(page, KM_USER0); |
113 | pte = table[index]; | 110 | pte = table[index]; |