aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-11 15:28:54 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:50:34 -0400
commitfce92dce79dbf5fff39c7ac2fb149729d79b7a39 (patch)
tree455461b843f5f94356786ea0e21132740458588a
parentc37079586f317d7e7f1a70d36f0e5177691c89c2 (diff)
KVM: MMU: filter out the mmio pfn from the fault pfn
If the page fault is caused by mmio, the gfn can not be found in memslots, and 'bad_pfn' is returned on gfn_to_hva path, so we can use 'bad_pfn' to identify the mmio page fault. And, to clarify the meaning of mmio pfn, we return fault page instead of bad page when the gfn is not allowd to prefetch Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--include/linux/kvm_host.h5
-rw-r--r--virt/kvm/kvm_main.c16
3 files changed, 21 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5334b4e9ecc7..96a7ed4e6837 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2085,8 +2085,8 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2085 2085
2086 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); 2086 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2087 if (!slot) { 2087 if (!slot) {
2088 get_page(bad_page); 2088 get_page(fault_page);
2089 return page_to_pfn(bad_page); 2089 return page_to_pfn(fault_page);
2090 } 2090 }
2091 2091
2092 hva = gfn_to_hva_memslot(slot, gfn); 2092 hva = gfn_to_hva_memslot(slot, gfn);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c8e023902f79..eabb21a30c34 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -327,12 +327,17 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
327static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } 327static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
328 328
329extern struct page *bad_page; 329extern struct page *bad_page;
330extern struct page *fault_page;
331
330extern pfn_t bad_pfn; 332extern pfn_t bad_pfn;
333extern pfn_t fault_pfn;
331 334
332int is_error_page(struct page *page); 335int is_error_page(struct page *page);
333int is_error_pfn(pfn_t pfn); 336int is_error_pfn(pfn_t pfn);
334int is_hwpoison_pfn(pfn_t pfn); 337int is_hwpoison_pfn(pfn_t pfn);
335int is_fault_pfn(pfn_t pfn); 338int is_fault_pfn(pfn_t pfn);
339int is_noslot_pfn(pfn_t pfn);
340int is_invalid_pfn(pfn_t pfn);
336int kvm_is_error_hva(unsigned long addr); 341int kvm_is_error_hva(unsigned long addr);
337int kvm_set_memory_region(struct kvm *kvm, 342int kvm_set_memory_region(struct kvm *kvm,
338 struct kvm_userspace_memory_region *mem, 343 struct kvm_userspace_memory_region *mem,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d5ef9ebcaff7..56f3c704fd74 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -101,8 +101,8 @@ static bool largepages_enabled = true;
101static struct page *hwpoison_page; 101static struct page *hwpoison_page;
102static pfn_t hwpoison_pfn; 102static pfn_t hwpoison_pfn;
103 103
104static struct page *fault_page; 104struct page *fault_page;
105static pfn_t fault_pfn; 105pfn_t fault_pfn;
106 106
107inline int kvm_is_mmio_pfn(pfn_t pfn) 107inline int kvm_is_mmio_pfn(pfn_t pfn)
108{ 108{
@@ -931,6 +931,18 @@ int is_fault_pfn(pfn_t pfn)
931} 931}
932EXPORT_SYMBOL_GPL(is_fault_pfn); 932EXPORT_SYMBOL_GPL(is_fault_pfn);
933 933
934int is_noslot_pfn(pfn_t pfn)
935{
936 return pfn == bad_pfn;
937}
938EXPORT_SYMBOL_GPL(is_noslot_pfn);
939
940int is_invalid_pfn(pfn_t pfn)
941{
942 return pfn == hwpoison_pfn || pfn == fault_pfn;
943}
944EXPORT_SYMBOL_GPL(is_invalid_pfn);
945
934static inline unsigned long bad_hva(void) 946static inline unsigned long bad_hva(void)
935{ 947{
936 return PAGE_OFFSET; 948 return PAGE_OFFSET;