aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2011-01-29 22:15:49 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2011-03-17 12:08:27 -0400
commitfafc3dbaac6447ab8f78d2f7f32a521b24fc6b36 (patch)
tree3934fab022773bc03c8af20021be79d112f584fa /virt/kvm
parent69ebb83e13e514222b0ae4f8bd813a17679ed876 (diff)
KVM: Replace is_hwpoison_address with __get_user_pages
is_hwpoison_address only checks whether the page table entry is hwpoisoned, regardless the memory page mapped. While __get_user_pages will check both. QEMU will clear the poisoned page table entry (via unmap/map) to make it possible to allocate a new memory page for the virtual address across guest rebooting. But it is also possible that the underlying memory page is kept poisoned even after the corresponding page table entry is cleared, that is, a new memory page can not be allocated. __get_user_pages can catch these situations. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/kvm_main.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4856a7dcbd7f..002fe0b12c9f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1028,6 +1028,15 @@ static pfn_t get_fault_pfn(void)
1028 return fault_pfn; 1028 return fault_pfn;
1029} 1029}
1030 1030
1031static inline int check_user_page_hwpoison(unsigned long addr)
1032{
1033 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
1034
1035 rc = __get_user_pages(current, current->mm, addr, 1,
1036 flags, NULL, NULL, NULL);
1037 return rc == -EHWPOISON;
1038}
1039
1031static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, 1040static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1032 bool *async, bool write_fault, bool *writable) 1041 bool *async, bool write_fault, bool *writable)
1033{ 1042{
@@ -1075,7 +1084,7 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1075 return get_fault_pfn(); 1084 return get_fault_pfn();
1076 1085
1077 down_read(&current->mm->mmap_sem); 1086 down_read(&current->mm->mmap_sem);
1078 if (is_hwpoison_address(addr)) { 1087 if (check_user_page_hwpoison(addr)) {
1079 up_read(&current->mm->mmap_sem); 1088 up_read(&current->mm->mmap_sem);
1080 get_page(hwpoison_page); 1089 get_page(hwpoison_page);
1081 return page_to_pfn(hwpoison_page); 1090 return page_to_pfn(hwpoison_page);