aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2011-02-01 06:21:47 -0500
committerAvi Kivity <avi@redhat.com>2011-04-06 06:15:55 -0400
commit0857b9e95c1af8bfe84630ef6747b9d4d61de4c6 (patch)
treedf3892f624910d2a6b210e30549a6a82a79e5474 /virt/kvm/kvm_main.c
parent9e02fb963352c5ad075d80dd3e852fbee9585575 (diff)
KVM: Enable async page fault processing
If asynchronous hva_to_pfn() is requested call GUP with FOLL_NOWAIT to avoid sleeping on IO. Check for hwpoison is done at the same time, otherwise check_user_page_hwpoison() will call GUP again and will put vcpu to sleep. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 556e3efe5325..6330653480e4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1037,6 +1037,17 @@ static pfn_t get_fault_pfn(void)
1037 return fault_pfn; 1037 return fault_pfn;
1038} 1038}
1039 1039
1040int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1041 unsigned long start, int write, struct page **page)
1042{
1043 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
1044
1045 if (write)
1046 flags |= FOLL_WRITE;
1047
1048 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
1049}
1050
1040static inline int check_user_page_hwpoison(unsigned long addr) 1051static inline int check_user_page_hwpoison(unsigned long addr)
1041{ 1052{
1042 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1053 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
@@ -1070,7 +1081,14 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1070 if (writable) 1081 if (writable)
1071 *writable = write_fault; 1082 *writable = write_fault;
1072 1083
1073 npages = get_user_pages_fast(addr, 1, write_fault, page); 1084 if (async) {
1085 down_read(&current->mm->mmap_sem);
1086 npages = get_user_page_nowait(current, current->mm,
1087 addr, write_fault, page);
1088 up_read(&current->mm->mmap_sem);
1089 } else
1090 npages = get_user_pages_fast(addr, 1, write_fault,
1091 page);
1074 1092
1075 /* map read fault as writable if possible */ 1093 /* map read fault as writable if possible */
1076 if (unlikely(!write_fault) && npages == 1) { 1094 if (unlikely(!write_fault) && npages == 1) {
@@ -1093,7 +1111,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1093 return get_fault_pfn(); 1111 return get_fault_pfn();
1094 1112
1095 down_read(&current->mm->mmap_sem); 1113 down_read(&current->mm->mmap_sem);
1096 if (check_user_page_hwpoison(addr)) { 1114 if (npages == -EHWPOISON ||
1115 (!async && check_user_page_hwpoison(addr))) {
1097 up_read(&current->mm->mmap_sem); 1116 up_read(&current->mm->mmap_sem);
1098 get_page(hwpoison_page); 1117 get_page(hwpoison_page);
1099 return page_to_pfn(hwpoison_page); 1118 return page_to_pfn(hwpoison_page);