aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2015-02-11 18:27:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:05 -0500
commit0664e57ff0c68cbca012a45a38288fa277eb6795 (patch)
tree6f660902ba6ae3c834182e15165143b9a57b2477 /virt
parent7e339128496284cc21977fba5416166ee81f5172 (diff)
mm: gup: kvm use get_user_pages_unlocked
Use the more generic get_user_pages_unlocked which has the additional benefit of passing FAULT_FLAG_ALLOW_RETRY at the very first page fault (which allows the first page fault in an unmapped area to be always able to block indefinitely by being allowed to release the mmap_sem). Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Andres Lagar-Cavilla <andreslc@google.com> Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Peter Feiner <pfeiner@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/kvm_main.c50
2 files changed, 5 insertions, 47 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 5ff7f7f2689a..44660aee335f 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -80,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
80 80
81 might_sleep(); 81 might_sleep();
82 82
83 kvm_get_user_page_io(NULL, mm, addr, 1, NULL); 83 get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL);
84 kvm_async_page_present_sync(vcpu, apf); 84 kvm_async_page_present_sync(vcpu, apf);
85 85
86 spin_lock(&vcpu->async_pf.lock); 86 spin_lock(&vcpu->async_pf.lock);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1cc6e2e19982..458b9b14b15c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1128,43 +1128,6 @@ static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1128 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1128 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
1129} 1129}
1130 1130
1131int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
1132 unsigned long addr, bool write_fault,
1133 struct page **pagep)
1134{
1135 int npages;
1136 int locked = 1;
1137 int flags = FOLL_TOUCH | FOLL_HWPOISON |
1138 (pagep ? FOLL_GET : 0) |
1139 (write_fault ? FOLL_WRITE : 0);
1140
1141 /*
1142 * If retrying the fault, we get here *not* having allowed the filemap
1143 * to wait on the page lock. We should now allow waiting on the IO with
1144 * the mmap semaphore released.
1145 */
1146 down_read(&mm->mmap_sem);
1147 npages = __get_user_pages(tsk, mm, addr, 1, flags, pagep, NULL,
1148 &locked);
1149 if (!locked) {
1150 VM_BUG_ON(npages);
1151
1152 if (!pagep)
1153 return 0;
1154
1155 /*
1156 * The previous call has now waited on the IO. Now we can
1157 * retry and complete. Pass TRIED to ensure we do not re
1158 * schedule async IO (see e.g. filemap_fault).
1159 */
1160 down_read(&mm->mmap_sem);
1161 npages = __get_user_pages(tsk, mm, addr, 1, flags | FOLL_TRIED,
1162 pagep, NULL, NULL);
1163 }
1164 up_read(&mm->mmap_sem);
1165 return npages;
1166}
1167
1168static inline int check_user_page_hwpoison(unsigned long addr) 1131static inline int check_user_page_hwpoison(unsigned long addr)
1169{ 1132{
1170 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1133 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
@@ -1227,15 +1190,10 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1227 npages = get_user_page_nowait(current, current->mm, 1190 npages = get_user_page_nowait(current, current->mm,
1228 addr, write_fault, page); 1191 addr, write_fault, page);
1229 up_read(&current->mm->mmap_sem); 1192 up_read(&current->mm->mmap_sem);
1230 } else { 1193 } else
1231 /* 1194 npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
1232 * By now we have tried gup_fast, and possibly async_pf, and we 1195 write_fault, 0, page,
1233 * are certainly not atomic. Time to retry the gup, allowing 1196 FOLL_TOUCH|FOLL_HWPOISON);
1234 * mmap semaphore to be relinquished in the case of IO.
1235 */
1236 npages = kvm_get_user_page_io(current, current->mm, addr,
1237 write_fault, page);
1238 }
1239 if (npages != 1) 1197 if (npages != 1)
1240 return npages; 1198 return npages;
1241 1199