diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2017-11-19 17:47:33 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2017-12-04 13:35:21 -0500 |
commit | ce53053ce378c21e7ffc45241fd67d6ee79daa2b (patch) | |
tree | 635bbf710c9add8e1740bf96e3cb99c6f6ace7fd | |
parent | e716712f83b635e62d5fb66c1375524ef2152cc0 (diff) |
kvm: switch get_user_page_nowait() to get_user_pages_unlocked()
... and fold into the sole caller, unifying async and non-async cases
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | virt/kvm/kvm_main.c | 43 |
1 files changed, 12 insertions, 31 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f169ecc4f2e8..ae4985bc8a8a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1314,17 +1314,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w | |||
1314 | return gfn_to_hva_memslot_prot(slot, gfn, writable); | 1314 | return gfn_to_hva_memslot_prot(slot, gfn, writable); |
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | static int get_user_page_nowait(unsigned long start, int write, | ||
1318 | struct page **page) | ||
1319 | { | ||
1320 | int flags = FOLL_NOWAIT | FOLL_HWPOISON; | ||
1321 | |||
1322 | if (write) | ||
1323 | flags |= FOLL_WRITE; | ||
1324 | |||
1325 | return get_user_pages(start, 1, flags, page, NULL); | ||
1326 | } | ||
1327 | |||
1328 | static inline int check_user_page_hwpoison(unsigned long addr) | 1317 | static inline int check_user_page_hwpoison(unsigned long addr) |
1329 | { | 1318 | { |
1330 | int rc, flags = FOLL_HWPOISON | FOLL_WRITE; | 1319 | int rc, flags = FOLL_HWPOISON | FOLL_WRITE; |
@@ -1373,7 +1362,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, | |||
1373 | static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, | 1362 | static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, |
1374 | bool *writable, kvm_pfn_t *pfn) | 1363 | bool *writable, kvm_pfn_t *pfn) |
1375 | { | 1364 | { |
1376 | struct page *page[1]; | 1365 | unsigned int flags = FOLL_HWPOISON; |
1366 | struct page *page; | ||
1377 | int npages = 0; | 1367 | int npages = 0; |
1378 | 1368 | ||
1379 | might_sleep(); | 1369 | might_sleep(); |
@@ -1381,35 +1371,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, | |||
1381 | if (writable) | 1371 | if (writable) |
1382 | *writable = write_fault; | 1372 | *writable = write_fault; |
1383 | 1373 | ||
1384 | if (async) { | 1374 | if (write_fault) |
1385 | down_read(¤t->mm->mmap_sem); | 1375 | flags |= FOLL_WRITE; |
1386 | npages = get_user_page_nowait(addr, write_fault, page); | 1376 | if (async) |
1387 | up_read(¤t->mm->mmap_sem); | 1377 | flags |= FOLL_NOWAIT; |
1388 | } else { | ||
1389 | unsigned int flags = FOLL_HWPOISON; | ||
1390 | |||
1391 | if (write_fault) | ||
1392 | flags |= FOLL_WRITE; | ||
1393 | 1378 | ||
1394 | npages = get_user_pages_unlocked(addr, 1, page, flags); | 1379 | npages = get_user_pages_unlocked(addr, 1, &page, flags); |
1395 | } | ||
1396 | if (npages != 1) | 1380 | if (npages != 1) |
1397 | return npages; | 1381 | return npages; |
1398 | 1382 | ||
1399 | /* map read fault as writable if possible */ | 1383 | /* map read fault as writable if possible */ |
1400 | if (unlikely(!write_fault) && writable) { | 1384 | if (unlikely(!write_fault) && writable) { |
1401 | struct page *wpage[1]; | 1385 | struct page *wpage; |
1402 | 1386 | ||
1403 | npages = __get_user_pages_fast(addr, 1, 1, wpage); | 1387 | if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) { |
1404 | if (npages == 1) { | ||
1405 | *writable = true; | 1388 | *writable = true; |
1406 | put_page(page[0]); | 1389 | put_page(page); |
1407 | page[0] = wpage[0]; | 1390 | page = wpage; |
1408 | } | 1391 | } |
1409 | |||
1410 | npages = 1; | ||
1411 | } | 1392 | } |
1412 | *pfn = page_to_pfn(page[0]); | 1393 | *pfn = page_to_pfn(page); |
1413 | return npages; | 1394 | return npages; |
1414 | } | 1395 | } |
1415 | 1396 | ||