diff options
| -rw-r--r-- | arch/cris/arch-v32/drivers/cryptocop.c | 23 | ||||
| -rw-r--r-- | mm/gup.c | 46 | ||||
| -rw-r--r-- | virt/kvm/kvm_main.c | 43 |
3 files changed, 34 insertions, 78 deletions
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c index d688fe117dca..a3c353472a8c 100644 --- a/arch/cris/arch-v32/drivers/cryptocop.c +++ b/arch/cris/arch-v32/drivers/cryptocop.c | |||
| @@ -2717,37 +2717,28 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig | |||
| 2717 | } | 2717 | } |
| 2718 | } | 2718 | } |
| 2719 | 2719 | ||
| 2720 | /* Acquire the mm page semaphore. */ | 2720 | err = get_user_pages_fast((unsigned long)(oper.indata + prev_ix), |
| 2721 | down_read(¤t->mm->mmap_sem); | ||
| 2722 | |||
| 2723 | err = get_user_pages((unsigned long int)(oper.indata + prev_ix), | ||
| 2724 | noinpages, | 2721 | noinpages, |
| 2725 | 0, /* read access only for in data */ | 2722 | false, /* read access only for in data */ |
| 2726 | inpages, | 2723 | inpages); |
| 2727 | NULL); | ||
| 2728 | 2724 | ||
| 2729 | if (err < 0) { | 2725 | if (err < 0) { |
| 2730 | up_read(¤t->mm->mmap_sem); | ||
| 2731 | nooutpages = noinpages = 0; | 2726 | nooutpages = noinpages = 0; |
| 2732 | DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n")); | 2727 | DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n")); |
| 2733 | goto error_cleanup; | 2728 | goto error_cleanup; |
| 2734 | } | 2729 | } |
| 2735 | noinpages = err; | 2730 | noinpages = err; |
| 2736 | if (oper.do_cipher){ | 2731 | if (oper.do_cipher) { |
| 2737 | err = get_user_pages((unsigned long int)oper.cipher_outdata, | 2732 | err = get_user_pages_fast((unsigned long)oper.cipher_outdata, |
| 2738 | nooutpages, | 2733 | nooutpages, |
| 2739 | FOLL_WRITE, /* write access for out data */ | 2734 | true, /* write access for out data */ |
| 2740 | outpages, | 2735 | outpages); |
| 2741 | NULL); | ||
| 2742 | up_read(¤t->mm->mmap_sem); | ||
| 2743 | if (err < 0) { | 2736 | if (err < 0) { |
| 2744 | nooutpages = 0; | 2737 | nooutpages = 0; |
| 2745 | DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n")); | 2738 | DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n")); |
| 2746 | goto error_cleanup; | 2739 | goto error_cleanup; |
| 2747 | } | 2740 | } |
| 2748 | nooutpages = err; | 2741 | nooutpages = err; |
| 2749 | } else { | ||
| 2750 | up_read(¤t->mm->mmap_sem); | ||
| 2751 | } | 2742 | } |
| 2752 | 2743 | ||
| 2753 | /* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and | 2744 | /* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and |
| @@ -848,7 +848,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, | |||
| 848 | unsigned long nr_pages, | 848 | unsigned long nr_pages, |
| 849 | struct page **pages, | 849 | struct page **pages, |
| 850 | struct vm_area_struct **vmas, | 850 | struct vm_area_struct **vmas, |
| 851 | int *locked, bool notify_drop, | 851 | int *locked, |
| 852 | unsigned int flags) | 852 | unsigned int flags) |
| 853 | { | 853 | { |
| 854 | long ret, pages_done; | 854 | long ret, pages_done; |
| @@ -922,7 +922,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, | |||
| 922 | pages++; | 922 | pages++; |
| 923 | start += PAGE_SIZE; | 923 | start += PAGE_SIZE; |
| 924 | } | 924 | } |
| 925 | if (notify_drop && lock_dropped && *locked) { | 925 | if (lock_dropped && *locked) { |
| 926 | /* | 926 | /* |
| 927 | * We must let the caller know we temporarily dropped the lock | 927 | * We must let the caller know we temporarily dropped the lock |
| 928 | * and so the critical section protected by it was lost. | 928 | * and so the critical section protected by it was lost. |
| @@ -959,36 +959,12 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | |||
| 959 | int *locked) | 959 | int *locked) |
| 960 | { | 960 | { |
| 961 | return __get_user_pages_locked(current, current->mm, start, nr_pages, | 961 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
| 962 | pages, NULL, locked, true, | 962 | pages, NULL, locked, |
| 963 | gup_flags | FOLL_TOUCH); | 963 | gup_flags | FOLL_TOUCH); |
| 964 | } | 964 | } |
| 965 | EXPORT_SYMBOL(get_user_pages_locked); | 965 | EXPORT_SYMBOL(get_user_pages_locked); |
| 966 | 966 | ||
| 967 | /* | 967 | /* |
| 968 | * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows for | ||
| 969 | * tsk, mm to be specified. | ||
| 970 | * | ||
| 971 | * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the | ||
| 972 | * caller if required (just like with __get_user_pages). "FOLL_GET" | ||
| 973 | * is set implicitly if "pages" is non-NULL. | ||
| 974 | */ | ||
| 975 | static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, | ||
| 976 | struct mm_struct *mm, unsigned long start, | ||
| 977 | unsigned long nr_pages, struct page **pages, | ||
| 978 | unsigned int gup_flags) | ||
| 979 | { | ||
| 980 | long ret; | ||
| 981 | int locked = 1; | ||
| 982 | |||
| 983 | down_read(&mm->mmap_sem); | ||
| 984 | ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL, | ||
| 985 | &locked, false, gup_flags); | ||
| 986 | if (locked) | ||
| 987 | up_read(&mm->mmap_sem); | ||
| 988 | return ret; | ||
| 989 | } | ||
| 990 | |||
| 991 | /* | ||
| 992 | * get_user_pages_unlocked() is suitable to replace the form: | 968 | * get_user_pages_unlocked() is suitable to replace the form: |
| 993 | * | 969 | * |
| 994 | * down_read(&mm->mmap_sem); | 970 | * down_read(&mm->mmap_sem); |
| @@ -1006,8 +982,16 @@ static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, | |||
| 1006 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 982 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 1007 | struct page **pages, unsigned int gup_flags) | 983 | struct page **pages, unsigned int gup_flags) |
| 1008 | { | 984 | { |
| 1009 | return __get_user_pages_unlocked(current, current->mm, start, nr_pages, | 985 | struct mm_struct *mm = current->mm; |
| 1010 | pages, gup_flags | FOLL_TOUCH); | 986 | int locked = 1; |
| 987 | long ret; | ||
| 988 | |||
| 989 | down_read(&mm->mmap_sem); | ||
| 990 | ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, | ||
| 991 | &locked, gup_flags | FOLL_TOUCH); | ||
| 992 | if (locked) | ||
| 993 | up_read(&mm->mmap_sem); | ||
| 994 | return ret; | ||
| 1011 | } | 995 | } |
| 1012 | EXPORT_SYMBOL(get_user_pages_unlocked); | 996 | EXPORT_SYMBOL(get_user_pages_unlocked); |
| 1013 | 997 | ||
| @@ -1073,7 +1057,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1073 | struct vm_area_struct **vmas, int *locked) | 1057 | struct vm_area_struct **vmas, int *locked) |
| 1074 | { | 1058 | { |
| 1075 | return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, | 1059 | return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, |
| 1076 | locked, true, | 1060 | locked, |
| 1077 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); | 1061 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); |
| 1078 | } | 1062 | } |
| 1079 | EXPORT_SYMBOL(get_user_pages_remote); | 1063 | EXPORT_SYMBOL(get_user_pages_remote); |
| @@ -1090,7 +1074,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, | |||
| 1090 | struct vm_area_struct **vmas) | 1074 | struct vm_area_struct **vmas) |
| 1091 | { | 1075 | { |
| 1092 | return __get_user_pages_locked(current, current->mm, start, nr_pages, | 1076 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
| 1093 | pages, vmas, NULL, false, | 1077 | pages, vmas, NULL, |
| 1094 | gup_flags | FOLL_TOUCH); | 1078 | gup_flags | FOLL_TOUCH); |
| 1095 | } | 1079 | } |
| 1096 | EXPORT_SYMBOL(get_user_pages); | 1080 | EXPORT_SYMBOL(get_user_pages); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 210bf820385a..d6b9370806f8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -1322,17 +1322,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w | |||
| 1322 | return gfn_to_hva_memslot_prot(slot, gfn, writable); | 1322 | return gfn_to_hva_memslot_prot(slot, gfn, writable); |
| 1323 | } | 1323 | } |
| 1324 | 1324 | ||
| 1325 | static int get_user_page_nowait(unsigned long start, int write, | ||
| 1326 | struct page **page) | ||
| 1327 | { | ||
| 1328 | int flags = FOLL_NOWAIT | FOLL_HWPOISON; | ||
| 1329 | |||
| 1330 | if (write) | ||
| 1331 | flags |= FOLL_WRITE; | ||
| 1332 | |||
| 1333 | return get_user_pages(start, 1, flags, page, NULL); | ||
| 1334 | } | ||
| 1335 | |||
| 1336 | static inline int check_user_page_hwpoison(unsigned long addr) | 1325 | static inline int check_user_page_hwpoison(unsigned long addr) |
| 1337 | { | 1326 | { |
| 1338 | int rc, flags = FOLL_HWPOISON | FOLL_WRITE; | 1327 | int rc, flags = FOLL_HWPOISON | FOLL_WRITE; |
| @@ -1381,7 +1370,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, | |||
| 1381 | static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, | 1370 | static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, |
| 1382 | bool *writable, kvm_pfn_t *pfn) | 1371 | bool *writable, kvm_pfn_t *pfn) |
| 1383 | { | 1372 | { |
| 1384 | struct page *page[1]; | 1373 | unsigned int flags = FOLL_HWPOISON; |
| 1374 | struct page *page; | ||
| 1385 | int npages = 0; | 1375 | int npages = 0; |
| 1386 | 1376 | ||
| 1387 | might_sleep(); | 1377 | might_sleep(); |
| @@ -1389,35 +1379,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, | |||
| 1389 | if (writable) | 1379 | if (writable) |
| 1390 | *writable = write_fault; | 1380 | *writable = write_fault; |
| 1391 | 1381 | ||
| 1392 | if (async) { | 1382 | if (write_fault) |
| 1393 | down_read(¤t->mm->mmap_sem); | 1383 | flags |= FOLL_WRITE; |
| 1394 | npages = get_user_page_nowait(addr, write_fault, page); | 1384 | if (async) |
| 1395 | up_read(¤t->mm->mmap_sem); | 1385 | flags |= FOLL_NOWAIT; |
| 1396 | } else { | ||
| 1397 | unsigned int flags = FOLL_HWPOISON; | ||
| 1398 | |||
| 1399 | if (write_fault) | ||
| 1400 | flags |= FOLL_WRITE; | ||
| 1401 | 1386 | ||
| 1402 | npages = get_user_pages_unlocked(addr, 1, page, flags); | 1387 | npages = get_user_pages_unlocked(addr, 1, &page, flags); |
| 1403 | } | ||
| 1404 | if (npages != 1) | 1388 | if (npages != 1) |
| 1405 | return npages; | 1389 | return npages; |
| 1406 | 1390 | ||
| 1407 | /* map read fault as writable if possible */ | 1391 | /* map read fault as writable if possible */ |
| 1408 | if (unlikely(!write_fault) && writable) { | 1392 | if (unlikely(!write_fault) && writable) { |
| 1409 | struct page *wpage[1]; | 1393 | struct page *wpage; |
| 1410 | 1394 | ||
| 1411 | npages = __get_user_pages_fast(addr, 1, 1, wpage); | 1395 | if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) { |
| 1412 | if (npages == 1) { | ||
| 1413 | *writable = true; | 1396 | *writable = true; |
| 1414 | put_page(page[0]); | 1397 | put_page(page); |
| 1415 | page[0] = wpage[0]; | 1398 | page = wpage; |
| 1416 | } | 1399 | } |
| 1417 | |||
| 1418 | npages = 1; | ||
| 1419 | } | 1400 | } |
| 1420 | *pfn = page_to_pfn(page[0]); | 1401 | *pfn = page_to_pfn(page); |
| 1421 | return npages; | 1402 | return npages; |
| 1422 | } | 1403 | } |
| 1423 | 1404 | ||
