diff options
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 35 |
1 files changed, 16 insertions, 19 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b94752ae791b..e84d22ce5de8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2808,7 +2808,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2808 | { | 2808 | { |
2809 | struct hstate *h = hstate_vma(vma); | 2809 | struct hstate *h = hstate_vma(vma); |
2810 | struct page *old_page, *new_page; | 2810 | struct page *old_page, *new_page; |
2811 | int outside_reserve = 0; | 2811 | int ret = 0, outside_reserve = 0; |
2812 | unsigned long mmun_start; /* For mmu_notifiers */ | 2812 | unsigned long mmun_start; /* For mmu_notifiers */ |
2813 | unsigned long mmun_end; /* For mmu_notifiers */ | 2813 | unsigned long mmun_end; /* For mmu_notifiers */ |
2814 | 2814 | ||
@@ -2838,14 +2838,14 @@ retry_avoidcopy: | |||
2838 | 2838 | ||
2839 | page_cache_get(old_page); | 2839 | page_cache_get(old_page); |
2840 | 2840 | ||
2841 | /* Drop page table lock as buddy allocator may be called */ | 2841 | /* |
2842 | * Drop page table lock as buddy allocator may be called. It will | ||
2843 | * be acquired again before returning to the caller, as expected. | ||
2844 | */ | ||
2842 | spin_unlock(ptl); | 2845 | spin_unlock(ptl); |
2843 | new_page = alloc_huge_page(vma, address, outside_reserve); | 2846 | new_page = alloc_huge_page(vma, address, outside_reserve); |
2844 | 2847 | ||
2845 | if (IS_ERR(new_page)) { | 2848 | if (IS_ERR(new_page)) { |
2846 | long err = PTR_ERR(new_page); | ||
2847 | page_cache_release(old_page); | ||
2848 | |||
2849 | /* | 2849 | /* |
2850 | * If a process owning a MAP_PRIVATE mapping fails to COW, | 2850 | * If a process owning a MAP_PRIVATE mapping fails to COW, |
2851 | * it is due to references held by a child and an insufficient | 2851 | * it is due to references held by a child and an insufficient |
@@ -2854,6 +2854,7 @@ retry_avoidcopy: | |||
2854 | * may get SIGKILLed if it later faults. | 2854 | * may get SIGKILLed if it later faults. |
2855 | */ | 2855 | */ |
2856 | if (outside_reserve) { | 2856 | if (outside_reserve) { |
2857 | page_cache_release(old_page); | ||
2857 | BUG_ON(huge_pte_none(pte)); | 2858 | BUG_ON(huge_pte_none(pte)); |
2858 | unmap_ref_private(mm, vma, old_page, address); | 2859 | unmap_ref_private(mm, vma, old_page, address); |
2859 | BUG_ON(huge_pte_none(pte)); | 2860 | BUG_ON(huge_pte_none(pte)); |
@@ -2869,12 +2870,9 @@ retry_avoidcopy: | |||
2869 | return 0; | 2870 | return 0; |
2870 | } | 2871 | } |
2871 | 2872 | ||
2872 | /* Caller expects lock to be held */ | 2873 | ret = (PTR_ERR(new_page) == -ENOMEM) ? |
2873 | spin_lock(ptl); | 2874 | VM_FAULT_OOM : VM_FAULT_SIGBUS; |
2874 | if (err == -ENOMEM) | 2875 | goto out_release_old; |
2875 | return VM_FAULT_OOM; | ||
2876 | else | ||
2877 | return VM_FAULT_SIGBUS; | ||
2878 | } | 2876 | } |
2879 | 2877 | ||
2880 | /* | 2878 | /* |
@@ -2882,11 +2880,8 @@ retry_avoidcopy: | |||
2882 | * anon_vma prepared. | 2880 | * anon_vma prepared. |
2883 | */ | 2881 | */ |
2884 | if (unlikely(anon_vma_prepare(vma))) { | 2882 | if (unlikely(anon_vma_prepare(vma))) { |
2885 | page_cache_release(new_page); | 2883 | ret = VM_FAULT_OOM; |
2886 | page_cache_release(old_page); | 2884 | goto out_release_all; |
2887 | /* Caller expects lock to be held */ | ||
2888 | spin_lock(ptl); | ||
2889 | return VM_FAULT_OOM; | ||
2890 | } | 2885 | } |
2891 | 2886 | ||
2892 | copy_user_huge_page(new_page, old_page, address, vma, | 2887 | copy_user_huge_page(new_page, old_page, address, vma, |
@@ -2896,6 +2891,7 @@ retry_avoidcopy: | |||
2896 | mmun_start = address & huge_page_mask(h); | 2891 | mmun_start = address & huge_page_mask(h); |
2897 | mmun_end = mmun_start + huge_page_size(h); | 2892 | mmun_end = mmun_start + huge_page_size(h); |
2898 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 2893 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
2894 | |||
2899 | /* | 2895 | /* |
2900 | * Retake the page table lock to check for racing updates | 2896 | * Retake the page table lock to check for racing updates |
2901 | * before the page tables are altered | 2897 | * before the page tables are altered |
@@ -2916,12 +2912,13 @@ retry_avoidcopy: | |||
2916 | } | 2912 | } |
2917 | spin_unlock(ptl); | 2913 | spin_unlock(ptl); |
2918 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 2914 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
2915 | out_release_all: | ||
2919 | page_cache_release(new_page); | 2916 | page_cache_release(new_page); |
2917 | out_release_old: | ||
2920 | page_cache_release(old_page); | 2918 | page_cache_release(old_page); |
2921 | 2919 | ||
2922 | /* Caller expects lock to be held */ | 2920 | spin_lock(ptl); /* Caller expects lock to be held */ |
2923 | spin_lock(ptl); | 2921 | return ret; |
2924 | return 0; | ||
2925 | } | 2922 | } |
2926 | 2923 | ||
2927 | /* Return the pagecache page at a given address within a VMA */ | 2924 | /* Return the pagecache page at a given address within a VMA */ |