diff options
author | Davidlohr Bueso <davidlohr@hp.com> | 2014-08-06 19:06:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:19 -0400 |
commit | ad4404a226ea92f2966f0e5378614e15ff4a7c76 (patch) | |
tree | 9c57a0c3429a45c881c10c7948bbd2daefba015c /mm | |
parent | 2f4612af43d4854c892f5ef8ed7a98b6492aee44 (diff) |
mm,hugetlb: simplify error handling in hugetlb_cow()
When returning from hugetlb_cow(), we always (1) put back the refcount
for each referenced page -- always 'old', and 'new' if allocation was
successful. And (2) retake the page table lock right before returning,
as the callers expects. This logic can be simplified and encapsulated,
as proposed in this patch. In addition to cleaner code, we also shave a
few bytes off the instruction text:
text data bss dec hex filename
28399 462 41328 70189 1122d mm/hugetlb.o-baseline
28367 462 41328 70157 1120d mm/hugetlb.o-patched
Passes libhugetlbfs testcases.
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 35 |
1 files changed, 16 insertions, 19 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b94752ae791b..e84d22ce5de8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2808,7 +2808,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2808 | { | 2808 | { |
2809 | struct hstate *h = hstate_vma(vma); | 2809 | struct hstate *h = hstate_vma(vma); |
2810 | struct page *old_page, *new_page; | 2810 | struct page *old_page, *new_page; |
2811 | int outside_reserve = 0; | 2811 | int ret = 0, outside_reserve = 0; |
2812 | unsigned long mmun_start; /* For mmu_notifiers */ | 2812 | unsigned long mmun_start; /* For mmu_notifiers */ |
2813 | unsigned long mmun_end; /* For mmu_notifiers */ | 2813 | unsigned long mmun_end; /* For mmu_notifiers */ |
2814 | 2814 | ||
@@ -2838,14 +2838,14 @@ retry_avoidcopy: | |||
2838 | 2838 | ||
2839 | page_cache_get(old_page); | 2839 | page_cache_get(old_page); |
2840 | 2840 | ||
2841 | /* Drop page table lock as buddy allocator may be called */ | 2841 | /* |
2842 | * Drop page table lock as buddy allocator may be called. It will | ||
2843 | * be acquired again before returning to the caller, as expected. | ||
2844 | */ | ||
2842 | spin_unlock(ptl); | 2845 | spin_unlock(ptl); |
2843 | new_page = alloc_huge_page(vma, address, outside_reserve); | 2846 | new_page = alloc_huge_page(vma, address, outside_reserve); |
2844 | 2847 | ||
2845 | if (IS_ERR(new_page)) { | 2848 | if (IS_ERR(new_page)) { |
2846 | long err = PTR_ERR(new_page); | ||
2847 | page_cache_release(old_page); | ||
2848 | |||
2849 | /* | 2849 | /* |
2850 | * If a process owning a MAP_PRIVATE mapping fails to COW, | 2850 | * If a process owning a MAP_PRIVATE mapping fails to COW, |
2851 | * it is due to references held by a child and an insufficient | 2851 | * it is due to references held by a child and an insufficient |
@@ -2854,6 +2854,7 @@ retry_avoidcopy: | |||
2854 | * may get SIGKILLed if it later faults. | 2854 | * may get SIGKILLed if it later faults. |
2855 | */ | 2855 | */ |
2856 | if (outside_reserve) { | 2856 | if (outside_reserve) { |
2857 | page_cache_release(old_page); | ||
2857 | BUG_ON(huge_pte_none(pte)); | 2858 | BUG_ON(huge_pte_none(pte)); |
2858 | unmap_ref_private(mm, vma, old_page, address); | 2859 | unmap_ref_private(mm, vma, old_page, address); |
2859 | BUG_ON(huge_pte_none(pte)); | 2860 | BUG_ON(huge_pte_none(pte)); |
@@ -2869,12 +2870,9 @@ retry_avoidcopy: | |||
2869 | return 0; | 2870 | return 0; |
2870 | } | 2871 | } |
2871 | 2872 | ||
2872 | /* Caller expects lock to be held */ | 2873 | ret = (PTR_ERR(new_page) == -ENOMEM) ? |
2873 | spin_lock(ptl); | 2874 | VM_FAULT_OOM : VM_FAULT_SIGBUS; |
2874 | if (err == -ENOMEM) | 2875 | goto out_release_old; |
2875 | return VM_FAULT_OOM; | ||
2876 | else | ||
2877 | return VM_FAULT_SIGBUS; | ||
2878 | } | 2876 | } |
2879 | 2877 | ||
2880 | /* | 2878 | /* |
@@ -2882,11 +2880,8 @@ retry_avoidcopy: | |||
2882 | * anon_vma prepared. | 2880 | * anon_vma prepared. |
2883 | */ | 2881 | */ |
2884 | if (unlikely(anon_vma_prepare(vma))) { | 2882 | if (unlikely(anon_vma_prepare(vma))) { |
2885 | page_cache_release(new_page); | 2883 | ret = VM_FAULT_OOM; |
2886 | page_cache_release(old_page); | 2884 | goto out_release_all; |
2887 | /* Caller expects lock to be held */ | ||
2888 | spin_lock(ptl); | ||
2889 | return VM_FAULT_OOM; | ||
2890 | } | 2885 | } |
2891 | 2886 | ||
2892 | copy_user_huge_page(new_page, old_page, address, vma, | 2887 | copy_user_huge_page(new_page, old_page, address, vma, |
@@ -2896,6 +2891,7 @@ retry_avoidcopy: | |||
2896 | mmun_start = address & huge_page_mask(h); | 2891 | mmun_start = address & huge_page_mask(h); |
2897 | mmun_end = mmun_start + huge_page_size(h); | 2892 | mmun_end = mmun_start + huge_page_size(h); |
2898 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 2893 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
2894 | |||
2899 | /* | 2895 | /* |
2900 | * Retake the page table lock to check for racing updates | 2896 | * Retake the page table lock to check for racing updates |
2901 | * before the page tables are altered | 2897 | * before the page tables are altered |
@@ -2916,12 +2912,13 @@ retry_avoidcopy: | |||
2916 | } | 2912 | } |
2917 | spin_unlock(ptl); | 2913 | spin_unlock(ptl); |
2918 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 2914 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
2915 | out_release_all: | ||
2919 | page_cache_release(new_page); | 2916 | page_cache_release(new_page); |
2917 | out_release_old: | ||
2920 | page_cache_release(old_page); | 2918 | page_cache_release(old_page); |
2921 | 2919 | ||
2922 | /* Caller expects lock to be held */ | 2920 | spin_lock(ptl); /* Caller expects lock to be held */ |
2923 | spin_lock(ptl); | 2921 | return ret; |
2924 | return 0; | ||
2925 | } | 2922 | } |
2926 | 2923 | ||
2927 | /* Return the pagecache page at a given address within a VMA */ | 2924 | /* Return the pagecache page at a given address within a VMA */ |