diff options
author | Davidlohr Bueso <davidlohr@hp.com> | 2014-08-06 19:06:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:19 -0400 |
commit | 2f4612af43d4854c892f5ef8ed7a98b6492aee44 (patch) | |
tree | 4c8534fb31083bb59f225124f252abd7fa8a3486 /mm | |
parent | eb39d618f9e80f81cfc5788cf1b252d141c2f0c3 (diff) |
mm,hugetlb: make unmap_ref_private() return void
This function always returns 1, thus no need to check return value in
hugetlb_cow(). By doing so, we can get rid of the unnecessary WARN_ON
call. While this logic perhaps existed as a way of identifying future
unmap_ref_private() mishandling, reality is it serves no apparent
purpose.
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 32 |
1 files changed, 14 insertions, 18 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7a0a73d2fcff..b94752ae791b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2754,8 +2754,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
2754 | * from other VMAs and let the children be SIGKILLed if they are faulting the | 2754 | * from other VMAs and let the children be SIGKILLed if they are faulting the |
2755 | * same region. | 2755 | * same region. |
2756 | */ | 2756 | */ |
2757 | static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, | 2757 | static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, |
2758 | struct page *page, unsigned long address) | 2758 | struct page *page, unsigned long address) |
2759 | { | 2759 | { |
2760 | struct hstate *h = hstate_vma(vma); | 2760 | struct hstate *h = hstate_vma(vma); |
2761 | struct vm_area_struct *iter_vma; | 2761 | struct vm_area_struct *iter_vma; |
@@ -2794,8 +2794,6 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2794 | address + huge_page_size(h), page); | 2794 | address + huge_page_size(h), page); |
2795 | } | 2795 | } |
2796 | mutex_unlock(&mapping->i_mmap_mutex); | 2796 | mutex_unlock(&mapping->i_mmap_mutex); |
2797 | |||
2798 | return 1; | ||
2799 | } | 2797 | } |
2800 | 2798 | ||
2801 | /* | 2799 | /* |
@@ -2857,20 +2855,18 @@ retry_avoidcopy: | |||
2857 | */ | 2855 | */ |
2858 | if (outside_reserve) { | 2856 | if (outside_reserve) { |
2859 | BUG_ON(huge_pte_none(pte)); | 2857 | BUG_ON(huge_pte_none(pte)); |
2860 | if (unmap_ref_private(mm, vma, old_page, address)) { | 2858 | unmap_ref_private(mm, vma, old_page, address); |
2861 | BUG_ON(huge_pte_none(pte)); | 2859 | BUG_ON(huge_pte_none(pte)); |
2862 | spin_lock(ptl); | 2860 | spin_lock(ptl); |
2863 | ptep = huge_pte_offset(mm, address & huge_page_mask(h)); | 2861 | ptep = huge_pte_offset(mm, address & huge_page_mask(h)); |
2864 | if (likely(ptep && | 2862 | if (likely(ptep && |
2865 | pte_same(huge_ptep_get(ptep), pte))) | 2863 | pte_same(huge_ptep_get(ptep), pte))) |
2866 | goto retry_avoidcopy; | 2864 | goto retry_avoidcopy; |
2867 | /* | 2865 | /* |
2868 | * race occurs while re-acquiring page table | 2866 | * race occurs while re-acquiring page table |
2869 | * lock, and our job is done. | 2867 | * lock, and our job is done. |
2870 | */ | 2868 | */ |
2871 | return 0; | 2869 | return 0; |
2872 | } | ||
2873 | WARN_ON_ONCE(1); | ||
2874 | } | 2870 | } |
2875 | 2871 | ||
2876 | /* Caller expects lock to be held */ | 2872 | /* Caller expects lock to be held */ |