aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@shadowen.org>2008-08-12 18:08:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-12 19:07:28 -0400
commit2b26736c88db85c038e04c2306d0745553e69602 (patch)
tree3914f58d18e93e33eee1256027551954ff24a432
parent57303d80175e10056bf51206f9961d586f02f967 (diff)
allocate structures for reservation tracking in hugetlbfs outside of spinlocks v2
[Andrew this should replace the previous version which did not check the returns from the region prepare for errors. This has been tested by us and Gerald and it looks good. Bah, while reviewing the locking based on your previous email I spotted that we need to check the return from the vma_needs_reservation call for allocation errors. Here is an updated patch to correct this. This passes testing here.] Signed-off-by: Andy Whitcroft <apw@shadowen.org> Tested-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hugetlb.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4c97c174e2e1..67a71191136e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1949,7 +1949,10 @@ retry:
1949 * the spinlock. 1949 * the spinlock.
1950 */ 1950 */
1951 if (write_access && !(vma->vm_flags & VM_SHARED)) 1951 if (write_access && !(vma->vm_flags & VM_SHARED))
1952 vma_needs_reservation(h, vma, address); 1952 if (vma_needs_reservation(h, vma, address) < 0) {
1953 ret = VM_FAULT_OOM;
1954 goto backout_unlocked;
1955 }
1953 1956
1954 spin_lock(&mm->page_table_lock); 1957 spin_lock(&mm->page_table_lock);
1955 size = i_size_read(mapping->host) >> huge_page_shift(h); 1958 size = i_size_read(mapping->host) >> huge_page_shift(h);
@@ -1976,6 +1979,7 @@ out:
1976 1979
1977backout: 1980backout:
1978 spin_unlock(&mm->page_table_lock); 1981 spin_unlock(&mm->page_table_lock);
1982backout_unlocked:
1979 unlock_page(page); 1983 unlock_page(page);
1980 put_page(page); 1984 put_page(page);
1981 goto out; 1985 goto out;
@@ -2004,8 +2008,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2004 entry = huge_ptep_get(ptep); 2008 entry = huge_ptep_get(ptep);
2005 if (huge_pte_none(entry)) { 2009 if (huge_pte_none(entry)) {
2006 ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 2010 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
2007 mutex_unlock(&hugetlb_instantiation_mutex); 2011 goto out_unlock;
2008 return ret;
2009 } 2012 }
2010 2013
2011 ret = 0; 2014 ret = 0;
@@ -2019,7 +2022,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2019 * consumed. 2022 * consumed.
2020 */ 2023 */
2021 if (write_access && !pte_write(entry)) { 2024 if (write_access && !pte_write(entry)) {
2022 vma_needs_reservation(h, vma, address); 2025 if (vma_needs_reservation(h, vma, address) < 0) {
2026 ret = VM_FAULT_OOM;
2027 goto out_unlock;
2028 }
2023 2029
2024 if (!(vma->vm_flags & VM_SHARED)) 2030 if (!(vma->vm_flags & VM_SHARED))
2025 pagecache_page = hugetlbfs_pagecache_page(h, 2031 pagecache_page = hugetlbfs_pagecache_page(h,
@@ -2039,6 +2045,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2039 put_page(pagecache_page); 2045 put_page(pagecache_page);
2040 } 2046 }
2041 2047
2048out_unlock:
2042 mutex_unlock(&hugetlb_instantiation_mutex); 2049 mutex_unlock(&hugetlb_instantiation_mutex);
2043 2050
2044 return ret; 2051 return ret;