aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c44
1 files changed, 35 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 92155db888b9..4c97c174e2e1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1942,6 +1942,15 @@ retry:
1942 lock_page(page); 1942 lock_page(page);
1943 } 1943 }
1944 1944
1945 /*
1946 * If we are going to COW a private mapping later, we examine the
1947 * pending reservations for this page now. This will ensure that
1948 * any allocations necessary to record that reservation occur outside
1949 * the spinlock.
1950 */
1951 if (write_access && !(vma->vm_flags & VM_SHARED))
1952 vma_needs_reservation(h, vma, address);
1953
1945 spin_lock(&mm->page_table_lock); 1954 spin_lock(&mm->page_table_lock);
1946 size = i_size_read(mapping->host) >> huge_page_shift(h); 1955 size = i_size_read(mapping->host) >> huge_page_shift(h);
1947 if (idx >= size) 1956 if (idx >= size)
@@ -1978,6 +1987,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1978 pte_t *ptep; 1987 pte_t *ptep;
1979 pte_t entry; 1988 pte_t entry;
1980 int ret; 1989 int ret;
1990 struct page *pagecache_page = NULL;
1981 static DEFINE_MUTEX(hugetlb_instantiation_mutex); 1991 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
1982 struct hstate *h = hstate_vma(vma); 1992 struct hstate *h = hstate_vma(vma);
1983 1993
@@ -2000,19 +2010,35 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2000 2010
2001 ret = 0; 2011 ret = 0;
2002 2012
2013 /*
2014 * If we are going to COW the mapping later, we examine the pending
2015 * reservations for this page now. This will ensure that any
2016 * allocations necessary to record that reservation occur outside the
2017 * spinlock. For private mappings, we also lookup the pagecache
2018 * page now as it is used to determine if a reservation has been
2019 * consumed.
2020 */
2021 if (write_access && !pte_write(entry)) {
2022 vma_needs_reservation(h, vma, address);
2023
2024 if (!(vma->vm_flags & VM_SHARED))
2025 pagecache_page = hugetlbfs_pagecache_page(h,
2026 vma, address);
2027 }
2028
2003 spin_lock(&mm->page_table_lock); 2029 spin_lock(&mm->page_table_lock);
2004 /* Check for a racing update before calling hugetlb_cow */ 2030 /* Check for a racing update before calling hugetlb_cow */
2005 if (likely(pte_same(entry, huge_ptep_get(ptep)))) 2031 if (likely(pte_same(entry, huge_ptep_get(ptep))))
2006 if (write_access && !pte_write(entry)) { 2032 if (write_access && !pte_write(entry))
2007 struct page *page; 2033 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2008 page = hugetlbfs_pagecache_page(h, vma, address); 2034 pagecache_page);
2009 ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
2010 if (page) {
2011 unlock_page(page);
2012 put_page(page);
2013 }
2014 }
2015 spin_unlock(&mm->page_table_lock); 2035 spin_unlock(&mm->page_table_lock);
2036
2037 if (pagecache_page) {
2038 unlock_page(pagecache_page);
2039 put_page(pagecache_page);
2040 }
2041
2016 mutex_unlock(&hugetlb_instantiation_mutex); 2042 mutex_unlock(&hugetlb_instantiation_mutex);
2017 2043
2018 return ret; 2044 return ret;