diff options
author | Andy Whitcroft <apw@shadowen.org> | 2008-08-12 18:08:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-12 19:07:28 -0400 |
commit | 57303d80175e10056bf51206f9961d586f02f967 (patch) | |
tree | 3979c1d3e6bf154227ef94245c5a7b6141512211 /mm/hugetlb.c | |
parent | ff1a4a7b14ae146142b1c93a001304caf662ae13 (diff) |
hugetlbfs: allocate structures for reservation tracking outside of spinlocks
In the normal case, hugetlbfs reserves hugepages at map time so that the
pages exist for future faults. A struct file_region is used to track when
reservations have been consumed and where. These file_regions are
allocated as necessary with kmalloc() which can sleep with the
mm->page_table_lock held. This is wrong and triggers may-sleep warning
when PREEMPT is enabled.
Updates to the underlying file_region are done in two phases. The first
phase prepares the region for the change, allocating any necessary memory,
without actually making the change. The second phase actually commits the
change. This patch makes use of this by checking the reservations before
the page_table_lock is taken; triggering any necessary allocations. This
may then be safely repeated within the locks without any allocations being
required.
Credit to Mel Gorman for diagnosing this failure and initial versions of
the patch.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Tested-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 44 |
1 files changed, 35 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 92155db888b9..4c97c174e2e1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1942,6 +1942,15 @@ retry: | |||
1942 | lock_page(page); | 1942 | lock_page(page); |
1943 | } | 1943 | } |
1944 | 1944 | ||
1945 | /* | ||
1946 | * If we are going to COW a private mapping later, we examine the | ||
1947 | * pending reservations for this page now. This will ensure that | ||
1948 | * any allocations necessary to record that reservation occur outside | ||
1949 | * the spinlock. | ||
1950 | */ | ||
1951 | if (write_access && !(vma->vm_flags & VM_SHARED)) | ||
1952 | vma_needs_reservation(h, vma, address); | ||
1953 | |||
1945 | spin_lock(&mm->page_table_lock); | 1954 | spin_lock(&mm->page_table_lock); |
1946 | size = i_size_read(mapping->host) >> huge_page_shift(h); | 1955 | size = i_size_read(mapping->host) >> huge_page_shift(h); |
1947 | if (idx >= size) | 1956 | if (idx >= size) |
@@ -1978,6 +1987,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1978 | pte_t *ptep; | 1987 | pte_t *ptep; |
1979 | pte_t entry; | 1988 | pte_t entry; |
1980 | int ret; | 1989 | int ret; |
1990 | struct page *pagecache_page = NULL; | ||
1981 | static DEFINE_MUTEX(hugetlb_instantiation_mutex); | 1991 | static DEFINE_MUTEX(hugetlb_instantiation_mutex); |
1982 | struct hstate *h = hstate_vma(vma); | 1992 | struct hstate *h = hstate_vma(vma); |
1983 | 1993 | ||
@@ -2000,19 +2010,35 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2000 | 2010 | ||
2001 | ret = 0; | 2011 | ret = 0; |
2002 | 2012 | ||
2013 | /* | ||
2014 | * If we are going to COW the mapping later, we examine the pending | ||
2015 | * reservations for this page now. This will ensure that any | ||
2016 | * allocations necessary to record that reservation occur outside the | ||
2017 | * spinlock. For private mappings, we also lookup the pagecache | ||
2018 | * page now as it is used to determine if a reservation has been | ||
2019 | * consumed. | ||
2020 | */ | ||
2021 | if (write_access && !pte_write(entry)) { | ||
2022 | vma_needs_reservation(h, vma, address); | ||
2023 | |||
2024 | if (!(vma->vm_flags & VM_SHARED)) | ||
2025 | pagecache_page = hugetlbfs_pagecache_page(h, | ||
2026 | vma, address); | ||
2027 | } | ||
2028 | |||
2003 | spin_lock(&mm->page_table_lock); | 2029 | spin_lock(&mm->page_table_lock); |
2004 | /* Check for a racing update before calling hugetlb_cow */ | 2030 | /* Check for a racing update before calling hugetlb_cow */ |
2005 | if (likely(pte_same(entry, huge_ptep_get(ptep)))) | 2031 | if (likely(pte_same(entry, huge_ptep_get(ptep)))) |
2006 | if (write_access && !pte_write(entry)) { | 2032 | if (write_access && !pte_write(entry)) |
2007 | struct page *page; | 2033 | ret = hugetlb_cow(mm, vma, address, ptep, entry, |
2008 | page = hugetlbfs_pagecache_page(h, vma, address); | 2034 | pagecache_page); |
2009 | ret = hugetlb_cow(mm, vma, address, ptep, entry, page); | ||
2010 | if (page) { | ||
2011 | unlock_page(page); | ||
2012 | put_page(page); | ||
2013 | } | ||
2014 | } | ||
2015 | spin_unlock(&mm->page_table_lock); | 2035 | spin_unlock(&mm->page_table_lock); |
2036 | |||
2037 | if (pagecache_page) { | ||
2038 | unlock_page(pagecache_page); | ||
2039 | put_page(pagecache_page); | ||
2040 | } | ||
2041 | |||
2016 | mutex_unlock(&hugetlb_instantiation_mutex); | 2042 | mutex_unlock(&hugetlb_instantiation_mutex); |
2017 | 2043 | ||
2018 | return ret; | 2044 | return ret; |