diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-09-05 12:56:57 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-09-05 12:56:57 -0400 |
| commit | 616ad8c44281c0c6711a72b560e01ec335ff27e0 (patch) | |
| tree | 0a20453ffedb09db6fb41a0c2208ccc2c7751d3a /mm/hugetlb.c | |
| parent | 99809963c99e1ed868d9ebeb4a5e7ee1cbe0309f (diff) | |
| parent | b380b0d4f7dffcc235c0facefa537d4655619101 (diff) | |
Merge branch 'linus' into x86/defconfig
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 62 |
1 files changed, 50 insertions, 12 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 757ca983fd99..67a71191136e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -565,7 +565,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) | |||
| 565 | huge_page_order(h)); | 565 | huge_page_order(h)); |
| 566 | if (page) { | 566 | if (page) { |
| 567 | if (arch_prepare_hugepage(page)) { | 567 | if (arch_prepare_hugepage(page)) { |
| 568 | __free_pages(page, HUGETLB_PAGE_ORDER); | 568 | __free_pages(page, huge_page_order(h)); |
| 569 | return NULL; | 569 | return NULL; |
| 570 | } | 570 | } |
| 571 | prep_new_huge_page(h, page, nid); | 571 | prep_new_huge_page(h, page, nid); |
| @@ -665,6 +665,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, | |||
| 665 | __GFP_REPEAT|__GFP_NOWARN, | 665 | __GFP_REPEAT|__GFP_NOWARN, |
| 666 | huge_page_order(h)); | 666 | huge_page_order(h)); |
| 667 | 667 | ||
| 668 | if (page && arch_prepare_hugepage(page)) { | ||
| 669 | __free_pages(page, huge_page_order(h)); | ||
| 670 | return NULL; | ||
| 671 | } | ||
| 672 | |||
| 668 | spin_lock(&hugetlb_lock); | 673 | spin_lock(&hugetlb_lock); |
| 669 | if (page) { | 674 | if (page) { |
| 670 | /* | 675 | /* |
| @@ -1937,6 +1942,18 @@ retry: | |||
| 1937 | lock_page(page); | 1942 | lock_page(page); |
| 1938 | } | 1943 | } |
| 1939 | 1944 | ||
| 1945 | /* | ||
| 1946 | * If we are going to COW a private mapping later, we examine the | ||
| 1947 | * pending reservations for this page now. This will ensure that | ||
| 1948 | * any allocations necessary to record that reservation occur outside | ||
| 1949 | * the spinlock. | ||
| 1950 | */ | ||
| 1951 | if (write_access && !(vma->vm_flags & VM_SHARED)) | ||
| 1952 | if (vma_needs_reservation(h, vma, address) < 0) { | ||
| 1953 | ret = VM_FAULT_OOM; | ||
| 1954 | goto backout_unlocked; | ||
| 1955 | } | ||
| 1956 | |||
| 1940 | spin_lock(&mm->page_table_lock); | 1957 | spin_lock(&mm->page_table_lock); |
| 1941 | size = i_size_read(mapping->host) >> huge_page_shift(h); | 1958 | size = i_size_read(mapping->host) >> huge_page_shift(h); |
| 1942 | if (idx >= size) | 1959 | if (idx >= size) |
| @@ -1962,6 +1979,7 @@ out: | |||
| 1962 | 1979 | ||
| 1963 | backout: | 1980 | backout: |
| 1964 | spin_unlock(&mm->page_table_lock); | 1981 | spin_unlock(&mm->page_table_lock); |
| 1982 | backout_unlocked: | ||
| 1965 | unlock_page(page); | 1983 | unlock_page(page); |
| 1966 | put_page(page); | 1984 | put_page(page); |
| 1967 | goto out; | 1985 | goto out; |
| @@ -1973,6 +1991,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1973 | pte_t *ptep; | 1991 | pte_t *ptep; |
| 1974 | pte_t entry; | 1992 | pte_t entry; |
| 1975 | int ret; | 1993 | int ret; |
| 1994 | struct page *pagecache_page = NULL; | ||
| 1976 | static DEFINE_MUTEX(hugetlb_instantiation_mutex); | 1995 | static DEFINE_MUTEX(hugetlb_instantiation_mutex); |
| 1977 | struct hstate *h = hstate_vma(vma); | 1996 | struct hstate *h = hstate_vma(vma); |
| 1978 | 1997 | ||
| @@ -1989,25 +2008,44 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1989 | entry = huge_ptep_get(ptep); | 2008 | entry = huge_ptep_get(ptep); |
| 1990 | if (huge_pte_none(entry)) { | 2009 | if (huge_pte_none(entry)) { |
| 1991 | ret = hugetlb_no_page(mm, vma, address, ptep, write_access); | 2010 | ret = hugetlb_no_page(mm, vma, address, ptep, write_access); |
| 1992 | mutex_unlock(&hugetlb_instantiation_mutex); | 2011 | goto out_unlock; |
| 1993 | return ret; | ||
| 1994 | } | 2012 | } |
| 1995 | 2013 | ||
| 1996 | ret = 0; | 2014 | ret = 0; |
| 1997 | 2015 | ||
| 2016 | /* | ||
| 2017 | * If we are going to COW the mapping later, we examine the pending | ||
| 2018 | * reservations for this page now. This will ensure that any | ||
| 2019 | * allocations necessary to record that reservation occur outside the | ||
| 2020 | * spinlock. For private mappings, we also lookup the pagecache | ||
| 2021 | * page now as it is used to determine if a reservation has been | ||
| 2022 | * consumed. | ||
| 2023 | */ | ||
| 2024 | if (write_access && !pte_write(entry)) { | ||
| 2025 | if (vma_needs_reservation(h, vma, address) < 0) { | ||
| 2026 | ret = VM_FAULT_OOM; | ||
| 2027 | goto out_unlock; | ||
| 2028 | } | ||
| 2029 | |||
| 2030 | if (!(vma->vm_flags & VM_SHARED)) | ||
| 2031 | pagecache_page = hugetlbfs_pagecache_page(h, | ||
| 2032 | vma, address); | ||
| 2033 | } | ||
| 2034 | |||
| 1998 | spin_lock(&mm->page_table_lock); | 2035 | spin_lock(&mm->page_table_lock); |
| 1999 | /* Check for a racing update before calling hugetlb_cow */ | 2036 | /* Check for a racing update before calling hugetlb_cow */ |
| 2000 | if (likely(pte_same(entry, huge_ptep_get(ptep)))) | 2037 | if (likely(pte_same(entry, huge_ptep_get(ptep)))) |
| 2001 | if (write_access && !pte_write(entry)) { | 2038 | if (write_access && !pte_write(entry)) |
| 2002 | struct page *page; | 2039 | ret = hugetlb_cow(mm, vma, address, ptep, entry, |
| 2003 | page = hugetlbfs_pagecache_page(h, vma, address); | 2040 | pagecache_page); |
| 2004 | ret = hugetlb_cow(mm, vma, address, ptep, entry, page); | ||
| 2005 | if (page) { | ||
| 2006 | unlock_page(page); | ||
| 2007 | put_page(page); | ||
| 2008 | } | ||
| 2009 | } | ||
| 2010 | spin_unlock(&mm->page_table_lock); | 2041 | spin_unlock(&mm->page_table_lock); |
| 2042 | |||
| 2043 | if (pagecache_page) { | ||
| 2044 | unlock_page(pagecache_page); | ||
| 2045 | put_page(pagecache_page); | ||
| 2046 | } | ||
| 2047 | |||
| 2048 | out_unlock: | ||
| 2011 | mutex_unlock(&hugetlb_instantiation_mutex); | 2049 | mutex_unlock(&hugetlb_instantiation_mutex); |
| 2012 | 2050 | ||
| 2013 | return ret; | 2051 | return ret; |
