aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-06-23 08:49:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-23 14:23:33 -0400
commit788c7df451467df71638dd79a2d63d78c6e13b9c (patch)
tree30714df634b620784b7a04fefc51d8c09291de38 /mm
parenta800faec1b21d7133b5f0c8c6dac593b7c4e118d (diff)
hugetlb: fault flags instead of write_access
handle_mm_fault() is now passing fault flags rather than write_access down to hugetlb_fault(), so better recognize that in hugetlb_fault(), and in hugetlb_no_page(). Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a56e6f3ce979..d0351e31f474 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1985,7 +1985,7 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h,
1985} 1985}
1986 1986
1987static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 1987static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1988 unsigned long address, pte_t *ptep, int write_access) 1988 unsigned long address, pte_t *ptep, unsigned int flags)
1989{ 1989{
1990 struct hstate *h = hstate_vma(vma); 1990 struct hstate *h = hstate_vma(vma);
1991 int ret = VM_FAULT_SIGBUS; 1991 int ret = VM_FAULT_SIGBUS;
@@ -2053,7 +2053,7 @@ retry:
2053 * any allocations necessary to record that reservation occur outside 2053 * any allocations necessary to record that reservation occur outside
2054 * the spinlock. 2054 * the spinlock.
2055 */ 2055 */
2056 if (write_access && !(vma->vm_flags & VM_SHARED)) 2056 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2057 if (vma_needs_reservation(h, vma, address) < 0) { 2057 if (vma_needs_reservation(h, vma, address) < 0) {
2058 ret = VM_FAULT_OOM; 2058 ret = VM_FAULT_OOM;
2059 goto backout_unlocked; 2059 goto backout_unlocked;
@@ -2072,7 +2072,7 @@ retry:
2072 && (vma->vm_flags & VM_SHARED))); 2072 && (vma->vm_flags & VM_SHARED)));
2073 set_huge_pte_at(mm, address, ptep, new_pte); 2073 set_huge_pte_at(mm, address, ptep, new_pte);
2074 2074
2075 if (write_access && !(vma->vm_flags & VM_SHARED)) { 2075 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2076 /* Optimization, do the COW without a second fault */ 2076 /* Optimization, do the COW without a second fault */
2077 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 2077 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2078 } 2078 }
@@ -2091,7 +2091,7 @@ backout_unlocked:
2091} 2091}
2092 2092
2093int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2093int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2094 unsigned long address, int write_access) 2094 unsigned long address, unsigned int flags)
2095{ 2095{
2096 pte_t *ptep; 2096 pte_t *ptep;
2097 pte_t entry; 2097 pte_t entry;
@@ -2112,7 +2112,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2112 mutex_lock(&hugetlb_instantiation_mutex); 2112 mutex_lock(&hugetlb_instantiation_mutex);
2113 entry = huge_ptep_get(ptep); 2113 entry = huge_ptep_get(ptep);
2114 if (huge_pte_none(entry)) { 2114 if (huge_pte_none(entry)) {
2115 ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 2115 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2116 goto out_mutex; 2116 goto out_mutex;
2117 } 2117 }
2118 2118
@@ -2126,7 +2126,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2126 * page now as it is used to determine if a reservation has been 2126 * page now as it is used to determine if a reservation has been
2127 * consumed. 2127 * consumed.
2128 */ 2128 */
2129 if (write_access && !pte_write(entry)) { 2129 if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2130 if (vma_needs_reservation(h, vma, address) < 0) { 2130 if (vma_needs_reservation(h, vma, address) < 0) {
2131 ret = VM_FAULT_OOM; 2131 ret = VM_FAULT_OOM;
2132 goto out_mutex; 2132 goto out_mutex;
@@ -2143,7 +2143,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2143 goto out_page_table_lock; 2143 goto out_page_table_lock;
2144 2144
2145 2145
2146 if (write_access) { 2146 if (flags & FAULT_FLAG_WRITE) {
2147 if (!pte_write(entry)) { 2147 if (!pte_write(entry)) {
2148 ret = hugetlb_cow(mm, vma, address, ptep, entry, 2148 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2149 pagecache_page); 2149 pagecache_page);
@@ -2152,7 +2152,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2152 entry = pte_mkdirty(entry); 2152 entry = pte_mkdirty(entry);
2153 } 2153 }
2154 entry = pte_mkyoung(entry); 2154 entry = pte_mkyoung(entry);
2155 if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access)) 2155 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2156 flags & FAULT_FLAG_WRITE))
2156 update_mmu_cache(vma, address, entry); 2157 update_mmu_cache(vma, address, entry);
2157 2158
2158out_page_table_lock: 2159out_page_table_lock: