aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2008-10-16 01:01:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-16 14:21:29 -0400
commitb4d1d99fdd8b98fb03dfd6ef9b0ece220de38640 (patch)
treee2bdf12aa53b71430edd312f441c79fdd6114535 /mm/hugetlb.c
parentdb99100d2ed40dd9736fcb1adb3657a98f9bcfd9 (diff)
hugetlb: handle updating of ACCESSED and DIRTY in hugetlb_fault()
The page fault path for normal pages, if the fault is neither a no-page fault nor a write-protect fault, will update the DIRTY and ACCESSED bits in the page table appropriately. The hugepage fault path, however, does not do this, handling only no-page or write-protect type faults. It assumes that either the ACCESSED and DIRTY bits are irrelevant for hugepages (usually true, since they are never swapped) or that they are handled by the arch code. This is inconvenient for some software-loaded TLB architectures, where the _PAGE_ACCESSED (_PAGE_DIRTY) bits need to be set to enable read (write) access to the page at the TLB miss. This could be worked around in the arch TLB miss code, but the TLB miss fast path can be made simple more easily if the hugetlb_fault() path handles this, as the normal page fault path does. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Adam Litke <agl@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 67a71191136e..38633864a93e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2008,7 +2008,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2008 entry = huge_ptep_get(ptep); 2008 entry = huge_ptep_get(ptep);
2009 if (huge_pte_none(entry)) { 2009 if (huge_pte_none(entry)) {
2010 ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 2010 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
2011 goto out_unlock; 2011 goto out_mutex;
2012 } 2012 }
2013 2013
2014 ret = 0; 2014 ret = 0;
@@ -2024,7 +2024,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2024 if (write_access && !pte_write(entry)) { 2024 if (write_access && !pte_write(entry)) {
2025 if (vma_needs_reservation(h, vma, address) < 0) { 2025 if (vma_needs_reservation(h, vma, address) < 0) {
2026 ret = VM_FAULT_OOM; 2026 ret = VM_FAULT_OOM;
2027 goto out_unlock; 2027 goto out_mutex;
2028 } 2028 }
2029 2029
2030 if (!(vma->vm_flags & VM_SHARED)) 2030 if (!(vma->vm_flags & VM_SHARED))
@@ -2034,10 +2034,23 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2034 2034
2035 spin_lock(&mm->page_table_lock); 2035 spin_lock(&mm->page_table_lock);
2036 /* Check for a racing update before calling hugetlb_cow */ 2036 /* Check for a racing update before calling hugetlb_cow */
2037 if (likely(pte_same(entry, huge_ptep_get(ptep)))) 2037 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2038 if (write_access && !pte_write(entry)) 2038 goto out_page_table_lock;
2039
2040
2041 if (write_access) {
2042 if (!pte_write(entry)) {
2039 ret = hugetlb_cow(mm, vma, address, ptep, entry, 2043 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2040 pagecache_page); 2044 pagecache_page);
2045 goto out_page_table_lock;
2046 }
2047 entry = pte_mkdirty(entry);
2048 }
2049 entry = pte_mkyoung(entry);
2050 if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
2051 update_mmu_cache(vma, address, entry);
2052
2053out_page_table_lock:
2041 spin_unlock(&mm->page_table_lock); 2054 spin_unlock(&mm->page_table_lock);
2042 2055
2043 if (pagecache_page) { 2056 if (pagecache_page) {
@@ -2045,7 +2058,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2045 put_page(pagecache_page); 2058 put_page(pagecache_page);
2046 } 2059 }
2047 2060
2048out_unlock: 2061out_mutex:
2049 mutex_unlock(&hugetlb_instantiation_mutex); 2062 mutex_unlock(&hugetlb_instantiation_mutex);
2050 2063
2051 return ret; 2064 return ret;