aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-12-11 19:01:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:24 -0500
commita1dd450bcb1a05e8218b9aac0ee36f8755d8a140 (patch)
treea46f3306e3802b59ed5df6eef0e03b8b11cca30c
parenteb2db439a3203ae86c35ad277ac4a3268a94baa1 (diff)
mm: thp: set the accessed flag for old pages on access fault
On x86 memory accesses to pages without the ACCESSED flag set result in the ACCESSED flag being set automatically. With the ARM architecture a page access fault is raised instead (and it will continue to be raised until the ACCESSED flag is set for the appropriate PTE/PMD). For normal memory pages, handle_pte_fault will call pte_mkyoung (effectively setting the ACCESSED flag). For transparent huge pages, pmd_mkyoung will only be called for a write fault. This patch ensures that faults on transparent hugepages which do not result in a CoW update the access flags for the faulting pmd. Signed-off-by: Will Deacon <will.deacon@arm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Ni zhan Chen <nizhan.chen@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/huge_mm.h4
-rw-r--r--mm/huge_memory.c22
-rw-r--r--mm/memory.c8
3 files changed, 32 insertions, 2 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b31cb7da0346..1af477552459 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -8,6 +8,10 @@ extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma); 10 struct vm_area_struct *vma);
11extern void huge_pmd_set_accessed(struct mm_struct *mm,
12 struct vm_area_struct *vma,
13 unsigned long address, pmd_t *pmd,
14 pmd_t orig_pmd, int dirty);
11extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 15extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
12 unsigned long address, pmd_t *pmd, 16 unsigned long address, pmd_t *pmd,
13 pmd_t orig_pmd); 17 pmd_t orig_pmd);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ea5fb93a53a9..5f902e20e8c0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -784,6 +784,28 @@ out:
784 return ret; 784 return ret;
785} 785}
786 786
787void huge_pmd_set_accessed(struct mm_struct *mm,
788 struct vm_area_struct *vma,
789 unsigned long address,
790 pmd_t *pmd, pmd_t orig_pmd,
791 int dirty)
792{
793 pmd_t entry;
794 unsigned long haddr;
795
796 spin_lock(&mm->page_table_lock);
797 if (unlikely(!pmd_same(*pmd, orig_pmd)))
798 goto unlock;
799
800 entry = pmd_mkyoung(orig_pmd);
801 haddr = address & HPAGE_PMD_MASK;
802 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
803 update_mmu_cache_pmd(vma, address, pmd);
804
805unlock:
806 spin_unlock(&mm->page_table_lock);
807}
808
787static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 809static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
788 struct vm_area_struct *vma, 810 struct vm_area_struct *vma,
789 unsigned long address, 811 unsigned long address,
diff --git a/mm/memory.c b/mm/memory.c
index 221fc9ffcab1..765377385632 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3537,8 +3537,9 @@ retry:
3537 3537
3538 barrier(); 3538 barrier();
3539 if (pmd_trans_huge(orig_pmd)) { 3539 if (pmd_trans_huge(orig_pmd)) {
3540 if (flags & FAULT_FLAG_WRITE && 3540 unsigned int dirty = flags & FAULT_FLAG_WRITE;
3541 !pmd_write(orig_pmd) && 3541
3542 if (dirty && !pmd_write(orig_pmd) &&
3542 !pmd_trans_splitting(orig_pmd)) { 3543 !pmd_trans_splitting(orig_pmd)) {
3543 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, 3544 ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
3544 orig_pmd); 3545 orig_pmd);
@@ -3550,6 +3551,9 @@ retry:
3550 if (unlikely(ret & VM_FAULT_OOM)) 3551 if (unlikely(ret & VM_FAULT_OOM))
3551 goto retry; 3552 goto retry;
3552 return ret; 3553 return ret;
3554 } else {
3555 huge_pmd_set_accessed(mm, vma, address, pmd,
3556 orig_pmd, dirty);
3553 } 3557 }
3554 return 0; 3558 return 0;
3555 } 3559 }