diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2011-01-13 18:47:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:44 -0500 |
commit | cd7548ab360c462118568eebb8c6da3bc303b02e (patch) | |
tree | 44d145e6139c8753659ee773ae35810dc3d92d0b /mm | |
parent | b36f5b0710e9e3b92484de32920fddcb17278664 (diff) |
thp: mprotect: transparent huge page support
Natively handle huge pmds when changing page tables on behalf of
mprotect().
I left out update_mmu_cache() because we do not need it on x86 anyway but
more importantly the interface works on ptes, not pmds.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 27 | ||||
-rw-r--r-- | mm/mprotect.c | 8 |
2 files changed, 34 insertions, 1 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 37e89a32a0b1..7b55fe0e998b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -948,6 +948,33 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
948 | return ret; | 948 | return ret; |
949 | } | 949 | } |
950 | 950 | ||
951 | int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | ||
952 | unsigned long addr, pgprot_t newprot) | ||
953 | { | ||
954 | struct mm_struct *mm = vma->vm_mm; | ||
955 | int ret = 0; | ||
956 | |||
957 | spin_lock(&mm->page_table_lock); | ||
958 | if (likely(pmd_trans_huge(*pmd))) { | ||
959 | if (unlikely(pmd_trans_splitting(*pmd))) { | ||
960 | spin_unlock(&mm->page_table_lock); | ||
961 | wait_split_huge_page(vma->anon_vma, pmd); | ||
962 | } else { | ||
963 | pmd_t entry; | ||
964 | |||
965 | entry = pmdp_get_and_clear(mm, addr, pmd); | ||
966 | entry = pmd_modify(entry, newprot); | ||
967 | set_pmd_at(mm, addr, pmd, entry); | ||
968 | spin_unlock(&vma->vm_mm->page_table_lock); | ||
969 | flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE); | ||
970 | ret = 1; | ||
971 | } | ||
972 | } else | ||
973 | spin_unlock(&vma->vm_mm->page_table_lock); | ||
974 | |||
975 | return ret; | ||
976 | } | ||
977 | |||
951 | pmd_t *page_check_address_pmd(struct page *page, | 978 | pmd_t *page_check_address_pmd(struct page *page, |
952 | struct mm_struct *mm, | 979 | struct mm_struct *mm, |
953 | unsigned long address, | 980 | unsigned long address, |
diff --git a/mm/mprotect.c b/mm/mprotect.c index 9402080d0d4a..5a688a2756be 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -88,7 +88,13 @@ static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
88 | pmd = pmd_offset(pud, addr); | 88 | pmd = pmd_offset(pud, addr); |
89 | do { | 89 | do { |
90 | next = pmd_addr_end(addr, end); | 90 | next = pmd_addr_end(addr, end); |
91 | split_huge_page_pmd(vma->vm_mm, pmd); | 91 | if (pmd_trans_huge(*pmd)) { |
92 | if (next - addr != HPAGE_PMD_SIZE) | ||
93 | split_huge_page_pmd(vma->vm_mm, pmd); | ||
94 | else if (change_huge_pmd(vma, pmd, addr, newprot)) | ||
95 | continue; | ||
96 | /* fall through */ | ||
97 | } | ||
92 | if (pmd_none_or_clear_bad(pmd)) | 98 | if (pmd_none_or_clear_bad(pmd)) |
93 | continue; | 99 | continue; |
94 | change_pte_range(vma->vm_mm, pmd, addr, next, newprot, | 100 | change_pte_range(vma->vm_mm, pmd, addr, next, newprot, |