diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2011-01-13 18:47:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:44 -0500 |
commit | cd7548ab360c462118568eebb8c6da3bc303b02e (patch) | |
tree | 44d145e6139c8753659ee773ae35810dc3d92d0b /mm/huge_memory.c | |
parent | b36f5b0710e9e3b92484de32920fddcb17278664 (diff) |
thp: mprotect: transparent huge page support
Natively handle huge pmds when changing page tables on behalf of
mprotect().
I left out update_mmu_cache() because we do not need it on x86 anyway but
more importantly the interface works on ptes, not pmds.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 37e89a32a0b1..7b55fe0e998b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -948,6 +948,33 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
948 | return ret; | 948 | return ret; |
949 | } | 949 | } |
950 | 950 | ||
951 | int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | ||
952 | unsigned long addr, pgprot_t newprot) | ||
953 | { | ||
954 | struct mm_struct *mm = vma->vm_mm; | ||
955 | int ret = 0; | ||
956 | |||
957 | spin_lock(&mm->page_table_lock); | ||
958 | if (likely(pmd_trans_huge(*pmd))) { | ||
959 | if (unlikely(pmd_trans_splitting(*pmd))) { | ||
960 | spin_unlock(&mm->page_table_lock); | ||
961 | wait_split_huge_page(vma->anon_vma, pmd); | ||
962 | } else { | ||
963 | pmd_t entry; | ||
964 | |||
965 | entry = pmdp_get_and_clear(mm, addr, pmd); | ||
966 | entry = pmd_modify(entry, newprot); | ||
967 | set_pmd_at(mm, addr, pmd, entry); | ||
968 | spin_unlock(&vma->vm_mm->page_table_lock); | ||
969 | flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE); | ||
970 | ret = 1; | ||
971 | } | ||
972 | } else | ||
973 | spin_unlock(&vma->vm_mm->page_table_lock); | ||
974 | |||
975 | return ret; | ||
976 | } | ||
977 | |||
951 | pmd_t *page_check_address_pmd(struct page *page, | 978 | pmd_t *page_check_address_pmd(struct page *page, |
952 | struct mm_struct *mm, | 979 | struct mm_struct *mm, |
953 | unsigned long address, | 980 | unsigned long address, |