aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--mm/huge_memory.c27
-rw-r--r--mm/mprotect.c8
3 files changed, 36 insertions, 1 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 25125fb6acf..c590b08c6fa 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -22,6 +22,8 @@ extern int zap_huge_pmd(struct mmu_gather *tlb,
22extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 22extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
23 unsigned long addr, unsigned long end, 23 unsigned long addr, unsigned long end,
24 unsigned char *vec); 24 unsigned char *vec);
25extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
26 unsigned long addr, pgprot_t newprot);
25 27
26enum transparent_hugepage_flag { 28enum transparent_hugepage_flag {
27 TRANSPARENT_HUGEPAGE_FLAG, 29 TRANSPARENT_HUGEPAGE_FLAG,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 37e89a32a0b..7b55fe0e998 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -948,6 +948,33 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
948 return ret; 948 return ret;
949} 949}
950 950
951int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
952 unsigned long addr, pgprot_t newprot)
953{
954 struct mm_struct *mm = vma->vm_mm;
955 int ret = 0;
956
957 spin_lock(&mm->page_table_lock);
958 if (likely(pmd_trans_huge(*pmd))) {
959 if (unlikely(pmd_trans_splitting(*pmd))) {
960 spin_unlock(&mm->page_table_lock);
961 wait_split_huge_page(vma->anon_vma, pmd);
962 } else {
963 pmd_t entry;
964
965 entry = pmdp_get_and_clear(mm, addr, pmd);
966 entry = pmd_modify(entry, newprot);
967 set_pmd_at(mm, addr, pmd, entry);
968 spin_unlock(&vma->vm_mm->page_table_lock);
969 flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
970 ret = 1;
971 }
972 } else
973 spin_unlock(&vma->vm_mm->page_table_lock);
974
975 return ret;
976}
977
951pmd_t *page_check_address_pmd(struct page *page, 978pmd_t *page_check_address_pmd(struct page *page,
952 struct mm_struct *mm, 979 struct mm_struct *mm,
953 unsigned long address, 980 unsigned long address,
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 9402080d0d4..5a688a2756b 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -88,7 +88,13 @@ static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
88 pmd = pmd_offset(pud, addr); 88 pmd = pmd_offset(pud, addr);
89 do { 89 do {
90 next = pmd_addr_end(addr, end); 90 next = pmd_addr_end(addr, end);
91 split_huge_page_pmd(vma->vm_mm, pmd); 91 if (pmd_trans_huge(*pmd)) {
92 if (next - addr != HPAGE_PMD_SIZE)
93 split_huge_page_pmd(vma->vm_mm, pmd);
94 else if (change_huge_pmd(vma, pmd, addr, newprot))
95 continue;
96 /* fall through */
97 }
92 if (pmd_none_or_clear_bad(pmd)) 98 if (pmd_none_or_clear_bad(pmd))
93 continue; 99 continue;
94 change_pte_range(vma->vm_mm, pmd, addr, next, newprot, 100 change_pte_range(vma->vm_mm, pmd, addr, next, newprot,