aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d8b3b850150c..243e710c6039 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -729,8 +729,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
729 pmd_t entry; 729 pmd_t entry;
730 entry = mk_huge_pmd(page, vma); 730 entry = mk_huge_pmd(page, vma);
731 page_add_new_anon_rmap(page, vma, haddr); 731 page_add_new_anon_rmap(page, vma, haddr);
732 pgtable_trans_huge_deposit(mm, pmd, pgtable);
732 set_pmd_at(mm, haddr, pmd, entry); 733 set_pmd_at(mm, haddr, pmd, entry);
733 pgtable_trans_huge_deposit(mm, pgtable);
734 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 734 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
735 mm->nr_ptes++; 735 mm->nr_ptes++;
736 spin_unlock(&mm->page_table_lock); 736 spin_unlock(&mm->page_table_lock);
@@ -771,8 +771,8 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
771 entry = mk_pmd(zero_page, vma->vm_page_prot); 771 entry = mk_pmd(zero_page, vma->vm_page_prot);
772 entry = pmd_wrprotect(entry); 772 entry = pmd_wrprotect(entry);
773 entry = pmd_mkhuge(entry); 773 entry = pmd_mkhuge(entry);
774 pgtable_trans_huge_deposit(mm, pmd, pgtable);
774 set_pmd_at(mm, haddr, pmd, entry); 775 set_pmd_at(mm, haddr, pmd, entry);
775 pgtable_trans_huge_deposit(mm, pgtable);
776 mm->nr_ptes++; 776 mm->nr_ptes++;
777 return true; 777 return true;
778} 778}
@@ -916,8 +916,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
916 916
917 pmdp_set_wrprotect(src_mm, addr, src_pmd); 917 pmdp_set_wrprotect(src_mm, addr, src_pmd);
918 pmd = pmd_mkold(pmd_wrprotect(pmd)); 918 pmd = pmd_mkold(pmd_wrprotect(pmd));
919 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
919 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 920 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
920 pgtable_trans_huge_deposit(dst_mm, pgtable);
921 dst_mm->nr_ptes++; 921 dst_mm->nr_ptes++;
922 922
923 ret = 0; 923 ret = 0;
@@ -987,7 +987,7 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
987 pmdp_clear_flush(vma, haddr, pmd); 987 pmdp_clear_flush(vma, haddr, pmd);
988 /* leave pmd empty until pte is filled */ 988 /* leave pmd empty until pte is filled */
989 989
990 pgtable = pgtable_trans_huge_withdraw(mm); 990 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
991 pmd_populate(mm, &_pmd, pgtable); 991 pmd_populate(mm, &_pmd, pgtable);
992 992
993 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 993 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -1085,7 +1085,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1085 pmdp_clear_flush(vma, haddr, pmd); 1085 pmdp_clear_flush(vma, haddr, pmd);
1086 /* leave pmd empty until pte is filled */ 1086 /* leave pmd empty until pte is filled */
1087 1087
1088 pgtable = pgtable_trans_huge_withdraw(mm); 1088 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1089 pmd_populate(mm, &_pmd, pgtable); 1089 pmd_populate(mm, &_pmd, pgtable);
1090 1090
1091 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1091 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -1265,7 +1265,9 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1265 * young bit, instead of the current set_pmd_at. 1265 * young bit, instead of the current set_pmd_at.
1266 */ 1266 */
1267 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 1267 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1268 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd); 1268 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1269 pmd, _pmd, 1))
1270 update_mmu_cache_pmd(vma, addr, pmd);
1269 } 1271 }
1270 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1272 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1271 if (page->mapping && trylock_page(page)) { 1273 if (page->mapping && trylock_page(page)) {
@@ -1358,9 +1360,15 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1358 struct page *page; 1360 struct page *page;
1359 pgtable_t pgtable; 1361 pgtable_t pgtable;
1360 pmd_t orig_pmd; 1362 pmd_t orig_pmd;
1361 pgtable = pgtable_trans_huge_withdraw(tlb->mm); 1363 /*
1364 * For architectures like ppc64 we look at deposited pgtable
1365 * when calling pmdp_get_and_clear. So do the
1366 * pgtable_trans_huge_withdraw after finishing pmdp related
1367 * operations.
1368 */
1362 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd); 1369 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
1363 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1370 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1371 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1364 if (is_huge_zero_pmd(orig_pmd)) { 1372 if (is_huge_zero_pmd(orig_pmd)) {
1365 tlb->mm->nr_ptes--; 1373 tlb->mm->nr_ptes--;
1366 spin_unlock(&tlb->mm->page_table_lock); 1374 spin_unlock(&tlb->mm->page_table_lock);
@@ -1691,7 +1699,7 @@ static int __split_huge_page_map(struct page *page,
1691 pmd = page_check_address_pmd(page, mm, address, 1699 pmd = page_check_address_pmd(page, mm, address,
1692 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); 1700 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1693 if (pmd) { 1701 if (pmd) {
1694 pgtable = pgtable_trans_huge_withdraw(mm); 1702 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1695 pmd_populate(mm, &_pmd, pgtable); 1703 pmd_populate(mm, &_pmd, pgtable);
1696 1704
1697 haddr = address; 1705 haddr = address;
@@ -2359,9 +2367,9 @@ static void collapse_huge_page(struct mm_struct *mm,
2359 spin_lock(&mm->page_table_lock); 2367 spin_lock(&mm->page_table_lock);
2360 BUG_ON(!pmd_none(*pmd)); 2368 BUG_ON(!pmd_none(*pmd));
2361 page_add_new_anon_rmap(new_page, vma, address); 2369 page_add_new_anon_rmap(new_page, vma, address);
2370 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2362 set_pmd_at(mm, address, pmd, _pmd); 2371 set_pmd_at(mm, address, pmd, _pmd);
2363 update_mmu_cache_pmd(vma, address, pmd); 2372 update_mmu_cache_pmd(vma, address, pmd);
2364 pgtable_trans_huge_deposit(mm, pgtable);
2365 spin_unlock(&mm->page_table_lock); 2373 spin_unlock(&mm->page_table_lock);
2366 2374
2367 *hpage = NULL; 2375 *hpage = NULL;
@@ -2667,7 +2675,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2667 pmdp_clear_flush(vma, haddr, pmd); 2675 pmdp_clear_flush(vma, haddr, pmd);
2668 /* leave pmd empty until pte is filled */ 2676 /* leave pmd empty until pte is filled */
2669 2677
2670 pgtable = pgtable_trans_huge_withdraw(mm); 2678 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2671 pmd_populate(mm, &_pmd, pgtable); 2679 pmd_populate(mm, &_pmd, pgtable);
2672 2680
2673 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2681 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {