aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2013-06-05 20:14:06 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-06-20 02:55:08 -0400
commitfce144b477fb0313f6612d5e3e22b67d7bdf935e (patch)
treebb704a7d5f787dcb6c72a67f03f37fe13b12a60e
parentfde52796d487b675cde55427e3347ff3e59f9a7f (diff)
mm/THP: deposit the transpare huge pgtable before set_pmd
Architectures like powerpc use the deposited pgtable to store hash index values. We need to make the deposted pgtable is visible to other cpus before we are ready to take a hash fault. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--mm/huge_memory.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5c4fac2d239e..59d9384b6bbf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -729,8 +729,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
729 pmd_t entry; 729 pmd_t entry;
730 entry = mk_huge_pmd(page, vma); 730 entry = mk_huge_pmd(page, vma);
731 page_add_new_anon_rmap(page, vma, haddr); 731 page_add_new_anon_rmap(page, vma, haddr);
732 set_pmd_at(mm, haddr, pmd, entry);
733 pgtable_trans_huge_deposit(mm, pmd, pgtable); 732 pgtable_trans_huge_deposit(mm, pmd, pgtable);
733 set_pmd_at(mm, haddr, pmd, entry);
734 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 734 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
735 mm->nr_ptes++; 735 mm->nr_ptes++;
736 spin_unlock(&mm->page_table_lock); 736 spin_unlock(&mm->page_table_lock);
@@ -771,8 +771,8 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
771 entry = mk_pmd(zero_page, vma->vm_page_prot); 771 entry = mk_pmd(zero_page, vma->vm_page_prot);
772 entry = pmd_wrprotect(entry); 772 entry = pmd_wrprotect(entry);
773 entry = pmd_mkhuge(entry); 773 entry = pmd_mkhuge(entry);
774 set_pmd_at(mm, haddr, pmd, entry);
775 pgtable_trans_huge_deposit(mm, pmd, pgtable); 774 pgtable_trans_huge_deposit(mm, pmd, pgtable);
775 set_pmd_at(mm, haddr, pmd, entry);
776 mm->nr_ptes++; 776 mm->nr_ptes++;
777 return true; 777 return true;
778} 778}
@@ -916,8 +916,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
916 916
917 pmdp_set_wrprotect(src_mm, addr, src_pmd); 917 pmdp_set_wrprotect(src_mm, addr, src_pmd);
918 pmd = pmd_mkold(pmd_wrprotect(pmd)); 918 pmd = pmd_mkold(pmd_wrprotect(pmd));
919 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
920 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 919 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
920 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
921 dst_mm->nr_ptes++; 921 dst_mm->nr_ptes++;
922 922
923 ret = 0; 923 ret = 0;
@@ -2367,9 +2367,9 @@ static void collapse_huge_page(struct mm_struct *mm,
2367 spin_lock(&mm->page_table_lock); 2367 spin_lock(&mm->page_table_lock);
2368 BUG_ON(!pmd_none(*pmd)); 2368 BUG_ON(!pmd_none(*pmd));
2369 page_add_new_anon_rmap(new_page, vma, address); 2369 page_add_new_anon_rmap(new_page, vma, address);
2370 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2370 set_pmd_at(mm, address, pmd, _pmd); 2371 set_pmd_at(mm, address, pmd, _pmd);
2371 update_mmu_cache_pmd(vma, address, pmd); 2372 update_mmu_cache_pmd(vma, address, pmd);
2372 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2373 spin_unlock(&mm->page_table_lock); 2373 spin_unlock(&mm->page_table_lock);
2374 2374
2375 *hpage = NULL; 2375 *hpage = NULL;