aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--include/linux/huge_mm.h1
-rw-r--r--mm/huge_memory.c50
-rw-r--r--mm/pgtable-generic.c39
4 files changed, 55 insertions, 43 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index c9a612069c8e..044939f21c16 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -162,6 +162,14 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma,
162 unsigned long address, pmd_t *pmdp); 162 unsigned long address, pmd_t *pmdp);
163#endif 163#endif
164 164
165#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
166extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
167#endif
168
169#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
170extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
171#endif
172
165#ifndef __HAVE_ARCH_PTE_SAME 173#ifndef __HAVE_ARCH_PTE_SAME
166static inline int pte_same(pte_t pte_a, pte_t pte_b) 174static inline int pte_same(pte_t pte_a, pte_t pte_b)
167{ 175{
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 4c59b1131187..6ab47af5a849 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -11,7 +11,6 @@ extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
11extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 11extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
12 unsigned long address, pmd_t *pmd, 12 unsigned long address, pmd_t *pmd,
13 pmd_t orig_pmd); 13 pmd_t orig_pmd);
14extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm);
15extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, 14extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
16 unsigned long addr, 15 unsigned long addr,
17 pmd_t *pmd, 16 pmd_t *pmd,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e19cc426c522..9ea6d1953765 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -598,19 +598,6 @@ out:
598} 598}
599__setup("transparent_hugepage=", setup_transparent_hugepage); 599__setup("transparent_hugepage=", setup_transparent_hugepage);
600 600
601static void prepare_pmd_huge_pte(pgtable_t pgtable,
602 struct mm_struct *mm)
603{
604 assert_spin_locked(&mm->page_table_lock);
605
606 /* FIFO */
607 if (!mm->pmd_huge_pte)
608 INIT_LIST_HEAD(&pgtable->lru);
609 else
610 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
611 mm->pmd_huge_pte = pgtable;
612}
613
614static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 601static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
615{ 602{
616 if (likely(vma->vm_flags & VM_WRITE)) 603 if (likely(vma->vm_flags & VM_WRITE))
@@ -652,7 +639,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
652 */ 639 */
653 page_add_new_anon_rmap(page, vma, haddr); 640 page_add_new_anon_rmap(page, vma, haddr);
654 set_pmd_at(mm, haddr, pmd, entry); 641 set_pmd_at(mm, haddr, pmd, entry);
655 prepare_pmd_huge_pte(pgtable, mm); 642 pgtable_trans_huge_deposit(mm, pgtable);
656 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 643 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
657 mm->nr_ptes++; 644 mm->nr_ptes++;
658 spin_unlock(&mm->page_table_lock); 645 spin_unlock(&mm->page_table_lock);
@@ -778,7 +765,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
778 pmdp_set_wrprotect(src_mm, addr, src_pmd); 765 pmdp_set_wrprotect(src_mm, addr, src_pmd);
779 pmd = pmd_mkold(pmd_wrprotect(pmd)); 766 pmd = pmd_mkold(pmd_wrprotect(pmd));
780 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 767 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
781 prepare_pmd_huge_pte(pgtable, dst_mm); 768 pgtable_trans_huge_deposit(dst_mm, pgtable);
782 dst_mm->nr_ptes++; 769 dst_mm->nr_ptes++;
783 770
784 ret = 0; 771 ret = 0;
@@ -789,25 +776,6 @@ out:
789 return ret; 776 return ret;
790} 777}
791 778
792/* no "address" argument so destroys page coloring of some arch */
793pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
794{
795 pgtable_t pgtable;
796
797 assert_spin_locked(&mm->page_table_lock);
798
799 /* FIFO */
800 pgtable = mm->pmd_huge_pte;
801 if (list_empty(&pgtable->lru))
802 mm->pmd_huge_pte = NULL;
803 else {
804 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
805 struct page, lru);
806 list_del(&pgtable->lru);
807 }
808 return pgtable;
809}
810
811static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 779static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
812 struct vm_area_struct *vma, 780 struct vm_area_struct *vma,
813 unsigned long address, 781 unsigned long address,
@@ -863,7 +831,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
863 pmdp_clear_flush_notify(vma, haddr, pmd); 831 pmdp_clear_flush_notify(vma, haddr, pmd);
864 /* leave pmd empty until pte is filled */ 832 /* leave pmd empty until pte is filled */
865 833
866 pgtable = get_pmd_huge_pte(mm); 834 pgtable = pgtable_trans_huge_withdraw(mm);
867 pmd_populate(mm, &_pmd, pgtable); 835 pmd_populate(mm, &_pmd, pgtable);
868 836
869 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 837 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -1028,7 +996,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1028 if (__pmd_trans_huge_lock(pmd, vma) == 1) { 996 if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1029 struct page *page; 997 struct page *page;
1030 pgtable_t pgtable; 998 pgtable_t pgtable;
1031 pgtable = get_pmd_huge_pte(tlb->mm); 999 pgtable = pgtable_trans_huge_withdraw(tlb->mm);
1032 page = pmd_page(*pmd); 1000 page = pmd_page(*pmd);
1033 pmd_clear(pmd); 1001 pmd_clear(pmd);
1034 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1002 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
@@ -1345,11 +1313,11 @@ static int __split_huge_page_map(struct page *page,
1345 pmd = page_check_address_pmd(page, mm, address, 1313 pmd = page_check_address_pmd(page, mm, address,
1346 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); 1314 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1347 if (pmd) { 1315 if (pmd) {
1348 pgtable = get_pmd_huge_pte(mm); 1316 pgtable = pgtable_trans_huge_withdraw(mm);
1349 pmd_populate(mm, &_pmd, pgtable); 1317 pmd_populate(mm, &_pmd, pgtable);
1350 1318
1351 for (i = 0, haddr = address; i < HPAGE_PMD_NR; 1319 haddr = address;
1352 i++, haddr += PAGE_SIZE) { 1320 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1353 pte_t *pte, entry; 1321 pte_t *pte, entry;
1354 BUG_ON(PageCompound(page+i)); 1322 BUG_ON(PageCompound(page+i));
1355 entry = mk_pte(page + i, vma->vm_page_prot); 1323 entry = mk_pte(page + i, vma->vm_page_prot);
@@ -2017,8 +1985,6 @@ static void collapse_huge_page(struct mm_struct *mm,
2017 pte_unmap(pte); 1985 pte_unmap(pte);
2018 __SetPageUptodate(new_page); 1986 __SetPageUptodate(new_page);
2019 pgtable = pmd_pgtable(_pmd); 1987 pgtable = pmd_pgtable(_pmd);
2020 VM_BUG_ON(page_count(pgtable) != 1);
2021 VM_BUG_ON(page_mapcount(pgtable) != 0);
2022 1988
2023 _pmd = mk_pmd(new_page, vma->vm_page_prot); 1989 _pmd = mk_pmd(new_page, vma->vm_page_prot);
2024 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1990 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
@@ -2036,7 +2002,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2036 page_add_new_anon_rmap(new_page, vma, address); 2002 page_add_new_anon_rmap(new_page, vma, address);
2037 set_pmd_at(mm, address, pmd, _pmd); 2003 set_pmd_at(mm, address, pmd, _pmd);
2038 update_mmu_cache(vma, address, _pmd); 2004 update_mmu_cache(vma, address, _pmd);
2039 prepare_pmd_huge_pte(pgtable, mm); 2005 pgtable_trans_huge_deposit(mm, pgtable);
2040 spin_unlock(&mm->page_table_lock); 2006 spin_unlock(&mm->page_table_lock);
2041 2007
2042 *hpage = NULL; 2008 *hpage = NULL;
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 74c0ddaa6fa0..29867e083d37 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -120,3 +120,42 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
120} 120}
121#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 121#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
122#endif 122#endif
123
124#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
125#ifdef CONFIG_TRANSPARENT_HUGEPAGE
126void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
127{
128 assert_spin_locked(&mm->page_table_lock);
129
130 /* FIFO */
131 if (!mm->pmd_huge_pte)
132 INIT_LIST_HEAD(&pgtable->lru);
133 else
134 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
135 mm->pmd_huge_pte = pgtable;
136}
137#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
138#endif
139
140#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
141#ifdef CONFIG_TRANSPARENT_HUGEPAGE
142/* no "address" argument so destroys page coloring of some arch */
143pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
144{
145 pgtable_t pgtable;
146
147 assert_spin_locked(&mm->page_table_lock);
148
149 /* FIFO */
150 pgtable = mm->pmd_huge_pte;
151 if (list_empty(&pgtable->lru))
152 mm->pmd_huge_pte = NULL;
153 else {
154 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
155 struct page, lru);
156 list_del(&pgtable->lru);
157 }
158 return pgtable;
159}
160#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
161#endif