diff options
author | Gerald Schaefer <gerald.schaefer@de.ibm.com> | 2012-10-08 19:30:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:22:29 -0400 |
commit | e3ebcf64381188a2744a9829a4eb5c2b60f1974c (patch) | |
tree | 6c2bc18729bd6fc12b93d82403fad6afec319c3b /mm | |
parent | 15626062f4a98279c59a2a5208c496cf65cbf8c0 (diff) |
thp: remove assumptions on pgtable_t type
The thp page table pre-allocation code currently assumes that pgtable_t is
of type "struct page *". This may not be true for all architectures, so
this patch removes that assumption by replacing the functions
prepare_pmd_huge_pte() and get_pmd_huge_pte() with two new functions that
can be defined architecture-specific.
It also removes two VM_BUG_ON checks for page_count() and page_mapcount()
operating on a pgtable_t. Apart from the VM_BUG_ON removal, there will be
no functional change introduced by this patch.
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 50 | ||||
-rw-r--r-- | mm/pgtable-generic.c | 39 |
2 files changed, 47 insertions, 42 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e19cc426c522..9ea6d1953765 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -598,19 +598,6 @@ out: | |||
598 | } | 598 | } |
599 | __setup("transparent_hugepage=", setup_transparent_hugepage); | 599 | __setup("transparent_hugepage=", setup_transparent_hugepage); |
600 | 600 | ||
601 | static void prepare_pmd_huge_pte(pgtable_t pgtable, | ||
602 | struct mm_struct *mm) | ||
603 | { | ||
604 | assert_spin_locked(&mm->page_table_lock); | ||
605 | |||
606 | /* FIFO */ | ||
607 | if (!mm->pmd_huge_pte) | ||
608 | INIT_LIST_HEAD(&pgtable->lru); | ||
609 | else | ||
610 | list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); | ||
611 | mm->pmd_huge_pte = pgtable; | ||
612 | } | ||
613 | |||
614 | static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) | 601 | static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) |
615 | { | 602 | { |
616 | if (likely(vma->vm_flags & VM_WRITE)) | 603 | if (likely(vma->vm_flags & VM_WRITE)) |
@@ -652,7 +639,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, | |||
652 | */ | 639 | */ |
653 | page_add_new_anon_rmap(page, vma, haddr); | 640 | page_add_new_anon_rmap(page, vma, haddr); |
654 | set_pmd_at(mm, haddr, pmd, entry); | 641 | set_pmd_at(mm, haddr, pmd, entry); |
655 | prepare_pmd_huge_pte(pgtable, mm); | 642 | pgtable_trans_huge_deposit(mm, pgtable); |
656 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); | 643 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
657 | mm->nr_ptes++; | 644 | mm->nr_ptes++; |
658 | spin_unlock(&mm->page_table_lock); | 645 | spin_unlock(&mm->page_table_lock); |
@@ -778,7 +765,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
778 | pmdp_set_wrprotect(src_mm, addr, src_pmd); | 765 | pmdp_set_wrprotect(src_mm, addr, src_pmd); |
779 | pmd = pmd_mkold(pmd_wrprotect(pmd)); | 766 | pmd = pmd_mkold(pmd_wrprotect(pmd)); |
780 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); | 767 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
781 | prepare_pmd_huge_pte(pgtable, dst_mm); | 768 | pgtable_trans_huge_deposit(dst_mm, pgtable); |
782 | dst_mm->nr_ptes++; | 769 | dst_mm->nr_ptes++; |
783 | 770 | ||
784 | ret = 0; | 771 | ret = 0; |
@@ -789,25 +776,6 @@ out: | |||
789 | return ret; | 776 | return ret; |
790 | } | 777 | } |
791 | 778 | ||
792 | /* no "address" argument so destroys page coloring of some arch */ | ||
793 | pgtable_t get_pmd_huge_pte(struct mm_struct *mm) | ||
794 | { | ||
795 | pgtable_t pgtable; | ||
796 | |||
797 | assert_spin_locked(&mm->page_table_lock); | ||
798 | |||
799 | /* FIFO */ | ||
800 | pgtable = mm->pmd_huge_pte; | ||
801 | if (list_empty(&pgtable->lru)) | ||
802 | mm->pmd_huge_pte = NULL; | ||
803 | else { | ||
804 | mm->pmd_huge_pte = list_entry(pgtable->lru.next, | ||
805 | struct page, lru); | ||
806 | list_del(&pgtable->lru); | ||
807 | } | ||
808 | return pgtable; | ||
809 | } | ||
810 | |||
811 | static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | 779 | static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, |
812 | struct vm_area_struct *vma, | 780 | struct vm_area_struct *vma, |
813 | unsigned long address, | 781 | unsigned long address, |
@@ -863,7 +831,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
863 | pmdp_clear_flush_notify(vma, haddr, pmd); | 831 | pmdp_clear_flush_notify(vma, haddr, pmd); |
864 | /* leave pmd empty until pte is filled */ | 832 | /* leave pmd empty until pte is filled */ |
865 | 833 | ||
866 | pgtable = get_pmd_huge_pte(mm); | 834 | pgtable = pgtable_trans_huge_withdraw(mm); |
867 | pmd_populate(mm, &_pmd, pgtable); | 835 | pmd_populate(mm, &_pmd, pgtable); |
868 | 836 | ||
869 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { | 837 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
@@ -1028,7 +996,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
1028 | if (__pmd_trans_huge_lock(pmd, vma) == 1) { | 996 | if (__pmd_trans_huge_lock(pmd, vma) == 1) { |
1029 | struct page *page; | 997 | struct page *page; |
1030 | pgtable_t pgtable; | 998 | pgtable_t pgtable; |
1031 | pgtable = get_pmd_huge_pte(tlb->mm); | 999 | pgtable = pgtable_trans_huge_withdraw(tlb->mm); |
1032 | page = pmd_page(*pmd); | 1000 | page = pmd_page(*pmd); |
1033 | pmd_clear(pmd); | 1001 | pmd_clear(pmd); |
1034 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); | 1002 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
@@ -1345,11 +1313,11 @@ static int __split_huge_page_map(struct page *page, | |||
1345 | pmd = page_check_address_pmd(page, mm, address, | 1313 | pmd = page_check_address_pmd(page, mm, address, |
1346 | PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); | 1314 | PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); |
1347 | if (pmd) { | 1315 | if (pmd) { |
1348 | pgtable = get_pmd_huge_pte(mm); | 1316 | pgtable = pgtable_trans_huge_withdraw(mm); |
1349 | pmd_populate(mm, &_pmd, pgtable); | 1317 | pmd_populate(mm, &_pmd, pgtable); |
1350 | 1318 | ||
1351 | for (i = 0, haddr = address; i < HPAGE_PMD_NR; | 1319 | haddr = address; |
1352 | i++, haddr += PAGE_SIZE) { | 1320 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
1353 | pte_t *pte, entry; | 1321 | pte_t *pte, entry; |
1354 | BUG_ON(PageCompound(page+i)); | 1322 | BUG_ON(PageCompound(page+i)); |
1355 | entry = mk_pte(page + i, vma->vm_page_prot); | 1323 | entry = mk_pte(page + i, vma->vm_page_prot); |
@@ -2017,8 +1985,6 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
2017 | pte_unmap(pte); | 1985 | pte_unmap(pte); |
2018 | __SetPageUptodate(new_page); | 1986 | __SetPageUptodate(new_page); |
2019 | pgtable = pmd_pgtable(_pmd); | 1987 | pgtable = pmd_pgtable(_pmd); |
2020 | VM_BUG_ON(page_count(pgtable) != 1); | ||
2021 | VM_BUG_ON(page_mapcount(pgtable) != 0); | ||
2022 | 1988 | ||
2023 | _pmd = mk_pmd(new_page, vma->vm_page_prot); | 1989 | _pmd = mk_pmd(new_page, vma->vm_page_prot); |
2024 | _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); | 1990 | _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); |
@@ -2036,7 +2002,7 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
2036 | page_add_new_anon_rmap(new_page, vma, address); | 2002 | page_add_new_anon_rmap(new_page, vma, address); |
2037 | set_pmd_at(mm, address, pmd, _pmd); | 2003 | set_pmd_at(mm, address, pmd, _pmd); |
2038 | update_mmu_cache(vma, address, _pmd); | 2004 | update_mmu_cache(vma, address, _pmd); |
2039 | prepare_pmd_huge_pte(pgtable, mm); | 2005 | pgtable_trans_huge_deposit(mm, pgtable); |
2040 | spin_unlock(&mm->page_table_lock); | 2006 | spin_unlock(&mm->page_table_lock); |
2041 | 2007 | ||
2042 | *hpage = NULL; | 2008 | *hpage = NULL; |
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 74c0ddaa6fa0..29867e083d37 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c | |||
@@ -120,3 +120,42 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, | |||
120 | } | 120 | } |
121 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 121 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
122 | #endif | 122 | #endif |
123 | |||
124 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT | ||
125 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
126 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) | ||
127 | { | ||
128 | assert_spin_locked(&mm->page_table_lock); | ||
129 | |||
130 | /* FIFO */ | ||
131 | if (!mm->pmd_huge_pte) | ||
132 | INIT_LIST_HEAD(&pgtable->lru); | ||
133 | else | ||
134 | list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); | ||
135 | mm->pmd_huge_pte = pgtable; | ||
136 | } | ||
137 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
138 | #endif | ||
139 | |||
140 | #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW | ||
141 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
142 | /* no "address" argument so destroys page coloring of some arch */ | ||
143 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm) | ||
144 | { | ||
145 | pgtable_t pgtable; | ||
146 | |||
147 | assert_spin_locked(&mm->page_table_lock); | ||
148 | |||
149 | /* FIFO */ | ||
150 | pgtable = mm->pmd_huge_pte; | ||
151 | if (list_empty(&pgtable->lru)) | ||
152 | mm->pmd_huge_pte = NULL; | ||
153 | else { | ||
154 | mm->pmd_huge_pte = list_entry(pgtable->lru.next, | ||
155 | struct page, lru); | ||
156 | list_del(&pgtable->lru); | ||
157 | } | ||
158 | return pgtable; | ||
159 | } | ||
160 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
161 | #endif | ||