aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-04-29 09:26:29 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-11 07:53:57 -0400
commit3df33f12be2f6fa88dff12564ab1d069482e3224 (patch)
tree70a1a974df6f748741a5768a71c305a370812537
parent6a1ea36260f69f1aea85bbf8afcd6a8c193485b9 (diff)
powerpc/mm/thp: Abstraction for THP functions
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h23
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h42
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h83
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c118
-rw-r--r--arch/powerpc/mm/pgtable-hash64.c137
6 files changed, 226 insertions, 179 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 80d2abe25280..5aae4f530c21 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -119,11 +119,6 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
119#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 119#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
120 120
121#ifdef CONFIG_TRANSPARENT_HUGEPAGE 121#ifdef CONFIG_TRANSPARENT_HUGEPAGE
122extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
123 unsigned long addr,
124 pmd_t *pmdp,
125 unsigned long clr,
126 unsigned long set);
127static inline char *get_hpte_slot_array(pmd_t *pmdp) 122static inline char *get_hpte_slot_array(pmd_t *pmdp)
128{ 123{
129 /* 124 /*
@@ -193,6 +188,24 @@ static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
193 return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); 188 return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
194} 189}
195 190
191static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
192{
193 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
194}
195
196extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
197 unsigned long addr, pmd_t *pmdp,
198 unsigned long clr, unsigned long set);
199extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
200 unsigned long address, pmd_t *pmdp);
201extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
202 pgtable_t pgtable);
203extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
204extern void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
205 unsigned long address, pmd_t *pmdp);
206extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
207 unsigned long addr, pmd_t *pmdp);
208extern int hash__has_transparent_hugepage(void);
196#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 209#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
197#endif /* __ASSEMBLY__ */ 210#endif /* __ASSEMBLY__ */
198 211
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index 27b5e34abe24..79331cf77613 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -54,41 +54,6 @@ static inline int hugepd_ok(hugepd_t hpd)
54#endif /* CONFIG_HUGETLB_PAGE */ 54#endif /* CONFIG_HUGETLB_PAGE */
55 55
56#ifdef CONFIG_TRANSPARENT_HUGEPAGE 56#ifdef CONFIG_TRANSPARENT_HUGEPAGE
57static inline int pmd_large(pmd_t pmd)
58{
59 return !!(pmd_val(pmd) & _PAGE_PTE);
60}
61
62static inline pmd_t pmd_mknotpresent(pmd_t pmd)
63{
64 return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
65}
66/*
67 * For radix we should always find H_PAGE_HASHPTE zero. Hence
68 * the below will work for radix too
69 */
70static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
71 unsigned long addr, pmd_t *pmdp)
72{
73 unsigned long old;
74
75 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
76 return 0;
77 old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
78 return ((old & _PAGE_ACCESSED) != 0);
79}
80
81#define __HAVE_ARCH_PMDP_SET_WRPROTECT
82static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
83 pmd_t *pmdp)
84{
85
86 if ((pmd_val(*pmdp) & _PAGE_WRITE) == 0)
87 return;
88
89 pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
90}
91
92static inline int pmd_trans_huge(pmd_t pmd) 57static inline int pmd_trans_huge(pmd_t pmd)
93{ 58{
94 if (radix_enabled()) 59 if (radix_enabled())
@@ -103,6 +68,12 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
103 return radix__pmd_same(pmd_a, pmd_b); 68 return radix__pmd_same(pmd_a, pmd_b);
104 return hash__pmd_same(pmd_a, pmd_b); 69 return hash__pmd_same(pmd_a, pmd_b);
105} 70}
71
72static inline pmd_t pmd_mkhuge(pmd_t pmd)
73{
74 return hash__pmd_mkhuge(pmd);
75}
76
106#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 77#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
107 78
108static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr, 79static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
@@ -111,7 +82,6 @@ static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
111 if (radix_enabled()) 82 if (radix_enabled())
112 BUG(); 83 BUG();
113 return hash__remap_4k_pfn(vma, addr, pfn, prot); 84 return hash__remap_4k_pfn(vma, addr, pfn, prot);
114
115} 85}
116#endif /* __ASSEMBLY__ */ 86#endif /* __ASSEMBLY__ */
117#endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H */ 87#endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index dfdd1f4e4cf0..5f290d39e563 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -769,7 +769,6 @@ static inline int __meminit vmemmap_create_mapping(unsigned long start,
769static inline void vmemmap_remove_mapping(unsigned long start, 769static inline void vmemmap_remove_mapping(unsigned long start,
770 unsigned long page_size) 770 unsigned long page_size)
771{ 771{
772
773 if (radix_enabled()) 772 if (radix_enabled())
774 return radix__vmemmap_remove_mapping(start, page_size); 773 return radix__vmemmap_remove_mapping(start, page_size);
775 return hash__vmemmap_remove_mapping(start, page_size); 774 return hash__vmemmap_remove_mapping(start, page_size);
@@ -825,11 +824,52 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
825 pmd_t *pmdp, pmd_t pmd); 824 pmd_t *pmdp, pmd_t pmd);
826extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 825extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
827 pmd_t *pmd); 826 pmd_t *pmd);
828extern int has_transparent_hugepage(void); 827extern int hash__has_transparent_hugepage(void);
828static inline int has_transparent_hugepage(void)
829{
830 return hash__has_transparent_hugepage();
831}
829 832
830static inline pmd_t pmd_mkhuge(pmd_t pmd) 833static inline unsigned long
834pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
835 unsigned long clr, unsigned long set)
831{ 836{
832 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); 837 return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
838}
839
840static inline int pmd_large(pmd_t pmd)
841{
842 return !!(pmd_val(pmd) & _PAGE_PTE);
843}
844
845static inline pmd_t pmd_mknotpresent(pmd_t pmd)
846{
847 return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
848}
849/*
850 * For radix we should always find H_PAGE_HASHPTE zero. Hence
851 * the below will work for radix too
852 */
853static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
854 unsigned long addr, pmd_t *pmdp)
855{
856 unsigned long old;
857
858 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
859 return 0;
860 old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
861 return ((old & _PAGE_ACCESSED) != 0);
862}
863
864#define __HAVE_ARCH_PMDP_SET_WRPROTECT
865static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
866 pmd_t *pmdp)
867{
868
869 if ((pmd_val(*pmdp) & _PAGE_WRITE) == 0)
870 return;
871
872 pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
833} 873}
834 874
835#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 875#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
@@ -842,26 +882,43 @@ extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
842 unsigned long address, pmd_t *pmdp); 882 unsigned long address, pmd_t *pmdp);
843 883
844#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 884#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
845extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 885static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
846 unsigned long addr, pmd_t *pmdp); 886 unsigned long addr, pmd_t *pmdp)
887{
888 return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
889}
847 890
848extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 891static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
849 unsigned long address, pmd_t *pmdp); 892 unsigned long address, pmd_t *pmdp)
893{
894 return hash__pmdp_collapse_flush(vma, address, pmdp);
895}
850#define pmdp_collapse_flush pmdp_collapse_flush 896#define pmdp_collapse_flush pmdp_collapse_flush
851 897
852#define __HAVE_ARCH_PGTABLE_DEPOSIT 898#define __HAVE_ARCH_PGTABLE_DEPOSIT
853extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 899static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
854 pgtable_t pgtable); 900 pmd_t *pmdp, pgtable_t pgtable)
901{
902 return hash__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
903}
904
855#define __HAVE_ARCH_PGTABLE_WITHDRAW 905#define __HAVE_ARCH_PGTABLE_WITHDRAW
856extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 906static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
907 pmd_t *pmdp)
908{
909 return hash__pgtable_trans_huge_withdraw(mm, pmdp);
910}
857 911
858#define __HAVE_ARCH_PMDP_INVALIDATE 912#define __HAVE_ARCH_PMDP_INVALIDATE
859extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 913extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
860 pmd_t *pmdp); 914 pmd_t *pmdp);
861 915
862#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE 916#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
863extern void pmdp_huge_split_prepare(struct vm_area_struct *vma, 917static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
864 unsigned long address, pmd_t *pmdp); 918 unsigned long address, pmd_t *pmdp)
919{
920 return hash__pmdp_huge_split_prepare(vma, address, pmdp);
921}
865 922
866#define pmd_move_must_withdraw pmd_move_must_withdraw 923#define pmd_move_must_withdraw pmd_move_must_withdraw
867struct spinlock; 924struct spinlock;
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 47511dd00599..f2cea6d5e764 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
14obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o 14obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o
15hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o 15hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
16obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o 16obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
17obj-$(CONFIG_PPC_STD_MMU_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o $(hash64-y) mmu_context_book3s64.o 17obj-$(CONFIG_PPC_STD_MMU_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o
18obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o 18obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
19obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o 19obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
20obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(CONFIG_WORD_SIZE).o 20obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(CONFIG_WORD_SIZE).o
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
new file mode 100644
index 000000000000..d566a250164d
--- /dev/null
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/sched.h>
11#include <asm/pgalloc.h>
12#include <asm/tlb.h>
13
14#include "mmu_decl.h"
15#include <trace/events/thp.h>
16
17#ifdef CONFIG_TRANSPARENT_HUGEPAGE
18/*
19 * This is called when relaxing access to a hugepage. It's also called in the page
20 * fault path when we don't hit any of the major fault cases, ie, a minor
21 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
22 * handled those two for us, we additionally deal with missing execute
23 * permission here on some processors
24 */
25int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
26 pmd_t *pmdp, pmd_t entry, int dirty)
27{
28 int changed;
29#ifdef CONFIG_DEBUG_VM
30 WARN_ON(!pmd_trans_huge(*pmdp));
31 assert_spin_locked(&vma->vm_mm->page_table_lock);
32#endif
33 changed = !pmd_same(*(pmdp), entry);
34 if (changed) {
35 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
36 /*
37 * Since we are not supporting SW TLB systems, we don't
38 * have any thing similar to flush_tlb_page_nohash()
39 */
40 }
41 return changed;
42}
43
44int pmdp_test_and_clear_young(struct vm_area_struct *vma,
45 unsigned long address, pmd_t *pmdp)
46{
47 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
48}
49/*
50 * set a new huge pmd. We should not be called for updating
51 * an existing pmd entry. That should go via pmd_hugepage_update.
52 */
53void set_pmd_at(struct mm_struct *mm, unsigned long addr,
54 pmd_t *pmdp, pmd_t pmd)
55{
56#ifdef CONFIG_DEBUG_VM
57 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
58 assert_spin_locked(&mm->page_table_lock);
59 WARN_ON(!pmd_trans_huge(pmd));
60#endif
61 trace_hugepage_set_pmd(addr, pmd_val(pmd));
62 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
63}
64/*
65 * We use this to invalidate a pmdp entry before switching from a
66 * hugepte to regular pmd entry.
67 */
68void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
69 pmd_t *pmdp)
70{
71 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
72
73 /*
74 * This ensures that generic code that rely on IRQ disabling
75 * to prevent a parallel THP split work as expected.
76 */
77 kick_all_cpus_sync();
78}
79
80static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
81{
82 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
83}
84
85pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
86{
87 unsigned long pmdv;
88
89 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
90 return pmd_set_protbits(__pmd(pmdv), pgprot);
91}
92
93pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
94{
95 return pfn_pmd(page_to_pfn(page), pgprot);
96}
97
98pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
99{
100 unsigned long pmdv;
101
102 pmdv = pmd_val(pmd);
103 pmdv &= _HPAGE_CHG_MASK;
104 return pmd_set_protbits(__pmd(pmdv), newprot);
105}
106
107/*
108 * This is called at the end of handling a user page fault, when the
109 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
110 * We use it to preload an HPTE into the hash table corresponding to
111 * the updated linux HUGE PMD entry.
112 */
113void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
114 pmd_t *pmd)
115{
116 return;
117}
118#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 9699a1ccedb5..c23e286a6b8f 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -99,35 +99,9 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
99 99
100#ifdef CONFIG_TRANSPARENT_HUGEPAGE 100#ifdef CONFIG_TRANSPARENT_HUGEPAGE
101 101
102/* 102unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
103 * This is called when relaxing access to a hugepage. It's also called in the page 103 pmd_t *pmdp, unsigned long clr,
104 * fault path when we don't hit any of the major fault cases, ie, a minor 104 unsigned long set)
105 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
106 * handled those two for us, we additionally deal with missing execute
107 * permission here on some processors
108 */
109int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
110 pmd_t *pmdp, pmd_t entry, int dirty)
111{
112 int changed;
113#ifdef CONFIG_DEBUG_VM
114 WARN_ON(!pmd_trans_huge(*pmdp));
115 assert_spin_locked(&vma->vm_mm->page_table_lock);
116#endif
117 changed = !pmd_same(*(pmdp), entry);
118 if (changed) {
119 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
120 /*
121 * Since we are not supporting SW TLB systems, we don't
122 * have any thing similar to flush_tlb_page_nohash()
123 */
124 }
125 return changed;
126}
127
128unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
129 pmd_t *pmdp, unsigned long clr,
130 unsigned long set)
131{ 105{
132 __be64 old_be, tmp; 106 __be64 old_be, tmp;
133 unsigned long old; 107 unsigned long old;
@@ -158,8 +132,8 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
158 return old; 132 return old;
159} 133}
160 134
161pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 135pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
162 pmd_t *pmdp) 136 pmd_t *pmdp)
163{ 137{
164 pmd_t pmd; 138 pmd_t pmd;
165 139
@@ -198,24 +172,11 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
198} 172}
199 173
200/* 174/*
201 * We currently remove entries from the hashtable regardless of whether
202 * the entry was young or dirty.
203 *
204 * We should be more intelligent about this but for the moment we override
205 * these functions and force a tlb flush unconditionally
206 */
207int pmdp_test_and_clear_young(struct vm_area_struct *vma,
208 unsigned long address, pmd_t *pmdp)
209{
210 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
211}
212
213/*
214 * We want to put the pgtable in pmd and use pgtable for tracking 175 * We want to put the pgtable in pmd and use pgtable for tracking
215 * the base page size hptes 176 * the base page size hptes
216 */ 177 */
217void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 178void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
218 pgtable_t pgtable) 179 pgtable_t pgtable)
219{ 180{
220 pgtable_t *pgtable_slot; 181 pgtable_t *pgtable_slot;
221 assert_spin_locked(&mm->page_table_lock); 182 assert_spin_locked(&mm->page_table_lock);
@@ -233,7 +194,7 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
233 smp_wmb(); 194 smp_wmb();
234} 195}
235 196
236pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 197pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
237{ 198{
238 pgtable_t pgtable; 199 pgtable_t pgtable;
239 pgtable_t *pgtable_slot; 200 pgtable_t *pgtable_slot;
@@ -253,8 +214,8 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
253 return pgtable; 214 return pgtable;
254} 215}
255 216
256void pmdp_huge_split_prepare(struct vm_area_struct *vma, 217void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
257 unsigned long address, pmd_t *pmdp) 218 unsigned long address, pmd_t *pmdp)
258{ 219{
259 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 220 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
260 VM_BUG_ON(REGION_ID(address) != USER_REGION_ID); 221 VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
@@ -274,39 +235,6 @@ void pmdp_huge_split_prepare(struct vm_area_struct *vma,
274 pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED); 235 pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
275} 236}
276 237
277
278/*
279 * set a new huge pmd. We should not be called for updating
280 * an existing pmd entry. That should go via pmd_hugepage_update.
281 */
282void set_pmd_at(struct mm_struct *mm, unsigned long addr,
283 pmd_t *pmdp, pmd_t pmd)
284{
285#ifdef CONFIG_DEBUG_VM
286 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
287 assert_spin_locked(&mm->page_table_lock);
288 WARN_ON(!pmd_trans_huge(pmd));
289#endif
290 trace_hugepage_set_pmd(addr, pmd_val(pmd));
291 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
292}
293
294/*
295 * We use this to invalidate a pmdp entry before switching from a
296 * hugepte to regular pmd entry.
297 */
298void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
299 pmd_t *pmdp)
300{
301 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
302
303 /*
304 * This ensures that generic code that rely on IRQ disabling
305 * to prevent a parallel THP split work as expected.
306 */
307 kick_all_cpus_sync();
308}
309
310/* 238/*
311 * A linux hugepage PMD was changed and the corresponding hash table entries 239 * A linux hugepage PMD was changed and the corresponding hash table entries
312 * neesd to be flushed. 240 * neesd to be flushed.
@@ -346,47 +274,8 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
346 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); 274 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
347} 275}
348 276
349static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) 277pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
350{ 278 unsigned long addr, pmd_t *pmdp)
351 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
352}
353
354pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
355{
356 unsigned long pmdv;
357
358 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
359 return pmd_set_protbits(__pmd(pmdv), pgprot);
360}
361
362pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
363{
364 return pfn_pmd(page_to_pfn(page), pgprot);
365}
366
367pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
368{
369 unsigned long pmdv;
370
371 pmdv = pmd_val(pmd);
372 pmdv &= _HPAGE_CHG_MASK;
373 return pmd_set_protbits(__pmd(pmdv), newprot);
374}
375
376/*
377 * This is called at the end of handling a user page fault, when the
378 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
379 * We use it to preload an HPTE into the hash table corresponding to
380 * the updated linux HUGE PMD entry.
381 */
382void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
383 pmd_t *pmd)
384{
385 return;
386}
387
388pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
389 unsigned long addr, pmd_t *pmdp)
390{ 279{
391 pmd_t old_pmd; 280 pmd_t old_pmd;
392 pgtable_t pgtable; 281 pgtable_t pgtable;
@@ -421,7 +310,7 @@ pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
421 return old_pmd; 310 return old_pmd;
422} 311}
423 312
424int has_transparent_hugepage(void) 313int hash__has_transparent_hugepage(void)
425{ 314{
426 315
427 if (!mmu_has_feature(MMU_FTR_16M_PAGE)) 316 if (!mmu_has_feature(MMU_FTR_16M_PAGE))