aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-08-24 05:33:38 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-09-13 03:37:10 -0400
commitc6d1a767b9eaa74f0969378ec47791ff8318a65c (patch)
tree466508d46813a767307358da73a45c77f2cab7b9
parent694c4951922d114e789f669deb409b2aef440ae9 (diff)
powerpc/mm/radix: Use different pte update sequence for different POWER9 revs
POWER9 DD1 requires pte to be marked invalid (V=0) before updating it with the new value. This makes this distinction for the different revisions. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h75
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h3
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c2
-rw-r--r--arch/powerpc/mm/pgtable.c2
7 files changed, 71 insertions, 22 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 38b33dcfcc9d..6b8b2d57fdc8 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -223,7 +223,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
223} 223}
224 224
225 225
226static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 226static inline void __ptep_set_access_flags(struct mm_struct *mm,
227 pte_t *ptep, pte_t entry)
227{ 228{
228 unsigned long set = pte_val(entry) & 229 unsigned long set = pte_val(entry) &
229 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 230 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 263bf39ced40..8ec8be9495ba 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -565,10 +565,11 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
565 * Generic functions with hash/radix callbacks 565 * Generic functions with hash/radix callbacks
566 */ 566 */
567 567
568static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 568static inline void __ptep_set_access_flags(struct mm_struct *mm,
569 pte_t *ptep, pte_t entry)
569{ 570{
570 if (radix_enabled()) 571 if (radix_enabled())
571 return radix__ptep_set_access_flags(ptep, entry); 572 return radix__ptep_set_access_flags(mm, ptep, entry);
572 return hash__ptep_set_access_flags(ptep, entry); 573 return hash__ptep_set_access_flags(ptep, entry);
573} 574}
574 575
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index a2fe8fbfbd3d..2a46dea8e1b1 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -11,6 +11,11 @@
11#include <asm/book3s/64/radix-4k.h> 11#include <asm/book3s/64/radix-4k.h>
12#endif 12#endif
13 13
14#ifndef __ASSEMBLY__
15#include <asm/book3s/64/tlbflush-radix.h>
16#include <asm/cpu_has_feature.h>
17#endif
18
14/* An empty PTE can still have a R or C writeback */ 19/* An empty PTE can still have a R or C writeback */
15#define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED) 20#define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
16 21
@@ -105,11 +110,8 @@
105#define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE) 110#define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
106#define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE) 111#define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
107 112
108static inline unsigned long radix__pte_update(struct mm_struct *mm, 113static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
109 unsigned long addr, 114 unsigned long set)
110 pte_t *ptep, unsigned long clr,
111 unsigned long set,
112 int huge)
113{ 115{
114 pte_t pte; 116 pte_t pte;
115 unsigned long old_pte, new_pte; 117 unsigned long old_pte, new_pte;
@@ -121,9 +123,39 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
121 123
122 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); 124 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
123 125
124 /* We already do a sync in cmpxchg, is ptesync needed ?*/ 126 return old_pte;
127}
128
129
130static inline unsigned long radix__pte_update(struct mm_struct *mm,
131 unsigned long addr,
132 pte_t *ptep, unsigned long clr,
133 unsigned long set,
134 int huge)
135{
136 unsigned long old_pte;
137
138 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
139
140 unsigned long new_pte;
141
142 old_pte = __radix_pte_update(ptep, ~0, 0);
143 asm volatile("ptesync" : : : "memory");
144 /*
145 * new value of pte
146 */
147 new_pte = (old_pte | set) & ~clr;
148
149 /*
150 * For now let's do heavy pid flush
151 * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
152 */
153 radix__flush_tlb_mm(mm);
154
155 __radix_pte_update(ptep, 0, new_pte);
156 } else
157 old_pte = __radix_pte_update(ptep, clr, set);
125 asm volatile("ptesync" : : : "memory"); 158 asm volatile("ptesync" : : : "memory");
126 /* huge pages use the old page table lock */
127 if (!huge) 159 if (!huge)
128 assert_pte_locked(mm, addr); 160 assert_pte_locked(mm, addr);
129 161
@@ -134,20 +166,33 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
134 * Set the dirty and/or accessed bits atomically in a linux PTE, this 166 * Set the dirty and/or accessed bits atomically in a linux PTE, this
135 * function doesn't need to invalidate tlb. 167 * function doesn't need to invalidate tlb.
136 */ 168 */
137static inline void radix__ptep_set_access_flags(pte_t *ptep, pte_t entry) 169static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
170 pte_t *ptep, pte_t entry)
138{ 171{
139 pte_t pte; 172
140 unsigned long old_pte, new_pte;
141 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | 173 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
142 _PAGE_RW | _PAGE_EXEC); 174 _PAGE_RW | _PAGE_EXEC);
143 do { 175
144 pte = READ_ONCE(*ptep); 176 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
145 old_pte = pte_val(pte); 177
178 unsigned long old_pte, new_pte;
179
180 old_pte = __radix_pte_update(ptep, ~0, 0);
181 asm volatile("ptesync" : : : "memory");
182 /*
183 * new value of pte
184 */
146 new_pte = old_pte | set; 185 new_pte = old_pte | set;
147 186
148 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); 187 /*
188 * For now let's do heavy pid flush
189 * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
190 */
191 radix__flush_tlb_mm(mm);
149 192
150 /* We already do a sync in cmpxchg, is ptesync needed ?*/ 193 __radix_pte_update(ptep, 0, new_pte);
194 } else
195 __radix_pte_update(ptep, 0, set);
151 asm volatile("ptesync" : : : "memory"); 196 asm volatile("ptesync" : : : "memory");
152} 197}
153 198
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 780847597514..c219ef7be53b 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -267,7 +267,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
267} 267}
268 268
269 269
270static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 270static inline void __ptep_set_access_flags(struct mm_struct *mm,
271 pte_t *ptep, pte_t entry)
271{ 272{
272 unsigned long set = pte_val(entry) & 273 unsigned long set = pte_val(entry) &
273 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 274 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index d4d808cf905e..653a1838469d 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -300,7 +300,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
300/* Set the dirty and/or accessed bits atomically in a linux PTE, this 300/* Set the dirty and/or accessed bits atomically in a linux PTE, this
301 * function doesn't need to flush the hash entry 301 * function doesn't need to flush the hash entry
302 */ 302 */
303static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 303static inline void __ptep_set_access_flags(struct mm_struct *mm,
304 pte_t *ptep, pte_t entry)
304{ 305{
305 unsigned long bits = pte_val(entry) & 306 unsigned long bits = pte_val(entry) &
306 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 307 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 34079302cc17..7328886bca4c 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -35,7 +35,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
35#endif 35#endif
36 changed = !pmd_same(*(pmdp), entry); 36 changed = !pmd_same(*(pmdp), entry);
37 if (changed) { 37 if (changed) {
38 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); 38 __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), pmd_pte(entry));
39 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 39 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
40 } 40 }
41 return changed; 41 return changed;
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 0b6fb244d0a1..911fdfb63ec1 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -224,7 +224,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
224 if (changed) { 224 if (changed) {
225 if (!is_vm_hugetlb_page(vma)) 225 if (!is_vm_hugetlb_page(vma))
226 assert_pte_locked(vma->vm_mm, address); 226 assert_pte_locked(vma->vm_mm, address);
227 __ptep_set_access_flags(ptep, entry); 227 __ptep_set_access_flags(vma->vm_mm, ptep, entry);
228 flush_tlb_page(vma, address); 228 flush_tlb_page(vma, address);
229 } 229 }
230 return changed; 230 return changed;