aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-09-09 22:52:50 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-09-17 02:31:49 -0400
commit5524a27d39b68770f203d8d42eb5a95dde4933bc (patch)
treea238d67d29bce6fd893cd4c5545b5daf058ba5fd
parentdcda287a9b26309ae43a091d0ecde16f8f61b4c0 (diff)
powerpc/mm: Convert virtual address to vpn
This patch convert different functions to take virtual page number instead of virtual address. Virtual page number is virtual address shifted right by VPN_SHIFT (12) bits. This enable us to have an address range of upto 76 bits. Reviewed-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h78
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h18
-rw-r--r--arch/powerpc/include/asm/tlbflush.h4
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c17
-rw-r--r--arch/powerpc/kvm/trace.h14
-rw-r--r--arch/powerpc/mm/hash_low_64.S97
-rw-r--r--arch/powerpc/mm/hash_native_64.c121
-rw-r--r--arch/powerpc/mm/hash_utils_64.c30
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c15
-rw-r--r--arch/powerpc/mm/tlb_hash64.c11
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c45
-rw-r--r--arch/powerpc/platforms/ps3/htab.c22
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c76
16 files changed, 324 insertions, 240 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f0e0c6a66d97..7aefdb3e1ce4 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -59,7 +59,7 @@ struct hpte_cache {
59 struct hlist_node list_vpte; 59 struct hlist_node list_vpte;
60 struct hlist_node list_vpte_long; 60 struct hlist_node list_vpte_long;
61 struct rcu_head rcu_head; 61 struct rcu_head rcu_head;
62 u64 host_va; 62 u64 host_vpn;
63 u64 pfn; 63 u64 pfn;
64 ulong slot; 64 ulong slot;
65 struct kvmppc_pte pte; 65 struct kvmppc_pte pte;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 8111e1b78f7f..c4231973edd3 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -34,19 +34,19 @@ struct machdep_calls {
34 char *name; 34 char *name;
35#ifdef CONFIG_PPC64 35#ifdef CONFIG_PPC64
36 void (*hpte_invalidate)(unsigned long slot, 36 void (*hpte_invalidate)(unsigned long slot,
37 unsigned long va, 37 unsigned long vpn,
38 int psize, int ssize, 38 int psize, int ssize,
39 int local); 39 int local);
40 long (*hpte_updatepp)(unsigned long slot, 40 long (*hpte_updatepp)(unsigned long slot,
41 unsigned long newpp, 41 unsigned long newpp,
42 unsigned long va, 42 unsigned long vpn,
43 int psize, int ssize, 43 int psize, int ssize,
44 int local); 44 int local);
45 void (*hpte_updateboltedpp)(unsigned long newpp, 45 void (*hpte_updateboltedpp)(unsigned long newpp,
46 unsigned long ea, 46 unsigned long ea,
47 int psize, int ssize); 47 int psize, int ssize);
48 long (*hpte_insert)(unsigned long hpte_group, 48 long (*hpte_insert)(unsigned long hpte_group,
49 unsigned long va, 49 unsigned long vpn,
50 unsigned long prpn, 50 unsigned long prpn,
51 unsigned long rflags, 51 unsigned long rflags,
52 unsigned long vflags, 52 unsigned long vflags,
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 1c65a59881ea..6aeb4986a373 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -154,9 +154,25 @@ struct mmu_psize_def
154#define MMU_SEGSIZE_256M 0 154#define MMU_SEGSIZE_256M 0
155#define MMU_SEGSIZE_1T 1 155#define MMU_SEGSIZE_1T 1
156 156
157/*
158 * encode page number shift.
159 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
160 * 12 bits. This enable us to address upto 76 bit va.
161 * For hpt hash from a va we can ignore the page size bits of va and for
162 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
163 * we work in all cases including 4k page size.
164 */
165#define VPN_SHIFT 12
157 166
158#ifndef __ASSEMBLY__ 167#ifndef __ASSEMBLY__
159 168
169static inline int segment_shift(int ssize)
170{
171 if (ssize == MMU_SEGSIZE_256M)
172 return SID_SHIFT;
173 return SID_SHIFT_1T;
174}
175
160/* 176/*
161 * The current system page and segment sizes 177 * The current system page and segment sizes
162 */ 178 */
@@ -180,18 +196,39 @@ extern unsigned long tce_alloc_start, tce_alloc_end;
180extern int mmu_ci_restrictions; 196extern int mmu_ci_restrictions;
181 197
182/* 198/*
199 * This computes the AVPN and B fields of the first dword of a HPTE,
200 * for use when we want to match an existing PTE. The bottom 7 bits
201 * of the returned value are zero.
202 */
203static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
204 int ssize)
205{
206 unsigned long v;
207 /*
208 * The AVA field omits the low-order 23 bits of the 78 bits VA.
209 * These bits are not needed in the PTE, because the
210 * low-order b of these bits are part of the byte offset
211 * into the virtual page and, if b < 23, the high-order
212 * 23-b of these bits are always used in selecting the
213 * PTEGs to be searched
214 */
215 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
216 v <<= HPTE_V_AVPN_SHIFT;
217 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
218 return v;
219}
220
221/*
183 * This function sets the AVPN and L fields of the HPTE appropriately 222 * This function sets the AVPN and L fields of the HPTE appropriately
184 * for the page size 223 * for the page size
185 */ 224 */
186static inline unsigned long hpte_encode_v(unsigned long va, int psize, 225static inline unsigned long hpte_encode_v(unsigned long vpn,
187 int ssize) 226 int psize, int ssize)
188{ 227{
189 unsigned long v; 228 unsigned long v;
190 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); 229 v = hpte_encode_avpn(vpn, psize, ssize);
191 v <<= HPTE_V_AVPN_SHIFT;
192 if (psize != MMU_PAGE_4K) 230 if (psize != MMU_PAGE_4K)
193 v |= HPTE_V_LARGE; 231 v |= HPTE_V_LARGE;
194 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
195 return v; 232 return v;
196} 233}
197 234
@@ -216,30 +253,37 @@ static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
216} 253}
217 254
218/* 255/*
219 * Build a VA given VSID, EA and segment size 256 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
220 */ 257 */
221static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid, 258static inline unsigned long hpt_vpn(unsigned long ea,
222 int ssize) 259 unsigned long vsid, int ssize)
223{ 260{
224 if (ssize == MMU_SEGSIZE_256M) 261 unsigned long mask;
225 return (vsid << 28) | (ea & 0xfffffffUL); 262 int s_shift = segment_shift(ssize);
226 return (vsid << 40) | (ea & 0xffffffffffUL); 263
264 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
265 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
227} 266}
228 267
229/* 268/*
230 * This hashes a virtual address 269 * This hashes a virtual address
231 */ 270 */
232 271static inline unsigned long hpt_hash(unsigned long vpn,
233static inline unsigned long hpt_hash(unsigned long va, unsigned int shift, 272 unsigned int shift, int ssize)
234 int ssize)
235{ 273{
274 int mask;
236 unsigned long hash, vsid; 275 unsigned long hash, vsid;
237 276
277 /* VPN_SHIFT can be atmost 12 */
238 if (ssize == MMU_SEGSIZE_256M) { 278 if (ssize == MMU_SEGSIZE_256M) {
239 hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift); 279 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
280 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
281 ((vpn & mask) >> (shift - VPN_SHIFT));
240 } else { 282 } else {
241 vsid = va >> 40; 283 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
242 hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift); 284 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
285 hash = vsid ^ (vsid << 25) ^
286 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
243 } 287 }
244 return hash & 0x7fffffffffUL; 288 return hash & 0x7fffffffffUL;
245} 289}
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 59247e816ac5..eedf427c9124 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -58,14 +58,16 @@
58/* Trick: we set __end to va + 64k, which happens works for 58/* Trick: we set __end to va + 64k, which happens works for
59 * a 16M page as well as we want only one iteration 59 * a 16M page as well as we want only one iteration
60 */ 60 */
61#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ 61#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
62 do { \ 62 do { \
63 unsigned long __end = va + PAGE_SIZE; \ 63 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
64 unsigned __split = (psize == MMU_PAGE_4K || \ 64 unsigned __split = (psize == MMU_PAGE_4K || \
65 psize == MMU_PAGE_64K_AP); \ 65 psize == MMU_PAGE_64K_AP); \
66 shift = mmu_psize_defs[psize].shift; \ 66 shift = mmu_psize_defs[psize].shift; \
67 for (index = 0; va < __end; index++, va += (1L << shift)) { \ 67 for (index = 0; vpn < __end; index++, \
68 if (!__split || __rpte_sub_valid(rpte, index)) do { \ 68 vpn += (1L << (shift - VPN_SHIFT))) { \
69 if (!__split || __rpte_sub_valid(rpte, index)) \
70 do {
69 71
70#define pte_iterate_hashed_end() } while(0); } } while(0) 72#define pte_iterate_hashed_end() } while(0); } } while(0)
71 73
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 81143fcbd113..fc02d1dee955 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -95,7 +95,7 @@ struct ppc64_tlb_batch {
95 unsigned long index; 95 unsigned long index;
96 struct mm_struct *mm; 96 struct mm_struct *mm;
97 real_pte_t pte[PPC64_TLB_BATCH_NR]; 97 real_pte_t pte[PPC64_TLB_BATCH_NR];
98 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 98 unsigned long vpn[PPC64_TLB_BATCH_NR];
99 unsigned int psize; 99 unsigned int psize;
100 int ssize; 100 int ssize;
101}; 101};
@@ -127,7 +127,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
127#define arch_flush_lazy_mmu_mode() do {} while (0) 127#define arch_flush_lazy_mmu_mode() do {} while (0)
128 128
129 129
130extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 130extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
131 int ssize, int local); 131 int ssize, int local);
132extern void flush_hash_range(unsigned long number, int local); 132extern void flush_hash_range(unsigned long number, int local);
133 133
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 837f13e7b6bf..00aa61268e0d 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -141,7 +141,7 @@ extern char etext[];
141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
142{ 142{
143 pfn_t hpaddr; 143 pfn_t hpaddr;
144 u64 va; 144 u64 vpn;
145 u64 vsid; 145 u64 vsid;
146 struct kvmppc_sid_map *map; 146 struct kvmppc_sid_map *map;
147 volatile u32 *pteg; 147 volatile u32 *pteg;
@@ -173,7 +173,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
173 BUG_ON(!map); 173 BUG_ON(!map);
174 174
175 vsid = map->host_vsid; 175 vsid = map->host_vsid;
176 va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); 176 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | ((eaddr & ~ESID_MASK) >> VPN_SHIFT)
177 177
178next_pteg: 178next_pteg:
179 if (rr == 16) { 179 if (rr == 16) {
@@ -244,11 +244,11 @@ next_pteg:
244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", 244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
245 orig_pte->may_write ? 'w' : '-', 245 orig_pte->may_write ? 'w' : '-',
246 orig_pte->may_execute ? 'x' : '-', 246 orig_pte->may_execute ? 'x' : '-',
247 orig_pte->eaddr, (ulong)pteg, va, 247 orig_pte->eaddr, (ulong)pteg, vpn,
248 orig_pte->vpage, hpaddr); 248 orig_pte->vpage, hpaddr);
249 249
250 pte->slot = (ulong)&pteg[rr]; 250 pte->slot = (ulong)&pteg[rr];
251 pte->host_va = va; 251 pte->host_vpn = vpn;
252 pte->pte = *orig_pte; 252 pte->pte = *orig_pte;
253 pte->pfn = hpaddr >> PAGE_SHIFT; 253 pte->pfn = hpaddr >> PAGE_SHIFT;
254 254
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0688b6b39585..4d72f9ebc554 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -33,7 +33,7 @@
33 33
34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
35{ 35{
36 ppc_md.hpte_invalidate(pte->slot, pte->host_va, 36 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
37 MMU_PAGE_4K, MMU_SEGSIZE_256M, 37 MMU_PAGE_4K, MMU_SEGSIZE_256M,
38 false); 38 false);
39} 39}
@@ -80,8 +80,9 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
80 80
81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
82{ 82{
83 unsigned long vpn;
83 pfn_t hpaddr; 84 pfn_t hpaddr;
84 ulong hash, hpteg, va; 85 ulong hash, hpteg;
85 u64 vsid; 86 u64 vsid;
86 int ret; 87 int ret;
87 int rflags = 0x192; 88 int rflags = 0x192;
@@ -117,7 +118,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
117 } 118 }
118 119
119 vsid = map->host_vsid; 120 vsid = map->host_vsid;
120 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); 121 vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
121 122
122 if (!orig_pte->may_write) 123 if (!orig_pte->may_write)
123 rflags |= HPTE_R_PP; 124 rflags |= HPTE_R_PP;
@@ -129,7 +130,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
129 else 130 else
130 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); 131 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
131 132
132 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); 133 hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
133 134
134map_again: 135map_again:
135 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 136 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -141,7 +142,8 @@ map_again:
141 goto out; 142 goto out;
142 } 143 }
143 144
144 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); 145 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
146 MMU_PAGE_4K, MMU_SEGSIZE_256M);
145 147
146 if (ret < 0) { 148 if (ret < 0) {
147 /* If we couldn't map a primary PTE, try a secondary */ 149 /* If we couldn't map a primary PTE, try a secondary */
@@ -152,7 +154,8 @@ map_again:
152 } else { 154 } else {
153 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); 155 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
154 156
155 trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte); 157 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
158 vpn, hpaddr, orig_pte);
156 159
157 /* The ppc_md code may give us a secondary entry even though we 160 /* The ppc_md code may give us a secondary entry even though we
158 asked for a primary. Fix up. */ 161 asked for a primary. Fix up. */
@@ -162,7 +165,7 @@ map_again:
162 } 165 }
163 166
164 pte->slot = hpteg + (ret & 7); 167 pte->slot = hpteg + (ret & 7);
165 pte->host_va = va; 168 pte->host_vpn = vpn;
166 pte->pte = *orig_pte; 169 pte->pte = *orig_pte;
167 pte->pfn = hpaddr >> PAGE_SHIFT; 170 pte->pfn = hpaddr >> PAGE_SHIFT;
168 171
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 877186b7b1c3..ddb6a2149d44 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -189,7 +189,7 @@ TRACE_EVENT(kvm_book3s_mmu_map,
189 TP_ARGS(pte), 189 TP_ARGS(pte),
190 190
191 TP_STRUCT__entry( 191 TP_STRUCT__entry(
192 __field( u64, host_va ) 192 __field( u64, host_vpn )
193 __field( u64, pfn ) 193 __field( u64, pfn )
194 __field( ulong, eaddr ) 194 __field( ulong, eaddr )
195 __field( u64, vpage ) 195 __field( u64, vpage )
@@ -198,7 +198,7 @@ TRACE_EVENT(kvm_book3s_mmu_map,
198 ), 198 ),
199 199
200 TP_fast_assign( 200 TP_fast_assign(
201 __entry->host_va = pte->host_va; 201 __entry->host_vpn = pte->host_vpn;
202 __entry->pfn = pte->pfn; 202 __entry->pfn = pte->pfn;
203 __entry->eaddr = pte->pte.eaddr; 203 __entry->eaddr = pte->pte.eaddr;
204 __entry->vpage = pte->pte.vpage; 204 __entry->vpage = pte->pte.vpage;
@@ -208,8 +208,8 @@ TRACE_EVENT(kvm_book3s_mmu_map,
208 (pte->pte.may_execute ? 0x1 : 0); 208 (pte->pte.may_execute ? 0x1 : 0);
209 ), 209 ),
210 210
211 TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", 211 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
212 __entry->host_va, __entry->pfn, __entry->eaddr, 212 __entry->host_vpn, __entry->pfn, __entry->eaddr,
213 __entry->vpage, __entry->raddr, __entry->flags) 213 __entry->vpage, __entry->raddr, __entry->flags)
214); 214);
215 215
@@ -218,7 +218,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
218 TP_ARGS(pte), 218 TP_ARGS(pte),
219 219
220 TP_STRUCT__entry( 220 TP_STRUCT__entry(
221 __field( u64, host_va ) 221 __field( u64, host_vpn )
222 __field( u64, pfn ) 222 __field( u64, pfn )
223 __field( ulong, eaddr ) 223 __field( ulong, eaddr )
224 __field( u64, vpage ) 224 __field( u64, vpage )
@@ -227,7 +227,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
227 ), 227 ),
228 228
229 TP_fast_assign( 229 TP_fast_assign(
230 __entry->host_va = pte->host_va; 230 __entry->host_vpn = pte->host_vpn;
231 __entry->pfn = pte->pfn; 231 __entry->pfn = pte->pfn;
232 __entry->eaddr = pte->pte.eaddr; 232 __entry->eaddr = pte->pte.eaddr;
233 __entry->vpage = pte->pte.vpage; 233 __entry->vpage = pte->pte.vpage;
@@ -238,7 +238,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
238 ), 238 ),
239 239
240 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", 240 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
241 __entry->host_va, __entry->pfn, __entry->eaddr, 241 __entry->host_vpn, __entry->pfn, __entry->eaddr,
242 __entry->vpage, __entry->raddr, __entry->flags) 242 __entry->vpage, __entry->raddr, __entry->flags)
243); 243);
244 244
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 602aeb06d298..56585086413a 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -63,7 +63,7 @@ _GLOBAL(__hash_page_4K)
63 /* Save non-volatile registers. 63 /* Save non-volatile registers.
64 * r31 will hold "old PTE" 64 * r31 will hold "old PTE"
65 * r30 is "new PTE" 65 * r30 is "new PTE"
66 * r29 is "va" 66 * r29 is vpn
67 * r28 is a hash value 67 * r28 is a hash value
68 * r27 is hashtab mask (maybe dynamic patched instead ?) 68 * r27 is hashtab mask (maybe dynamic patched instead ?)
69 */ 69 */
@@ -111,10 +111,10 @@ BEGIN_FTR_SECTION
111 cmpdi r9,0 /* check segment size */ 111 cmpdi r9,0 /* check segment size */
112 bne 3f 112 bne 3f
113END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 113END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
114 /* Calc va and put it in r29 */ 114 /* Calc vpn and put it in r29 */
115 rldicr r29,r5,28,63-28 115 sldi r29,r5,SID_SHIFT - VPN_SHIFT
116 rldicl r3,r3,0,36 116 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
117 or r29,r3,r29 117 or r29,r28,r29
118 118
119 /* Calculate hash value for primary slot and store it in r28 */ 119 /* Calculate hash value for primary slot and store it in r28 */
120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -122,14 +122,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
122 xor r28,r5,r0 122 xor r28,r5,r0
123 b 4f 123 b 4f
124 124
1253: /* Calc VA and hash in r29 and r28 for 1T segment */ 1253: /* Calc vpn and put it in r29 */
126 sldi r29,r5,40 /* vsid << 40 */ 126 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
127 clrldi r3,r3,24 /* ea & 0xffffffffff */ 127 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
128 or r29,r28,r29
129
130 /*
131 * calculate hash value for primary slot and
132 * store it in r28 for 1T segment
133 */
128 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 134 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
129 clrldi r5,r5,40 /* vsid & 0xffffff */ 135 clrldi r5,r5,40 /* vsid & 0xffffff */
130 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 136 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
131 xor r28,r28,r5 137 xor r28,r28,r5
132 or r29,r3,r29 /* VA */
133 xor r28,r28,r0 /* hash */ 138 xor r28,r28,r0 /* hash */
134 139
135 /* Convert linux PTE bits into HW equivalents */ 140 /* Convert linux PTE bits into HW equivalents */
@@ -185,7 +190,7 @@ htab_insert_pte:
185 190
186 /* Call ppc_md.hpte_insert */ 191 /* Call ppc_md.hpte_insert */
187 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 192 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
188 mr r4,r29 /* Retrieve va */ 193 mr r4,r29 /* Retrieve vpn */
189 li r7,0 /* !bolted, !secondary */ 194 li r7,0 /* !bolted, !secondary */
190 li r8,MMU_PAGE_4K /* page size */ 195 li r8,MMU_PAGE_4K /* page size */
191 ld r9,STK_PARAM(R9)(r1) /* segment size */ 196 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -208,7 +213,7 @@ _GLOBAL(htab_call_hpte_insert1)
208 213
209 /* Call ppc_md.hpte_insert */ 214 /* Call ppc_md.hpte_insert */
210 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 215 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
211 mr r4,r29 /* Retrieve va */ 216 mr r4,r29 /* Retrieve vpn */
212 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 217 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
213 li r8,MMU_PAGE_4K /* page size */ 218 li r8,MMU_PAGE_4K /* page size */
214 ld r9,STK_PARAM(R9)(r1) /* segment size */ 219 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -278,7 +283,7 @@ htab_modify_pte:
278 add r3,r0,r3 /* add slot idx */ 283 add r3,r0,r3 /* add slot idx */
279 284
280 /* Call ppc_md.hpte_updatepp */ 285 /* Call ppc_md.hpte_updatepp */
281 mr r5,r29 /* va */ 286 mr r5,r29 /* vpn */
282 li r6,MMU_PAGE_4K /* page size */ 287 li r6,MMU_PAGE_4K /* page size */
283 ld r7,STK_PARAM(R9)(r1) /* segment size */ 288 ld r7,STK_PARAM(R9)(r1) /* segment size */
284 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 289 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -339,7 +344,7 @@ _GLOBAL(__hash_page_4K)
339 /* Save non-volatile registers. 344 /* Save non-volatile registers.
340 * r31 will hold "old PTE" 345 * r31 will hold "old PTE"
341 * r30 is "new PTE" 346 * r30 is "new PTE"
342 * r29 is "va" 347 * r29 is vpn
343 * r28 is a hash value 348 * r28 is a hash value
344 * r27 is hashtab mask (maybe dynamic patched instead ?) 349 * r27 is hashtab mask (maybe dynamic patched instead ?)
345 * r26 is the hidx mask 350 * r26 is the hidx mask
@@ -394,10 +399,14 @@ BEGIN_FTR_SECTION
394 cmpdi r9,0 /* check segment size */ 399 cmpdi r9,0 /* check segment size */
395 bne 3f 400 bne 3f
396END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 401END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
397 /* Calc va and put it in r29 */ 402 /* Calc vpn and put it in r29 */
398 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ 403 sldi r29,r5,SID_SHIFT - VPN_SHIFT
399 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ 404 /*
400 or r29,r3,r29 /* r29 = va */ 405 * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff
406 * srdi r28,r3,VPN_SHIFT
407 */
408 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
409 or r29,r28,r29
401 410
402 /* Calculate hash value for primary slot and store it in r28 */ 411 /* Calculate hash value for primary slot and store it in r28 */
403 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 412 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -405,14 +414,23 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
405 xor r28,r5,r0 414 xor r28,r5,r0
406 b 4f 415 b 4f
407 416
4083: /* Calc VA and hash in r29 and r28 for 1T segment */ 4173: /* Calc vpn and put it in r29 */
409 sldi r29,r5,40 /* vsid << 40 */ 418 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
410 clrldi r3,r3,24 /* ea & 0xffffffffff */ 419 /*
420 * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff
421 * srdi r28,r3,VPN_SHIFT
422 */
423 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
424 or r29,r28,r29
425
426 /*
427 * Calculate hash value for primary slot and
428 * store it in r28 for 1T segment
429 */
411 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 430 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
412 clrldi r5,r5,40 /* vsid & 0xffffff */ 431 clrldi r5,r5,40 /* vsid & 0xffffff */
413 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 432 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
414 xor r28,r28,r5 433 xor r28,r28,r5
415 or r29,r3,r29 /* VA */
416 xor r28,r28,r0 /* hash */ 434 xor r28,r28,r0 /* hash */
417 435
418 /* Convert linux PTE bits into HW equivalents */ 436 /* Convert linux PTE bits into HW equivalents */
@@ -488,7 +506,7 @@ htab_special_pfn:
488 506
489 /* Call ppc_md.hpte_insert */ 507 /* Call ppc_md.hpte_insert */
490 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 508 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
491 mr r4,r29 /* Retrieve va */ 509 mr r4,r29 /* Retrieve vpn */
492 li r7,0 /* !bolted, !secondary */ 510 li r7,0 /* !bolted, !secondary */
493 li r8,MMU_PAGE_4K /* page size */ 511 li r8,MMU_PAGE_4K /* page size */
494 ld r9,STK_PARAM(R9)(r1) /* segment size */ 512 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -515,7 +533,7 @@ _GLOBAL(htab_call_hpte_insert1)
515 533
516 /* Call ppc_md.hpte_insert */ 534 /* Call ppc_md.hpte_insert */
517 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 535 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
518 mr r4,r29 /* Retrieve va */ 536 mr r4,r29 /* Retrieve vpn */
519 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 537 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
520 li r8,MMU_PAGE_4K /* page size */ 538 li r8,MMU_PAGE_4K /* page size */
521 ld r9,STK_PARAM(R9)(r1) /* segment size */ 539 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -547,7 +565,7 @@ _GLOBAL(htab_call_hpte_remove)
547 * useless now that the segment has been switched to 4k pages. 565 * useless now that the segment has been switched to 4k pages.
548 */ 566 */
549htab_inval_old_hpte: 567htab_inval_old_hpte:
550 mr r3,r29 /* virtual addr */ 568 mr r3,r29 /* vpn */
551 mr r4,r31 /* PTE.pte */ 569 mr r4,r31 /* PTE.pte */
552 li r5,0 /* PTE.hidx */ 570 li r5,0 /* PTE.hidx */
553 li r6,MMU_PAGE_64K /* psize */ 571 li r6,MMU_PAGE_64K /* psize */
@@ -620,7 +638,7 @@ htab_modify_pte:
620 add r3,r0,r3 /* add slot idx */ 638 add r3,r0,r3 /* add slot idx */
621 639
622 /* Call ppc_md.hpte_updatepp */ 640 /* Call ppc_md.hpte_updatepp */
623 mr r5,r29 /* va */ 641 mr r5,r29 /* vpn */
624 li r6,MMU_PAGE_4K /* page size */ 642 li r6,MMU_PAGE_4K /* page size */
625 ld r7,STK_PARAM(R9)(r1) /* segment size */ 643 ld r7,STK_PARAM(R9)(r1) /* segment size */
626 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 644 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -676,7 +694,7 @@ _GLOBAL(__hash_page_64K)
676 /* Save non-volatile registers. 694 /* Save non-volatile registers.
677 * r31 will hold "old PTE" 695 * r31 will hold "old PTE"
678 * r30 is "new PTE" 696 * r30 is "new PTE"
679 * r29 is "va" 697 * r29 is vpn
680 * r28 is a hash value 698 * r28 is a hash value
681 * r27 is hashtab mask (maybe dynamic patched instead ?) 699 * r27 is hashtab mask (maybe dynamic patched instead ?)
682 */ 700 */
@@ -729,10 +747,10 @@ BEGIN_FTR_SECTION
729 cmpdi r9,0 /* check segment size */ 747 cmpdi r9,0 /* check segment size */
730 bne 3f 748 bne 3f
731END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 749END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
732 /* Calc va and put it in r29 */ 750 /* Calc vpn and put it in r29 */
733 rldicr r29,r5,28,63-28 751 sldi r29,r5,SID_SHIFT - VPN_SHIFT
734 rldicl r3,r3,0,36 752 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
735 or r29,r3,r29 753 or r29,r28,r29
736 754
737 /* Calculate hash value for primary slot and store it in r28 */ 755 /* Calculate hash value for primary slot and store it in r28 */
738 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 756 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -740,14 +758,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
740 xor r28,r5,r0 758 xor r28,r5,r0
741 b 4f 759 b 4f
742 760
7433: /* Calc VA and hash in r29 and r28 for 1T segment */ 7613: /* Calc vpn and put it in r29 */
744 sldi r29,r5,40 /* vsid << 40 */ 762 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
745 clrldi r3,r3,24 /* ea & 0xffffffffff */ 763 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
764 or r29,r28,r29
765
766 /*
767 * calculate hash value for primary slot and
768 * store it in r28 for 1T segment
769 */
746 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 770 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
747 clrldi r5,r5,40 /* vsid & 0xffffff */ 771 clrldi r5,r5,40 /* vsid & 0xffffff */
748 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ 772 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
749 xor r28,r28,r5 773 xor r28,r28,r5
750 or r29,r3,r29 /* VA */
751 xor r28,r28,r0 /* hash */ 774 xor r28,r28,r0 /* hash */
752 775
753 /* Convert linux PTE bits into HW equivalents */ 776 /* Convert linux PTE bits into HW equivalents */
@@ -806,7 +829,7 @@ ht64_insert_pte:
806 829
807 /* Call ppc_md.hpte_insert */ 830 /* Call ppc_md.hpte_insert */
808 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 831 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
809 mr r4,r29 /* Retrieve va */ 832 mr r4,r29 /* Retrieve vpn */
810 li r7,0 /* !bolted, !secondary */ 833 li r7,0 /* !bolted, !secondary */
811 li r8,MMU_PAGE_64K 834 li r8,MMU_PAGE_64K
812 ld r9,STK_PARAM(R9)(r1) /* segment size */ 835 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -829,7 +852,7 @@ _GLOBAL(ht64_call_hpte_insert1)
829 852
830 /* Call ppc_md.hpte_insert */ 853 /* Call ppc_md.hpte_insert */
831 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 854 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
832 mr r4,r29 /* Retrieve va */ 855 mr r4,r29 /* Retrieve vpn */
833 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 856 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
834 li r8,MMU_PAGE_64K 857 li r8,MMU_PAGE_64K
835 ld r9,STK_PARAM(R9)(r1) /* segment size */ 858 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -899,7 +922,7 @@ ht64_modify_pte:
899 add r3,r0,r3 /* add slot idx */ 922 add r3,r0,r3 /* add slot idx */
900 923
901 /* Call ppc_md.hpte_updatepp */ 924 /* Call ppc_md.hpte_updatepp */
902 mr r5,r29 /* va */ 925 mr r5,r29 /* vpn */
903 li r6,MMU_PAGE_64K 926 li r6,MMU_PAGE_64K
904 ld r7,STK_PARAM(R9)(r1) /* segment size */ 927 ld r7,STK_PARAM(R9)(r1) /* segment size */
905 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 928 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index ebf685af4af2..a4a1c728f269 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -39,22 +39,35 @@
39 39
40DEFINE_RAW_SPINLOCK(native_tlbie_lock); 40DEFINE_RAW_SPINLOCK(native_tlbie_lock);
41 41
42static inline void __tlbie(unsigned long va, int psize, int ssize) 42static inline void __tlbie(unsigned long vpn, int psize, int ssize)
43{ 43{
44 unsigned long va;
44 unsigned int penc; 45 unsigned int penc;
45 46
46 /* clear top 16 bits, non SLS segment */ 47 /*
48 * We need 14 to 65 bits of va for a tlibe of 4K page
49 * With vpn we ignore the lower VPN_SHIFT bits already.
50 * And top two bits are already ignored because we can
51 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
52 * of 12.
53 */
54 va = vpn << VPN_SHIFT;
55 /*
56 * clear top 16 bits of 64bit va, non SLS segment
57 * Older versions of the architecture (2.02 and earler) require the
58 * masking of the top 16 bits.
59 */
47 va &= ~(0xffffULL << 48); 60 va &= ~(0xffffULL << 48);
48 61
49 switch (psize) { 62 switch (psize) {
50 case MMU_PAGE_4K: 63 case MMU_PAGE_4K:
51 va &= ~0xffful;
52 va |= ssize << 8; 64 va |= ssize << 8;
53 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 65 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
54 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 66 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
55 : "memory"); 67 : "memory");
56 break; 68 break;
57 default: 69 default:
70 /* We need 14 to 14 + i bits of va */
58 penc = mmu_psize_defs[psize].penc; 71 penc = mmu_psize_defs[psize].penc;
59 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 72 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
60 va |= penc << 12; 73 va |= penc << 12;
@@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
67 } 80 }
68} 81}
69 82
70static inline void __tlbiel(unsigned long va, int psize, int ssize) 83static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
71{ 84{
85 unsigned long va;
72 unsigned int penc; 86 unsigned int penc;
73 87
74 /* clear top 16 bits, non SLS segment */ 88 /* VPN_SHIFT can be atmost 12 */
89 va = vpn << VPN_SHIFT;
90 /*
91 * clear top 16 bits of 64 bit va, non SLS segment
92 * Older versions of the architecture (2.02 and earler) require the
93 * masking of the top 16 bits.
94 */
75 va &= ~(0xffffULL << 48); 95 va &= ~(0xffffULL << 48);
76 96
77 switch (psize) { 97 switch (psize) {
78 case MMU_PAGE_4K: 98 case MMU_PAGE_4K:
79 va &= ~0xffful;
80 va |= ssize << 8; 99 va |= ssize << 8;
81 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 100 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
82 : : "r"(va) : "memory"); 101 : : "r"(va) : "memory");
83 break; 102 break;
84 default: 103 default:
104 /* We need 14 to 14 + i bits of va */
85 penc = mmu_psize_defs[psize].penc; 105 penc = mmu_psize_defs[psize].penc;
86 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 106 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
87 va |= penc << 12; 107 va |= penc << 12;
@@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
94 114
95} 115}
96 116
97static inline void tlbie(unsigned long va, int psize, int ssize, int local) 117static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
98{ 118{
99 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); 119 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
100 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 120 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
105 raw_spin_lock(&native_tlbie_lock); 125 raw_spin_lock(&native_tlbie_lock);
106 asm volatile("ptesync": : :"memory"); 126 asm volatile("ptesync": : :"memory");
107 if (use_local) { 127 if (use_local) {
108 __tlbiel(va, psize, ssize); 128 __tlbiel(vpn, psize, ssize);
109 asm volatile("ptesync": : :"memory"); 129 asm volatile("ptesync": : :"memory");
110 } else { 130 } else {
111 __tlbie(va, psize, ssize); 131 __tlbie(vpn, psize, ssize);
112 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 132 asm volatile("eieio; tlbsync; ptesync": : :"memory");
113 } 133 }
114 if (lock_tlbie && !use_local) 134 if (lock_tlbie && !use_local)
@@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
134 clear_bit_unlock(HPTE_LOCK_BIT, word); 154 clear_bit_unlock(HPTE_LOCK_BIT, word);
135} 155}
136 156
137static long native_hpte_insert(unsigned long hpte_group, unsigned long va, 157static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
138 unsigned long pa, unsigned long rflags, 158 unsigned long pa, unsigned long rflags,
139 unsigned long vflags, int psize, int ssize) 159 unsigned long vflags, int psize, int ssize)
140{ 160{
@@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
143 int i; 163 int i;
144 164
145 if (!(vflags & HPTE_V_BOLTED)) { 165 if (!(vflags & HPTE_V_BOLTED)) {
146 DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx," 166 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
147 " rflags=%lx, vflags=%lx, psize=%d)\n", 167 " rflags=%lx, vflags=%lx, psize=%d)\n",
148 hpte_group, va, pa, rflags, vflags, psize); 168 hpte_group, vpn, pa, rflags, vflags, psize);
149 } 169 }
150 170
151 for (i = 0; i < HPTES_PER_GROUP; i++) { 171 for (i = 0; i < HPTES_PER_GROUP; i++) {
@@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
163 if (i == HPTES_PER_GROUP) 183 if (i == HPTES_PER_GROUP)
164 return -1; 184 return -1;
165 185
166 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 186 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
167 hpte_r = hpte_encode_r(pa, psize) | rflags; 187 hpte_r = hpte_encode_r(pa, psize) | rflags;
168 188
169 if (!(vflags & HPTE_V_BOLTED)) { 189 if (!(vflags & HPTE_V_BOLTED)) {
@@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group)
225} 245}
226 246
227static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 247static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
228 unsigned long va, int psize, int ssize, 248 unsigned long vpn, int psize, int ssize,
229 int local) 249 int local)
230{ 250{
231 struct hash_pte *hptep = htab_address + slot; 251 struct hash_pte *hptep = htab_address + slot;
232 unsigned long hpte_v, want_v; 252 unsigned long hpte_v, want_v;
233 int ret = 0; 253 int ret = 0;
234 254
235 want_v = hpte_encode_v(va, psize, ssize); 255 want_v = hpte_encode_v(vpn, psize, ssize);
236 256
237 DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", 257 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
238 va, want_v & HPTE_V_AVPN, slot, newpp); 258 vpn, want_v & HPTE_V_AVPN, slot, newpp);
239 259
240 native_lock_hpte(hptep); 260 native_lock_hpte(hptep);
241 261
@@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
254 native_unlock_hpte(hptep); 274 native_unlock_hpte(hptep);
255 275
256 /* Ensure it is out of the tlb too. */ 276 /* Ensure it is out of the tlb too. */
257 tlbie(va, psize, ssize, local); 277 tlbie(vpn, psize, ssize, local);
258 278
259 return ret; 279 return ret;
260} 280}
261 281
262static long native_hpte_find(unsigned long va, int psize, int ssize) 282static long native_hpte_find(unsigned long vpn, int psize, int ssize)
263{ 283{
264 struct hash_pte *hptep; 284 struct hash_pte *hptep;
265 unsigned long hash; 285 unsigned long hash;
@@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
267 long slot; 287 long slot;
268 unsigned long want_v, hpte_v; 288 unsigned long want_v, hpte_v;
269 289
270 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 290 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
271 want_v = hpte_encode_v(va, psize, ssize); 291 want_v = hpte_encode_v(vpn, psize, ssize);
272 292
273 /* Bolted mappings are only ever in the primary group */ 293 /* Bolted mappings are only ever in the primary group */
274 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 294 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
295static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 315static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
296 int psize, int ssize) 316 int psize, int ssize)
297{ 317{
298 unsigned long vsid, va; 318 unsigned long vpn;
319 unsigned long vsid;
299 long slot; 320 long slot;
300 struct hash_pte *hptep; 321 struct hash_pte *hptep;
301 322
302 vsid = get_kernel_vsid(ea, ssize); 323 vsid = get_kernel_vsid(ea, ssize);
303 va = hpt_va(ea, vsid, ssize); 324 vpn = hpt_vpn(ea, vsid, ssize);
304 325
305 slot = native_hpte_find(va, psize, ssize); 326 slot = native_hpte_find(vpn, psize, ssize);
306 if (slot == -1) 327 if (slot == -1)
307 panic("could not find page to bolt\n"); 328 panic("could not find page to bolt\n");
308 hptep = htab_address + slot; 329 hptep = htab_address + slot;
@@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
312 (newpp & (HPTE_R_PP | HPTE_R_N)); 333 (newpp & (HPTE_R_PP | HPTE_R_N));
313 334
314 /* Ensure it is out of the tlb too. */ 335 /* Ensure it is out of the tlb too. */
315 tlbie(va, psize, ssize, 0); 336 tlbie(vpn, psize, ssize, 0);
316} 337}
317 338
318static void native_hpte_invalidate(unsigned long slot, unsigned long va, 339static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
319 int psize, int ssize, int local) 340 int psize, int ssize, int local)
320{ 341{
321 struct hash_pte *hptep = htab_address + slot; 342 struct hash_pte *hptep = htab_address + slot;
@@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
325 346
326 local_irq_save(flags); 347 local_irq_save(flags);
327 348
328 DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); 349 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
329 350
330 want_v = hpte_encode_v(va, psize, ssize); 351 want_v = hpte_encode_v(vpn, psize, ssize);
331 native_lock_hpte(hptep); 352 native_lock_hpte(hptep);
332 hpte_v = hptep->v; 353 hpte_v = hptep->v;
333 354
@@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
339 hptep->v = 0; 360 hptep->v = 0;
340 361
341 /* Invalidate the TLB */ 362 /* Invalidate the TLB */
342 tlbie(va, psize, ssize, local); 363 tlbie(vpn, psize, ssize, local);
343 364
344 local_irq_restore(flags); 365 local_irq_restore(flags);
345} 366}
@@ -349,7 +370,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
349#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 370#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
350 371
351static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 372static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
352 int *psize, int *ssize, unsigned long *va) 373 int *psize, int *ssize, unsigned long *vpn)
353{ 374{
354 unsigned long avpn, pteg, vpi; 375 unsigned long avpn, pteg, vpi;
355 unsigned long hpte_r = hpte->r; 376 unsigned long hpte_r = hpte->r;
@@ -399,7 +420,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
399 vpi = (vsid ^ pteg) & htab_hash_mask; 420 vpi = (vsid ^ pteg) & htab_hash_mask;
400 seg_off |= vpi << shift; 421 seg_off |= vpi << shift;
401 } 422 }
402 *va = vsid << SID_SHIFT | seg_off; 423 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
403 case MMU_SEGSIZE_1T: 424 case MMU_SEGSIZE_1T:
404 /* We only have 40 - 23 bits of seg_off in avpn */ 425 /* We only have 40 - 23 bits of seg_off in avpn */
405 seg_off = (avpn & 0x1ffff) << 23; 426 seg_off = (avpn & 0x1ffff) << 23;
@@ -408,9 +429,9 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
408 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; 429 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
409 seg_off |= vpi << shift; 430 seg_off |= vpi << shift;
410 } 431 }
411 *va = vsid << SID_SHIFT_1T | seg_off; 432 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
412 default: 433 default:
413 *va = size = 0; 434 *vpn = size = 0;
414 } 435 }
415 *psize = size; 436 *psize = size;
416} 437}
@@ -425,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
425 */ 446 */
426static void native_hpte_clear(void) 447static void native_hpte_clear(void)
427{ 448{
449 unsigned long vpn = 0;
428 unsigned long slot, slots, flags; 450 unsigned long slot, slots, flags;
429 struct hash_pte *hptep = htab_address; 451 struct hash_pte *hptep = htab_address;
430 unsigned long hpte_v, va; 452 unsigned long hpte_v;
431 unsigned long pteg_count; 453 unsigned long pteg_count;
432 int psize, ssize; 454 int psize, ssize;
433 455
@@ -455,9 +477,9 @@ static void native_hpte_clear(void)
455 * already hold the native_tlbie_lock. 477 * already hold the native_tlbie_lock.
456 */ 478 */
457 if (hpte_v & HPTE_V_VALID) { 479 if (hpte_v & HPTE_V_VALID) {
458 hpte_decode(hptep, slot, &psize, &ssize, &va); 480 hpte_decode(hptep, slot, &psize, &ssize, &vpn);
459 hptep->v = 0; 481 hptep->v = 0;
460 __tlbie(va, psize, ssize); 482 __tlbie(vpn, psize, ssize);
461 } 483 }
462 } 484 }
463 485
@@ -472,7 +494,8 @@ static void native_hpte_clear(void)
472 */ 494 */
473static void native_flush_hash_range(unsigned long number, int local) 495static void native_flush_hash_range(unsigned long number, int local)
474{ 496{
475 unsigned long va, hash, index, hidx, shift, slot; 497 unsigned long vpn;
498 unsigned long hash, index, hidx, shift, slot;
476 struct hash_pte *hptep; 499 struct hash_pte *hptep;
477 unsigned long hpte_v; 500 unsigned long hpte_v;
478 unsigned long want_v; 501 unsigned long want_v;
@@ -486,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local)
486 local_irq_save(flags); 509 local_irq_save(flags);
487 510
488 for (i = 0; i < number; i++) { 511 for (i = 0; i < number; i++) {
489 va = batch->vaddr[i]; 512 vpn = batch->vpn[i];
490 pte = batch->pte[i]; 513 pte = batch->pte[i];
491 514
492 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 515 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
493 hash = hpt_hash(va, shift, ssize); 516 hash = hpt_hash(vpn, shift, ssize);
494 hidx = __rpte_to_hidx(pte, index); 517 hidx = __rpte_to_hidx(pte, index);
495 if (hidx & _PTEIDX_SECONDARY) 518 if (hidx & _PTEIDX_SECONDARY)
496 hash = ~hash; 519 hash = ~hash;
497 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 520 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
498 slot += hidx & _PTEIDX_GROUP_IX; 521 slot += hidx & _PTEIDX_GROUP_IX;
499 hptep = htab_address + slot; 522 hptep = htab_address + slot;
500 want_v = hpte_encode_v(va, psize, ssize); 523 want_v = hpte_encode_v(vpn, psize, ssize);
501 native_lock_hpte(hptep); 524 native_lock_hpte(hptep);
502 hpte_v = hptep->v; 525 hpte_v = hptep->v;
503 if (!HPTE_V_COMPARE(hpte_v, want_v) || 526 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -512,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local)
512 mmu_psize_defs[psize].tlbiel && local) { 535 mmu_psize_defs[psize].tlbiel && local) {
513 asm volatile("ptesync":::"memory"); 536 asm volatile("ptesync":::"memory");
514 for (i = 0; i < number; i++) { 537 for (i = 0; i < number; i++) {
515 va = batch->vaddr[i]; 538 vpn = batch->vpn[i];
516 pte = batch->pte[i]; 539 pte = batch->pte[i];
517 540
518 pte_iterate_hashed_subpages(pte, psize, va, index, 541 pte_iterate_hashed_subpages(pte, psize,
519 shift) { 542 vpn, index, shift) {
520 __tlbiel(va, psize, ssize); 543 __tlbiel(vpn, psize, ssize);
521 } pte_iterate_hashed_end(); 544 } pte_iterate_hashed_end();
522 } 545 }
523 asm volatile("ptesync":::"memory"); 546 asm volatile("ptesync":::"memory");
@@ -529,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local)
529 552
530 asm volatile("ptesync":::"memory"); 553 asm volatile("ptesync":::"memory");
531 for (i = 0; i < number; i++) { 554 for (i = 0; i < number; i++) {
532 va = batch->vaddr[i]; 555 vpn = batch->vpn[i];
533 pte = batch->pte[i]; 556 pte = batch->pte[i];
534 557
535 pte_iterate_hashed_subpages(pte, psize, va, index, 558 pte_iterate_hashed_subpages(pte, psize,
536 shift) { 559 vpn, index, shift) {
537 __tlbie(va, psize, ssize); 560 __tlbie(vpn, psize, ssize);
538 } pte_iterate_hashed_end(); 561 } pte_iterate_hashed_end();
539 } 562 }
540 asm volatile("eieio; tlbsync; ptesync":::"memory"); 563 asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index ba45739bdfe8..7d4ffd79cc82 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -191,18 +191,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
191 vaddr += step, paddr += step) { 191 vaddr += step, paddr += step) {
192 unsigned long hash, hpteg; 192 unsigned long hash, hpteg;
193 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 193 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
194 unsigned long va = hpt_va(vaddr, vsid, ssize); 194 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
195 unsigned long tprot = prot; 195 unsigned long tprot = prot;
196 196
197 /* Make kernel text executable */ 197 /* Make kernel text executable */
198 if (overlaps_kernel_text(vaddr, vaddr + step)) 198 if (overlaps_kernel_text(vaddr, vaddr + step))
199 tprot &= ~HPTE_R_N; 199 tprot &= ~HPTE_R_N;
200 200
201 hash = hpt_hash(va, shift, ssize); 201 hash = hpt_hash(vpn, shift, ssize);
202 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 202 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
203 203
204 BUG_ON(!ppc_md.hpte_insert); 204 BUG_ON(!ppc_md.hpte_insert);
205 ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, 205 ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
206 HPTE_V_BOLTED, psize, ssize); 206 HPTE_V_BOLTED, psize, ssize);
207 207
208 if (ret < 0) 208 if (ret < 0)
@@ -1152,21 +1152,21 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1152/* WARNING: This is called from hash_low_64.S, if you change this prototype, 1152/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1153 * do not forget to update the assembly call site ! 1153 * do not forget to update the assembly call site !
1154 */ 1154 */
1155void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, 1155void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
1156 int local) 1156 int local)
1157{ 1157{
1158 unsigned long hash, index, shift, hidx, slot; 1158 unsigned long hash, index, shift, hidx, slot;
1159 1159
1160 DBG_LOW("flush_hash_page(va=%016lx)\n", va); 1160 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
1161 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 1161 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1162 hash = hpt_hash(va, shift, ssize); 1162 hash = hpt_hash(vpn, shift, ssize);
1163 hidx = __rpte_to_hidx(pte, index); 1163 hidx = __rpte_to_hidx(pte, index);
1164 if (hidx & _PTEIDX_SECONDARY) 1164 if (hidx & _PTEIDX_SECONDARY)
1165 hash = ~hash; 1165 hash = ~hash;
1166 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1166 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1167 slot += hidx & _PTEIDX_GROUP_IX; 1167 slot += hidx & _PTEIDX_GROUP_IX;
1168 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); 1168 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
1169 ppc_md.hpte_invalidate(slot, va, psize, ssize, local); 1169 ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
1170 } pte_iterate_hashed_end(); 1170 } pte_iterate_hashed_end();
1171} 1171}
1172 1172
@@ -1180,7 +1180,7 @@ void flush_hash_range(unsigned long number, int local)
1180 &__get_cpu_var(ppc64_tlb_batch); 1180 &__get_cpu_var(ppc64_tlb_batch);
1181 1181
1182 for (i = 0; i < number; i++) 1182 for (i = 0; i < number; i++)
1183 flush_hash_page(batch->vaddr[i], batch->pte[i], 1183 flush_hash_page(batch->vpn[i], batch->pte[i],
1184 batch->psize, batch->ssize, local); 1184 batch->psize, batch->ssize, local);
1185 } 1185 }
1186} 1186}
@@ -1207,14 +1207,14 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1207{ 1207{
1208 unsigned long hash, hpteg; 1208 unsigned long hash, hpteg;
1209 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1209 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1210 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1210 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1211 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); 1211 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
1212 int ret; 1212 int ret;
1213 1213
1214 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1214 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1215 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1215 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1216 1216
1217 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), 1217 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
1218 mode, HPTE_V_BOLTED, 1218 mode, HPTE_V_BOLTED,
1219 mmu_linear_psize, mmu_kernel_ssize); 1219 mmu_linear_psize, mmu_kernel_ssize);
1220 BUG_ON (ret < 0); 1220 BUG_ON (ret < 0);
@@ -1228,9 +1228,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1228{ 1228{
1229 unsigned long hash, hidx, slot; 1229 unsigned long hash, hidx, slot;
1230 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1230 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1231 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1231 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1232 1232
1233 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1233 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1234 spin_lock(&linear_map_hash_lock); 1234 spin_lock(&linear_map_hash_lock);
1235 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 1235 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1236 hidx = linear_map_hash_slots[lmi] & 0x7f; 1236 hidx = linear_map_hash_slots[lmi] & 0x7f;
@@ -1240,7 +1240,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1240 hash = ~hash; 1240 hash = ~hash;
1241 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1241 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1242 slot += hidx & _PTEIDX_GROUP_IX; 1242 slot += hidx & _PTEIDX_GROUP_IX;
1243 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); 1243 ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
1244} 1244}
1245 1245
1246void kernel_map_pages(struct page *page, int numpages, int enable) 1246void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index cc5c273086cf..cecad348f604 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -18,14 +18,15 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
18 pte_t *ptep, unsigned long trap, int local, int ssize, 18 pte_t *ptep, unsigned long trap, int local, int ssize,
19 unsigned int shift, unsigned int mmu_psize) 19 unsigned int shift, unsigned int mmu_psize)
20{ 20{
21 unsigned long vpn;
21 unsigned long old_pte, new_pte; 22 unsigned long old_pte, new_pte;
22 unsigned long va, rflags, pa, sz; 23 unsigned long rflags, pa, sz;
23 long slot; 24 long slot;
24 25
25 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); 26 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
26 27
27 /* Search the Linux page table for a match with va */ 28 /* Search the Linux page table for a match with va */
28 va = hpt_va(ea, vsid, ssize); 29 vpn = hpt_vpn(ea, vsid, ssize);
29 30
30 /* At this point, we have a pte (old_pte) which can be used to build 31 /* At this point, we have a pte (old_pte) which can be used to build
31 * or update an HPTE. There are 2 cases: 32 * or update an HPTE. There are 2 cases:
@@ -69,19 +70,19 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
69 /* There MIGHT be an HPTE for this pte */ 70 /* There MIGHT be an HPTE for this pte */
70 unsigned long hash, slot; 71 unsigned long hash, slot;
71 72
72 hash = hpt_hash(va, shift, ssize); 73 hash = hpt_hash(vpn, shift, ssize);
73 if (old_pte & _PAGE_F_SECOND) 74 if (old_pte & _PAGE_F_SECOND)
74 hash = ~hash; 75 hash = ~hash;
75 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 76 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
76 slot += (old_pte & _PAGE_F_GIX) >> 12; 77 slot += (old_pte & _PAGE_F_GIX) >> 12;
77 78
78 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize, 79 if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
79 ssize, local) == -1) 80 ssize, local) == -1)
80 old_pte &= ~_PAGE_HPTEFLAGS; 81 old_pte &= ~_PAGE_HPTEFLAGS;
81 } 82 }
82 83
83 if (likely(!(old_pte & _PAGE_HASHPTE))) { 84 if (likely(!(old_pte & _PAGE_HASHPTE))) {
84 unsigned long hash = hpt_hash(va, shift, ssize); 85 unsigned long hash = hpt_hash(vpn, shift, ssize);
85 unsigned long hpte_group; 86 unsigned long hpte_group;
86 87
87 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; 88 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -101,14 +102,14 @@ repeat:
101 _PAGE_COHERENT | _PAGE_GUARDED)); 102 _PAGE_COHERENT | _PAGE_GUARDED));
102 103
103 /* Insert into the hash table, primary slot */ 104 /* Insert into the hash table, primary slot */
104 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, 105 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
105 mmu_psize, ssize); 106 mmu_psize, ssize);
106 107
107 /* Primary is full, try the secondary */ 108 /* Primary is full, try the secondary */
108 if (unlikely(slot == -1)) { 109 if (unlikely(slot == -1)) {
109 hpte_group = ((~hash & htab_hash_mask) * 110 hpte_group = ((~hash & htab_hash_mask) *
110 HPTES_PER_GROUP) & ~0x7UL; 111 HPTES_PER_GROUP) & ~0x7UL;
111 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 112 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
112 HPTE_V_SECONDARY, 113 HPTE_V_SECONDARY,
113 mmu_psize, ssize); 114 mmu_psize, ssize);
114 if (slot == -1) { 115 if (slot == -1) {
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 31f18207970b..ae758b3ff72c 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -42,8 +42,9 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
42void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 42void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
43 pte_t *ptep, unsigned long pte, int huge) 43 pte_t *ptep, unsigned long pte, int huge)
44{ 44{
45 unsigned long vpn;
45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 46 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
46 unsigned long vsid, vaddr; 47 unsigned long vsid;
47 unsigned int psize; 48 unsigned int psize;
48 int ssize; 49 int ssize;
49 real_pte_t rpte; 50 real_pte_t rpte;
@@ -86,7 +87,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
87 ssize = mmu_kernel_ssize; 88 ssize = mmu_kernel_ssize;
88 } 89 }
89 vaddr = hpt_va(addr, vsid, ssize); 90 vpn = hpt_vpn(addr, vsid, ssize);
90 rpte = __real_pte(__pte(pte), ptep); 91 rpte = __real_pte(__pte(pte), ptep);
91 92
92 /* 93 /*
@@ -96,7 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
96 * and decide to use local invalidates instead... 97 * and decide to use local invalidates instead...
97 */ 98 */
98 if (!batch->active) { 99 if (!batch->active) {
99 flush_hash_page(vaddr, rpte, psize, ssize, 0); 100 flush_hash_page(vpn, rpte, psize, ssize, 0);
100 put_cpu_var(ppc64_tlb_batch); 101 put_cpu_var(ppc64_tlb_batch);
101 return; 102 return;
102 } 103 }
@@ -122,7 +123,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
122 batch->ssize = ssize; 123 batch->ssize = ssize;
123 } 124 }
124 batch->pte[i] = rpte; 125 batch->pte[i] = rpte;
125 batch->vaddr[i] = vaddr; 126 batch->vpn[i] = vpn;
126 batch->index = ++i; 127 batch->index = ++i;
127 if (i >= PPC64_TLB_BATCH_NR) 128 if (i >= PPC64_TLB_BATCH_NR)
128 __flush_tlb_pending(batch); 129 __flush_tlb_pending(batch);
@@ -146,7 +147,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
146 if (cpumask_equal(mm_cpumask(batch->mm), tmp)) 147 if (cpumask_equal(mm_cpumask(batch->mm), tmp))
147 local = 1; 148 local = 1;
148 if (i == 1) 149 if (i == 1)
149 flush_hash_page(batch->vaddr[0], batch->pte[0], 150 flush_hash_page(batch->vpn[0], batch->pte[0],
150 batch->psize, batch->ssize, local); 151 batch->psize, batch->ssize, local);
151 else 152 else
152 flush_hash_range(i, local); 153 flush_hash_range(i, local);
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index b83077e13161..0f6f83988b3d 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -88,7 +88,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
88} 88}
89 89
90static long beat_lpar_hpte_insert(unsigned long hpte_group, 90static long beat_lpar_hpte_insert(unsigned long hpte_group,
91 unsigned long va, unsigned long pa, 91 unsigned long vpn, unsigned long pa,
92 unsigned long rflags, unsigned long vflags, 92 unsigned long rflags, unsigned long vflags,
93 int psize, int ssize) 93 int psize, int ssize)
94{ 94{
@@ -103,7 +103,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
103 "rflags=%lx, vflags=%lx, psize=%d)\n", 103 "rflags=%lx, vflags=%lx, psize=%d)\n",
104 hpte_group, va, pa, rflags, vflags, psize); 104 hpte_group, va, pa, rflags, vflags, psize);
105 105
106 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 106 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
107 vflags | HPTE_V_VALID; 107 vflags | HPTE_V_VALID;
108 hpte_r = hpte_encode_r(pa, psize) | rflags; 108 hpte_r = hpte_encode_r(pa, psize) | rflags;
109 109
@@ -184,14 +184,14 @@ static void beat_lpar_hptab_clear(void)
184 */ 184 */
185static long beat_lpar_hpte_updatepp(unsigned long slot, 185static long beat_lpar_hpte_updatepp(unsigned long slot,
186 unsigned long newpp, 186 unsigned long newpp,
187 unsigned long va, 187 unsigned long vpn,
188 int psize, int ssize, int local) 188 int psize, int ssize, int local)
189{ 189{
190 unsigned long lpar_rc; 190 unsigned long lpar_rc;
191 u64 dummy0, dummy1; 191 u64 dummy0, dummy1;
192 unsigned long want_v; 192 unsigned long want_v;
193 193
194 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 194 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
195 195
196 DBG_LOW(" update: " 196 DBG_LOW(" update: "
197 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", 197 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -220,15 +220,15 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
220 return 0; 220 return 0;
221} 221}
222 222
223static long beat_lpar_hpte_find(unsigned long va, int psize) 223static long beat_lpar_hpte_find(unsigned long vpn, int psize)
224{ 224{
225 unsigned long hash; 225 unsigned long hash;
226 unsigned long i, j; 226 unsigned long i, j;
227 long slot; 227 long slot;
228 unsigned long want_v, hpte_v; 228 unsigned long want_v, hpte_v;
229 229
230 hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); 230 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
231 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 231 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
232 232
233 for (j = 0; j < 2; j++) { 233 for (j = 0; j < 2; j++) {
234 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 234 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -255,14 +255,15 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
255 unsigned long ea, 255 unsigned long ea,
256 int psize, int ssize) 256 int psize, int ssize)
257{ 257{
258 unsigned long lpar_rc, slot, vsid, va; 258 unsigned long vpn;
259 unsigned long lpar_rc, slot, vsid;
259 u64 dummy0, dummy1; 260 u64 dummy0, dummy1;
260 261
261 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); 262 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
262 va = hpt_va(ea, vsid, MMU_SEGSIZE_256M); 263 vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
263 264
264 raw_spin_lock(&beat_htab_lock); 265 raw_spin_lock(&beat_htab_lock);
265 slot = beat_lpar_hpte_find(va, psize); 266 slot = beat_lpar_hpte_find(vpn, psize);
266 BUG_ON(slot == -1); 267 BUG_ON(slot == -1);
267 268
268 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, 269 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
@@ -272,7 +273,7 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
272 BUG_ON(lpar_rc != 0); 273 BUG_ON(lpar_rc != 0);
273} 274}
274 275
275static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 276static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
276 int psize, int ssize, int local) 277 int psize, int ssize, int local)
277{ 278{
278 unsigned long want_v; 279 unsigned long want_v;
@@ -282,7 +283,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
282 283
283 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 284 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
284 slot, va, psize, local); 285 slot, va, psize, local);
285 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 286 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
286 287
287 raw_spin_lock_irqsave(&beat_htab_lock, flags); 288 raw_spin_lock_irqsave(&beat_htab_lock, flags);
288 dummy1 = beat_lpar_hpte_getword0(slot); 289 dummy1 = beat_lpar_hpte_getword0(slot);
@@ -311,7 +312,7 @@ void __init hpte_init_beat(void)
311} 312}
312 313
313static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, 314static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
314 unsigned long va, unsigned long pa, 315 unsigned long vpn, unsigned long pa,
315 unsigned long rflags, unsigned long vflags, 316 unsigned long rflags, unsigned long vflags,
316 int psize, int ssize) 317 int psize, int ssize)
317{ 318{
@@ -322,11 +323,11 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
322 return -1; 323 return -1;
323 324
324 if (!(vflags & HPTE_V_BOLTED)) 325 if (!(vflags & HPTE_V_BOLTED))
325 DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 326 DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
326 "rflags=%lx, vflags=%lx, psize=%d)\n", 327 "rflags=%lx, vflags=%lx, psize=%d)\n",
327 hpte_group, va, pa, rflags, vflags, psize); 328 hpte_group, vpn, pa, rflags, vflags, psize);
328 329
329 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 330 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
330 vflags | HPTE_V_VALID; 331 vflags | HPTE_V_VALID;
331 hpte_r = hpte_encode_r(pa, psize) | rflags; 332 hpte_r = hpte_encode_r(pa, psize) | rflags;
332 333
@@ -364,14 +365,14 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
364 */ 365 */
365static long beat_lpar_hpte_updatepp_v3(unsigned long slot, 366static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
366 unsigned long newpp, 367 unsigned long newpp,
367 unsigned long va, 368 unsigned long vpn,
368 int psize, int ssize, int local) 369 int psize, int ssize, int local)
369{ 370{
370 unsigned long lpar_rc; 371 unsigned long lpar_rc;
371 unsigned long want_v; 372 unsigned long want_v;
372 unsigned long pss; 373 unsigned long pss;
373 374
374 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 375 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
375 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 376 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
376 377
377 DBG_LOW(" update: " 378 DBG_LOW(" update: "
@@ -392,16 +393,16 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
392 return 0; 393 return 0;
393} 394}
394 395
395static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va, 396static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
396 int psize, int ssize, int local) 397 int psize, int ssize, int local)
397{ 398{
398 unsigned long want_v; 399 unsigned long want_v;
399 unsigned long lpar_rc; 400 unsigned long lpar_rc;
400 unsigned long pss; 401 unsigned long pss;
401 402
402 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 403 DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
403 slot, va, psize, local); 404 slot, vpn, psize, local);
404 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 405 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
405 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 406 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
406 407
407 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss); 408 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 3124cf791ebb..d00d7b0a3bda 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -43,7 +43,7 @@ enum ps3_lpar_vas_id {
43 43
44static DEFINE_SPINLOCK(ps3_htab_lock); 44static DEFINE_SPINLOCK(ps3_htab_lock);
45 45
46static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, 46static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
47 unsigned long pa, unsigned long rflags, unsigned long vflags, 47 unsigned long pa, unsigned long rflags, unsigned long vflags,
48 int psize, int ssize) 48 int psize, int ssize)
49{ 49{
@@ -61,7 +61,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
61 */ 61 */
62 vflags &= ~HPTE_V_SECONDARY; 62 vflags &= ~HPTE_V_SECONDARY;
63 63
64 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 64 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; 65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
66 66
67 spin_lock_irqsave(&ps3_htab_lock, flags); 67 spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -75,8 +75,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
75 75
76 if (result) { 76 if (result) {
77 /* all entries bolted !*/ 77 /* all entries bolted !*/
78 pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n", 78 pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
79 __func__, result, va, pa, hpte_group, hpte_v, hpte_r); 79 __func__, result, vpn, pa, hpte_group, hpte_v, hpte_r);
80 BUG(); 80 BUG();
81 } 81 }
82 82
@@ -107,7 +107,7 @@ static long ps3_hpte_remove(unsigned long hpte_group)
107} 107}
108 108
109static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, 109static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
110 unsigned long va, int psize, int ssize, int local) 110 unsigned long vpn, int psize, int ssize, int local)
111{ 111{
112 int result; 112 int result;
113 u64 hpte_v, want_v, hpte_rs; 113 u64 hpte_v, want_v, hpte_rs;
@@ -115,7 +115,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
115 unsigned long flags; 115 unsigned long flags;
116 long ret; 116 long ret;
117 117
118 want_v = hpte_encode_v(va, psize, ssize); 118 want_v = hpte_encode_v(vpn, psize, ssize);
119 119
120 spin_lock_irqsave(&ps3_htab_lock, flags); 120 spin_lock_irqsave(&ps3_htab_lock, flags);
121 121
@@ -125,8 +125,8 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
125 &hpte_rs); 125 &hpte_rs);
126 126
127 if (result) { 127 if (result) {
128 pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n", 128 pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n",
129 __func__, result, va, slot, psize); 129 __func__, result, vpn, slot, psize);
130 BUG(); 130 BUG();
131 } 131 }
132 132
@@ -159,7 +159,7 @@ static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
159 panic("ps3_hpte_updateboltedpp() not implemented"); 159 panic("ps3_hpte_updateboltedpp() not implemented");
160} 160}
161 161
162static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, 162static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
163 int psize, int ssize, int local) 163 int psize, int ssize, int local)
164{ 164{
165 unsigned long flags; 165 unsigned long flags;
@@ -170,8 +170,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); 170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
171 171
172 if (result) { 172 if (result) {
173 pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n", 173 pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n",
174 __func__, result, va, slot, psize); 174 __func__, result, vpn, slot, psize);
175 BUG(); 175 BUG();
176 } 176 }
177 177
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 177055d0186b..0da39fed355a 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -107,9 +107,9 @@ void vpa_init(int cpu)
107} 107}
108 108
109static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 109static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
110 unsigned long va, unsigned long pa, 110 unsigned long vpn, unsigned long pa,
111 unsigned long rflags, unsigned long vflags, 111 unsigned long rflags, unsigned long vflags,
112 int psize, int ssize) 112 int psize, int ssize)
113{ 113{
114 unsigned long lpar_rc; 114 unsigned long lpar_rc;
115 unsigned long flags; 115 unsigned long flags;
@@ -117,11 +117,11 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
117 unsigned long hpte_v, hpte_r; 117 unsigned long hpte_v, hpte_r;
118 118
119 if (!(vflags & HPTE_V_BOLTED)) 119 if (!(vflags & HPTE_V_BOLTED))
120 pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 120 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
121 "rflags=%lx, vflags=%lx, psize=%d)\n", 121 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
122 hpte_group, va, pa, rflags, vflags, psize); 122 hpte_group, vpn, pa, rflags, vflags, psize);
123 123
124 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 124 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
125 hpte_r = hpte_encode_r(pa, psize) | rflags; 125 hpte_r = hpte_encode_r(pa, psize) | rflags;
126 126
127 if (!(vflags & HPTE_V_BOLTED)) 127 if (!(vflags & HPTE_V_BOLTED))
@@ -226,22 +226,6 @@ static void pSeries_lpar_hptab_clear(void)
226} 226}
227 227
228/* 228/*
229 * This computes the AVPN and B fields of the first dword of a HPTE,
230 * for use when we want to match an existing PTE. The bottom 7 bits
231 * of the returned value are zero.
232 */
233static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
234 int ssize)
235{
236 unsigned long v;
237
238 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
239 v <<= HPTE_V_AVPN_SHIFT;
240 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
241 return v;
242}
243
244/*
245 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and 229 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
246 * the low 3 bits of flags happen to line up. So no transform is needed. 230 * the low 3 bits of flags happen to line up. So no transform is needed.
247 * We can probably optimize here and assume the high bits of newpp are 231 * We can probably optimize here and assume the high bits of newpp are
@@ -249,14 +233,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
249 */ 233 */
250static long pSeries_lpar_hpte_updatepp(unsigned long slot, 234static long pSeries_lpar_hpte_updatepp(unsigned long slot,
251 unsigned long newpp, 235 unsigned long newpp,
252 unsigned long va, 236 unsigned long vpn,
253 int psize, int ssize, int local) 237 int psize, int ssize, int local)
254{ 238{
255 unsigned long lpar_rc; 239 unsigned long lpar_rc;
256 unsigned long flags = (newpp & 7) | H_AVPN; 240 unsigned long flags = (newpp & 7) | H_AVPN;
257 unsigned long want_v; 241 unsigned long want_v;
258 242
259 want_v = hpte_encode_avpn(va, psize, ssize); 243 want_v = hpte_encode_avpn(vpn, psize, ssize);
260 244
261 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", 245 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
262 want_v, slot, flags, psize); 246 want_v, slot, flags, psize);
@@ -294,15 +278,15 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
294 return dword0; 278 return dword0;
295} 279}
296 280
297static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize) 281static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
298{ 282{
299 unsigned long hash; 283 unsigned long hash;
300 unsigned long i; 284 unsigned long i;
301 long slot; 285 long slot;
302 unsigned long want_v, hpte_v; 286 unsigned long want_v, hpte_v;
303 287
304 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 288 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
305 want_v = hpte_encode_avpn(va, psize, ssize); 289 want_v = hpte_encode_avpn(vpn, psize, ssize);
306 290
307 /* Bolted entries are always in the primary group */ 291 /* Bolted entries are always in the primary group */
308 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 292 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -322,12 +306,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
322 unsigned long ea, 306 unsigned long ea,
323 int psize, int ssize) 307 int psize, int ssize)
324{ 308{
325 unsigned long lpar_rc, slot, vsid, va, flags; 309 unsigned long vpn;
310 unsigned long lpar_rc, slot, vsid, flags;
326 311
327 vsid = get_kernel_vsid(ea, ssize); 312 vsid = get_kernel_vsid(ea, ssize);
328 va = hpt_va(ea, vsid, ssize); 313 vpn = hpt_vpn(ea, vsid, ssize);
329 314
330 slot = pSeries_lpar_hpte_find(va, psize, ssize); 315 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
331 BUG_ON(slot == -1); 316 BUG_ON(slot == -1);
332 317
333 flags = newpp & 7; 318 flags = newpp & 7;
@@ -336,17 +321,17 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
336 BUG_ON(lpar_rc != H_SUCCESS); 321 BUG_ON(lpar_rc != H_SUCCESS);
337} 322}
338 323
339static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 324static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
340 int psize, int ssize, int local) 325 int psize, int ssize, int local)
341{ 326{
342 unsigned long want_v; 327 unsigned long want_v;
343 unsigned long lpar_rc; 328 unsigned long lpar_rc;
344 unsigned long dummy1, dummy2; 329 unsigned long dummy1, dummy2;
345 330
346 pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 331 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
347 slot, va, psize, local); 332 slot, vpn, psize, local);
348 333
349 want_v = hpte_encode_avpn(va, psize, ssize); 334 want_v = hpte_encode_avpn(vpn, psize, ssize);
350 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); 335 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
351 if (lpar_rc == H_NOT_FOUND) 336 if (lpar_rc == H_NOT_FOUND)
352 return; 337 return;
@@ -357,15 +342,16 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
357static void pSeries_lpar_hpte_removebolted(unsigned long ea, 342static void pSeries_lpar_hpte_removebolted(unsigned long ea,
358 int psize, int ssize) 343 int psize, int ssize)
359{ 344{
360 unsigned long slot, vsid, va; 345 unsigned long vpn;
346 unsigned long slot, vsid;
361 347
362 vsid = get_kernel_vsid(ea, ssize); 348 vsid = get_kernel_vsid(ea, ssize);
363 va = hpt_va(ea, vsid, ssize); 349 vpn = hpt_vpn(ea, vsid, ssize);
364 350
365 slot = pSeries_lpar_hpte_find(va, psize, ssize); 351 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
366 BUG_ON(slot == -1); 352 BUG_ON(slot == -1);
367 353
368 pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0); 354 pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0);
369} 355}
370 356
371/* Flag bits for H_BULK_REMOVE */ 357/* Flag bits for H_BULK_REMOVE */
@@ -381,12 +367,12 @@ static void pSeries_lpar_hpte_removebolted(unsigned long ea,
381 */ 367 */
382static void pSeries_lpar_flush_hash_range(unsigned long number, int local) 368static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
383{ 369{
370 unsigned long vpn;
384 unsigned long i, pix, rc; 371 unsigned long i, pix, rc;
385 unsigned long flags = 0; 372 unsigned long flags = 0;
386 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 373 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
387 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 374 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
388 unsigned long param[9]; 375 unsigned long param[9];
389 unsigned long va;
390 unsigned long hash, index, shift, hidx, slot; 376 unsigned long hash, index, shift, hidx, slot;
391 real_pte_t pte; 377 real_pte_t pte;
392 int psize, ssize; 378 int psize, ssize;
@@ -398,21 +384,21 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
398 ssize = batch->ssize; 384 ssize = batch->ssize;
399 pix = 0; 385 pix = 0;
400 for (i = 0; i < number; i++) { 386 for (i = 0; i < number; i++) {
401 va = batch->vaddr[i]; 387 vpn = batch->vpn[i];
402 pte = batch->pte[i]; 388 pte = batch->pte[i];
403 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 389 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
404 hash = hpt_hash(va, shift, ssize); 390 hash = hpt_hash(vpn, shift, ssize);
405 hidx = __rpte_to_hidx(pte, index); 391 hidx = __rpte_to_hidx(pte, index);
406 if (hidx & _PTEIDX_SECONDARY) 392 if (hidx & _PTEIDX_SECONDARY)
407 hash = ~hash; 393 hash = ~hash;
408 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 394 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
409 slot += hidx & _PTEIDX_GROUP_IX; 395 slot += hidx & _PTEIDX_GROUP_IX;
410 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 396 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
411 pSeries_lpar_hpte_invalidate(slot, va, psize, 397 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
412 ssize, local); 398 ssize, local);
413 } else { 399 } else {
414 param[pix] = HBR_REQUEST | HBR_AVPN | slot; 400 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
415 param[pix+1] = hpte_encode_avpn(va, psize, 401 param[pix+1] = hpte_encode_avpn(vpn, psize,
416 ssize); 402 ssize);
417 pix += 2; 403 pix += 2;
418 if (pix == 8) { 404 if (pix == 8) {