aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2013-04-28 05:37:35 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-04-30 02:00:14 -0400
commitb1022fbd293564de91596b8775340cf41ad5214c (patch)
treeb8224a20090350249d0254bc8e15af10e837eb91
parent74f227b22897e0db52a58f8f634e9ce6f67cc652 (diff)
powerpc: Decode the pte-lp-encoding bits correctly.
We look at both the segment base page size and actual page size and store the pte-lp-encodings in an array per base page size. We also update all relevant functions to take actual page size argument so that we can use the correct PTE LP encoding in HPTE. This should also get the basic Multiple Page Size per Segment (MPSS) support. This is needed to enable THP on ppc64. [Fixed PR KVM build --BenH] Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/machdep.h3
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h33
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c8
-rw-r--r--arch/powerpc/mm/hash_low_64.S18
-rw-r--r--arch/powerpc/mm/hash_native_64.c135
-rw-r--r--arch/powerpc/mm/hash_utils_64.c125
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c16
-rw-r--r--arch/powerpc/platforms/ps3/htab.c6
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c6
10 files changed, 234 insertions, 118 deletions
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 3d6b4100dac1..3f3f691be2e7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -50,7 +50,8 @@ struct machdep_calls {
50 unsigned long prpn, 50 unsigned long prpn,
51 unsigned long rflags, 51 unsigned long rflags,
52 unsigned long vflags, 52 unsigned long vflags,
53 int psize, int ssize); 53 int psize, int apsize,
54 int ssize);
54 long (*hpte_remove)(unsigned long hpte_group); 55 long (*hpte_remove)(unsigned long hpte_group);
55 void (*hpte_removebolted)(unsigned long ea, 56 void (*hpte_removebolted)(unsigned long ea,
56 int psize, int ssize); 57 int psize, int ssize);
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index de9e577f329c..18171a86bff6 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -155,7 +155,7 @@ extern unsigned long htab_hash_mask;
155struct mmu_psize_def 155struct mmu_psize_def
156{ 156{
157 unsigned int shift; /* number of bits */ 157 unsigned int shift; /* number of bits */
158 unsigned int penc; /* HPTE encoding */ 158 int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
159 unsigned int tlbiel; /* tlbiel supported for that page size */ 159 unsigned int tlbiel; /* tlbiel supported for that page size */
160 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ 160 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
161 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ 161 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
@@ -200,6 +200,13 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
200 */ 200 */
201#define VPN_SHIFT 12 201#define VPN_SHIFT 12
202 202
203/*
204 * HPTE Large Page (LP) details
205 */
206#define LP_SHIFT 12
207#define LP_BITS 8
208#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
209
203#ifndef __ASSEMBLY__ 210#ifndef __ASSEMBLY__
204 211
205static inline int segment_shift(int ssize) 212static inline int segment_shift(int ssize)
@@ -255,14 +262,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
255 262
256/* 263/*
257 * This function sets the AVPN and L fields of the HPTE appropriately 264 * This function sets the AVPN and L fields of the HPTE appropriately
258 * for the page size 265 * using the base page size and actual page size.
259 */ 266 */
260static inline unsigned long hpte_encode_v(unsigned long vpn, 267static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
261 int psize, int ssize) 268 int actual_psize, int ssize)
262{ 269{
263 unsigned long v; 270 unsigned long v;
264 v = hpte_encode_avpn(vpn, psize, ssize); 271 v = hpte_encode_avpn(vpn, base_psize, ssize);
265 if (psize != MMU_PAGE_4K) 272 if (actual_psize != MMU_PAGE_4K)
266 v |= HPTE_V_LARGE; 273 v |= HPTE_V_LARGE;
267 return v; 274 return v;
268} 275}
@@ -272,19 +279,17 @@ static inline unsigned long hpte_encode_v(unsigned long vpn,
272 * for the page size. We assume the pa is already "clean" that is properly 279 * for the page size. We assume the pa is already "clean" that is properly
273 * aligned for the requested page size 280 * aligned for the requested page size
274 */ 281 */
275static inline unsigned long hpte_encode_r(unsigned long pa, int psize) 282static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
283 int actual_psize)
276{ 284{
277 unsigned long r;
278
279 /* A 4K page needs no special encoding */ 285 /* A 4K page needs no special encoding */
280 if (psize == MMU_PAGE_4K) 286 if (actual_psize == MMU_PAGE_4K)
281 return pa & HPTE_R_RPN; 287 return pa & HPTE_R_RPN;
282 else { 288 else {
283 unsigned int penc = mmu_psize_defs[psize].penc; 289 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
284 unsigned int shift = mmu_psize_defs[psize].shift; 290 unsigned int shift = mmu_psize_defs[actual_psize].shift;
285 return (pa & ~((1ul << shift) - 1)) | (penc << 12); 291 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
286 } 292 }
287 return r;
288} 293}
289 294
290/* 295/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 5d7d29a313eb..3a9a1aceb14f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -143,7 +143,7 @@ map_again:
143 } 143 }
144 144
145 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, 145 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
146 MMU_PAGE_4K, MMU_SEGSIZE_256M); 146 MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M);
147 147
148 if (ret < 0) { 148 if (ret < 0) {
149 /* If we couldn't map a primary PTE, try a secondary */ 149 /* If we couldn't map a primary PTE, try a secondary */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 80dcc53a1aba..c794a4cd0225 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1515,7 +1515,13 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
1515 (*sps)->page_shift = def->shift; 1515 (*sps)->page_shift = def->shift;
1516 (*sps)->slb_enc = def->sllp; 1516 (*sps)->slb_enc = def->sllp;
1517 (*sps)->enc[0].page_shift = def->shift; 1517 (*sps)->enc[0].page_shift = def->shift;
1518 (*sps)->enc[0].pte_enc = def->penc; 1518 /*
1519 * Only return base page encoding. We don't want to return
1520 * all the supporting pte_enc, because our H_ENTER doesn't
1521 * support MPSS yet. Once they do, we can start passing all
1522 * support pte_enc here
1523 */
1524 (*sps)->enc[0].pte_enc = def->penc[linux_psize];
1519 (*sps)++; 1525 (*sps)++;
1520} 1526}
1521 1527
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index abdd5e21cdb6..0e980acae67c 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -196,7 +196,8 @@ htab_insert_pte:
196 mr r4,r29 /* Retrieve vpn */ 196 mr r4,r29 /* Retrieve vpn */
197 li r7,0 /* !bolted, !secondary */ 197 li r7,0 /* !bolted, !secondary */
198 li r8,MMU_PAGE_4K /* page size */ 198 li r8,MMU_PAGE_4K /* page size */
199 ld r9,STK_PARAM(R9)(r1) /* segment size */ 199 li r9,MMU_PAGE_4K /* actual page size */
200 ld r10,STK_PARAM(R9)(r1) /* segment size */
200_GLOBAL(htab_call_hpte_insert1) 201_GLOBAL(htab_call_hpte_insert1)
201 bl . /* Patched by htab_finish_init() */ 202 bl . /* Patched by htab_finish_init() */
202 cmpdi 0,r3,0 203 cmpdi 0,r3,0
@@ -219,7 +220,8 @@ _GLOBAL(htab_call_hpte_insert1)
219 mr r4,r29 /* Retrieve vpn */ 220 mr r4,r29 /* Retrieve vpn */
220 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 221 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
221 li r8,MMU_PAGE_4K /* page size */ 222 li r8,MMU_PAGE_4K /* page size */
222 ld r9,STK_PARAM(R9)(r1) /* segment size */ 223 li r9,MMU_PAGE_4K /* actual page size */
224 ld r10,STK_PARAM(R9)(r1) /* segment size */
223_GLOBAL(htab_call_hpte_insert2) 225_GLOBAL(htab_call_hpte_insert2)
224 bl . /* Patched by htab_finish_init() */ 226 bl . /* Patched by htab_finish_init() */
225 cmpdi 0,r3,0 227 cmpdi 0,r3,0
@@ -515,7 +517,8 @@ htab_special_pfn:
515 mr r4,r29 /* Retrieve vpn */ 517 mr r4,r29 /* Retrieve vpn */
516 li r7,0 /* !bolted, !secondary */ 518 li r7,0 /* !bolted, !secondary */
517 li r8,MMU_PAGE_4K /* page size */ 519 li r8,MMU_PAGE_4K /* page size */
518 ld r9,STK_PARAM(R9)(r1) /* segment size */ 520 li r9,MMU_PAGE_4K /* actual page size */
521 ld r10,STK_PARAM(R9)(r1) /* segment size */
519_GLOBAL(htab_call_hpte_insert1) 522_GLOBAL(htab_call_hpte_insert1)
520 bl . /* patched by htab_finish_init() */ 523 bl . /* patched by htab_finish_init() */
521 cmpdi 0,r3,0 524 cmpdi 0,r3,0
@@ -542,7 +545,8 @@ _GLOBAL(htab_call_hpte_insert1)
542 mr r4,r29 /* Retrieve vpn */ 545 mr r4,r29 /* Retrieve vpn */
543 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 546 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
544 li r8,MMU_PAGE_4K /* page size */ 547 li r8,MMU_PAGE_4K /* page size */
545 ld r9,STK_PARAM(R9)(r1) /* segment size */ 548 li r9,MMU_PAGE_4K /* actual page size */
549 ld r10,STK_PARAM(R9)(r1) /* segment size */
546_GLOBAL(htab_call_hpte_insert2) 550_GLOBAL(htab_call_hpte_insert2)
547 bl . /* patched by htab_finish_init() */ 551 bl . /* patched by htab_finish_init() */
548 cmpdi 0,r3,0 552 cmpdi 0,r3,0
@@ -840,7 +844,8 @@ ht64_insert_pte:
840 mr r4,r29 /* Retrieve vpn */ 844 mr r4,r29 /* Retrieve vpn */
841 li r7,0 /* !bolted, !secondary */ 845 li r7,0 /* !bolted, !secondary */
842 li r8,MMU_PAGE_64K 846 li r8,MMU_PAGE_64K
843 ld r9,STK_PARAM(R9)(r1) /* segment size */ 847 li r9,MMU_PAGE_64K /* actual page size */
848 ld r10,STK_PARAM(R9)(r1) /* segment size */
844_GLOBAL(ht64_call_hpte_insert1) 849_GLOBAL(ht64_call_hpte_insert1)
845 bl . /* patched by htab_finish_init() */ 850 bl . /* patched by htab_finish_init() */
846 cmpdi 0,r3,0 851 cmpdi 0,r3,0
@@ -863,7 +868,8 @@ _GLOBAL(ht64_call_hpte_insert1)
863 mr r4,r29 /* Retrieve vpn */ 868 mr r4,r29 /* Retrieve vpn */
864 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 869 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
865 li r8,MMU_PAGE_64K 870 li r8,MMU_PAGE_64K
866 ld r9,STK_PARAM(R9)(r1) /* segment size */ 871 li r9,MMU_PAGE_64K /* actual page size */
872 ld r10,STK_PARAM(R9)(r1) /* segment size */
867_GLOBAL(ht64_call_hpte_insert2) 873_GLOBAL(ht64_call_hpte_insert2)
868 bl . /* patched by htab_finish_init() */ 874 bl . /* patched by htab_finish_init() */
869 cmpdi 0,r3,0 875 cmpdi 0,r3,0
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 9d8983ad1dec..14e3fe896baf 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -39,7 +39,7 @@
39 39
40DEFINE_RAW_SPINLOCK(native_tlbie_lock); 40DEFINE_RAW_SPINLOCK(native_tlbie_lock);
41 41
42static inline void __tlbie(unsigned long vpn, int psize, int ssize) 42static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
43{ 43{
44 unsigned long va; 44 unsigned long va;
45 unsigned int penc; 45 unsigned int penc;
@@ -68,7 +68,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int ssize)
68 break; 68 break;
69 default: 69 default:
70 /* We need 14 to 14 + i bits of va */ 70 /* We need 14 to 14 + i bits of va */
71 penc = mmu_psize_defs[psize].penc; 71 penc = mmu_psize_defs[psize].penc[apsize];
72 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 72 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
73 va |= penc << 12; 73 va |= penc << 12;
74 va |= ssize << 8; 74 va |= ssize << 8;
@@ -80,7 +80,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int ssize)
80 } 80 }
81} 81}
82 82
83static inline void __tlbiel(unsigned long vpn, int psize, int ssize) 83static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
84{ 84{
85 unsigned long va; 85 unsigned long va;
86 unsigned int penc; 86 unsigned int penc;
@@ -102,7 +102,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
102 break; 102 break;
103 default: 103 default:
104 /* We need 14 to 14 + i bits of va */ 104 /* We need 14 to 14 + i bits of va */
105 penc = mmu_psize_defs[psize].penc; 105 penc = mmu_psize_defs[psize].penc[apsize];
106 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 106 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
107 va |= penc << 12; 107 va |= penc << 12;
108 va |= ssize << 8; 108 va |= ssize << 8;
@@ -114,7 +114,8 @@ static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
114 114
115} 115}
116 116
117static inline void tlbie(unsigned long vpn, int psize, int ssize, int local) 117static inline void tlbie(unsigned long vpn, int psize, int apsize,
118 int ssize, int local)
118{ 119{
119 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); 120 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
120 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 121 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -125,10 +126,10 @@ static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
125 raw_spin_lock(&native_tlbie_lock); 126 raw_spin_lock(&native_tlbie_lock);
126 asm volatile("ptesync": : :"memory"); 127 asm volatile("ptesync": : :"memory");
127 if (use_local) { 128 if (use_local) {
128 __tlbiel(vpn, psize, ssize); 129 __tlbiel(vpn, psize, apsize, ssize);
129 asm volatile("ptesync": : :"memory"); 130 asm volatile("ptesync": : :"memory");
130 } else { 131 } else {
131 __tlbie(vpn, psize, ssize); 132 __tlbie(vpn, psize, apsize, ssize);
132 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 133 asm volatile("eieio; tlbsync; ptesync": : :"memory");
133 } 134 }
134 if (lock_tlbie && !use_local) 135 if (lock_tlbie && !use_local)
@@ -156,7 +157,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
156 157
157static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, 158static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
158 unsigned long pa, unsigned long rflags, 159 unsigned long pa, unsigned long rflags,
159 unsigned long vflags, int psize, int ssize) 160 unsigned long vflags, int psize, int apsize, int ssize)
160{ 161{
161 struct hash_pte *hptep = htab_address + hpte_group; 162 struct hash_pte *hptep = htab_address + hpte_group;
162 unsigned long hpte_v, hpte_r; 163 unsigned long hpte_v, hpte_r;
@@ -183,8 +184,8 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
183 if (i == HPTES_PER_GROUP) 184 if (i == HPTES_PER_GROUP)
184 return -1; 185 return -1;
185 186
186 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; 187 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
187 hpte_r = hpte_encode_r(pa, psize) | rflags; 188 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
188 189
189 if (!(vflags & HPTE_V_BOLTED)) { 190 if (!(vflags & HPTE_V_BOLTED)) {
190 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n", 191 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
@@ -244,6 +245,45 @@ static long native_hpte_remove(unsigned long hpte_group)
244 return i; 245 return i;
245} 246}
246 247
248static inline int hpte_actual_psize(struct hash_pte *hptep, int psize)
249{
250 int i, shift;
251 unsigned int mask;
252 /* Look at the 8 bit LP value */
253 unsigned int lp = (hptep->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
254
255 if (!(hptep->v & HPTE_V_VALID))
256 return -1;
257
258 /* First check if it is large page */
259 if (!(hptep->v & HPTE_V_LARGE))
260 return MMU_PAGE_4K;
261
262 /* start from 1 ignoring MMU_PAGE_4K */
263 for (i = 1; i < MMU_PAGE_COUNT; i++) {
264
265 /* invalid penc */
266 if (mmu_psize_defs[psize].penc[i] == -1)
267 continue;
268 /*
269 * encoding bits per actual page size
270 * PTE LP actual page size
271 * rrrr rrrz >=8KB
272 * rrrr rrzz >=16KB
273 * rrrr rzzz >=32KB
274 * rrrr zzzz >=64KB
275 * .......
276 */
277 shift = mmu_psize_defs[i].shift - LP_SHIFT;
278 if (shift > LP_BITS)
279 shift = LP_BITS;
280 mask = (1 << shift) - 1;
281 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
282 return i;
283 }
284 return -1;
285}
286
247static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 287static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
248 unsigned long vpn, int psize, int ssize, 288 unsigned long vpn, int psize, int ssize,
249 int local) 289 int local)
@@ -251,6 +291,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
251 struct hash_pte *hptep = htab_address + slot; 291 struct hash_pte *hptep = htab_address + slot;
252 unsigned long hpte_v, want_v; 292 unsigned long hpte_v, want_v;
253 int ret = 0; 293 int ret = 0;
294 int actual_psize;
254 295
255 want_v = hpte_encode_avpn(vpn, psize, ssize); 296 want_v = hpte_encode_avpn(vpn, psize, ssize);
256 297
@@ -260,9 +301,13 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
260 native_lock_hpte(hptep); 301 native_lock_hpte(hptep);
261 302
262 hpte_v = hptep->v; 303 hpte_v = hptep->v;
263 304 actual_psize = hpte_actual_psize(hptep, psize);
305 if (actual_psize < 0) {
306 native_unlock_hpte(hptep);
307 return -1;
308 }
264 /* Even if we miss, we need to invalidate the TLB */ 309 /* Even if we miss, we need to invalidate the TLB */
265 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { 310 if (!HPTE_V_COMPARE(hpte_v, want_v)) {
266 DBG_LOW(" -> miss\n"); 311 DBG_LOW(" -> miss\n");
267 ret = -1; 312 ret = -1;
268 } else { 313 } else {
@@ -274,7 +319,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
274 native_unlock_hpte(hptep); 319 native_unlock_hpte(hptep);
275 320
276 /* Ensure it is out of the tlb too. */ 321 /* Ensure it is out of the tlb too. */
277 tlbie(vpn, psize, ssize, local); 322 tlbie(vpn, psize, actual_psize, ssize, local);
278 323
279 return ret; 324 return ret;
280} 325}
@@ -315,6 +360,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
315static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 360static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
316 int psize, int ssize) 361 int psize, int ssize)
317{ 362{
363 int actual_psize;
318 unsigned long vpn; 364 unsigned long vpn;
319 unsigned long vsid; 365 unsigned long vsid;
320 long slot; 366 long slot;
@@ -327,13 +373,16 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
327 if (slot == -1) 373 if (slot == -1)
328 panic("could not find page to bolt\n"); 374 panic("could not find page to bolt\n");
329 hptep = htab_address + slot; 375 hptep = htab_address + slot;
376 actual_psize = hpte_actual_psize(hptep, psize);
377 if (actual_psize < 0)
378 return;
330 379
331 /* Update the HPTE */ 380 /* Update the HPTE */
332 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 381 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
333 (newpp & (HPTE_R_PP | HPTE_R_N)); 382 (newpp & (HPTE_R_PP | HPTE_R_N));
334 383
335 /* Ensure it is out of the tlb too. */ 384 /* Ensure it is out of the tlb too. */
336 tlbie(vpn, psize, ssize, 0); 385 tlbie(vpn, psize, actual_psize, ssize, 0);
337} 386}
338 387
339static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, 388static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
@@ -343,6 +392,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
343 unsigned long hpte_v; 392 unsigned long hpte_v;
344 unsigned long want_v; 393 unsigned long want_v;
345 unsigned long flags; 394 unsigned long flags;
395 int actual_psize;
346 396
347 local_irq_save(flags); 397 local_irq_save(flags);
348 398
@@ -352,35 +402,38 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
352 native_lock_hpte(hptep); 402 native_lock_hpte(hptep);
353 hpte_v = hptep->v; 403 hpte_v = hptep->v;
354 404
405 actual_psize = hpte_actual_psize(hptep, psize);
406 if (actual_psize < 0) {
407 native_unlock_hpte(hptep);
408 local_irq_restore(flags);
409 return;
410 }
355 /* Even if we miss, we need to invalidate the TLB */ 411 /* Even if we miss, we need to invalidate the TLB */
356 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) 412 if (!HPTE_V_COMPARE(hpte_v, want_v))
357 native_unlock_hpte(hptep); 413 native_unlock_hpte(hptep);
358 else 414 else
359 /* Invalidate the hpte. NOTE: this also unlocks it */ 415 /* Invalidate the hpte. NOTE: this also unlocks it */
360 hptep->v = 0; 416 hptep->v = 0;
361 417
362 /* Invalidate the TLB */ 418 /* Invalidate the TLB */
363 tlbie(vpn, psize, ssize, local); 419 tlbie(vpn, psize, actual_psize, ssize, local);
364 420
365 local_irq_restore(flags); 421 local_irq_restore(flags);
366} 422}
367 423
368#define LP_SHIFT 12
369#define LP_BITS 8
370#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
371
372static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 424static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
373 int *psize, int *ssize, unsigned long *vpn) 425 int *psize, int *apsize, int *ssize, unsigned long *vpn)
374{ 426{
375 unsigned long avpn, pteg, vpi; 427 unsigned long avpn, pteg, vpi;
376 unsigned long hpte_r = hpte->r; 428 unsigned long hpte_r = hpte->r;
377 unsigned long hpte_v = hpte->v; 429 unsigned long hpte_v = hpte->v;
378 unsigned long vsid, seg_off; 430 unsigned long vsid, seg_off;
379 int i, size, shift, penc; 431 int i, size, a_size, shift, penc;
380 432
381 if (!(hpte_v & HPTE_V_LARGE)) 433 if (!(hpte_v & HPTE_V_LARGE)) {
382 size = MMU_PAGE_4K; 434 size = MMU_PAGE_4K;
383 else { 435 a_size = MMU_PAGE_4K;
436 } else {
384 for (i = 0; i < LP_BITS; i++) { 437 for (i = 0; i < LP_BITS; i++) {
385 if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1)) 438 if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
386 break; 439 break;
@@ -388,19 +441,26 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
388 penc = LP_MASK(i+1) >> LP_SHIFT; 441 penc = LP_MASK(i+1) >> LP_SHIFT;
389 for (size = 0; size < MMU_PAGE_COUNT; size++) { 442 for (size = 0; size < MMU_PAGE_COUNT; size++) {
390 443
391 /* 4K pages are not represented by LP */
392 if (size == MMU_PAGE_4K)
393 continue;
394
395 /* valid entries have a shift value */ 444 /* valid entries have a shift value */
396 if (!mmu_psize_defs[size].shift) 445 if (!mmu_psize_defs[size].shift)
397 continue; 446 continue;
447 for (a_size = 0; a_size < MMU_PAGE_COUNT; a_size++) {
398 448
399 if (penc == mmu_psize_defs[size].penc) 449 /* 4K pages are not represented by LP */
400 break; 450 if (a_size == MMU_PAGE_4K)
451 continue;
452
453 /* valid entries have a shift value */
454 if (!mmu_psize_defs[a_size].shift)
455 continue;
456
457 if (penc == mmu_psize_defs[size].penc[a_size])
458 goto out;
459 }
401 } 460 }
402 } 461 }
403 462
463out:
404 /* This works for all page sizes, and for 256M and 1T segments */ 464 /* This works for all page sizes, and for 256M and 1T segments */
405 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; 465 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
406 shift = mmu_psize_defs[size].shift; 466 shift = mmu_psize_defs[size].shift;
@@ -433,7 +493,8 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
433 default: 493 default:
434 *vpn = size = 0; 494 *vpn = size = 0;
435 } 495 }
436 *psize = size; 496 *psize = size;
497 *apsize = a_size;
437} 498}
438 499
439/* 500/*
@@ -451,7 +512,7 @@ static void native_hpte_clear(void)
451 struct hash_pte *hptep = htab_address; 512 struct hash_pte *hptep = htab_address;
452 unsigned long hpte_v; 513 unsigned long hpte_v;
453 unsigned long pteg_count; 514 unsigned long pteg_count;
454 int psize, ssize; 515 int psize, apsize, ssize;
455 516
456 pteg_count = htab_hash_mask + 1; 517 pteg_count = htab_hash_mask + 1;
457 518
@@ -477,9 +538,9 @@ static void native_hpte_clear(void)
477 * already hold the native_tlbie_lock. 538 * already hold the native_tlbie_lock.
478 */ 539 */
479 if (hpte_v & HPTE_V_VALID) { 540 if (hpte_v & HPTE_V_VALID) {
480 hpte_decode(hptep, slot, &psize, &ssize, &vpn); 541 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
481 hptep->v = 0; 542 hptep->v = 0;
482 __tlbie(vpn, psize, ssize); 543 __tlbie(vpn, psize, apsize, ssize);
483 } 544 }
484 } 545 }
485 546
@@ -540,7 +601,7 @@ static void native_flush_hash_range(unsigned long number, int local)
540 601
541 pte_iterate_hashed_subpages(pte, psize, 602 pte_iterate_hashed_subpages(pte, psize,
542 vpn, index, shift) { 603 vpn, index, shift) {
543 __tlbiel(vpn, psize, ssize); 604 __tlbiel(vpn, psize, psize, ssize);
544 } pte_iterate_hashed_end(); 605 } pte_iterate_hashed_end();
545 } 606 }
546 asm volatile("ptesync":::"memory"); 607 asm volatile("ptesync":::"memory");
@@ -557,7 +618,7 @@ static void native_flush_hash_range(unsigned long number, int local)
557 618
558 pte_iterate_hashed_subpages(pte, psize, 619 pte_iterate_hashed_subpages(pte, psize,
559 vpn, index, shift) { 620 vpn, index, shift) {
560 __tlbie(vpn, psize, ssize); 621 __tlbie(vpn, psize, psize, ssize);
561 } pte_iterate_hashed_end(); 622 } pte_iterate_hashed_end();
562 } 623 }
563 asm volatile("eieio; tlbsync; ptesync":::"memory"); 624 asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1ed4419c533b..cd75d68a730b 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -126,7 +126,7 @@ static struct mmu_psize_def mmu_psize_defaults_old[] = {
126 [MMU_PAGE_4K] = { 126 [MMU_PAGE_4K] = {
127 .shift = 12, 127 .shift = 12,
128 .sllp = 0, 128 .sllp = 0,
129 .penc = 0, 129 .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
130 .avpnm = 0, 130 .avpnm = 0,
131 .tlbiel = 0, 131 .tlbiel = 0,
132 }, 132 },
@@ -140,14 +140,15 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
140 [MMU_PAGE_4K] = { 140 [MMU_PAGE_4K] = {
141 .shift = 12, 141 .shift = 12,
142 .sllp = 0, 142 .sllp = 0,
143 .penc = 0, 143 .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
144 .avpnm = 0, 144 .avpnm = 0,
145 .tlbiel = 1, 145 .tlbiel = 1,
146 }, 146 },
147 [MMU_PAGE_16M] = { 147 [MMU_PAGE_16M] = {
148 .shift = 24, 148 .shift = 24,
149 .sllp = SLB_VSID_L, 149 .sllp = SLB_VSID_L,
150 .penc = 0, 150 .penc = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
151 [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
151 .avpnm = 0x1UL, 152 .avpnm = 0x1UL,
152 .tlbiel = 0, 153 .tlbiel = 0,
153 }, 154 },
@@ -209,7 +210,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
209 210
210 BUG_ON(!ppc_md.hpte_insert); 211 BUG_ON(!ppc_md.hpte_insert);
211 ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot, 212 ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
212 HPTE_V_BOLTED, psize, ssize); 213 HPTE_V_BOLTED, psize, psize, ssize);
213 214
214 if (ret < 0) 215 if (ret < 0)
215 break; 216 break;
@@ -276,6 +277,30 @@ static void __init htab_init_seg_sizes(void)
276 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); 277 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
277} 278}
278 279
280static int __init get_idx_from_shift(unsigned int shift)
281{
282 int idx = -1;
283
284 switch (shift) {
285 case 0xc:
286 idx = MMU_PAGE_4K;
287 break;
288 case 0x10:
289 idx = MMU_PAGE_64K;
290 break;
291 case 0x14:
292 idx = MMU_PAGE_1M;
293 break;
294 case 0x18:
295 idx = MMU_PAGE_16M;
296 break;
297 case 0x22:
298 idx = MMU_PAGE_16G;
299 break;
300 }
301 return idx;
302}
303
279static int __init htab_dt_scan_page_sizes(unsigned long node, 304static int __init htab_dt_scan_page_sizes(unsigned long node,
280 const char *uname, int depth, 305 const char *uname, int depth,
281 void *data) 306 void *data)
@@ -295,60 +320,61 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
295 size /= 4; 320 size /= 4;
296 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); 321 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
297 while(size > 0) { 322 while(size > 0) {
298 unsigned int shift = prop[0]; 323 unsigned int base_shift = prop[0];
299 unsigned int slbenc = prop[1]; 324 unsigned int slbenc = prop[1];
300 unsigned int lpnum = prop[2]; 325 unsigned int lpnum = prop[2];
301 unsigned int lpenc = 0;
302 struct mmu_psize_def *def; 326 struct mmu_psize_def *def;
303 int idx = -1; 327 int idx, base_idx;
304 328
305 size -= 3; prop += 3; 329 size -= 3; prop += 3;
306 while(size > 0 && lpnum) { 330 base_idx = get_idx_from_shift(base_shift);
307 if (prop[0] == shift) 331 if (base_idx < 0) {
308 lpenc = prop[1]; 332 /*
309 prop += 2; size -= 2; 333 * skip the pte encoding also
310 lpnum--; 334 */
335 prop += lpnum * 2; size -= lpnum * 2;
336 continue;
311 } 337 }
312 switch(shift) { 338 def = &mmu_psize_defs[base_idx];
313 case 0xc: 339 if (base_idx == MMU_PAGE_16M)
314 idx = MMU_PAGE_4K;
315 break;
316 case 0x10:
317 idx = MMU_PAGE_64K;
318 break;
319 case 0x14:
320 idx = MMU_PAGE_1M;
321 break;
322 case 0x18:
323 idx = MMU_PAGE_16M;
324 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; 340 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
325 break; 341
326 case 0x22: 342 def->shift = base_shift;
327 idx = MMU_PAGE_16G; 343 if (base_shift <= 23)
328 break;
329 }
330 if (idx < 0)
331 continue;
332 def = &mmu_psize_defs[idx];
333 def->shift = shift;
334 if (shift <= 23)
335 def->avpnm = 0; 344 def->avpnm = 0;
336 else 345 else
337 def->avpnm = (1 << (shift - 23)) - 1; 346 def->avpnm = (1 << (base_shift - 23)) - 1;
338 def->sllp = slbenc; 347 def->sllp = slbenc;
339 def->penc = lpenc; 348 /*
340 /* We don't know for sure what's up with tlbiel, so 349 * We don't know for sure what's up with tlbiel, so
341 * for now we only set it for 4K and 64K pages 350 * for now we only set it for 4K and 64K pages
342 */ 351 */
343 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K) 352 if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
344 def->tlbiel = 1; 353 def->tlbiel = 1;
345 else 354 else
346 def->tlbiel = 0; 355 def->tlbiel = 0;
347 356
348 DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, " 357 while (size > 0 && lpnum) {
349 "tlbiel=%d, penc=%d\n", 358 unsigned int shift = prop[0];
350 idx, shift, def->sllp, def->avpnm, def->tlbiel, 359 int penc = prop[1];
351 def->penc); 360
361 prop += 2; size -= 2;
362 lpnum--;
363
364 idx = get_idx_from_shift(shift);
365 if (idx < 0)
366 continue;
367
368 if (penc == -1)
369 pr_err("Invalid penc for base_shift=%d "
370 "shift=%d\n", base_shift, shift);
371
372 def->penc[idx] = penc;
373 DBG(" %d: shift=%02x, sllp=%04lx, "
374 "avpnm=%08lx, tlbiel=%d, penc=%d\n",
375 idx, shift, def->sllp, def->avpnm,
376 def->tlbiel, def->penc[idx]);
377 }
352 } 378 }
353 return 1; 379 return 1;
354 } 380 }
@@ -397,10 +423,21 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
397} 423}
398#endif /* CONFIG_HUGETLB_PAGE */ 424#endif /* CONFIG_HUGETLB_PAGE */
399 425
426static void mmu_psize_set_default_penc(void)
427{
428 int bpsize, apsize;
429 for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
430 for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++)
431 mmu_psize_defs[bpsize].penc[apsize] = -1;
432}
433
400static void __init htab_init_page_sizes(void) 434static void __init htab_init_page_sizes(void)
401{ 435{
402 int rc; 436 int rc;
403 437
438 /* se the invalid penc to -1 */
439 mmu_psize_set_default_penc();
440
404 /* Default to 4K pages only */ 441 /* Default to 4K pages only */
405 memcpy(mmu_psize_defs, mmu_psize_defaults_old, 442 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
406 sizeof(mmu_psize_defaults_old)); 443 sizeof(mmu_psize_defaults_old));
@@ -1243,7 +1280,7 @@ repeat:
1243 1280
1244 /* Insert into the hash table, primary slot */ 1281 /* Insert into the hash table, primary slot */
1245 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags, 1282 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
1246 psize, ssize); 1283 psize, psize, ssize);
1247 1284
1248 /* Primary is full, try the secondary */ 1285 /* Primary is full, try the secondary */
1249 if (unlikely(slot == -1)) { 1286 if (unlikely(slot == -1)) {
@@ -1251,7 +1288,7 @@ repeat:
1251 HPTES_PER_GROUP) & ~0x7UL; 1288 HPTES_PER_GROUP) & ~0x7UL;
1252 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 1289 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
1253 vflags | HPTE_V_SECONDARY, 1290 vflags | HPTE_V_SECONDARY,
1254 psize, ssize); 1291 psize, psize, ssize);
1255 if (slot == -1) { 1292 if (slot == -1) {
1256 if (mftb() & 0x1) 1293 if (mftb() & 0x1)
1257 hpte_group = ((hash & htab_hash_mask) * 1294 hpte_group = ((hash & htab_hash_mask) *
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 472f9a7609cf..246e1d8b3af3 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -90,7 +90,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
90static long beat_lpar_hpte_insert(unsigned long hpte_group, 90static long beat_lpar_hpte_insert(unsigned long hpte_group,
91 unsigned long vpn, unsigned long pa, 91 unsigned long vpn, unsigned long pa,
92 unsigned long rflags, unsigned long vflags, 92 unsigned long rflags, unsigned long vflags,
93 int psize, int ssize) 93 int psize, int apsize, int ssize)
94{ 94{
95 unsigned long lpar_rc; 95 unsigned long lpar_rc;
96 u64 hpte_v, hpte_r, slot; 96 u64 hpte_v, hpte_r, slot;
@@ -103,9 +103,9 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
103 "rflags=%lx, vflags=%lx, psize=%d)\n", 103 "rflags=%lx, vflags=%lx, psize=%d)\n",
104 hpte_group, va, pa, rflags, vflags, psize); 104 hpte_group, va, pa, rflags, vflags, psize);
105 105
106 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) | 106 hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
107 vflags | HPTE_V_VALID; 107 vflags | HPTE_V_VALID;
108 hpte_r = hpte_encode_r(pa, psize) | rflags; 108 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
109 109
110 if (!(vflags & HPTE_V_BOLTED)) 110 if (!(vflags & HPTE_V_BOLTED))
111 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 111 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -314,7 +314,7 @@ void __init hpte_init_beat(void)
314static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, 314static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
315 unsigned long vpn, unsigned long pa, 315 unsigned long vpn, unsigned long pa,
316 unsigned long rflags, unsigned long vflags, 316 unsigned long rflags, unsigned long vflags,
317 int psize, int ssize) 317 int psize, int apsize, int ssize)
318{ 318{
319 unsigned long lpar_rc; 319 unsigned long lpar_rc;
320 u64 hpte_v, hpte_r, slot; 320 u64 hpte_v, hpte_r, slot;
@@ -327,9 +327,9 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
327 "rflags=%lx, vflags=%lx, psize=%d)\n", 327 "rflags=%lx, vflags=%lx, psize=%d)\n",
328 hpte_group, vpn, pa, rflags, vflags, psize); 328 hpte_group, vpn, pa, rflags, vflags, psize);
329 329
330 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) | 330 hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
331 vflags | HPTE_V_VALID; 331 vflags | HPTE_V_VALID;
332 hpte_r = hpte_encode_r(pa, psize) | rflags; 332 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
333 333
334 if (!(vflags & HPTE_V_BOLTED)) 334 if (!(vflags & HPTE_V_BOLTED))
335 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 335 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -373,7 +373,7 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
373 unsigned long pss; 373 unsigned long pss;
374 374
375 want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M); 375 want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
376 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 376 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
377 377
378 DBG_LOW(" update: " 378 DBG_LOW(" update: "
379 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", 379 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -403,7 +403,7 @@ static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
403 DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", 403 DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
404 slot, vpn, psize, local); 404 slot, vpn, psize, local);
405 want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M); 405 want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
406 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 406 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
407 407
408 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss); 408 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
409 409
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index cd8f2fbb6d1a..177a2f70700c 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -46,7 +46,7 @@ static DEFINE_SPINLOCK(ps3_htab_lock);
46 46
47static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn, 47static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
48 unsigned long pa, unsigned long rflags, unsigned long vflags, 48 unsigned long pa, unsigned long rflags, unsigned long vflags,
49 int psize, int ssize) 49 int psize, int apsize, int ssize)
50{ 50{
51 int result; 51 int result;
52 u64 hpte_v, hpte_r; 52 u64 hpte_v, hpte_r;
@@ -62,8 +62,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
62 */ 62 */
63 vflags &= ~HPTE_V_SECONDARY; 63 vflags &= ~HPTE_V_SECONDARY;
64 64
65 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; 65 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
66 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; 66 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize) | rflags;
67 67
68 spin_lock_irqsave(&ps3_htab_lock, flags); 68 spin_lock_irqsave(&ps3_htab_lock, flags);
69 69
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 9b02ab14a5cd..6d62072a7d5a 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -109,7 +109,7 @@ void vpa_init(int cpu)
109static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 109static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
110 unsigned long vpn, unsigned long pa, 110 unsigned long vpn, unsigned long pa,
111 unsigned long rflags, unsigned long vflags, 111 unsigned long rflags, unsigned long vflags,
112 int psize, int ssize) 112 int psize, int apsize, int ssize)
113{ 113{
114 unsigned long lpar_rc; 114 unsigned long lpar_rc;
115 unsigned long flags; 115 unsigned long flags;
@@ -121,8 +121,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
121 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", 121 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
122 hpte_group, vpn, pa, rflags, vflags, psize); 122 hpte_group, vpn, pa, rflags, vflags, psize);
123 123
124 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; 124 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
125 hpte_r = hpte_encode_r(pa, psize) | rflags; 125 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
126 126
127 if (!(vflags & HPTE_V_BOLTED)) 127 if (!(vflags & HPTE_V_BOLTED))
128 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 128 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);