aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hash_native_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hash_native_64.c')
-rw-r--r--arch/powerpc/mm/hash_native_64.c178
1 files changed, 129 insertions, 49 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index ffc1e00f7a22..6a2aead5b0e5 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -39,7 +39,7 @@
39 39
40DEFINE_RAW_SPINLOCK(native_tlbie_lock); 40DEFINE_RAW_SPINLOCK(native_tlbie_lock);
41 41
42static inline void __tlbie(unsigned long vpn, int psize, int ssize) 42static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
43{ 43{
44 unsigned long va; 44 unsigned long va;
45 unsigned int penc; 45 unsigned int penc;
@@ -61,17 +61,31 @@ static inline void __tlbie(unsigned long vpn, int psize, int ssize)
61 61
62 switch (psize) { 62 switch (psize) {
63 case MMU_PAGE_4K: 63 case MMU_PAGE_4K:
64 /* clear out bits after (52) [0....52.....63] */
65 va &= ~((1ul << (64 - 52)) - 1);
64 va |= ssize << 8; 66 va |= ssize << 8;
67 va |= mmu_psize_defs[apsize].sllp << 6;
65 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 68 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
66 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 69 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
67 : "memory"); 70 : "memory");
68 break; 71 break;
69 default: 72 default:
70 /* We need 14 to 14 + i bits of va */ 73 /* We need 14 to 14 + i bits of va */
71 penc = mmu_psize_defs[psize].penc; 74 penc = mmu_psize_defs[psize].penc[apsize];
72 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 75 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
73 va |= penc << 12; 76 va |= penc << 12;
74 va |= ssize << 8; 77 va |= ssize << 8;
78 /* Add AVAL part */
79 if (psize != apsize) {
80 /*
81 * MPSS, 64K base page size and 16MB parge page size
82 * We don't need all the bits, but rest of the bits
83 * must be ignored by the processor.
84 * vpn cover upto 65 bits of va. (0...65) and we need
85 * 58..64 bits of va.
86 */
87 va |= (vpn & 0xfe);
88 }
75 va |= 1; /* L */ 89 va |= 1; /* L */
76 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) 90 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
77 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 91 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@@ -80,7 +94,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int ssize)
80 } 94 }
81} 95}
82 96
83static inline void __tlbiel(unsigned long vpn, int psize, int ssize) 97static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
84{ 98{
85 unsigned long va; 99 unsigned long va;
86 unsigned int penc; 100 unsigned int penc;
@@ -96,16 +110,30 @@ static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
96 110
97 switch (psize) { 111 switch (psize) {
98 case MMU_PAGE_4K: 112 case MMU_PAGE_4K:
113 /* clear out bits after(52) [0....52.....63] */
114 va &= ~((1ul << (64 - 52)) - 1);
99 va |= ssize << 8; 115 va |= ssize << 8;
116 va |= mmu_psize_defs[apsize].sllp << 6;
100 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 117 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
101 : : "r"(va) : "memory"); 118 : : "r"(va) : "memory");
102 break; 119 break;
103 default: 120 default:
104 /* We need 14 to 14 + i bits of va */ 121 /* We need 14 to 14 + i bits of va */
105 penc = mmu_psize_defs[psize].penc; 122 penc = mmu_psize_defs[psize].penc[apsize];
106 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 123 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
107 va |= penc << 12; 124 va |= penc << 12;
108 va |= ssize << 8; 125 va |= ssize << 8;
126 /* Add AVAL part */
127 if (psize != apsize) {
128 /*
129 * MPSS, 64K base page size and 16MB parge page size
130 * We don't need all the bits, but rest of the bits
131 * must be ignored by the processor.
132 * vpn cover upto 65 bits of va. (0...65) and we need
133 * 58..64 bits of va.
134 */
135 va |= (vpn & 0xfe);
136 }
109 va |= 1; /* L */ 137 va |= 1; /* L */
110 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" 138 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
111 : : "r"(va) : "memory"); 139 : : "r"(va) : "memory");
@@ -114,7 +142,8 @@ static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
114 142
115} 143}
116 144
117static inline void tlbie(unsigned long vpn, int psize, int ssize, int local) 145static inline void tlbie(unsigned long vpn, int psize, int apsize,
146 int ssize, int local)
118{ 147{
119 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); 148 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
120 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 149 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -125,10 +154,10 @@ static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
125 raw_spin_lock(&native_tlbie_lock); 154 raw_spin_lock(&native_tlbie_lock);
126 asm volatile("ptesync": : :"memory"); 155 asm volatile("ptesync": : :"memory");
127 if (use_local) { 156 if (use_local) {
128 __tlbiel(vpn, psize, ssize); 157 __tlbiel(vpn, psize, apsize, ssize);
129 asm volatile("ptesync": : :"memory"); 158 asm volatile("ptesync": : :"memory");
130 } else { 159 } else {
131 __tlbie(vpn, psize, ssize); 160 __tlbie(vpn, psize, apsize, ssize);
132 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 161 asm volatile("eieio; tlbsync; ptesync": : :"memory");
133 } 162 }
134 if (lock_tlbie && !use_local) 163 if (lock_tlbie && !use_local)
@@ -156,7 +185,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
156 185
157static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, 186static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
158 unsigned long pa, unsigned long rflags, 187 unsigned long pa, unsigned long rflags,
159 unsigned long vflags, int psize, int ssize) 188 unsigned long vflags, int psize, int apsize, int ssize)
160{ 189{
161 struct hash_pte *hptep = htab_address + hpte_group; 190 struct hash_pte *hptep = htab_address + hpte_group;
162 unsigned long hpte_v, hpte_r; 191 unsigned long hpte_v, hpte_r;
@@ -183,8 +212,8 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
183 if (i == HPTES_PER_GROUP) 212 if (i == HPTES_PER_GROUP)
184 return -1; 213 return -1;
185 214
186 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID; 215 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
187 hpte_r = hpte_encode_r(pa, psize) | rflags; 216 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
188 217
189 if (!(vflags & HPTE_V_BOLTED)) { 218 if (!(vflags & HPTE_V_BOLTED)) {
190 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n", 219 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
@@ -244,6 +273,51 @@ static long native_hpte_remove(unsigned long hpte_group)
244 return i; 273 return i;
245} 274}
246 275
276static inline int __hpte_actual_psize(unsigned int lp, int psize)
277{
278 int i, shift;
279 unsigned int mask;
280
281 /* start from 1 ignoring MMU_PAGE_4K */
282 for (i = 1; i < MMU_PAGE_COUNT; i++) {
283
284 /* invalid penc */
285 if (mmu_psize_defs[psize].penc[i] == -1)
286 continue;
287 /*
288 * encoding bits per actual page size
289 * PTE LP actual page size
290 * rrrr rrrz >=8KB
291 * rrrr rrzz >=16KB
292 * rrrr rzzz >=32KB
293 * rrrr zzzz >=64KB
294 * .......
295 */
296 shift = mmu_psize_defs[i].shift - LP_SHIFT;
297 if (shift > LP_BITS)
298 shift = LP_BITS;
299 mask = (1 << shift) - 1;
300 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
301 return i;
302 }
303 return -1;
304}
305
306static inline int hpte_actual_psize(struct hash_pte *hptep, int psize)
307{
308 /* Look at the 8 bit LP value */
309 unsigned int lp = (hptep->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
310
311 if (!(hptep->v & HPTE_V_VALID))
312 return -1;
313
314 /* First check if it is large page */
315 if (!(hptep->v & HPTE_V_LARGE))
316 return MMU_PAGE_4K;
317
318 return __hpte_actual_psize(lp, psize);
319}
320
247static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 321static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
248 unsigned long vpn, int psize, int ssize, 322 unsigned long vpn, int psize, int ssize,
249 int local) 323 int local)
@@ -251,8 +325,9 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
251 struct hash_pte *hptep = htab_address + slot; 325 struct hash_pte *hptep = htab_address + slot;
252 unsigned long hpte_v, want_v; 326 unsigned long hpte_v, want_v;
253 int ret = 0; 327 int ret = 0;
328 int actual_psize;
254 329
255 want_v = hpte_encode_v(vpn, psize, ssize); 330 want_v = hpte_encode_avpn(vpn, psize, ssize);
256 331
257 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", 332 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
258 vpn, want_v & HPTE_V_AVPN, slot, newpp); 333 vpn, want_v & HPTE_V_AVPN, slot, newpp);
@@ -260,9 +335,13 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
260 native_lock_hpte(hptep); 335 native_lock_hpte(hptep);
261 336
262 hpte_v = hptep->v; 337 hpte_v = hptep->v;
263 338 actual_psize = hpte_actual_psize(hptep, psize);
339 if (actual_psize < 0) {
340 native_unlock_hpte(hptep);
341 return -1;
342 }
264 /* Even if we miss, we need to invalidate the TLB */ 343 /* Even if we miss, we need to invalidate the TLB */
265 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { 344 if (!HPTE_V_COMPARE(hpte_v, want_v)) {
266 DBG_LOW(" -> miss\n"); 345 DBG_LOW(" -> miss\n");
267 ret = -1; 346 ret = -1;
268 } else { 347 } else {
@@ -274,7 +353,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
274 native_unlock_hpte(hptep); 353 native_unlock_hpte(hptep);
275 354
276 /* Ensure it is out of the tlb too. */ 355 /* Ensure it is out of the tlb too. */
277 tlbie(vpn, psize, ssize, local); 356 tlbie(vpn, psize, actual_psize, ssize, local);
278 357
279 return ret; 358 return ret;
280} 359}
@@ -288,7 +367,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
288 unsigned long want_v, hpte_v; 367 unsigned long want_v, hpte_v;
289 368
290 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); 369 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
291 want_v = hpte_encode_v(vpn, psize, ssize); 370 want_v = hpte_encode_avpn(vpn, psize, ssize);
292 371
293 /* Bolted mappings are only ever in the primary group */ 372 /* Bolted mappings are only ever in the primary group */
294 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 373 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -315,6 +394,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
315static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 394static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
316 int psize, int ssize) 395 int psize, int ssize)
317{ 396{
397 int actual_psize;
318 unsigned long vpn; 398 unsigned long vpn;
319 unsigned long vsid; 399 unsigned long vsid;
320 long slot; 400 long slot;
@@ -327,13 +407,16 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
327 if (slot == -1) 407 if (slot == -1)
328 panic("could not find page to bolt\n"); 408 panic("could not find page to bolt\n");
329 hptep = htab_address + slot; 409 hptep = htab_address + slot;
410 actual_psize = hpte_actual_psize(hptep, psize);
411 if (actual_psize < 0)
412 return;
330 413
331 /* Update the HPTE */ 414 /* Update the HPTE */
332 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 415 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
333 (newpp & (HPTE_R_PP | HPTE_R_N)); 416 (newpp & (HPTE_R_PP | HPTE_R_N));
334 417
335 /* Ensure it is out of the tlb too. */ 418 /* Ensure it is out of the tlb too. */
336 tlbie(vpn, psize, ssize, 0); 419 tlbie(vpn, psize, actual_psize, ssize, 0);
337} 420}
338 421
339static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, 422static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
@@ -343,64 +426,60 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
343 unsigned long hpte_v; 426 unsigned long hpte_v;
344 unsigned long want_v; 427 unsigned long want_v;
345 unsigned long flags; 428 unsigned long flags;
429 int actual_psize;
346 430
347 local_irq_save(flags); 431 local_irq_save(flags);
348 432
349 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); 433 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
350 434
351 want_v = hpte_encode_v(vpn, psize, ssize); 435 want_v = hpte_encode_avpn(vpn, psize, ssize);
352 native_lock_hpte(hptep); 436 native_lock_hpte(hptep);
353 hpte_v = hptep->v; 437 hpte_v = hptep->v;
354 438
439 actual_psize = hpte_actual_psize(hptep, psize);
440 if (actual_psize < 0) {
441 native_unlock_hpte(hptep);
442 local_irq_restore(flags);
443 return;
444 }
355 /* Even if we miss, we need to invalidate the TLB */ 445 /* Even if we miss, we need to invalidate the TLB */
356 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) 446 if (!HPTE_V_COMPARE(hpte_v, want_v))
357 native_unlock_hpte(hptep); 447 native_unlock_hpte(hptep);
358 else 448 else
359 /* Invalidate the hpte. NOTE: this also unlocks it */ 449 /* Invalidate the hpte. NOTE: this also unlocks it */
360 hptep->v = 0; 450 hptep->v = 0;
361 451
362 /* Invalidate the TLB */ 452 /* Invalidate the TLB */
363 tlbie(vpn, psize, ssize, local); 453 tlbie(vpn, psize, actual_psize, ssize, local);
364 454
365 local_irq_restore(flags); 455 local_irq_restore(flags);
366} 456}
367 457
368#define LP_SHIFT 12
369#define LP_BITS 8
370#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
371
372static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 458static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
373 int *psize, int *ssize, unsigned long *vpn) 459 int *psize, int *apsize, int *ssize, unsigned long *vpn)
374{ 460{
375 unsigned long avpn, pteg, vpi; 461 unsigned long avpn, pteg, vpi;
376 unsigned long hpte_r = hpte->r;
377 unsigned long hpte_v = hpte->v; 462 unsigned long hpte_v = hpte->v;
378 unsigned long vsid, seg_off; 463 unsigned long vsid, seg_off;
379 int i, size, shift, penc; 464 int size, a_size, shift;
465 /* Look at the 8 bit LP value */
466 unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
380 467
381 if (!(hpte_v & HPTE_V_LARGE)) 468 if (!(hpte_v & HPTE_V_LARGE)) {
382 size = MMU_PAGE_4K; 469 size = MMU_PAGE_4K;
383 else { 470 a_size = MMU_PAGE_4K;
384 for (i = 0; i < LP_BITS; i++) { 471 } else {
385 if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
386 break;
387 }
388 penc = LP_MASK(i+1) >> LP_SHIFT;
389 for (size = 0; size < MMU_PAGE_COUNT; size++) { 472 for (size = 0; size < MMU_PAGE_COUNT; size++) {
390 473
391 /* 4K pages are not represented by LP */
392 if (size == MMU_PAGE_4K)
393 continue;
394
395 /* valid entries have a shift value */ 474 /* valid entries have a shift value */
396 if (!mmu_psize_defs[size].shift) 475 if (!mmu_psize_defs[size].shift)
397 continue; 476 continue;
398 477
399 if (penc == mmu_psize_defs[size].penc) 478 a_size = __hpte_actual_psize(lp, size);
479 if (a_size != -1)
400 break; 480 break;
401 } 481 }
402 } 482 }
403
404 /* This works for all page sizes, and for 256M and 1T segments */ 483 /* This works for all page sizes, and for 256M and 1T segments */
405 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; 484 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
406 shift = mmu_psize_defs[size].shift; 485 shift = mmu_psize_defs[size].shift;
@@ -433,7 +512,8 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
433 default: 512 default:
434 *vpn = size = 0; 513 *vpn = size = 0;
435 } 514 }
436 *psize = size; 515 *psize = size;
516 *apsize = a_size;
437} 517}
438 518
439/* 519/*
@@ -451,7 +531,7 @@ static void native_hpte_clear(void)
451 struct hash_pte *hptep = htab_address; 531 struct hash_pte *hptep = htab_address;
452 unsigned long hpte_v; 532 unsigned long hpte_v;
453 unsigned long pteg_count; 533 unsigned long pteg_count;
454 int psize, ssize; 534 int psize, apsize, ssize;
455 535
456 pteg_count = htab_hash_mask + 1; 536 pteg_count = htab_hash_mask + 1;
457 537
@@ -477,9 +557,9 @@ static void native_hpte_clear(void)
477 * already hold the native_tlbie_lock. 557 * already hold the native_tlbie_lock.
478 */ 558 */
479 if (hpte_v & HPTE_V_VALID) { 559 if (hpte_v & HPTE_V_VALID) {
480 hpte_decode(hptep, slot, &psize, &ssize, &vpn); 560 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
481 hptep->v = 0; 561 hptep->v = 0;
482 __tlbie(vpn, psize, ssize); 562 __tlbie(vpn, psize, apsize, ssize);
483 } 563 }
484 } 564 }
485 565
@@ -520,7 +600,7 @@ static void native_flush_hash_range(unsigned long number, int local)
520 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 600 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
521 slot += hidx & _PTEIDX_GROUP_IX; 601 slot += hidx & _PTEIDX_GROUP_IX;
522 hptep = htab_address + slot; 602 hptep = htab_address + slot;
523 want_v = hpte_encode_v(vpn, psize, ssize); 603 want_v = hpte_encode_avpn(vpn, psize, ssize);
524 native_lock_hpte(hptep); 604 native_lock_hpte(hptep);
525 hpte_v = hptep->v; 605 hpte_v = hptep->v;
526 if (!HPTE_V_COMPARE(hpte_v, want_v) || 606 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -540,7 +620,7 @@ static void native_flush_hash_range(unsigned long number, int local)
540 620
541 pte_iterate_hashed_subpages(pte, psize, 621 pte_iterate_hashed_subpages(pte, psize,
542 vpn, index, shift) { 622 vpn, index, shift) {
543 __tlbiel(vpn, psize, ssize); 623 __tlbiel(vpn, psize, psize, ssize);
544 } pte_iterate_hashed_end(); 624 } pte_iterate_hashed_end();
545 } 625 }
546 asm volatile("ptesync":::"memory"); 626 asm volatile("ptesync":::"memory");
@@ -557,7 +637,7 @@ static void native_flush_hash_range(unsigned long number, int local)
557 637
558 pte_iterate_hashed_subpages(pte, psize, 638 pte_iterate_hashed_subpages(pte, psize,
559 vpn, index, shift) { 639 vpn, index, shift) {
560 __tlbie(vpn, psize, ssize); 640 __tlbie(vpn, psize, psize, ssize);
561 } pte_iterate_hashed_end(); 641 } pte_iterate_hashed_end();
562 } 642 }
563 asm volatile("eieio; tlbsync; ptesync":::"memory"); 643 asm volatile("eieio; tlbsync; ptesync":::"memory");