aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-10-11 06:37:10 -0400
committerPaul Mackerras <paulus@samba.org>2007-10-12 00:05:17 -0400
commit1189be6508d45183013ddb82b18f4934193de274 (patch)
tree58924481b4de56699e4a884dce8dc601e71cf7d1
parent287e5d6fcccfa38b953cebe307e1ddfd32363355 (diff)
[POWERPC] Use 1TB segments
This makes the kernel use 1TB segments for all kernel mappings and for user addresses of 1TB and above, on machines which support them (currently POWER5+, POWER6 and PA6T). We detect that the machine supports 1TB segments by looking at the ibm,processor-segment-sizes property in the device tree. We don't currently use 1TB segments for user addresses < 1T, since that would effectively prevent 32-bit processes from using huge pages unless we also had a way to revert to using 256MB segments. That would be possible but would involve extra complications (such as keeping track of which segment size was used when HPTEs were inserted) and is not addressed here. Parts of this patch were originally written by Ben Herrenschmidt. Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kernel/entry_64.S14
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/process.c9
-rw-r--r--arch/powerpc/mm/hash_low_64.S73
-rw-r--r--arch/powerpc/mm/hash_native_64.c92
-rw-r--r--arch/powerpc/mm/hash_utils_64.c114
-rw-r--r--arch/powerpc/mm/hugetlbpage.c13
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
-rw-r--r--arch/powerpc/mm/slb.c67
-rw-r--r--arch/powerpc/mm/slb_low.S37
-rw-r--r--arch/powerpc/mm/stab.c6
-rw-r--r--arch/powerpc/mm/tlb_64.c20
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c2
-rw-r--r--arch/powerpc/platforms/celleb/htab.c34
-rw-r--r--arch/powerpc/platforms/iseries/htab.c13
-rw-r--r--arch/powerpc/platforms/ps3/htab.c14
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c89
-rw-r--r--include/asm-powerpc/cputable.h3
-rw-r--r--include/asm-powerpc/machdep.h8
-rw-r--r--include/asm-powerpc/mmu-hash64.h140
-rw-r--r--include/asm-powerpc/page_64.h8
-rw-r--r--include/asm-powerpc/tlbflush.h3
23 files changed, 513 insertions, 258 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index fbbd3f6f0064..0ec134034899 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -373,8 +373,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
373 373
374 ld r8,KSP(r4) /* new stack pointer */ 374 ld r8,KSP(r4) /* new stack pointer */
375BEGIN_FTR_SECTION 375BEGIN_FTR_SECTION
376 b 2f
377END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
378BEGIN_FTR_SECTION
376 clrrdi r6,r8,28 /* get its ESID */ 379 clrrdi r6,r8,28 /* get its ESID */
377 clrrdi r9,r1,28 /* get current sp ESID */ 380 clrrdi r9,r1,28 /* get current sp ESID */
381END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
382BEGIN_FTR_SECTION
383 clrrdi r6,r8,40 /* get its 1T ESID */
384 clrrdi r9,r1,40 /* get current sp 1T ESID */
385END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
378 clrldi. r0,r6,2 /* is new ESID c00000000? */ 386 clrldi. r0,r6,2 /* is new ESID c00000000? */
379 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ 387 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
380 cror eq,4*cr1+eq,eq 388 cror eq,4*cr1+eq,eq
@@ -384,6 +392,11 @@ BEGIN_FTR_SECTION
384 ld r7,KSP_VSID(r4) /* Get new stack's VSID */ 392 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
385 oris r0,r6,(SLB_ESID_V)@h 393 oris r0,r6,(SLB_ESID_V)@h
386 ori r0,r0,(SLB_NUM_BOLTED-1)@l 394 ori r0,r0,(SLB_NUM_BOLTED-1)@l
395BEGIN_FTR_SECTION
396 li r9,MMU_SEGSIZE_1T /* insert B field */
397 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
398 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
399END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
387 400
388 /* Update the last bolted SLB. No write barriers are needed 401 /* Update the last bolted SLB. No write barriers are needed
389 * here, provided we only update the current CPU's SLB shadow 402 * here, provided we only update the current CPU's SLB shadow
@@ -401,7 +414,6 @@ BEGIN_FTR_SECTION
401 isync 414 isync
402 415
4032: 4162:
404END_FTR_SECTION_IFSET(CPU_FTR_SLB)
405 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ 417 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
406 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE 418 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
407 because we don't need to leave the 288-byte ABI gap at the 419 because we don't need to leave the 288-byte ABI gap at the
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 22ac245bd59a..97c5857faf00 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -935,7 +935,7 @@ _GLOBAL(do_stab_bolted)
935 935
936 /* Calculate VSID */ 936 /* Calculate VSID */
937 /* This is a kernel address, so protovsid = ESID */ 937 /* This is a kernel address, so protovsid = ESID */
938 ASM_VSID_SCRAMBLE(r11, r9) 938 ASM_VSID_SCRAMBLE(r11, r9, 256M)
939 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 939 rldic r9,r11,12,16 /* r9 = vsid << 12 */
940 940
941 /* Search the primary group for a free entry */ 941 /* Search the primary group for a free entry */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 15998b57767c..7949c203cb89 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -564,10 +564,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
564 564
565#ifdef CONFIG_PPC64 565#ifdef CONFIG_PPC64
566 if (cpu_has_feature(CPU_FTR_SLB)) { 566 if (cpu_has_feature(CPU_FTR_SLB)) {
567 unsigned long sp_vsid = get_kernel_vsid(sp); 567 unsigned long sp_vsid;
568 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 568 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
569 569
570 sp_vsid <<= SLB_VSID_SHIFT; 570 if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
571 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
572 << SLB_VSID_SHIFT_1T;
573 else
574 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
575 << SLB_VSID_SHIFT;
571 sp_vsid |= SLB_VSID_KERNEL | llp; 576 sp_vsid |= SLB_VSID_KERNEL | llp;
572 p->thread.ksp_vsid = sp_vsid; 577 p->thread.ksp_vsid = sp_vsid;
573 } 578 }
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 35eabfb50723..ad253b959030 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -54,7 +54,7 @@
54 54
55/* 55/*
56 * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, 56 * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
57 * pte_t *ptep, unsigned long trap, int local) 57 * pte_t *ptep, unsigned long trap, int local, int ssize)
58 * 58 *
59 * Adds a 4K page to the hash table in a segment of 4K pages only 59 * Adds a 4K page to the hash table in a segment of 4K pages only
60 */ 60 */
@@ -66,6 +66,7 @@ _GLOBAL(__hash_page_4K)
66 /* Save all params that we need after a function call */ 66 /* Save all params that we need after a function call */
67 std r6,STK_PARM(r6)(r1) 67 std r6,STK_PARM(r6)(r1)
68 std r8,STK_PARM(r8)(r1) 68 std r8,STK_PARM(r8)(r1)
69 std r9,STK_PARM(r9)(r1)
69 70
70 /* Add _PAGE_PRESENT to access */ 71 /* Add _PAGE_PRESENT to access */
71 ori r4,r4,_PAGE_PRESENT 72 ori r4,r4,_PAGE_PRESENT
@@ -117,6 +118,10 @@ _GLOBAL(__hash_page_4K)
117 * r4 (access) is re-useable, we use it for the new HPTE flags 118 * r4 (access) is re-useable, we use it for the new HPTE flags
118 */ 119 */
119 120
121BEGIN_FTR_SECTION
122 cmpdi r9,0 /* check segment size */
123 bne 3f
124END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
120 /* Calc va and put it in r29 */ 125 /* Calc va and put it in r29 */
121 rldicr r29,r5,28,63-28 126 rldicr r29,r5,28,63-28
122 rldicl r3,r3,0,36 127 rldicl r3,r3,0,36
@@ -126,9 +131,20 @@ _GLOBAL(__hash_page_4K)
126 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 131 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
127 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 132 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
128 xor r28,r5,r0 133 xor r28,r5,r0
134 b 4f
135
1363: /* Calc VA and hash in r29 and r28 for 1T segment */
137 sldi r29,r5,40 /* vsid << 40 */
138 clrldi r3,r3,24 /* ea & 0xffffffffff */
139 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
140 clrldi r5,r5,40 /* vsid & 0xffffff */
141 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
142 xor r28,r28,r5
143 or r29,r3,r29 /* VA */
144 xor r28,r28,r0 /* hash */
129 145
130 /* Convert linux PTE bits into HW equivalents */ 146 /* Convert linux PTE bits into HW equivalents */
131 andi. r3,r30,0x1fe /* Get basic set of flags */ 1474: andi. r3,r30,0x1fe /* Get basic set of flags */
132 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ 148 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
133 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ 149 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
134 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ 150 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
@@ -183,6 +199,7 @@ htab_insert_pte:
183 mr r4,r29 /* Retreive va */ 199 mr r4,r29 /* Retreive va */
184 li r7,0 /* !bolted, !secondary */ 200 li r7,0 /* !bolted, !secondary */
185 li r8,MMU_PAGE_4K /* page size */ 201 li r8,MMU_PAGE_4K /* page size */
202 ld r9,STK_PARM(r9)(r1) /* segment size */
186_GLOBAL(htab_call_hpte_insert1) 203_GLOBAL(htab_call_hpte_insert1)
187 bl . /* Patched by htab_finish_init() */ 204 bl . /* Patched by htab_finish_init() */
188 cmpdi 0,r3,0 205 cmpdi 0,r3,0
@@ -205,6 +222,7 @@ _GLOBAL(htab_call_hpte_insert1)
205 mr r4,r29 /* Retreive va */ 222 mr r4,r29 /* Retreive va */
206 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 223 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
207 li r8,MMU_PAGE_4K /* page size */ 224 li r8,MMU_PAGE_4K /* page size */
225 ld r9,STK_PARM(r9)(r1) /* segment size */
208_GLOBAL(htab_call_hpte_insert2) 226_GLOBAL(htab_call_hpte_insert2)
209 bl . /* Patched by htab_finish_init() */ 227 bl . /* Patched by htab_finish_init() */
210 cmpdi 0,r3,0 228 cmpdi 0,r3,0
@@ -273,7 +291,8 @@ htab_modify_pte:
273 /* Call ppc_md.hpte_updatepp */ 291 /* Call ppc_md.hpte_updatepp */
274 mr r5,r29 /* va */ 292 mr r5,r29 /* va */
275 li r6,MMU_PAGE_4K /* page size */ 293 li r6,MMU_PAGE_4K /* page size */
276 ld r7,STK_PARM(r8)(r1) /* get "local" param */ 294 ld r7,STK_PARM(r9)(r1) /* segment size */
295 ld r8,STK_PARM(r8)(r1) /* get "local" param */
277_GLOBAL(htab_call_hpte_updatepp) 296_GLOBAL(htab_call_hpte_updatepp)
278 bl . /* Patched by htab_finish_init() */ 297 bl . /* Patched by htab_finish_init() */
279 298
@@ -325,6 +344,7 @@ _GLOBAL(__hash_page_4K)
325 /* Save all params that we need after a function call */ 344 /* Save all params that we need after a function call */
326 std r6,STK_PARM(r6)(r1) 345 std r6,STK_PARM(r6)(r1)
327 std r8,STK_PARM(r8)(r1) 346 std r8,STK_PARM(r8)(r1)
347 std r9,STK_PARM(r9)(r1)
328 348
329 /* Add _PAGE_PRESENT to access */ 349 /* Add _PAGE_PRESENT to access */
330 ori r4,r4,_PAGE_PRESENT 350 ori r4,r4,_PAGE_PRESENT
@@ -383,18 +403,33 @@ _GLOBAL(__hash_page_4K)
383 /* Load the hidx index */ 403 /* Load the hidx index */
384 rldicl r25,r3,64-12,60 404 rldicl r25,r3,64-12,60
385 405
406BEGIN_FTR_SECTION
407 cmpdi r9,0 /* check segment size */
408 bne 3f
409END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
386 /* Calc va and put it in r29 */ 410 /* Calc va and put it in r29 */
387 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ 411 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
388 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ 412 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
389 or r29,r3,r29 /* r29 = va 413 or r29,r3,r29 /* r29 = va */
390 414
391 /* Calculate hash value for primary slot and store it in r28 */ 415 /* Calculate hash value for primary slot and store it in r28 */
392 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 416 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
393 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 417 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
394 xor r28,r5,r0 418 xor r28,r5,r0
419 b 4f
420
4213: /* Calc VA and hash in r29 and r28 for 1T segment */
422 sldi r29,r5,40 /* vsid << 40 */
423 clrldi r3,r3,24 /* ea & 0xffffffffff */
424 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
425 clrldi r5,r5,40 /* vsid & 0xffffff */
426 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
427 xor r28,r28,r5
428 or r29,r3,r29 /* VA */
429 xor r28,r28,r0 /* hash */
395 430
396 /* Convert linux PTE bits into HW equivalents */ 431 /* Convert linux PTE bits into HW equivalents */
397 andi. r3,r30,0x1fe /* Get basic set of flags */ 4324: andi. r3,r30,0x1fe /* Get basic set of flags */
398 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ 433 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
399 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ 434 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
400 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ 435 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
@@ -462,6 +497,7 @@ htab_special_pfn:
462 mr r4,r29 /* Retreive va */ 497 mr r4,r29 /* Retreive va */
463 li r7,0 /* !bolted, !secondary */ 498 li r7,0 /* !bolted, !secondary */
464 li r8,MMU_PAGE_4K /* page size */ 499 li r8,MMU_PAGE_4K /* page size */
500 ld r9,STK_PARM(r9)(r1) /* segment size */
465_GLOBAL(htab_call_hpte_insert1) 501_GLOBAL(htab_call_hpte_insert1)
466 bl . /* patched by htab_finish_init() */ 502 bl . /* patched by htab_finish_init() */
467 cmpdi 0,r3,0 503 cmpdi 0,r3,0
@@ -488,6 +524,7 @@ _GLOBAL(htab_call_hpte_insert1)
488 mr r4,r29 /* Retreive va */ 524 mr r4,r29 /* Retreive va */
489 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 525 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
490 li r8,MMU_PAGE_4K /* page size */ 526 li r8,MMU_PAGE_4K /* page size */
527 ld r9,STK_PARM(r9)(r1) /* segment size */
491_GLOBAL(htab_call_hpte_insert2) 528_GLOBAL(htab_call_hpte_insert2)
492 bl . /* patched by htab_finish_init() */ 529 bl . /* patched by htab_finish_init() */
493 cmpdi 0,r3,0 530 cmpdi 0,r3,0
@@ -586,7 +623,8 @@ htab_modify_pte:
586 /* Call ppc_md.hpte_updatepp */ 623 /* Call ppc_md.hpte_updatepp */
587 mr r5,r29 /* va */ 624 mr r5,r29 /* va */
588 li r6,MMU_PAGE_4K /* page size */ 625 li r6,MMU_PAGE_4K /* page size */
589 ld r7,STK_PARM(r8)(r1) /* get "local" param */ 626 ld r7,STK_PARM(r9)(r1) /* segment size */
627 ld r8,STK_PARM(r8)(r1) /* get "local" param */
590_GLOBAL(htab_call_hpte_updatepp) 628_GLOBAL(htab_call_hpte_updatepp)
591 bl . /* patched by htab_finish_init() */ 629 bl . /* patched by htab_finish_init() */
592 630
@@ -634,6 +672,7 @@ _GLOBAL(__hash_page_64K)
634 /* Save all params that we need after a function call */ 672 /* Save all params that we need after a function call */
635 std r6,STK_PARM(r6)(r1) 673 std r6,STK_PARM(r6)(r1)
636 std r8,STK_PARM(r8)(r1) 674 std r8,STK_PARM(r8)(r1)
675 std r9,STK_PARM(r9)(r1)
637 676
638 /* Add _PAGE_PRESENT to access */ 677 /* Add _PAGE_PRESENT to access */
639 ori r4,r4,_PAGE_PRESENT 678 ori r4,r4,_PAGE_PRESENT
@@ -690,6 +729,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
690 * r4 (access) is re-useable, we use it for the new HPTE flags 729 * r4 (access) is re-useable, we use it for the new HPTE flags
691 */ 730 */
692 731
732BEGIN_FTR_SECTION
733 cmpdi r9,0 /* check segment size */
734 bne 3f
735END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
693 /* Calc va and put it in r29 */ 736 /* Calc va and put it in r29 */
694 rldicr r29,r5,28,63-28 737 rldicr r29,r5,28,63-28
695 rldicl r3,r3,0,36 738 rldicl r3,r3,0,36
@@ -699,9 +742,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
699 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 742 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
700 rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ 743 rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
701 xor r28,r5,r0 744 xor r28,r5,r0
745 b 4f
746
7473: /* Calc VA and hash in r29 and r28 for 1T segment */
748 sldi r29,r5,40 /* vsid << 40 */
749 clrldi r3,r3,24 /* ea & 0xffffffffff */
750 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
751 clrldi r5,r5,40 /* vsid & 0xffffff */
752 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
753 xor r28,r28,r5
754 or r29,r3,r29 /* VA */
755 xor r28,r28,r0 /* hash */
702 756
703 /* Convert linux PTE bits into HW equivalents */ 757 /* Convert linux PTE bits into HW equivalents */
704 andi. r3,r30,0x1fe /* Get basic set of flags */ 7584: andi. r3,r30,0x1fe /* Get basic set of flags */
705 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ 759 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
706 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ 760 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
707 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ 761 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
@@ -756,6 +810,7 @@ ht64_insert_pte:
756 mr r4,r29 /* Retreive va */ 810 mr r4,r29 /* Retreive va */
757 li r7,0 /* !bolted, !secondary */ 811 li r7,0 /* !bolted, !secondary */
758 li r8,MMU_PAGE_64K 812 li r8,MMU_PAGE_64K
813 ld r9,STK_PARM(r9)(r1) /* segment size */
759_GLOBAL(ht64_call_hpte_insert1) 814_GLOBAL(ht64_call_hpte_insert1)
760 bl . /* patched by htab_finish_init() */ 815 bl . /* patched by htab_finish_init() */
761 cmpdi 0,r3,0 816 cmpdi 0,r3,0
@@ -778,6 +833,7 @@ _GLOBAL(ht64_call_hpte_insert1)
778 mr r4,r29 /* Retreive va */ 833 mr r4,r29 /* Retreive va */
779 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 834 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
780 li r8,MMU_PAGE_64K 835 li r8,MMU_PAGE_64K
836 ld r9,STK_PARM(r9)(r1) /* segment size */
781_GLOBAL(ht64_call_hpte_insert2) 837_GLOBAL(ht64_call_hpte_insert2)
782 bl . /* patched by htab_finish_init() */ 838 bl . /* patched by htab_finish_init() */
783 cmpdi 0,r3,0 839 cmpdi 0,r3,0
@@ -846,7 +902,8 @@ ht64_modify_pte:
846 /* Call ppc_md.hpte_updatepp */ 902 /* Call ppc_md.hpte_updatepp */
847 mr r5,r29 /* va */ 903 mr r5,r29 /* va */
848 li r6,MMU_PAGE_64K 904 li r6,MMU_PAGE_64K
849 ld r7,STK_PARM(r8)(r1) /* get "local" param */ 905 ld r7,STK_PARM(r9)(r1) /* segment size */
906 ld r8,STK_PARM(r8)(r1) /* get "local" param */
850_GLOBAL(ht64_call_hpte_updatepp) 907_GLOBAL(ht64_call_hpte_updatepp)
851 bl . /* patched by htab_finish_init() */ 908 bl . /* patched by htab_finish_init() */
852 909
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 6ba9b47e55af..34e5c0b219b9 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -38,7 +38,7 @@
38 38
39static DEFINE_SPINLOCK(native_tlbie_lock); 39static DEFINE_SPINLOCK(native_tlbie_lock);
40 40
41static inline void __tlbie(unsigned long va, unsigned int psize) 41static inline void __tlbie(unsigned long va, int psize, int ssize)
42{ 42{
43 unsigned int penc; 43 unsigned int penc;
44 44
@@ -48,18 +48,20 @@ static inline void __tlbie(unsigned long va, unsigned int psize)
48 switch (psize) { 48 switch (psize) {
49 case MMU_PAGE_4K: 49 case MMU_PAGE_4K:
50 va &= ~0xffful; 50 va &= ~0xffful;
51 va |= ssize << 8;
51 asm volatile("tlbie %0,0" : : "r" (va) : "memory"); 52 asm volatile("tlbie %0,0" : : "r" (va) : "memory");
52 break; 53 break;
53 default: 54 default:
54 penc = mmu_psize_defs[psize].penc; 55 penc = mmu_psize_defs[psize].penc;
55 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 56 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
56 va |= penc << 12; 57 va |= penc << 12;
58 va |= ssize << 8;
57 asm volatile("tlbie %0,1" : : "r" (va) : "memory"); 59 asm volatile("tlbie %0,1" : : "r" (va) : "memory");
58 break; 60 break;
59 } 61 }
60} 62}
61 63
62static inline void __tlbiel(unsigned long va, unsigned int psize) 64static inline void __tlbiel(unsigned long va, int psize, int ssize)
63{ 65{
64 unsigned int penc; 66 unsigned int penc;
65 67
@@ -69,6 +71,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
69 switch (psize) { 71 switch (psize) {
70 case MMU_PAGE_4K: 72 case MMU_PAGE_4K:
71 va &= ~0xffful; 73 va &= ~0xffful;
74 va |= ssize << 8;
72 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 75 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
73 : : "r"(va) : "memory"); 76 : : "r"(va) : "memory");
74 break; 77 break;
@@ -76,6 +79,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
76 penc = mmu_psize_defs[psize].penc; 79 penc = mmu_psize_defs[psize].penc;
77 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 80 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
78 va |= penc << 12; 81 va |= penc << 12;
82 va |= ssize << 8;
79 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" 83 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
80 : : "r"(va) : "memory"); 84 : : "r"(va) : "memory");
81 break; 85 break;
@@ -83,7 +87,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
83 87
84} 88}
85 89
86static inline void tlbie(unsigned long va, int psize, int local) 90static inline void tlbie(unsigned long va, int psize, int ssize, int local)
87{ 91{
88 unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL); 92 unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL);
89 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 93 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
@@ -94,10 +98,10 @@ static inline void tlbie(unsigned long va, int psize, int local)
94 spin_lock(&native_tlbie_lock); 98 spin_lock(&native_tlbie_lock);
95 asm volatile("ptesync": : :"memory"); 99 asm volatile("ptesync": : :"memory");
96 if (use_local) { 100 if (use_local) {
97 __tlbiel(va, psize); 101 __tlbiel(va, psize, ssize);
98 asm volatile("ptesync": : :"memory"); 102 asm volatile("ptesync": : :"memory");
99 } else { 103 } else {
100 __tlbie(va, psize); 104 __tlbie(va, psize, ssize);
101 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 105 asm volatile("eieio; tlbsync; ptesync": : :"memory");
102 } 106 }
103 if (lock_tlbie && !use_local) 107 if (lock_tlbie && !use_local)
@@ -126,7 +130,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
126 130
127static long native_hpte_insert(unsigned long hpte_group, unsigned long va, 131static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
128 unsigned long pa, unsigned long rflags, 132 unsigned long pa, unsigned long rflags,
129 unsigned long vflags, int psize) 133 unsigned long vflags, int psize, int ssize)
130{ 134{
131 struct hash_pte *hptep = htab_address + hpte_group; 135 struct hash_pte *hptep = htab_address + hpte_group;
132 unsigned long hpte_v, hpte_r; 136 unsigned long hpte_v, hpte_r;
@@ -153,7 +157,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
153 if (i == HPTES_PER_GROUP) 157 if (i == HPTES_PER_GROUP)
154 return -1; 158 return -1;
155 159
156 hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; 160 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
157 hpte_r = hpte_encode_r(pa, psize) | rflags; 161 hpte_r = hpte_encode_r(pa, psize) | rflags;
158 162
159 if (!(vflags & HPTE_V_BOLTED)) { 163 if (!(vflags & HPTE_V_BOLTED)) {
@@ -215,13 +219,14 @@ static long native_hpte_remove(unsigned long hpte_group)
215} 219}
216 220
217static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 221static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
218 unsigned long va, int psize, int local) 222 unsigned long va, int psize, int ssize,
223 int local)
219{ 224{
220 struct hash_pte *hptep = htab_address + slot; 225 struct hash_pte *hptep = htab_address + slot;
221 unsigned long hpte_v, want_v; 226 unsigned long hpte_v, want_v;
222 int ret = 0; 227 int ret = 0;
223 228
224 want_v = hpte_encode_v(va, psize); 229 want_v = hpte_encode_v(va, psize, ssize);
225 230
226 DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", 231 DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
227 va, want_v & HPTE_V_AVPN, slot, newpp); 232 va, want_v & HPTE_V_AVPN, slot, newpp);
@@ -243,39 +248,32 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
243 native_unlock_hpte(hptep); 248 native_unlock_hpte(hptep);
244 249
245 /* Ensure it is out of the tlb too. */ 250 /* Ensure it is out of the tlb too. */
246 tlbie(va, psize, local); 251 tlbie(va, psize, ssize, local);
247 252
248 return ret; 253 return ret;
249} 254}
250 255
251static long native_hpte_find(unsigned long va, int psize) 256static long native_hpte_find(unsigned long va, int psize, int ssize)
252{ 257{
253 struct hash_pte *hptep; 258 struct hash_pte *hptep;
254 unsigned long hash; 259 unsigned long hash;
255 unsigned long i, j; 260 unsigned long i;
256 long slot; 261 long slot;
257 unsigned long want_v, hpte_v; 262 unsigned long want_v, hpte_v;
258 263
259 hash = hpt_hash(va, mmu_psize_defs[psize].shift); 264 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
260 want_v = hpte_encode_v(va, psize); 265 want_v = hpte_encode_v(va, psize, ssize);
261 266
262 for (j = 0; j < 2; j++) { 267 /* Bolted mappings are only ever in the primary group */
263 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 268 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
264 for (i = 0; i < HPTES_PER_GROUP; i++) { 269 for (i = 0; i < HPTES_PER_GROUP; i++) {
265 hptep = htab_address + slot; 270 hptep = htab_address + slot;
266 hpte_v = hptep->v; 271 hpte_v = hptep->v;
267 272
268 if (HPTE_V_COMPARE(hpte_v, want_v) 273 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
269 && (hpte_v & HPTE_V_VALID) 274 /* HPTE matches */
270 && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) { 275 return slot;
271 /* HPTE matches */ 276 ++slot;
272 if (j)
273 slot = -slot;
274 return slot;
275 }
276 ++slot;
277 }
278 hash = ~hash;
279 } 277 }
280 278
281 return -1; 279 return -1;
@@ -289,16 +287,16 @@ static long native_hpte_find(unsigned long va, int psize)
289 * No need to lock here because we should be the only user. 287 * No need to lock here because we should be the only user.
290 */ 288 */
291static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 289static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
292 int psize) 290 int psize, int ssize)
293{ 291{
294 unsigned long vsid, va; 292 unsigned long vsid, va;
295 long slot; 293 long slot;
296 struct hash_pte *hptep; 294 struct hash_pte *hptep;
297 295
298 vsid = get_kernel_vsid(ea); 296 vsid = get_kernel_vsid(ea, ssize);
299 va = (vsid << 28) | (ea & 0x0fffffff); 297 va = hpt_va(ea, vsid, ssize);
300 298
301 slot = native_hpte_find(va, psize); 299 slot = native_hpte_find(va, psize, ssize);
302 if (slot == -1) 300 if (slot == -1)
303 panic("could not find page to bolt\n"); 301 panic("could not find page to bolt\n");
304 hptep = htab_address + slot; 302 hptep = htab_address + slot;
@@ -308,11 +306,11 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
308 (newpp & (HPTE_R_PP | HPTE_R_N)); 306 (newpp & (HPTE_R_PP | HPTE_R_N));
309 307
310 /* Ensure it is out of the tlb too. */ 308 /* Ensure it is out of the tlb too. */
311 tlbie(va, psize, 0); 309 tlbie(va, psize, ssize, 0);
312} 310}
313 311
314static void native_hpte_invalidate(unsigned long slot, unsigned long va, 312static void native_hpte_invalidate(unsigned long slot, unsigned long va,
315 int psize, int local) 313 int psize, int ssize, int local)
316{ 314{
317 struct hash_pte *hptep = htab_address + slot; 315 struct hash_pte *hptep = htab_address + slot;
318 unsigned long hpte_v; 316 unsigned long hpte_v;
@@ -323,7 +321,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
323 321
324 DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); 322 DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
325 323
326 want_v = hpte_encode_v(va, psize); 324 want_v = hpte_encode_v(va, psize, ssize);
327 native_lock_hpte(hptep); 325 native_lock_hpte(hptep);
328 hpte_v = hptep->v; 326 hpte_v = hptep->v;
329 327
@@ -335,7 +333,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
335 hptep->v = 0; 333 hptep->v = 0;
336 334
337 /* Invalidate the TLB */ 335 /* Invalidate the TLB */
338 tlbie(va, psize, local); 336 tlbie(va, psize, ssize, local);
339 337
340 local_irq_restore(flags); 338 local_irq_restore(flags);
341} 339}
@@ -345,7 +343,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
345#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 343#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
346 344
347static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 345static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
348 int *psize, unsigned long *va) 346 int *psize, int *ssize, unsigned long *va)
349{ 347{
350 unsigned long hpte_r = hpte->r; 348 unsigned long hpte_r = hpte->r;
351 unsigned long hpte_v = hpte->v; 349 unsigned long hpte_v = hpte->v;
@@ -401,6 +399,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
401 399
402 *va = avpn; 400 *va = avpn;
403 *psize = size; 401 *psize = size;
402 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
404} 403}
405 404
406/* 405/*
@@ -417,7 +416,7 @@ static void native_hpte_clear(void)
417 struct hash_pte *hptep = htab_address; 416 struct hash_pte *hptep = htab_address;
418 unsigned long hpte_v, va; 417 unsigned long hpte_v, va;
419 unsigned long pteg_count; 418 unsigned long pteg_count;
420 int psize; 419 int psize, ssize;
421 420
422 pteg_count = htab_hash_mask + 1; 421 pteg_count = htab_hash_mask + 1;
423 422
@@ -443,9 +442,9 @@ static void native_hpte_clear(void)
443 * already hold the native_tlbie_lock. 442 * already hold the native_tlbie_lock.
444 */ 443 */
445 if (hpte_v & HPTE_V_VALID) { 444 if (hpte_v & HPTE_V_VALID) {
446 hpte_decode(hptep, slot, &psize, &va); 445 hpte_decode(hptep, slot, &psize, &ssize, &va);
447 hptep->v = 0; 446 hptep->v = 0;
448 __tlbie(va, psize); 447 __tlbie(va, psize, ssize);
449 } 448 }
450 } 449 }
451 450
@@ -468,6 +467,7 @@ static void native_flush_hash_range(unsigned long number, int local)
468 real_pte_t pte; 467 real_pte_t pte;
469 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 468 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
470 unsigned long psize = batch->psize; 469 unsigned long psize = batch->psize;
470 int ssize = batch->ssize;
471 int i; 471 int i;
472 472
473 local_irq_save(flags); 473 local_irq_save(flags);
@@ -477,14 +477,14 @@ static void native_flush_hash_range(unsigned long number, int local)
477 pte = batch->pte[i]; 477 pte = batch->pte[i];
478 478
479 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 479 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
480 hash = hpt_hash(va, shift); 480 hash = hpt_hash(va, shift, ssize);
481 hidx = __rpte_to_hidx(pte, index); 481 hidx = __rpte_to_hidx(pte, index);
482 if (hidx & _PTEIDX_SECONDARY) 482 if (hidx & _PTEIDX_SECONDARY)
483 hash = ~hash; 483 hash = ~hash;
484 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 484 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
485 slot += hidx & _PTEIDX_GROUP_IX; 485 slot += hidx & _PTEIDX_GROUP_IX;
486 hptep = htab_address + slot; 486 hptep = htab_address + slot;
487 want_v = hpte_encode_v(va, psize); 487 want_v = hpte_encode_v(va, psize, ssize);
488 native_lock_hpte(hptep); 488 native_lock_hpte(hptep);
489 hpte_v = hptep->v; 489 hpte_v = hptep->v;
490 if (!HPTE_V_COMPARE(hpte_v, want_v) || 490 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -504,7 +504,7 @@ static void native_flush_hash_range(unsigned long number, int local)
504 504
505 pte_iterate_hashed_subpages(pte, psize, va, index, 505 pte_iterate_hashed_subpages(pte, psize, va, index,
506 shift) { 506 shift) {
507 __tlbiel(va, psize); 507 __tlbiel(va, psize, ssize);
508 } pte_iterate_hashed_end(); 508 } pte_iterate_hashed_end();
509 } 509 }
510 asm volatile("ptesync":::"memory"); 510 asm volatile("ptesync":::"memory");
@@ -521,7 +521,7 @@ static void native_flush_hash_range(unsigned long number, int local)
521 521
522 pte_iterate_hashed_subpages(pte, psize, va, index, 522 pte_iterate_hashed_subpages(pte, psize, va, index,
523 shift) { 523 shift) {
524 __tlbie(va, psize); 524 __tlbie(va, psize, ssize);
525 } pte_iterate_hashed_end(); 525 } pte_iterate_hashed_end();
526 } 526 }
527 asm volatile("eieio; tlbsync; ptesync":::"memory"); 527 asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d525f2eba313..611ad084b7e7 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -93,6 +93,8 @@ int mmu_linear_psize = MMU_PAGE_4K;
93int mmu_virtual_psize = MMU_PAGE_4K; 93int mmu_virtual_psize = MMU_PAGE_4K;
94int mmu_vmalloc_psize = MMU_PAGE_4K; 94int mmu_vmalloc_psize = MMU_PAGE_4K;
95int mmu_io_psize = MMU_PAGE_4K; 95int mmu_io_psize = MMU_PAGE_4K;
96int mmu_kernel_ssize = MMU_SEGSIZE_256M;
97int mmu_highuser_ssize = MMU_SEGSIZE_256M;
96#ifdef CONFIG_HUGETLB_PAGE 98#ifdef CONFIG_HUGETLB_PAGE
97int mmu_huge_psize = MMU_PAGE_16M; 99int mmu_huge_psize = MMU_PAGE_16M;
98unsigned int HPAGE_SHIFT; 100unsigned int HPAGE_SHIFT;
@@ -145,7 +147,8 @@ struct mmu_psize_def mmu_psize_defaults_gp[] = {
145 147
146 148
147int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 149int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
148 unsigned long pstart, unsigned long mode, int psize) 150 unsigned long pstart, unsigned long mode,
151 int psize, int ssize)
149{ 152{
150 unsigned long vaddr, paddr; 153 unsigned long vaddr, paddr;
151 unsigned int step, shift; 154 unsigned int step, shift;
@@ -158,8 +161,8 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
158 for (vaddr = vstart, paddr = pstart; vaddr < vend; 161 for (vaddr = vstart, paddr = pstart; vaddr < vend;
159 vaddr += step, paddr += step) { 162 vaddr += step, paddr += step) {
160 unsigned long hash, hpteg; 163 unsigned long hash, hpteg;
161 unsigned long vsid = get_kernel_vsid(vaddr); 164 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
162 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); 165 unsigned long va = hpt_va(vaddr, vsid, ssize);
163 166
164 tmp_mode = mode; 167 tmp_mode = mode;
165 168
@@ -167,14 +170,14 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
167 if (!in_kernel_text(vaddr)) 170 if (!in_kernel_text(vaddr))
168 tmp_mode = mode | HPTE_R_N; 171 tmp_mode = mode | HPTE_R_N;
169 172
170 hash = hpt_hash(va, shift); 173 hash = hpt_hash(va, shift, ssize);
171 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 174 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
172 175
173 DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert); 176 DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
174 177
175 BUG_ON(!ppc_md.hpte_insert); 178 BUG_ON(!ppc_md.hpte_insert);
176 ret = ppc_md.hpte_insert(hpteg, va, paddr, 179 ret = ppc_md.hpte_insert(hpteg, va, paddr,
177 tmp_mode, HPTE_V_BOLTED, psize); 180 tmp_mode, HPTE_V_BOLTED, psize, ssize);
178 181
179 if (ret < 0) 182 if (ret < 0)
180 break; 183 break;
@@ -186,6 +189,37 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
186 return ret < 0 ? ret : 0; 189 return ret < 0 ? ret : 0;
187} 190}
188 191
192static int __init htab_dt_scan_seg_sizes(unsigned long node,
193 const char *uname, int depth,
194 void *data)
195{
196 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
197 u32 *prop;
198 unsigned long size = 0;
199
200 /* We are scanning "cpu" nodes only */
201 if (type == NULL || strcmp(type, "cpu") != 0)
202 return 0;
203
204 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
205 &size);
206 if (prop == NULL)
207 return 0;
208 for (; size >= 4; size -= 4, ++prop) {
209 if (prop[0] == 40) {
210 DBG("1T segment support detected\n");
211 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
212 }
213 return 1;
214 }
215 return 0;
216}
217
218static void __init htab_init_seg_sizes(void)
219{
220 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
221}
222
189static int __init htab_dt_scan_page_sizes(unsigned long node, 223static int __init htab_dt_scan_page_sizes(unsigned long node,
190 const char *uname, int depth, 224 const char *uname, int depth,
191 void *data) 225 void *data)
@@ -265,7 +299,6 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
265 return 0; 299 return 0;
266} 300}
267 301
268
269static void __init htab_init_page_sizes(void) 302static void __init htab_init_page_sizes(void)
270{ 303{
271 int rc; 304 int rc;
@@ -398,7 +431,7 @@ void create_section_mapping(unsigned long start, unsigned long end)
398{ 431{
399 BUG_ON(htab_bolt_mapping(start, end, __pa(start), 432 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
400 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX, 433 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
401 mmu_linear_psize)); 434 mmu_linear_psize, mmu_kernel_ssize));
402} 435}
403#endif /* CONFIG_MEMORY_HOTPLUG */ 436#endif /* CONFIG_MEMORY_HOTPLUG */
404 437
@@ -449,9 +482,18 @@ void __init htab_initialize(void)
449 482
450 DBG(" -> htab_initialize()\n"); 483 DBG(" -> htab_initialize()\n");
451 484
485 /* Initialize segment sizes */
486 htab_init_seg_sizes();
487
452 /* Initialize page sizes */ 488 /* Initialize page sizes */
453 htab_init_page_sizes(); 489 htab_init_page_sizes();
454 490
491 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
492 mmu_kernel_ssize = MMU_SEGSIZE_1T;
493 mmu_highuser_ssize = MMU_SEGSIZE_1T;
494 printk(KERN_INFO "Using 1TB segments\n");
495 }
496
455 /* 497 /*
456 * Calculate the required size of the htab. We want the number of 498 * Calculate the required size of the htab. We want the number of
457 * PTEGs to equal one half the number of real pages. 499 * PTEGs to equal one half the number of real pages.
@@ -523,18 +565,20 @@ void __init htab_initialize(void)
523 if (base != dart_tablebase) 565 if (base != dart_tablebase)
524 BUG_ON(htab_bolt_mapping(base, dart_tablebase, 566 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
525 __pa(base), mode_rw, 567 __pa(base), mode_rw,
526 mmu_linear_psize)); 568 mmu_linear_psize,
569 mmu_kernel_ssize));
527 if ((base + size) > dart_table_end) 570 if ((base + size) > dart_table_end)
528 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, 571 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
529 base + size, 572 base + size,
530 __pa(dart_table_end), 573 __pa(dart_table_end),
531 mode_rw, 574 mode_rw,
532 mmu_linear_psize)); 575 mmu_linear_psize,
576 mmu_kernel_ssize));
533 continue; 577 continue;
534 } 578 }
535#endif /* CONFIG_U3_DART */ 579#endif /* CONFIG_U3_DART */
536 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), 580 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
537 mode_rw, mmu_linear_psize)); 581 mode_rw, mmu_linear_psize, mmu_kernel_ssize));
538 } 582 }
539 583
540 /* 584 /*
@@ -553,7 +597,7 @@ void __init htab_initialize(void)
553 597
554 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, 598 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
555 __pa(tce_alloc_start), mode_rw, 599 __pa(tce_alloc_start), mode_rw,
556 mmu_linear_psize)); 600 mmu_linear_psize, mmu_kernel_ssize));
557 } 601 }
558 602
559 htab_finish_init(); 603 htab_finish_init();
@@ -621,7 +665,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
621 pte_t *ptep; 665 pte_t *ptep;
622 cpumask_t tmp; 666 cpumask_t tmp;
623 int rc, user_region = 0, local = 0; 667 int rc, user_region = 0, local = 0;
624 int psize; 668 int psize, ssize;
625 669
626 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 670 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
627 ea, access, trap); 671 ea, access, trap);
@@ -640,20 +684,22 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
640 DBG_LOW(" user region with no mm !\n"); 684 DBG_LOW(" user region with no mm !\n");
641 return 1; 685 return 1;
642 } 686 }
643 vsid = get_vsid(mm->context.id, ea);
644#ifdef CONFIG_PPC_MM_SLICES 687#ifdef CONFIG_PPC_MM_SLICES
645 psize = get_slice_psize(mm, ea); 688 psize = get_slice_psize(mm, ea);
646#else 689#else
647 psize = mm->context.user_psize; 690 psize = mm->context.user_psize;
648#endif 691#endif
692 ssize = user_segment_size(ea);
693 vsid = get_vsid(mm->context.id, ea, ssize);
649 break; 694 break;
650 case VMALLOC_REGION_ID: 695 case VMALLOC_REGION_ID:
651 mm = &init_mm; 696 mm = &init_mm;
652 vsid = get_kernel_vsid(ea); 697 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
653 if (ea < VMALLOC_END) 698 if (ea < VMALLOC_END)
654 psize = mmu_vmalloc_psize; 699 psize = mmu_vmalloc_psize;
655 else 700 else
656 psize = mmu_io_psize; 701 psize = mmu_io_psize;
702 ssize = mmu_kernel_ssize;
657 break; 703 break;
658 default: 704 default:
659 /* Not a valid range 705 /* Not a valid range
@@ -758,10 +804,10 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
758 804
759#ifdef CONFIG_PPC_HAS_HASH_64K 805#ifdef CONFIG_PPC_HAS_HASH_64K
760 if (psize == MMU_PAGE_64K) 806 if (psize == MMU_PAGE_64K)
761 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); 807 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
762 else 808 else
763#endif /* CONFIG_PPC_HAS_HASH_64K */ 809#endif /* CONFIG_PPC_HAS_HASH_64K */
764 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); 810 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
765 811
766#ifndef CONFIG_PPC_64K_PAGES 812#ifndef CONFIG_PPC_64K_PAGES
767 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); 813 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
@@ -783,6 +829,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
783 cpumask_t mask; 829 cpumask_t mask;
784 unsigned long flags; 830 unsigned long flags;
785 int local = 0; 831 int local = 0;
832 int ssize;
786 833
787 BUG_ON(REGION_ID(ea) != USER_REGION_ID); 834 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
788 835
@@ -815,7 +862,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
815#endif /* CONFIG_PPC_64K_PAGES */ 862#endif /* CONFIG_PPC_64K_PAGES */
816 863
817 /* Get VSID */ 864 /* Get VSID */
818 vsid = get_vsid(mm->context.id, ea); 865 ssize = user_segment_size(ea);
866 vsid = get_vsid(mm->context.id, ea, ssize);
819 867
820 /* Hash doesn't like irqs */ 868 /* Hash doesn't like irqs */
821 local_irq_save(flags); 869 local_irq_save(flags);
@@ -828,28 +876,29 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
828 /* Hash it in */ 876 /* Hash it in */
829#ifdef CONFIG_PPC_HAS_HASH_64K 877#ifdef CONFIG_PPC_HAS_HASH_64K
830 if (mm->context.user_psize == MMU_PAGE_64K) 878 if (mm->context.user_psize == MMU_PAGE_64K)
831 __hash_page_64K(ea, access, vsid, ptep, trap, local); 879 __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
832 else 880 else
833#endif /* CONFIG_PPC_HAS_HASH_64K */ 881#endif /* CONFIG_PPC_HAS_HASH_64K */
834 __hash_page_4K(ea, access, vsid, ptep, trap, local); 882 __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
835 883
836 local_irq_restore(flags); 884 local_irq_restore(flags);
837} 885}
838 886
839void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local) 887void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
888 int local)
840{ 889{
841 unsigned long hash, index, shift, hidx, slot; 890 unsigned long hash, index, shift, hidx, slot;
842 891
843 DBG_LOW("flush_hash_page(va=%016x)\n", va); 892 DBG_LOW("flush_hash_page(va=%016x)\n", va);
844 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 893 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
845 hash = hpt_hash(va, shift); 894 hash = hpt_hash(va, shift, ssize);
846 hidx = __rpte_to_hidx(pte, index); 895 hidx = __rpte_to_hidx(pte, index);
847 if (hidx & _PTEIDX_SECONDARY) 896 if (hidx & _PTEIDX_SECONDARY)
848 hash = ~hash; 897 hash = ~hash;
849 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 898 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
850 slot += hidx & _PTEIDX_GROUP_IX; 899 slot += hidx & _PTEIDX_GROUP_IX;
851 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx); 900 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
852 ppc_md.hpte_invalidate(slot, va, psize, local); 901 ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
853 } pte_iterate_hashed_end(); 902 } pte_iterate_hashed_end();
854} 903}
855 904
@@ -864,7 +913,7 @@ void flush_hash_range(unsigned long number, int local)
864 913
865 for (i = 0; i < number; i++) 914 for (i = 0; i < number; i++)
866 flush_hash_page(batch->vaddr[i], batch->pte[i], 915 flush_hash_page(batch->vaddr[i], batch->pte[i],
867 batch->psize, local); 916 batch->psize, batch->ssize, local);
868 } 917 }
869} 918}
870 919
@@ -890,17 +939,19 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address)
890#ifdef CONFIG_DEBUG_PAGEALLOC 939#ifdef CONFIG_DEBUG_PAGEALLOC
891static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) 940static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
892{ 941{
893 unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr); 942 unsigned long hash, hpteg;
894 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); 943 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
944 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
895 unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY | 945 unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
896 _PAGE_COHERENT | PP_RWXX | HPTE_R_N; 946 _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
897 int ret; 947 int ret;
898 948
899 hash = hpt_hash(va, PAGE_SHIFT); 949 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
900 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 950 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
901 951
902 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), 952 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
903 mode, HPTE_V_BOLTED, mmu_linear_psize); 953 mode, HPTE_V_BOLTED,
954 mmu_linear_psize, mmu_kernel_ssize);
904 BUG_ON (ret < 0); 955 BUG_ON (ret < 0);
905 spin_lock(&linear_map_hash_lock); 956 spin_lock(&linear_map_hash_lock);
906 BUG_ON(linear_map_hash_slots[lmi] & 0x80); 957 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
@@ -910,10 +961,11 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
910 961
911static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) 962static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
912{ 963{
913 unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr); 964 unsigned long hash, hidx, slot;
914 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); 965 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
966 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
915 967
916 hash = hpt_hash(va, PAGE_SHIFT); 968 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
917 spin_lock(&linear_map_hash_lock); 969 spin_lock(&linear_map_hash_lock);
918 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 970 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
919 hidx = linear_map_hash_slots[lmi] & 0x7f; 971 hidx = linear_map_hash_slots[lmi] & 0x7f;
@@ -923,7 +975,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
923 hash = ~hash; 975 hash = ~hash;
924 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 976 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
925 slot += hidx & _PTEIDX_GROUP_IX; 977 slot += hidx & _PTEIDX_GROUP_IX;
926 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0); 978 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
927} 979}
928 980
929void kernel_map_pages(struct page *page, int numpages, int enable) 981void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index ba5f12a60467..08f0d9ff7712 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -403,11 +403,12 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
403 unsigned long va, rflags, pa; 403 unsigned long va, rflags, pa;
404 long slot; 404 long slot;
405 int err = 1; 405 int err = 1;
406 int ssize = user_segment_size(ea);
406 407
407 ptep = huge_pte_offset(mm, ea); 408 ptep = huge_pte_offset(mm, ea);
408 409
409 /* Search the Linux page table for a match with va */ 410 /* Search the Linux page table for a match with va */
410 va = (vsid << 28) | (ea & 0x0fffffff); 411 va = hpt_va(ea, vsid, ssize);
411 412
412 /* 413 /*
413 * If no pte found or not present, send the problem up to 414 * If no pte found or not present, send the problem up to
@@ -458,19 +459,19 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
458 /* There MIGHT be an HPTE for this pte */ 459 /* There MIGHT be an HPTE for this pte */
459 unsigned long hash, slot; 460 unsigned long hash, slot;
460 461
461 hash = hpt_hash(va, HPAGE_SHIFT); 462 hash = hpt_hash(va, HPAGE_SHIFT, ssize);
462 if (old_pte & _PAGE_F_SECOND) 463 if (old_pte & _PAGE_F_SECOND)
463 hash = ~hash; 464 hash = ~hash;
464 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 465 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
465 slot += (old_pte & _PAGE_F_GIX) >> 12; 466 slot += (old_pte & _PAGE_F_GIX) >> 12;
466 467
467 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize, 468 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
468 local) == -1) 469 ssize, local) == -1)
469 old_pte &= ~_PAGE_HPTEFLAGS; 470 old_pte &= ~_PAGE_HPTEFLAGS;
470 } 471 }
471 472
472 if (likely(!(old_pte & _PAGE_HASHPTE))) { 473 if (likely(!(old_pte & _PAGE_HASHPTE))) {
473 unsigned long hash = hpt_hash(va, HPAGE_SHIFT); 474 unsigned long hash = hpt_hash(va, HPAGE_SHIFT, ssize);
474 unsigned long hpte_group; 475 unsigned long hpte_group;
475 476
476 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; 477 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -489,7 +490,7 @@ repeat:
489 490
490 /* Insert into the hash table, primary slot */ 491 /* Insert into the hash table, primary slot */
491 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, 492 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
492 mmu_huge_psize); 493 mmu_huge_psize, ssize);
493 494
494 /* Primary is full, try the secondary */ 495 /* Primary is full, try the secondary */
495 if (unlikely(slot == -1)) { 496 if (unlikely(slot == -1)) {
@@ -497,7 +498,7 @@ repeat:
497 HPTES_PER_GROUP) & ~0x7UL; 498 HPTES_PER_GROUP) & ~0x7UL;
498 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 499 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
499 HPTE_V_SECONDARY, 500 HPTE_V_SECONDARY,
500 mmu_huge_psize); 501 mmu_huge_psize, ssize);
501 if (slot == -1) { 502 if (slot == -1) {
502 if (mftb() & 0x1) 503 if (mftb() & 0x1)
503 hpte_group = ((hash & htab_hash_mask) * 504 hpte_group = ((hash & htab_hash_mask) *
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 60fd52cd270f..3ef0ad2f9ca0 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -87,8 +87,8 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
87 * entry in the hardware page table. 87 * entry in the hardware page table.
88 * 88 *
89 */ 89 */
90 if (htab_bolt_mapping(ea, (unsigned long)ea + PAGE_SIZE, 90 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
91 pa, flags, mmu_io_psize)) { 91 mmu_io_psize, mmu_kernel_ssize)) {
92 printk(KERN_ERR "Failed to do bolted mapping IO " 92 printk(KERN_ERR "Failed to do bolted mapping IO "
93 "memory at %016lx !\n", pa); 93 "memory at %016lx !\n", pa);
94 return -ENOMEM; 94 return -ENOMEM;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 4bee1cfa9dea..6c164cec9d2c 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -43,17 +43,26 @@ static void slb_allocate(unsigned long ea)
43 slb_allocate_realmode(ea); 43 slb_allocate_realmode(ea);
44} 44}
45 45
46static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) 46static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
47 unsigned long slot)
47{ 48{
48 return (ea & ESID_MASK) | SLB_ESID_V | slot; 49 unsigned long mask;
50
51 mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
52 return (ea & mask) | SLB_ESID_V | slot;
49} 53}
50 54
51static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) 55#define slb_vsid_shift(ssize) \
56 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
57
58static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
59 unsigned long flags)
52{ 60{
53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 61 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
62 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
54} 63}
55 64
56static inline void slb_shadow_update(unsigned long ea, 65static inline void slb_shadow_update(unsigned long ea, int ssize,
57 unsigned long flags, 66 unsigned long flags,
58 unsigned long entry) 67 unsigned long entry)
59{ 68{
@@ -63,8 +72,8 @@ static inline void slb_shadow_update(unsigned long ea,
63 * we only update the current CPU's SLB shadow buffer. 72 * we only update the current CPU's SLB shadow buffer.
64 */ 73 */
65 get_slb_shadow()->save_area[entry].esid = 0; 74 get_slb_shadow()->save_area[entry].esid = 0;
66 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); 75 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
67 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); 76 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
68} 77}
69 78
70static inline void slb_shadow_clear(unsigned long entry) 79static inline void slb_shadow_clear(unsigned long entry)
@@ -72,7 +81,8 @@ static inline void slb_shadow_clear(unsigned long entry)
72 get_slb_shadow()->save_area[entry].esid = 0; 81 get_slb_shadow()->save_area[entry].esid = 0;
73} 82}
74 83
75static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, 84static inline void create_shadowed_slbe(unsigned long ea, int ssize,
85 unsigned long flags,
76 unsigned long entry) 86 unsigned long entry)
77{ 87{
78 /* 88 /*
@@ -80,11 +90,11 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
80 * we don't get a stale entry here if we get preempted by PHYP 90 * we don't get a stale entry here if we get preempted by PHYP
81 * between these two statements. 91 * between these two statements.
82 */ 92 */
83 slb_shadow_update(ea, flags, entry); 93 slb_shadow_update(ea, ssize, flags, entry);
84 94
85 asm volatile("slbmte %0,%1" : 95 asm volatile("slbmte %0,%1" :
86 : "r" (mk_vsid_data(ea, flags)), 96 : "r" (mk_vsid_data(ea, ssize, flags)),
87 "r" (mk_esid_data(ea, entry)) 97 "r" (mk_esid_data(ea, ssize, entry))
88 : "memory" ); 98 : "memory" );
89} 99}
90 100
@@ -93,7 +103,7 @@ void slb_flush_and_rebolt(void)
93 /* If you change this make sure you change SLB_NUM_BOLTED 103 /* If you change this make sure you change SLB_NUM_BOLTED
94 * appropriately too. */ 104 * appropriately too. */
95 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 105 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
96 unsigned long ksp_esid_data; 106 unsigned long ksp_esid_data, ksp_vsid_data;
97 107
98 WARN_ON(!irqs_disabled()); 108 WARN_ON(!irqs_disabled());
99 109
@@ -102,13 +112,15 @@ void slb_flush_and_rebolt(void)
102 lflags = SLB_VSID_KERNEL | linear_llp; 112 lflags = SLB_VSID_KERNEL | linear_llp;
103 vflags = SLB_VSID_KERNEL | vmalloc_llp; 113 vflags = SLB_VSID_KERNEL | vmalloc_llp;
104 114
105 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 115 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
106 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) { 116 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
107 ksp_esid_data &= ~SLB_ESID_V; 117 ksp_esid_data &= ~SLB_ESID_V;
118 ksp_vsid_data = 0;
108 slb_shadow_clear(2); 119 slb_shadow_clear(2);
109 } else { 120 } else {
110 /* Update stack entry; others don't change */ 121 /* Update stack entry; others don't change */
111 slb_shadow_update(get_paca()->kstack, lflags, 2); 122 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
123 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
112 } 124 }
113 125
114 /* We need to do this all in asm, so we're sure we don't touch 126 /* We need to do this all in asm, so we're sure we don't touch
@@ -120,9 +132,9 @@ void slb_flush_and_rebolt(void)
120 /* Slot 2 - kernel stack */ 132 /* Slot 2 - kernel stack */
121 "slbmte %2,%3\n" 133 "slbmte %2,%3\n"
122 "isync" 134 "isync"
123 :: "r"(mk_vsid_data(VMALLOC_START, vflags)), 135 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
124 "r"(mk_esid_data(VMALLOC_START, 1)), 136 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
125 "r"(mk_vsid_data(ksp_esid_data, lflags)), 137 "r"(ksp_vsid_data),
126 "r"(ksp_esid_data) 138 "r"(ksp_esid_data)
127 : "memory"); 139 : "memory");
128} 140}
@@ -132,7 +144,7 @@ void slb_vmalloc_update(void)
132 unsigned long vflags; 144 unsigned long vflags;
133 145
134 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; 146 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
135 slb_shadow_update(VMALLOC_START, vflags, 1); 147 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
136 slb_flush_and_rebolt(); 148 slb_flush_and_rebolt();
137} 149}
138 150
@@ -140,7 +152,7 @@ void slb_vmalloc_update(void)
140void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 152void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
141{ 153{
142 unsigned long offset = get_paca()->slb_cache_ptr; 154 unsigned long offset = get_paca()->slb_cache_ptr;
143 unsigned long esid_data = 0; 155 unsigned long slbie_data = 0;
144 unsigned long pc = KSTK_EIP(tsk); 156 unsigned long pc = KSTK_EIP(tsk);
145 unsigned long stack = KSTK_ESP(tsk); 157 unsigned long stack = KSTK_ESP(tsk);
146 unsigned long unmapped_base; 158 unsigned long unmapped_base;
@@ -149,9 +161,12 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
149 int i; 161 int i;
150 asm volatile("isync" : : : "memory"); 162 asm volatile("isync" : : : "memory");
151 for (i = 0; i < offset; i++) { 163 for (i = 0; i < offset; i++) {
152 esid_data = ((unsigned long)get_paca()->slb_cache[i] 164 slbie_data = (unsigned long)get_paca()->slb_cache[i]
153 << SID_SHIFT) | SLBIE_C; 165 << SID_SHIFT; /* EA */
154 asm volatile("slbie %0" : : "r" (esid_data)); 166 slbie_data |= user_segment_size(slbie_data)
167 << SLBIE_SSIZE_SHIFT;
168 slbie_data |= SLBIE_C; /* C set for user addresses */
169 asm volatile("slbie %0" : : "r" (slbie_data));
155 } 170 }
156 asm volatile("isync" : : : "memory"); 171 asm volatile("isync" : : : "memory");
157 } else { 172 } else {
@@ -160,7 +175,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
160 175
161 /* Workaround POWER5 < DD2.1 issue */ 176 /* Workaround POWER5 < DD2.1 issue */
162 if (offset == 1 || offset > SLB_CACHE_ENTRIES) 177 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
163 asm volatile("slbie %0" : : "r" (esid_data)); 178 asm volatile("slbie %0" : : "r" (slbie_data));
164 179
165 get_paca()->slb_cache_ptr = 0; 180 get_paca()->slb_cache_ptr = 0;
166 get_paca()->context = mm->context; 181 get_paca()->context = mm->context;
@@ -243,9 +258,9 @@ void slb_initialize(void)
243 asm volatile("isync":::"memory"); 258 asm volatile("isync":::"memory");
244 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 259 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
245 asm volatile("isync; slbia; isync":::"memory"); 260 asm volatile("isync; slbia; isync":::"memory");
246 create_shadowed_slbe(PAGE_OFFSET, lflags, 0); 261 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
247 262
248 create_shadowed_slbe(VMALLOC_START, vflags, 1); 263 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
249 264
250 /* We don't bolt the stack for the time being - we're in boot, 265 /* We don't bolt the stack for the time being - we're in boot,
251 * so the stack is in the bolted segment. By the time it goes 266 * so the stack is in the bolted segment. By the time it goes
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index cd1a93d4948c..1328a81a84aa 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -57,7 +57,10 @@ _GLOBAL(slb_allocate_realmode)
57 */ 57 */
58_GLOBAL(slb_miss_kernel_load_linear) 58_GLOBAL(slb_miss_kernel_load_linear)
59 li r11,0 59 li r11,0
60BEGIN_FTR_SECTION
60 b slb_finish_load 61 b slb_finish_load
62END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
63 b slb_finish_load_1T
61 64
621: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below 651: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
63 * will be patched by the kernel at boot 66 * will be patched by the kernel at boot
@@ -68,13 +71,16 @@ BEGIN_FTR_SECTION
68 cmpldi r11,(VMALLOC_SIZE >> 28) - 1 71 cmpldi r11,(VMALLOC_SIZE >> 28) - 1
69 bgt 5f 72 bgt 5f
70 lhz r11,PACAVMALLOCSLLP(r13) 73 lhz r11,PACAVMALLOCSLLP(r13)
71 b slb_finish_load 74 b 6f
725: 755:
73END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) 76END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
74_GLOBAL(slb_miss_kernel_load_io) 77_GLOBAL(slb_miss_kernel_load_io)
75 li r11,0 78 li r11,0
796:
80BEGIN_FTR_SECTION
76 b slb_finish_load 81 b slb_finish_load
77 82END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
83 b slb_finish_load_1T
78 84
790: /* user address: proto-VSID = context << 15 | ESID. First check 850: /* user address: proto-VSID = context << 15 | ESID. First check
80 * if the address is within the boundaries of the user region 86 * if the address is within the boundaries of the user region
@@ -122,7 +128,13 @@ _GLOBAL(slb_miss_kernel_load_io)
122#endif /* CONFIG_PPC_MM_SLICES */ 128#endif /* CONFIG_PPC_MM_SLICES */
123 129
124 ld r9,PACACONTEXTID(r13) 130 ld r9,PACACONTEXTID(r13)
131BEGIN_FTR_SECTION
132 cmpldi r10,0x1000
133END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
125 rldimi r10,r9,USER_ESID_BITS,0 134 rldimi r10,r9,USER_ESID_BITS,0
135BEGIN_FTR_SECTION
136 bge slb_finish_load_1T
137END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
126 b slb_finish_load 138 b slb_finish_load
127 139
1288: /* invalid EA */ 1408: /* invalid EA */
@@ -188,7 +200,7 @@ _GLOBAL(slb_allocate_user)
188 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET 200 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
189 */ 201 */
190slb_finish_load: 202slb_finish_load:
191 ASM_VSID_SCRAMBLE(r10,r9) 203 ASM_VSID_SCRAMBLE(r10,r9,256M)
192 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ 204 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
193 205
194 /* r3 = EA, r11 = VSID data */ 206 /* r3 = EA, r11 = VSID data */
@@ -213,7 +225,7 @@ BEGIN_FW_FTR_SECTION
213END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 225END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
214#endif /* CONFIG_PPC_ISERIES */ 226#endif /* CONFIG_PPC_ISERIES */
215 227
216 ld r10,PACASTABRR(r13) 2287: ld r10,PACASTABRR(r13)
217 addi r10,r10,1 229 addi r10,r10,1
218 /* use a cpu feature mask if we ever change our slb size */ 230 /* use a cpu feature mask if we ever change our slb size */
219 cmpldi r10,SLB_NUM_ENTRIES 231 cmpldi r10,SLB_NUM_ENTRIES
@@ -259,3 +271,20 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
259 crclr 4*cr0+eq /* set result to "success" */ 271 crclr 4*cr0+eq /* set result to "success" */
260 blr 272 blr
261 273
274/*
275 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
276 * We assume legacy iSeries will never have 1T segments.
277 *
278 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
279 */
280slb_finish_load_1T:
281 srdi r10,r10,40-28 /* get 1T ESID */
282 ASM_VSID_SCRAMBLE(r10,r9,1T)
283 rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */
284 li r10,MMU_SEGSIZE_1T
285 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
286
287 /* r3 = EA, r11 = VSID data */
288 clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
289 b 7b
290
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 28492bbdee8e..9e85bda76216 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -122,12 +122,12 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
122 122
123 /* Kernel or user address? */ 123 /* Kernel or user address? */
124 if (is_kernel_addr(ea)) { 124 if (is_kernel_addr(ea)) {
125 vsid = get_kernel_vsid(ea); 125 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
126 } else { 126 } else {
127 if ((ea >= TASK_SIZE_USER64) || (! mm)) 127 if ((ea >= TASK_SIZE_USER64) || (! mm))
128 return 1; 128 return 1;
129 129
130 vsid = get_vsid(mm->context.id, ea); 130 vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M);
131 } 131 }
132 132
133 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); 133 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
@@ -261,7 +261,7 @@ void __init stabs_alloc(void)
261 */ 261 */
262void stab_initialize(unsigned long stab) 262void stab_initialize(unsigned long stab)
263{ 263{
264 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET); 264 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M);
265 unsigned long stabreal; 265 unsigned long stabreal;
266 266
267 asm volatile("isync; slbia; isync":::"memory"); 267 asm volatile("isync; slbia; isync":::"memory");
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index cbd34fc813ee..eafbca52bff9 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -132,6 +132,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
133 unsigned long vsid, vaddr; 133 unsigned long vsid, vaddr;
134 unsigned int psize; 134 unsigned int psize;
135 int ssize;
135 real_pte_t rpte; 136 real_pte_t rpte;
136 int i; 137 int i;
137 138
@@ -161,11 +162,14 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
161 162
162 /* Build full vaddr */ 163 /* Build full vaddr */
163 if (!is_kernel_addr(addr)) { 164 if (!is_kernel_addr(addr)) {
164 vsid = get_vsid(mm->context.id, addr); 165 ssize = user_segment_size(addr);
166 vsid = get_vsid(mm->context.id, addr, ssize);
165 WARN_ON(vsid == 0); 167 WARN_ON(vsid == 0);
166 } else 168 } else {
167 vsid = get_kernel_vsid(addr); 169 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
168 vaddr = (vsid << 28 ) | (addr & 0x0fffffff); 170 ssize = mmu_kernel_ssize;
171 }
172 vaddr = hpt_va(addr, vsid, ssize);
169 rpte = __real_pte(__pte(pte), ptep); 173 rpte = __real_pte(__pte(pte), ptep);
170 174
171 /* 175 /*
@@ -175,7 +179,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
175 * and decide to use local invalidates instead... 179 * and decide to use local invalidates instead...
176 */ 180 */
177 if (!batch->active) { 181 if (!batch->active) {
178 flush_hash_page(vaddr, rpte, psize, 0); 182 flush_hash_page(vaddr, rpte, psize, ssize, 0);
179 return; 183 return;
180 } 184 }
181 185
@@ -189,13 +193,15 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
189 * We also need to ensure only one page size is present in a given 193 * We also need to ensure only one page size is present in a given
190 * batch 194 * batch
191 */ 195 */
192 if (i != 0 && (mm != batch->mm || batch->psize != psize)) { 196 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
197 batch->ssize != ssize)) {
193 __flush_tlb_pending(batch); 198 __flush_tlb_pending(batch);
194 i = 0; 199 i = 0;
195 } 200 }
196 if (i == 0) { 201 if (i == 0) {
197 batch->mm = mm; 202 batch->mm = mm;
198 batch->psize = psize; 203 batch->psize = psize;
204 batch->ssize = ssize;
199 } 205 }
200 batch->pte[i] = rpte; 206 batch->pte[i] = rpte;
201 batch->vaddr[i] = vaddr; 207 batch->vaddr[i] = vaddr;
@@ -222,7 +228,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
222 local = 1; 228 local = 1;
223 if (i == 1) 229 if (i == 1)
224 flush_hash_page(batch->vaddr[0], batch->pte[0], 230 flush_hash_page(batch->vaddr[0], batch->pte[0],
225 batch->psize, local); 231 batch->psize, batch->ssize, local);
226 else 232 else
227 flush_hash_range(i, local); 233 flush_hash_range(i, local);
228 batch->index = 0; 234 batch->index = 0;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index b5a21177bb32..c83c3e3f5178 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -168,7 +168,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
168#else 168#else
169 psize = mm->context.user_psize; 169 psize = mm->context.user_psize;
170#endif 170#endif
171 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | 171 vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
172 SLB_VSID_USER; 172 SLB_VSID_USER;
173 break; 173 break;
174 case VMALLOC_REGION_ID: 174 case VMALLOC_REGION_ID:
@@ -176,12 +176,12 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
176 psize = mmu_vmalloc_psize; 176 psize = mmu_vmalloc_psize;
177 else 177 else
178 psize = mmu_io_psize; 178 psize = mmu_io_psize;
179 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | 179 vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
180 SLB_VSID_KERNEL; 180 SLB_VSID_KERNEL;
181 break; 181 break;
182 case KERNEL_REGION_ID: 182 case KERNEL_REGION_ID:
183 psize = mmu_linear_psize; 183 psize = mmu_linear_psize;
184 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | 184 vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
185 SLB_VSID_KERNEL; 185 SLB_VSID_KERNEL;
186 break; 186 break;
187 default: 187 default:
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index de7e5ee451d2..3d64c81cc6e2 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -699,7 +699,7 @@ static inline void get_kernel_slb(u64 ea, u64 slb[2])
699 llp = mmu_psize_defs[mmu_linear_psize].sllp; 699 llp = mmu_psize_defs[mmu_linear_psize].sllp;
700 else 700 else
701 llp = mmu_psize_defs[mmu_virtual_psize].sllp; 701 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
702 slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | 702 slb[0] = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
703 SLB_VSID_KERNEL | llp; 703 SLB_VSID_KERNEL | llp;
704 slb[1] = (ea & ESID_MASK) | SLB_ESID_V; 704 slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
705} 705}
diff --git a/arch/powerpc/platforms/celleb/htab.c b/arch/powerpc/platforms/celleb/htab.c
index 5e75c77ea8f3..fbf27c74ebda 100644
--- a/arch/powerpc/platforms/celleb/htab.c
+++ b/arch/powerpc/platforms/celleb/htab.c
@@ -90,7 +90,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
90static long beat_lpar_hpte_insert(unsigned long hpte_group, 90static long beat_lpar_hpte_insert(unsigned long hpte_group,
91 unsigned long va, unsigned long pa, 91 unsigned long va, unsigned long pa,
92 unsigned long rflags, unsigned long vflags, 92 unsigned long rflags, unsigned long vflags,
93 int psize) 93 int psize, int ssize)
94{ 94{
95 unsigned long lpar_rc; 95 unsigned long lpar_rc;
96 unsigned long slot; 96 unsigned long slot;
@@ -105,7 +105,8 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
105 "rflags=%lx, vflags=%lx, psize=%d)\n", 105 "rflags=%lx, vflags=%lx, psize=%d)\n",
106 hpte_group, va, pa, rflags, vflags, psize); 106 hpte_group, va, pa, rflags, vflags, psize);
107 107
108 hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; 108 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
109 vflags | HPTE_V_VALID;
109 hpte_r = hpte_encode_r(pa, psize) | rflags; 110 hpte_r = hpte_encode_r(pa, psize) | rflags;
110 111
111 if (!(vflags & HPTE_V_BOLTED)) 112 if (!(vflags & HPTE_V_BOLTED))
@@ -184,12 +185,12 @@ static void beat_lpar_hptab_clear(void)
184static long beat_lpar_hpte_updatepp(unsigned long slot, 185static long beat_lpar_hpte_updatepp(unsigned long slot,
185 unsigned long newpp, 186 unsigned long newpp,
186 unsigned long va, 187 unsigned long va,
187 int psize, int local) 188 int psize, int ssize, int local)
188{ 189{
189 unsigned long lpar_rc; 190 unsigned long lpar_rc;
190 unsigned long dummy0, dummy1, want_v; 191 unsigned long dummy0, dummy1, want_v;
191 192
192 want_v = hpte_encode_v(va, psize); 193 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
193 194
194 DBG_LOW(" update: " 195 DBG_LOW(" update: "
195 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", 196 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -225,8 +226,8 @@ static long beat_lpar_hpte_find(unsigned long va, int psize)
225 long slot; 226 long slot;
226 unsigned long want_v, hpte_v; 227 unsigned long want_v, hpte_v;
227 228
228 hash = hpt_hash(va, mmu_psize_defs[psize].shift); 229 hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
229 want_v = hpte_encode_v(va, psize); 230 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
230 231
231 for (j = 0; j < 2; j++) { 232 for (j = 0; j < 2; j++) {
232 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 233 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -251,11 +252,11 @@ static long beat_lpar_hpte_find(unsigned long va, int psize)
251 252
252static void beat_lpar_hpte_updateboltedpp(unsigned long newpp, 253static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
253 unsigned long ea, 254 unsigned long ea,
254 int psize) 255 int psize, int ssize)
255{ 256{
256 unsigned long lpar_rc, slot, vsid, va, dummy0, dummy1; 257 unsigned long lpar_rc, slot, vsid, va, dummy0, dummy1;
257 258
258 vsid = get_kernel_vsid(ea); 259 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
259 va = (vsid << 28) | (ea & 0x0fffffff); 260 va = (vsid << 28) | (ea & 0x0fffffff);
260 261
261 spin_lock(&beat_htab_lock); 262 spin_lock(&beat_htab_lock);
@@ -270,7 +271,7 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
270} 271}
271 272
272static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 273static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
273 int psize, int local) 274 int psize, int ssize, int local)
274{ 275{
275 unsigned long want_v; 276 unsigned long want_v;
276 unsigned long lpar_rc; 277 unsigned long lpar_rc;
@@ -279,7 +280,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
279 280
280 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 281 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
281 slot, va, psize, local); 282 slot, va, psize, local);
282 want_v = hpte_encode_v(va, psize); 283 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
283 284
284 spin_lock_irqsave(&beat_htab_lock, flags); 285 spin_lock_irqsave(&beat_htab_lock, flags);
285 dummy1 = beat_lpar_hpte_getword0(slot); 286 dummy1 = beat_lpar_hpte_getword0(slot);
@@ -310,7 +311,7 @@ void __init hpte_init_beat(void)
310static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, 311static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
311 unsigned long va, unsigned long pa, 312 unsigned long va, unsigned long pa,
312 unsigned long rflags, unsigned long vflags, 313 unsigned long rflags, unsigned long vflags,
313 int psize) 314 int psize, int ssize)
314{ 315{
315 unsigned long lpar_rc; 316 unsigned long lpar_rc;
316 unsigned long slot; 317 unsigned long slot;
@@ -325,7 +326,8 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
325 "rflags=%lx, vflags=%lx, psize=%d)\n", 326 "rflags=%lx, vflags=%lx, psize=%d)\n",
326 hpte_group, va, pa, rflags, vflags, psize); 327 hpte_group, va, pa, rflags, vflags, psize);
327 328
328 hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; 329 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
330 vflags | HPTE_V_VALID;
329 hpte_r = hpte_encode_r(pa, psize) | rflags; 331 hpte_r = hpte_encode_r(pa, psize) | rflags;
330 332
331 if (!(vflags & HPTE_V_BOLTED)) 333 if (!(vflags & HPTE_V_BOLTED))
@@ -363,13 +365,13 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
363static long beat_lpar_hpte_updatepp_v3(unsigned long slot, 365static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
364 unsigned long newpp, 366 unsigned long newpp,
365 unsigned long va, 367 unsigned long va,
366 int psize, int local) 368 int psize, int ssize, int local)
367{ 369{
368 unsigned long lpar_rc; 370 unsigned long lpar_rc;
369 unsigned long want_v; 371 unsigned long want_v;
370 unsigned long pss; 372 unsigned long pss;
371 373
372 want_v = hpte_encode_v(va, psize); 374 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
373 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 375 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
374 376
375 DBG_LOW(" update: " 377 DBG_LOW(" update: "
@@ -391,7 +393,7 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
391} 393}
392 394
393static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va, 395static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va,
394 int psize, int local) 396 int psize, int ssize, int local)
395{ 397{
396 unsigned long want_v; 398 unsigned long want_v;
397 unsigned long lpar_rc; 399 unsigned long lpar_rc;
@@ -399,7 +401,7 @@ static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va,
399 401
400 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 402 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
401 slot, va, psize, local); 403 slot, va, psize, local);
402 want_v = hpte_encode_v(va, psize); 404 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
403 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 405 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
404 406
405 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss); 407 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
index b4e2c7a038e1..15a7097e5dd7 100644
--- a/arch/powerpc/platforms/iseries/htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -86,7 +86,8 @@ long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
86 } 86 }
87 87
88 88
89 lhpte.v = hpte_encode_v(va, MMU_PAGE_4K) | vflags | HPTE_V_VALID; 89 lhpte.v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M) |
90 vflags | HPTE_V_VALID;
90 lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags; 91 lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
91 92
92 /* Now fill in the actual HPTE */ 93 /* Now fill in the actual HPTE */
@@ -142,7 +143,7 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
142 * bits 61..63 : PP2,PP1,PP0 143 * bits 61..63 : PP2,PP1,PP0
143 */ 144 */
144static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, 145static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
145 unsigned long va, int psize, int local) 146 unsigned long va, int psize, int ssize, int local)
146{ 147{
147 struct hash_pte hpte; 148 struct hash_pte hpte;
148 unsigned long want_v; 149 unsigned long want_v;
@@ -150,7 +151,7 @@ static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
150 iSeries_hlock(slot); 151 iSeries_hlock(slot);
151 152
152 HvCallHpt_get(&hpte, slot); 153 HvCallHpt_get(&hpte, slot);
153 want_v = hpte_encode_v(va, MMU_PAGE_4K); 154 want_v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M);
154 155
155 if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) { 156 if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
156 /* 157 /*
@@ -205,14 +206,14 @@ static long iSeries_hpte_find(unsigned long vpn)
205 * No need to lock here because we should be the only user. 206 * No need to lock here because we should be the only user.
206 */ 207 */
207static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 208static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
208 int psize) 209 int psize, int ssize)
209{ 210{
210 unsigned long vsid,va,vpn; 211 unsigned long vsid,va,vpn;
211 long slot; 212 long slot;
212 213
213 BUG_ON(psize != MMU_PAGE_4K); 214 BUG_ON(psize != MMU_PAGE_4K);
214 215
215 vsid = get_kernel_vsid(ea); 216 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
216 va = (vsid << 28) | (ea & 0x0fffffff); 217 va = (vsid << 28) | (ea & 0x0fffffff);
217 vpn = va >> HW_PAGE_SHIFT; 218 vpn = va >> HW_PAGE_SHIFT;
218 slot = iSeries_hpte_find(vpn); 219 slot = iSeries_hpte_find(vpn);
@@ -222,7 +223,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
222} 223}
223 224
224static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va, 225static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
225 int psize, int local) 226 int psize, int ssize, int local)
226{ 227{
227 unsigned long hpte_v; 228 unsigned long hpte_v;
228 unsigned long avpn = va >> 23; 229 unsigned long avpn = va >> 23;
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 5d2e176a1b18..7382f195c4f8 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -60,7 +60,8 @@ static void _debug_dump_hpte(unsigned long pa, unsigned long va,
60} 60}
61 61
62static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, 62static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
63 unsigned long pa, unsigned long rflags, unsigned long vflags, int psize) 63 unsigned long pa, unsigned long rflags, unsigned long vflags,
64 int psize, int ssize)
64{ 65{
65 unsigned long slot; 66 unsigned long slot;
66 struct hash_pte lhpte; 67 struct hash_pte lhpte;
@@ -72,7 +73,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
72 73
73 vflags &= ~HPTE_V_SECONDARY; /* this bit is ignored */ 74 vflags &= ~HPTE_V_SECONDARY; /* this bit is ignored */
74 75
75 lhpte.v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; 76 lhpte.v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
77 vflags | HPTE_V_VALID;
76 lhpte.r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; 78 lhpte.r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
77 79
78 p_pteg = hpte_group / HPTES_PER_GROUP; 80 p_pteg = hpte_group / HPTES_PER_GROUP;
@@ -167,14 +169,14 @@ static long ps3_hpte_remove(unsigned long hpte_group)
167} 169}
168 170
169static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, 171static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
170 unsigned long va, int psize, int local) 172 unsigned long va, int psize, int ssize, int local)
171{ 173{
172 unsigned long flags; 174 unsigned long flags;
173 unsigned long result; 175 unsigned long result;
174 unsigned long pteg, bit; 176 unsigned long pteg, bit;
175 unsigned long hpte_v, want_v; 177 unsigned long hpte_v, want_v;
176 178
177 want_v = hpte_encode_v(va, psize); 179 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
178 180
179 spin_lock_irqsave(&ps3_bolttab_lock, flags); 181 spin_lock_irqsave(&ps3_bolttab_lock, flags);
180 182
@@ -205,13 +207,13 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
205} 207}
206 208
207static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 209static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
208 int psize) 210 int psize, int ssize)
209{ 211{
210 panic("ps3_hpte_updateboltedpp() not implemented"); 212 panic("ps3_hpte_updateboltedpp() not implemented");
211} 213}
212 214
213static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, 215static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
214 int psize, int local) 216 int psize, int ssize, int local)
215{ 217{
216 unsigned long flags; 218 unsigned long flags;
217 unsigned long result; 219 unsigned long result;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index ea327ca345c6..9a455d46379d 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -284,7 +284,7 @@ void vpa_init(int cpu)
284static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 284static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
285 unsigned long va, unsigned long pa, 285 unsigned long va, unsigned long pa,
286 unsigned long rflags, unsigned long vflags, 286 unsigned long rflags, unsigned long vflags,
287 int psize) 287 int psize, int ssize)
288{ 288{
289 unsigned long lpar_rc; 289 unsigned long lpar_rc;
290 unsigned long flags; 290 unsigned long flags;
@@ -296,7 +296,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
296 "rflags=%lx, vflags=%lx, psize=%d)\n", 296 "rflags=%lx, vflags=%lx, psize=%d)\n",
297 hpte_group, va, pa, rflags, vflags, psize); 297 hpte_group, va, pa, rflags, vflags, psize);
298 298
299 hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; 299 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
300 hpte_r = hpte_encode_r(pa, psize) | rflags; 300 hpte_r = hpte_encode_r(pa, psize) | rflags;
301 301
302 if (!(vflags & HPTE_V_BOLTED)) 302 if (!(vflags & HPTE_V_BOLTED))
@@ -392,6 +392,22 @@ static void pSeries_lpar_hptab_clear(void)
392} 392}
393 393
394/* 394/*
395 * This computes the AVPN and B fields of the first dword of a HPTE,
396 * for use when we want to match an existing PTE. The bottom 7 bits
397 * of the returned value are zero.
398 */
399static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
400 int ssize)
401{
402 unsigned long v;
403
404 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
405 v <<= HPTE_V_AVPN_SHIFT;
406 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
407 return v;
408}
409
410/*
395 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and 411 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
396 * the low 3 bits of flags happen to line up. So no transform is needed. 412 * the low 3 bits of flags happen to line up. So no transform is needed.
397 * We can probably optimize here and assume the high bits of newpp are 413 * We can probably optimize here and assume the high bits of newpp are
@@ -400,18 +416,18 @@ static void pSeries_lpar_hptab_clear(void)
400static long pSeries_lpar_hpte_updatepp(unsigned long slot, 416static long pSeries_lpar_hpte_updatepp(unsigned long slot,
401 unsigned long newpp, 417 unsigned long newpp,
402 unsigned long va, 418 unsigned long va,
403 int psize, int local) 419 int psize, int ssize, int local)
404{ 420{
405 unsigned long lpar_rc; 421 unsigned long lpar_rc;
406 unsigned long flags = (newpp & 7) | H_AVPN; 422 unsigned long flags = (newpp & 7) | H_AVPN;
407 unsigned long want_v; 423 unsigned long want_v;
408 424
409 want_v = hpte_encode_v(va, psize); 425 want_v = hpte_encode_avpn(va, psize, ssize);
410 426
411 DBG_LOW(" update: avpnv=%016lx, hash=%016lx, f=%x, psize: %d ... ", 427 DBG_LOW(" update: avpnv=%016lx, hash=%016lx, f=%x, psize: %d ... ",
412 want_v & HPTE_V_AVPN, slot, flags, psize); 428 want_v, slot, flags, psize);
413 429
414 lpar_rc = plpar_pte_protect(flags, slot, want_v & HPTE_V_AVPN); 430 lpar_rc = plpar_pte_protect(flags, slot, want_v);
415 431
416 if (lpar_rc == H_NOT_FOUND) { 432 if (lpar_rc == H_NOT_FOUND) {
417 DBG_LOW("not found !\n"); 433 DBG_LOW("not found !\n");
@@ -444,32 +460,25 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
444 return dword0; 460 return dword0;
445} 461}
446 462
447static long pSeries_lpar_hpte_find(unsigned long va, int psize) 463static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize)
448{ 464{
449 unsigned long hash; 465 unsigned long hash;
450 unsigned long i, j; 466 unsigned long i;
451 long slot; 467 long slot;
452 unsigned long want_v, hpte_v; 468 unsigned long want_v, hpte_v;
453 469
454 hash = hpt_hash(va, mmu_psize_defs[psize].shift); 470 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
455 want_v = hpte_encode_v(va, psize); 471 want_v = hpte_encode_avpn(va, psize, ssize);
456 472
457 for (j = 0; j < 2; j++) { 473 /* Bolted entries are always in the primary group */
458 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 474 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
459 for (i = 0; i < HPTES_PER_GROUP; i++) { 475 for (i = 0; i < HPTES_PER_GROUP; i++) {
460 hpte_v = pSeries_lpar_hpte_getword0(slot); 476 hpte_v = pSeries_lpar_hpte_getword0(slot);
461 477
462 if (HPTE_V_COMPARE(hpte_v, want_v) 478 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
463 && (hpte_v & HPTE_V_VALID) 479 /* HPTE matches */
464 && (!!(hpte_v & HPTE_V_SECONDARY) == j)) { 480 return slot;
465 /* HPTE matches */ 481 ++slot;
466 if (j)
467 slot = -slot;
468 return slot;
469 }
470 ++slot;
471 }
472 hash = ~hash;
473 } 482 }
474 483
475 return -1; 484 return -1;
@@ -477,14 +486,14 @@ static long pSeries_lpar_hpte_find(unsigned long va, int psize)
477 486
478static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, 487static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
479 unsigned long ea, 488 unsigned long ea,
480 int psize) 489 int psize, int ssize)
481{ 490{
482 unsigned long lpar_rc, slot, vsid, va, flags; 491 unsigned long lpar_rc, slot, vsid, va, flags;
483 492
484 vsid = get_kernel_vsid(ea); 493 vsid = get_kernel_vsid(ea, ssize);
485 va = (vsid << 28) | (ea & 0x0fffffff); 494 va = hpt_va(ea, vsid, ssize);
486 495
487 slot = pSeries_lpar_hpte_find(va, psize); 496 slot = pSeries_lpar_hpte_find(va, psize, ssize);
488 BUG_ON(slot == -1); 497 BUG_ON(slot == -1);
489 498
490 flags = newpp & 7; 499 flags = newpp & 7;
@@ -494,7 +503,7 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
494} 503}
495 504
496static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 505static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
497 int psize, int local) 506 int psize, int ssize, int local)
498{ 507{
499 unsigned long want_v; 508 unsigned long want_v;
500 unsigned long lpar_rc; 509 unsigned long lpar_rc;
@@ -503,9 +512,8 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
503 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d", 512 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d",
504 slot, va, psize, local); 513 slot, va, psize, local);
505 514
506 want_v = hpte_encode_v(va, psize); 515 want_v = hpte_encode_avpn(va, psize, ssize);
507 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v & HPTE_V_AVPN, 516 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
508 &dummy1, &dummy2);
509 if (lpar_rc == H_NOT_FOUND) 517 if (lpar_rc == H_NOT_FOUND)
510 return; 518 return;
511 519
@@ -533,18 +541,19 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
533 unsigned long va; 541 unsigned long va;
534 unsigned long hash, index, shift, hidx, slot; 542 unsigned long hash, index, shift, hidx, slot;
535 real_pte_t pte; 543 real_pte_t pte;
536 int psize; 544 int psize, ssize;
537 545
538 if (lock_tlbie) 546 if (lock_tlbie)
539 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); 547 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
540 548
541 psize = batch->psize; 549 psize = batch->psize;
550 ssize = batch->ssize;
542 pix = 0; 551 pix = 0;
543 for (i = 0; i < number; i++) { 552 for (i = 0; i < number; i++) {
544 va = batch->vaddr[i]; 553 va = batch->vaddr[i];
545 pte = batch->pte[i]; 554 pte = batch->pte[i];
546 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 555 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
547 hash = hpt_hash(va, shift); 556 hash = hpt_hash(va, shift, ssize);
548 hidx = __rpte_to_hidx(pte, index); 557 hidx = __rpte_to_hidx(pte, index);
549 if (hidx & _PTEIDX_SECONDARY) 558 if (hidx & _PTEIDX_SECONDARY)
550 hash = ~hash; 559 hash = ~hash;
@@ -552,11 +561,11 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
552 slot += hidx & _PTEIDX_GROUP_IX; 561 slot += hidx & _PTEIDX_GROUP_IX;
553 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 562 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
554 pSeries_lpar_hpte_invalidate(slot, va, psize, 563 pSeries_lpar_hpte_invalidate(slot, va, psize,
555 local); 564 ssize, local);
556 } else { 565 } else {
557 param[pix] = HBR_REQUEST | HBR_AVPN | slot; 566 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
558 param[pix+1] = hpte_encode_v(va, psize) & 567 param[pix+1] = hpte_encode_avpn(va, psize,
559 HPTE_V_AVPN; 568 ssize);
560 pix += 2; 569 pix += 2;
561 if (pix == 8) { 570 if (pix == 8) {
562 rc = plpar_hcall9(H_BULK_REMOVE, param, 571 rc = plpar_hcall9(H_BULK_REMOVE, param,
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index d913f460e710..ae093ef68363 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -164,6 +164,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
164#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) 164#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
165#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000) 165#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
166#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000) 166#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
167#define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000)
167 168
168#ifndef __ASSEMBLY__ 169#ifndef __ASSEMBLY__
169 170
@@ -374,7 +375,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
374#define CPU_FTRS_POSSIBLE \ 375#define CPU_FTRS_POSSIBLE \
375 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ 376 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
376 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ 377 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
377 CPU_FTRS_CELL | CPU_FTRS_PA6T) 378 CPU_FTRS_CELL | CPU_FTRS_PA6T | CPU_FTR_1T_SEGMENT)
378#else 379#else
379enum { 380enum {
380 CPU_FTRS_POSSIBLE = 381 CPU_FTRS_POSSIBLE =
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index cc7c17f16a9a..6968f4300dca 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -51,22 +51,22 @@ struct machdep_calls {
51#ifdef CONFIG_PPC64 51#ifdef CONFIG_PPC64
52 void (*hpte_invalidate)(unsigned long slot, 52 void (*hpte_invalidate)(unsigned long slot,
53 unsigned long va, 53 unsigned long va,
54 int psize, 54 int psize, int ssize,
55 int local); 55 int local);
56 long (*hpte_updatepp)(unsigned long slot, 56 long (*hpte_updatepp)(unsigned long slot,
57 unsigned long newpp, 57 unsigned long newpp,
58 unsigned long va, 58 unsigned long va,
59 int pize, 59 int psize, int ssize,
60 int local); 60 int local);
61 void (*hpte_updateboltedpp)(unsigned long newpp, 61 void (*hpte_updateboltedpp)(unsigned long newpp,
62 unsigned long ea, 62 unsigned long ea,
63 int psize); 63 int psize, int ssize);
64 long (*hpte_insert)(unsigned long hpte_group, 64 long (*hpte_insert)(unsigned long hpte_group,
65 unsigned long va, 65 unsigned long va,
66 unsigned long prpn, 66 unsigned long prpn,
67 unsigned long rflags, 67 unsigned long rflags,
68 unsigned long vflags, 68 unsigned long vflags,
69 int psize); 69 int psize, int ssize);
70 long (*hpte_remove)(unsigned long hpte_group); 70 long (*hpte_remove)(unsigned long hpte_group);
71 void (*flush_hash_range)(unsigned long number, int local); 71 void (*flush_hash_range)(unsigned long number, int local);
72 72
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index b22b0d20e157..82328dec2b52 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -47,6 +47,8 @@ extern char initial_stab[];
47 47
48/* Bits in the SLB VSID word */ 48/* Bits in the SLB VSID word */
49#define SLB_VSID_SHIFT 12 49#define SLB_VSID_SHIFT 12
50#define SLB_VSID_SHIFT_1T 24
51#define SLB_VSID_SSIZE_SHIFT 62
50#define SLB_VSID_B ASM_CONST(0xc000000000000000) 52#define SLB_VSID_B ASM_CONST(0xc000000000000000)
51#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) 53#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
52#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) 54#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
@@ -66,6 +68,7 @@ extern char initial_stab[];
66#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) 68#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
67 69
68#define SLBIE_C (0x08000000) 70#define SLBIE_C (0x08000000)
71#define SLBIE_SSIZE_SHIFT 25
69 72
70/* 73/*
71 * Hash table 74 * Hash table
@@ -77,7 +80,7 @@ extern char initial_stab[];
77#define HPTE_V_AVPN_SHIFT 7 80#define HPTE_V_AVPN_SHIFT 7
78#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80) 81#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
79#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) 82#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
80#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) 83#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80))
81#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) 84#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
82#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) 85#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
83#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) 86#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
@@ -164,16 +167,19 @@ struct mmu_psize_def
164#define MMU_SEGSIZE_256M 0 167#define MMU_SEGSIZE_256M 0
165#define MMU_SEGSIZE_1T 1 168#define MMU_SEGSIZE_1T 1
166 169
170
167#ifndef __ASSEMBLY__ 171#ifndef __ASSEMBLY__
168 172
169/* 173/*
170 * The current system page sizes 174 * The current system page and segment sizes
171 */ 175 */
172extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 176extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
173extern int mmu_linear_psize; 177extern int mmu_linear_psize;
174extern int mmu_virtual_psize; 178extern int mmu_virtual_psize;
175extern int mmu_vmalloc_psize; 179extern int mmu_vmalloc_psize;
176extern int mmu_io_psize; 180extern int mmu_io_psize;
181extern int mmu_kernel_ssize;
182extern int mmu_highuser_ssize;
177 183
178/* 184/*
179 * If the processor supports 64k normal pages but not 64k cache 185 * If the processor supports 64k normal pages but not 64k cache
@@ -195,13 +201,15 @@ extern int mmu_huge_psize;
195 * This function sets the AVPN and L fields of the HPTE appropriately 201 * This function sets the AVPN and L fields of the HPTE appropriately
196 * for the page size 202 * for the page size
197 */ 203 */
198static inline unsigned long hpte_encode_v(unsigned long va, int psize) 204static inline unsigned long hpte_encode_v(unsigned long va, int psize,
205 int ssize)
199{ 206{
200 unsigned long v = 207 unsigned long v;
201 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); 208 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
202 v <<= HPTE_V_AVPN_SHIFT; 209 v <<= HPTE_V_AVPN_SHIFT;
203 if (psize != MMU_PAGE_4K) 210 if (psize != MMU_PAGE_4K)
204 v |= HPTE_V_LARGE; 211 v |= HPTE_V_LARGE;
212 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
205 return v; 213 return v;
206} 214}
207 215
@@ -226,20 +234,40 @@ static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
226} 234}
227 235
228/* 236/*
229 * This hashes a virtual address for a 256Mb segment only for now 237 * Build a VA given VSID, EA and segment size
230 */ 238 */
239static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
240 int ssize)
241{
242 if (ssize == MMU_SEGSIZE_256M)
243 return (vsid << 28) | (ea & 0xfffffffUL);
244 return (vsid << 40) | (ea & 0xffffffffffUL);
245}
231 246
232static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) 247/*
248 * This hashes a virtual address
249 */
250
251static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
252 int ssize)
233{ 253{
234 return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); 254 unsigned long hash, vsid;
255
256 if (ssize == MMU_SEGSIZE_256M) {
257 hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
258 } else {
259 vsid = va >> 40;
260 hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
261 }
262 return hash & 0x7fffffffffUL;
235} 263}
236 264
237extern int __hash_page_4K(unsigned long ea, unsigned long access, 265extern int __hash_page_4K(unsigned long ea, unsigned long access,
238 unsigned long vsid, pte_t *ptep, unsigned long trap, 266 unsigned long vsid, pte_t *ptep, unsigned long trap,
239 unsigned int local); 267 unsigned int local, int ssize);
240extern int __hash_page_64K(unsigned long ea, unsigned long access, 268extern int __hash_page_64K(unsigned long ea, unsigned long access,
241 unsigned long vsid, pte_t *ptep, unsigned long trap, 269 unsigned long vsid, pte_t *ptep, unsigned long trap,
242 unsigned int local); 270 unsigned int local, int ssize);
243struct mm_struct; 271struct mm_struct;
244extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); 272extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
245extern int hash_huge_page(struct mm_struct *mm, unsigned long access, 273extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
@@ -248,7 +276,7 @@ extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
248 276
249extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 277extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
250 unsigned long pstart, unsigned long mode, 278 unsigned long pstart, unsigned long mode,
251 int psize); 279 int psize, int ssize);
252 280
253extern void htab_initialize(void); 281extern void htab_initialize(void);
254extern void htab_initialize_secondary(void); 282extern void htab_initialize_secondary(void);
@@ -317,12 +345,17 @@ extern void slb_vmalloc_update(void);
317 * which are used by the iSeries firmware. 345 * which are used by the iSeries firmware.
318 */ 346 */
319 347
320#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ 348#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */
321#define VSID_BITS 36 349#define VSID_BITS_256M 36
322#define VSID_MODULUS ((1UL<<VSID_BITS)-1) 350#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
323 351
324#define CONTEXT_BITS 19 352#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
325#define USER_ESID_BITS 16 353#define VSID_BITS_1T 24
354#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
355
356#define CONTEXT_BITS 19
357#define USER_ESID_BITS 16
358#define USER_ESID_BITS_1T 4
326 359
327#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 360#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
328 361
@@ -336,17 +369,17 @@ extern void slb_vmalloc_update(void);
336 * rx = scratch register (clobbered) 369 * rx = scratch register (clobbered)
337 * 370 *
338 * - rt and rx must be different registers 371 * - rt and rx must be different registers
339 * - The answer will end up in the low 36 bits of rt. The higher 372 * - The answer will end up in the low VSID_BITS bits of rt. The higher
340 * bits may contain other garbage, so you may need to mask the 373 * bits may contain other garbage, so you may need to mask the
341 * result. 374 * result.
342 */ 375 */
343#define ASM_VSID_SCRAMBLE(rt, rx) \ 376#define ASM_VSID_SCRAMBLE(rt, rx, size) \
344 lis rx,VSID_MULTIPLIER@h; \ 377 lis rx,VSID_MULTIPLIER_##size@h; \
345 ori rx,rx,VSID_MULTIPLIER@l; \ 378 ori rx,rx,VSID_MULTIPLIER_##size@l; \
346 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ 379 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
347 \ 380 \
348 srdi rx,rt,VSID_BITS; \ 381 srdi rx,rt,VSID_BITS_##size; \
349 clrldi rt,rt,(64-VSID_BITS); \ 382 clrldi rt,rt,(64-VSID_BITS_##size); \
350 add rt,rt,rx; /* add high and low bits */ \ 383 add rt,rt,rx; /* add high and low bits */ \
351 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 384 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
352 * 2^36-1+2^28-1. That in particular means that if r3 >= \ 385 * 2^36-1+2^28-1. That in particular means that if r3 >= \
@@ -355,7 +388,7 @@ extern void slb_vmalloc_update(void);
355 * doesn't, the answer is the low 36 bits of r3+1. So in all \ 388 * doesn't, the answer is the low 36 bits of r3+1. So in all \
356 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ 389 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
357 addi rx,rt,1; \ 390 addi rx,rt,1; \
358 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ 391 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
359 add rt,rt,rx 392 add rt,rt,rx
360 393
361 394
@@ -377,37 +410,60 @@ typedef struct {
377} mm_context_t; 410} mm_context_t;
378 411
379 412
380static inline unsigned long vsid_scramble(unsigned long protovsid)
381{
382#if 0 413#if 0
383 /* The code below is equivalent to this function for arguments 414/*
384 * < 2^VSID_BITS, which is all this should ever be called 415 * The code below is equivalent to this function for arguments
385 * with. However gcc is not clever enough to compute the 416 * < 2^VSID_BITS, which is all this should ever be called
386 * modulus (2^n-1) without a second multiply. */ 417 * with. However gcc is not clever enough to compute the
387 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); 418 * modulus (2^n-1) without a second multiply.
388#else /* 1 */ 419 */
389 unsigned long x; 420#define vsid_scrample(protovsid, size) \
421 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
390 422
391 x = protovsid * VSID_MULTIPLIER; 423#else /* 1 */
392 x = (x >> VSID_BITS) + (x & VSID_MODULUS); 424#define vsid_scramble(protovsid, size) \
393 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; 425 ({ \
426 unsigned long x; \
427 x = (protovsid) * VSID_MULTIPLIER_##size; \
428 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
429 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
430 })
394#endif /* 1 */ 431#endif /* 1 */
395}
396 432
397/* This is only valid for addresses >= KERNELBASE */ 433/* This is only valid for addresses >= KERNELBASE */
398static inline unsigned long get_kernel_vsid(unsigned long ea) 434static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
399{ 435{
400 return vsid_scramble(ea >> SID_SHIFT); 436 if (ssize == MMU_SEGSIZE_256M)
437 return vsid_scramble(ea >> SID_SHIFT, 256M);
438 return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
401} 439}
402 440
403/* This is only valid for user addresses (which are below 2^41) */ 441/* Returns the segment size indicator for a user address */
404static inline unsigned long get_vsid(unsigned long context, unsigned long ea) 442static inline int user_segment_size(unsigned long addr)
405{ 443{
406 return vsid_scramble((context << USER_ESID_BITS) 444 /* Use 1T segments if possible for addresses >= 1T */
407 | (ea >> SID_SHIFT)); 445 if (addr >= (1UL << SID_SHIFT_1T))
446 return mmu_highuser_ssize;
447 return MMU_SEGSIZE_256M;
408} 448}
409 449
410#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) 450/* This is only valid for user addresses (which are below 2^44) */
451static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
452 int ssize)
453{
454 if (ssize == MMU_SEGSIZE_256M)
455 return vsid_scramble((context << USER_ESID_BITS)
456 | (ea >> SID_SHIFT), 256M);
457 return vsid_scramble((context << USER_ESID_BITS_1T)
458 | (ea >> SID_SHIFT_1T), 1T);
459}
460
461/*
462 * This is only used on legacy iSeries in lparmap.c,
463 * hence the 256MB segment assumption.
464 */
465#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \
466 VSID_MODULUS_256M)
411#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) 467#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
412 468
413/* Physical address used by some IO functions */ 469/* Physical address used by some IO functions */
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 56a2df0f6836..4ee82c61e4d7 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -26,12 +26,18 @@
26 */ 26 */
27#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT) 27#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
28 28
29/* Segment size */ 29/* Segment size; normal 256M segments */
30#define SID_SHIFT 28 30#define SID_SHIFT 28
31#define SID_MASK ASM_CONST(0xfffffffff) 31#define SID_MASK ASM_CONST(0xfffffffff)
32#define ESID_MASK 0xfffffffff0000000UL 32#define ESID_MASK 0xfffffffff0000000UL
33#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK) 33#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
34 34
35/* 1T segments */
36#define SID_SHIFT_1T 40
37#define SID_MASK_1T 0xffffffUL
38#define ESID_MASK_1T 0xffffff0000000000UL
39#define GET_ESID_1T(x) (((x) >> SID_SHIFT_1T) & SID_MASK_1T)
40
35#ifndef __ASSEMBLY__ 41#ifndef __ASSEMBLY__
36#include <asm/cache.h> 42#include <asm/cache.h>
37 43
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
index 99a0439baa50..a022f806bb21 100644
--- a/include/asm-powerpc/tlbflush.h
+++ b/include/asm-powerpc/tlbflush.h
@@ -97,6 +97,7 @@ struct ppc64_tlb_batch {
97 real_pte_t pte[PPC64_TLB_BATCH_NR]; 97 real_pte_t pte[PPC64_TLB_BATCH_NR];
98 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 98 unsigned long vaddr[PPC64_TLB_BATCH_NR];
99 unsigned int psize; 99 unsigned int psize;
100 int ssize;
100}; 101};
101DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 102DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
102 103
@@ -127,7 +128,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
127 128
128 129
129extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 130extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
130 int local); 131 int ssize, int local);
131extern void flush_hash_range(unsigned long number, int local); 132extern void flush_hash_range(unsigned long number, int local);
132 133
133 134