aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/40x_mmu.c (renamed from arch/powerpc/mm/4xx_mmu.c)4
-rw-r--r--arch/powerpc/mm/Makefile15
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hash_low_64.S73
-rw-r--r--arch/powerpc/mm/hash_native_64.c92
-rw-r--r--arch/powerpc/mm/hash_utils_64.c121
-rw-r--r--arch/powerpc/mm/hugetlbpage.c16
-rw-r--r--arch/powerpc/mm/init_32.c41
-rw-r--r--arch/powerpc/mm/init_64.c2
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/mmu_context_64.c11
-rw-r--r--arch/powerpc/mm/pgtable_64.c6
-rw-r--r--arch/powerpc/mm/slb.c73
-rw-r--r--arch/powerpc/mm/slb_low.S37
-rw-r--r--arch/powerpc/mm/slice.c1
-rw-r--r--arch/powerpc/mm/stab.c6
-rw-r--r--arch/powerpc/mm/tlb_64.c20
17 files changed, 355 insertions, 166 deletions
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/40x_mmu.c
index 7ff2609b64d1..e067df836be2 100644
--- a/arch/powerpc/mm/4xx_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -108,7 +108,7 @@ unsigned long __init mmu_mapin_ram(void)
108 pmd_t *pmdp; 108 pmd_t *pmdp;
109 unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; 109 unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
110 110
111 pmdp = pmd_offset(pgd_offset_k(v), v); 111 pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
112 pmd_val(*pmdp++) = val; 112 pmd_val(*pmdp++) = val;
113 pmd_val(*pmdp++) = val; 113 pmd_val(*pmdp++) = val;
114 pmd_val(*pmdp++) = val; 114 pmd_val(*pmdp++) = val;
@@ -123,7 +123,7 @@ unsigned long __init mmu_mapin_ram(void)
123 pmd_t *pmdp; 123 pmd_t *pmdp;
124 unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; 124 unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
125 125
126 pmdp = pmd_offset(pgd_offset_k(v), v); 126 pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
127 pmd_val(*pmdp) = val; 127 pmd_val(*pmdp) = val;
128 128
129 v += LARGE_PAGE_SIZE_4M; 129 v += LARGE_PAGE_SIZE_4M;
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 7e4d27ad3dee..20629ae95c50 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -6,14 +6,17 @@ ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc 6EXTRA_CFLAGS += -mno-minimal-toc
7endif 7endif
8 8
9obj-y := fault.o mem.o lmb.o 9obj-y := fault.o mem.o lmb.o \
10obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o 10 init_$(CONFIG_WORD_SIZE).o \
11 pgtable_$(CONFIG_WORD_SIZE).o \
12 mmu_context_$(CONFIG_WORD_SIZE).o
11hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o 13hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \ 14obj-$(CONFIG_PPC64) += hash_utils_64.o \
13 hash_utils_64.o hash_low_64.o tlb_64.o \
14 slb_low.o slb.o stab.o mmap.o $(hash-y) 15 slb_low.o slb.o stab.o mmap.o $(hash-y)
15obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o 16obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
16obj-$(CONFIG_40x) += 4xx_mmu.o 17obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
18 tlb_$(CONFIG_WORD_SIZE).o
19obj-$(CONFIG_40x) += 40x_mmu.o
17obj-$(CONFIG_44x) += 44x_mmu.o 20obj-$(CONFIG_44x) += 44x_mmu.o
18obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o 21obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
19obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o 22obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index afab247d472f..17139daeaff4 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -59,6 +59,7 @@ unsigned int num_tlbcam_entries;
59static unsigned long __cam0, __cam1, __cam2; 59static unsigned long __cam0, __cam1, __cam2;
60extern unsigned long total_lowmem; 60extern unsigned long total_lowmem;
61extern unsigned long __max_low_memory; 61extern unsigned long __max_low_memory;
62extern unsigned long __initial_memory_limit;
62#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE 63#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
63 64
64#define NUM_TLBCAMS (16) 65#define NUM_TLBCAMS (16)
@@ -232,4 +233,5 @@ adjust_total_lowmem(void)
232 __cam0 >> 20, __cam1 >> 20, __cam2 >> 20, 233 __cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
233 (total_lowmem - __cam0 - __cam1 - __cam2) >> 20); 234 (total_lowmem - __cam0 - __cam1 - __cam2) >> 20);
234 __max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2; 235 __max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2;
236 __initial_memory_limit = __max_low_memory;
235} 237}
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 35eabfb50723..ad253b959030 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -54,7 +54,7 @@
54 54
55/* 55/*
56 * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, 56 * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
57 * pte_t *ptep, unsigned long trap, int local) 57 * pte_t *ptep, unsigned long trap, int local, int ssize)
58 * 58 *
59 * Adds a 4K page to the hash table in a segment of 4K pages only 59 * Adds a 4K page to the hash table in a segment of 4K pages only
60 */ 60 */
@@ -66,6 +66,7 @@ _GLOBAL(__hash_page_4K)
66 /* Save all params that we need after a function call */ 66 /* Save all params that we need after a function call */
67 std r6,STK_PARM(r6)(r1) 67 std r6,STK_PARM(r6)(r1)
68 std r8,STK_PARM(r8)(r1) 68 std r8,STK_PARM(r8)(r1)
69 std r9,STK_PARM(r9)(r1)
69 70
70 /* Add _PAGE_PRESENT to access */ 71 /* Add _PAGE_PRESENT to access */
71 ori r4,r4,_PAGE_PRESENT 72 ori r4,r4,_PAGE_PRESENT
@@ -117,6 +118,10 @@ _GLOBAL(__hash_page_4K)
117 * r4 (access) is re-useable, we use it for the new HPTE flags 118 * r4 (access) is re-useable, we use it for the new HPTE flags
118 */ 119 */
119 120
121BEGIN_FTR_SECTION
122 cmpdi r9,0 /* check segment size */
123 bne 3f
124END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
120 /* Calc va and put it in r29 */ 125 /* Calc va and put it in r29 */
121 rldicr r29,r5,28,63-28 126 rldicr r29,r5,28,63-28
122 rldicl r3,r3,0,36 127 rldicl r3,r3,0,36
@@ -126,9 +131,20 @@ _GLOBAL(__hash_page_4K)
126 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 131 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
127 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 132 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
128 xor r28,r5,r0 133 xor r28,r5,r0
134 b 4f
135
1363: /* Calc VA and hash in r29 and r28 for 1T segment */
137 sldi r29,r5,40 /* vsid << 40 */
138 clrldi r3,r3,24 /* ea & 0xffffffffff */
139 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
140 clrldi r5,r5,40 /* vsid & 0xffffff */
141 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
142 xor r28,r28,r5
143 or r29,r3,r29 /* VA */
144 xor r28,r28,r0 /* hash */
129 145
130 /* Convert linux PTE bits into HW equivalents */ 146 /* Convert linux PTE bits into HW equivalents */
131 andi. r3,r30,0x1fe /* Get basic set of flags */ 1474: andi. r3,r30,0x1fe /* Get basic set of flags */
132 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ 148 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
133 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ 149 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
134 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ 150 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
@@ -183,6 +199,7 @@ htab_insert_pte:
183 mr r4,r29 /* Retreive va */ 199 mr r4,r29 /* Retreive va */
184 li r7,0 /* !bolted, !secondary */ 200 li r7,0 /* !bolted, !secondary */
185 li r8,MMU_PAGE_4K /* page size */ 201 li r8,MMU_PAGE_4K /* page size */
202 ld r9,STK_PARM(r9)(r1) /* segment size */
186_GLOBAL(htab_call_hpte_insert1) 203_GLOBAL(htab_call_hpte_insert1)
187 bl . /* Patched by htab_finish_init() */ 204 bl . /* Patched by htab_finish_init() */
188 cmpdi 0,r3,0 205 cmpdi 0,r3,0
@@ -205,6 +222,7 @@ _GLOBAL(htab_call_hpte_insert1)
205 mr r4,r29 /* Retreive va */ 222 mr r4,r29 /* Retreive va */
206 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 223 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
207 li r8,MMU_PAGE_4K /* page size */ 224 li r8,MMU_PAGE_4K /* page size */
225 ld r9,STK_PARM(r9)(r1) /* segment size */
208_GLOBAL(htab_call_hpte_insert2) 226_GLOBAL(htab_call_hpte_insert2)
209 bl . /* Patched by htab_finish_init() */ 227 bl . /* Patched by htab_finish_init() */
210 cmpdi 0,r3,0 228 cmpdi 0,r3,0
@@ -273,7 +291,8 @@ htab_modify_pte:
273 /* Call ppc_md.hpte_updatepp */ 291 /* Call ppc_md.hpte_updatepp */
274 mr r5,r29 /* va */ 292 mr r5,r29 /* va */
275 li r6,MMU_PAGE_4K /* page size */ 293 li r6,MMU_PAGE_4K /* page size */
276 ld r7,STK_PARM(r8)(r1) /* get "local" param */ 294 ld r7,STK_PARM(r9)(r1) /* segment size */
295 ld r8,STK_PARM(r8)(r1) /* get "local" param */
277_GLOBAL(htab_call_hpte_updatepp) 296_GLOBAL(htab_call_hpte_updatepp)
278 bl . /* Patched by htab_finish_init() */ 297 bl . /* Patched by htab_finish_init() */
279 298
@@ -325,6 +344,7 @@ _GLOBAL(__hash_page_4K)
325 /* Save all params that we need after a function call */ 344 /* Save all params that we need after a function call */
326 std r6,STK_PARM(r6)(r1) 345 std r6,STK_PARM(r6)(r1)
327 std r8,STK_PARM(r8)(r1) 346 std r8,STK_PARM(r8)(r1)
347 std r9,STK_PARM(r9)(r1)
328 348
329 /* Add _PAGE_PRESENT to access */ 349 /* Add _PAGE_PRESENT to access */
330 ori r4,r4,_PAGE_PRESENT 350 ori r4,r4,_PAGE_PRESENT
@@ -383,18 +403,33 @@ _GLOBAL(__hash_page_4K)
383 /* Load the hidx index */ 403 /* Load the hidx index */
384 rldicl r25,r3,64-12,60 404 rldicl r25,r3,64-12,60
385 405
406BEGIN_FTR_SECTION
407 cmpdi r9,0 /* check segment size */
408 bne 3f
409END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
386 /* Calc va and put it in r29 */ 410 /* Calc va and put it in r29 */
387 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ 411 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
388 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ 412 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
389 or r29,r3,r29 /* r29 = va 413 or r29,r3,r29 /* r29 = va */
390 414
391 /* Calculate hash value for primary slot and store it in r28 */ 415 /* Calculate hash value for primary slot and store it in r28 */
392 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 416 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
393 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 417 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
394 xor r28,r5,r0 418 xor r28,r5,r0
419 b 4f
420
4213: /* Calc VA and hash in r29 and r28 for 1T segment */
422 sldi r29,r5,40 /* vsid << 40 */
423 clrldi r3,r3,24 /* ea & 0xffffffffff */
424 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
425 clrldi r5,r5,40 /* vsid & 0xffffff */
426 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
427 xor r28,r28,r5
428 or r29,r3,r29 /* VA */
429 xor r28,r28,r0 /* hash */
395 430
396 /* Convert linux PTE bits into HW equivalents */ 431 /* Convert linux PTE bits into HW equivalents */
397 andi. r3,r30,0x1fe /* Get basic set of flags */ 4324: andi. r3,r30,0x1fe /* Get basic set of flags */
398 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ 433 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
399 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ 434 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
400 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ 435 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
@@ -462,6 +497,7 @@ htab_special_pfn:
462 mr r4,r29 /* Retreive va */ 497 mr r4,r29 /* Retreive va */
463 li r7,0 /* !bolted, !secondary */ 498 li r7,0 /* !bolted, !secondary */
464 li r8,MMU_PAGE_4K /* page size */ 499 li r8,MMU_PAGE_4K /* page size */
500 ld r9,STK_PARM(r9)(r1) /* segment size */
465_GLOBAL(htab_call_hpte_insert1) 501_GLOBAL(htab_call_hpte_insert1)
466 bl . /* patched by htab_finish_init() */ 502 bl . /* patched by htab_finish_init() */
467 cmpdi 0,r3,0 503 cmpdi 0,r3,0
@@ -488,6 +524,7 @@ _GLOBAL(htab_call_hpte_insert1)
488 mr r4,r29 /* Retreive va */ 524 mr r4,r29 /* Retreive va */
489 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 525 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
490 li r8,MMU_PAGE_4K /* page size */ 526 li r8,MMU_PAGE_4K /* page size */
527 ld r9,STK_PARM(r9)(r1) /* segment size */
491_GLOBAL(htab_call_hpte_insert2) 528_GLOBAL(htab_call_hpte_insert2)
492 bl . /* patched by htab_finish_init() */ 529 bl . /* patched by htab_finish_init() */
493 cmpdi 0,r3,0 530 cmpdi 0,r3,0
@@ -586,7 +623,8 @@ htab_modify_pte:
586 /* Call ppc_md.hpte_updatepp */ 623 /* Call ppc_md.hpte_updatepp */
587 mr r5,r29 /* va */ 624 mr r5,r29 /* va */
588 li r6,MMU_PAGE_4K /* page size */ 625 li r6,MMU_PAGE_4K /* page size */
589 ld r7,STK_PARM(r8)(r1) /* get "local" param */ 626 ld r7,STK_PARM(r9)(r1) /* segment size */
627 ld r8,STK_PARM(r8)(r1) /* get "local" param */
590_GLOBAL(htab_call_hpte_updatepp) 628_GLOBAL(htab_call_hpte_updatepp)
591 bl . /* patched by htab_finish_init() */ 629 bl . /* patched by htab_finish_init() */
592 630
@@ -634,6 +672,7 @@ _GLOBAL(__hash_page_64K)
634 /* Save all params that we need after a function call */ 672 /* Save all params that we need after a function call */
635 std r6,STK_PARM(r6)(r1) 673 std r6,STK_PARM(r6)(r1)
636 std r8,STK_PARM(r8)(r1) 674 std r8,STK_PARM(r8)(r1)
675 std r9,STK_PARM(r9)(r1)
637 676
638 /* Add _PAGE_PRESENT to access */ 677 /* Add _PAGE_PRESENT to access */
639 ori r4,r4,_PAGE_PRESENT 678 ori r4,r4,_PAGE_PRESENT
@@ -690,6 +729,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
690 * r4 (access) is re-useable, we use it for the new HPTE flags 729 * r4 (access) is re-useable, we use it for the new HPTE flags
691 */ 730 */
692 731
732BEGIN_FTR_SECTION
733 cmpdi r9,0 /* check segment size */
734 bne 3f
735END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
693 /* Calc va and put it in r29 */ 736 /* Calc va and put it in r29 */
694 rldicr r29,r5,28,63-28 737 rldicr r29,r5,28,63-28
695 rldicl r3,r3,0,36 738 rldicl r3,r3,0,36
@@ -699,9 +742,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
699 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 742 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
700 rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ 743 rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
701 xor r28,r5,r0 744 xor r28,r5,r0
745 b 4f
746
7473: /* Calc VA and hash in r29 and r28 for 1T segment */
748 sldi r29,r5,40 /* vsid << 40 */
749 clrldi r3,r3,24 /* ea & 0xffffffffff */
750 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
751 clrldi r5,r5,40 /* vsid & 0xffffff */
752 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
753 xor r28,r28,r5
754 or r29,r3,r29 /* VA */
755 xor r28,r28,r0 /* hash */
702 756
703 /* Convert linux PTE bits into HW equivalents */ 757 /* Convert linux PTE bits into HW equivalents */
704 andi. r3,r30,0x1fe /* Get basic set of flags */ 7584: andi. r3,r30,0x1fe /* Get basic set of flags */
705 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ 759 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
706 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ 760 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
707 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ 761 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
@@ -756,6 +810,7 @@ ht64_insert_pte:
756 mr r4,r29 /* Retreive va */ 810 mr r4,r29 /* Retreive va */
757 li r7,0 /* !bolted, !secondary */ 811 li r7,0 /* !bolted, !secondary */
758 li r8,MMU_PAGE_64K 812 li r8,MMU_PAGE_64K
813 ld r9,STK_PARM(r9)(r1) /* segment size */
759_GLOBAL(ht64_call_hpte_insert1) 814_GLOBAL(ht64_call_hpte_insert1)
760 bl . /* patched by htab_finish_init() */ 815 bl . /* patched by htab_finish_init() */
761 cmpdi 0,r3,0 816 cmpdi 0,r3,0
@@ -778,6 +833,7 @@ _GLOBAL(ht64_call_hpte_insert1)
778 mr r4,r29 /* Retreive va */ 833 mr r4,r29 /* Retreive va */
779 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 834 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
780 li r8,MMU_PAGE_64K 835 li r8,MMU_PAGE_64K
836 ld r9,STK_PARM(r9)(r1) /* segment size */
781_GLOBAL(ht64_call_hpte_insert2) 837_GLOBAL(ht64_call_hpte_insert2)
782 bl . /* patched by htab_finish_init() */ 838 bl . /* patched by htab_finish_init() */
783 cmpdi 0,r3,0 839 cmpdi 0,r3,0
@@ -846,7 +902,8 @@ ht64_modify_pte:
846 /* Call ppc_md.hpte_updatepp */ 902 /* Call ppc_md.hpte_updatepp */
847 mr r5,r29 /* va */ 903 mr r5,r29 /* va */
848 li r6,MMU_PAGE_64K 904 li r6,MMU_PAGE_64K
849 ld r7,STK_PARM(r8)(r1) /* get "local" param */ 905 ld r7,STK_PARM(r9)(r1) /* segment size */
906 ld r8,STK_PARM(r8)(r1) /* get "local" param */
850_GLOBAL(ht64_call_hpte_updatepp) 907_GLOBAL(ht64_call_hpte_updatepp)
851 bl . /* patched by htab_finish_init() */ 908 bl . /* patched by htab_finish_init() */
852 909
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 6ba9b47e55af..34e5c0b219b9 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -38,7 +38,7 @@
38 38
39static DEFINE_SPINLOCK(native_tlbie_lock); 39static DEFINE_SPINLOCK(native_tlbie_lock);
40 40
41static inline void __tlbie(unsigned long va, unsigned int psize) 41static inline void __tlbie(unsigned long va, int psize, int ssize)
42{ 42{
43 unsigned int penc; 43 unsigned int penc;
44 44
@@ -48,18 +48,20 @@ static inline void __tlbie(unsigned long va, unsigned int psize)
48 switch (psize) { 48 switch (psize) {
49 case MMU_PAGE_4K: 49 case MMU_PAGE_4K:
50 va &= ~0xffful; 50 va &= ~0xffful;
51 va |= ssize << 8;
51 asm volatile("tlbie %0,0" : : "r" (va) : "memory"); 52 asm volatile("tlbie %0,0" : : "r" (va) : "memory");
52 break; 53 break;
53 default: 54 default:
54 penc = mmu_psize_defs[psize].penc; 55 penc = mmu_psize_defs[psize].penc;
55 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 56 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
56 va |= penc << 12; 57 va |= penc << 12;
58 va |= ssize << 8;
57 asm volatile("tlbie %0,1" : : "r" (va) : "memory"); 59 asm volatile("tlbie %0,1" : : "r" (va) : "memory");
58 break; 60 break;
59 } 61 }
60} 62}
61 63
62static inline void __tlbiel(unsigned long va, unsigned int psize) 64static inline void __tlbiel(unsigned long va, int psize, int ssize)
63{ 65{
64 unsigned int penc; 66 unsigned int penc;
65 67
@@ -69,6 +71,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
69 switch (psize) { 71 switch (psize) {
70 case MMU_PAGE_4K: 72 case MMU_PAGE_4K:
71 va &= ~0xffful; 73 va &= ~0xffful;
74 va |= ssize << 8;
72 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 75 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
73 : : "r"(va) : "memory"); 76 : : "r"(va) : "memory");
74 break; 77 break;
@@ -76,6 +79,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
76 penc = mmu_psize_defs[psize].penc; 79 penc = mmu_psize_defs[psize].penc;
77 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 80 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
78 va |= penc << 12; 81 va |= penc << 12;
82 va |= ssize << 8;
79 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" 83 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
80 : : "r"(va) : "memory"); 84 : : "r"(va) : "memory");
81 break; 85 break;
@@ -83,7 +87,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
83 87
84} 88}
85 89
86static inline void tlbie(unsigned long va, int psize, int local) 90static inline void tlbie(unsigned long va, int psize, int ssize, int local)
87{ 91{
88 unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL); 92 unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL);
89 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 93 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
@@ -94,10 +98,10 @@ static inline void tlbie(unsigned long va, int psize, int local)
94 spin_lock(&native_tlbie_lock); 98 spin_lock(&native_tlbie_lock);
95 asm volatile("ptesync": : :"memory"); 99 asm volatile("ptesync": : :"memory");
96 if (use_local) { 100 if (use_local) {
97 __tlbiel(va, psize); 101 __tlbiel(va, psize, ssize);
98 asm volatile("ptesync": : :"memory"); 102 asm volatile("ptesync": : :"memory");
99 } else { 103 } else {
100 __tlbie(va, psize); 104 __tlbie(va, psize, ssize);
101 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 105 asm volatile("eieio; tlbsync; ptesync": : :"memory");
102 } 106 }
103 if (lock_tlbie && !use_local) 107 if (lock_tlbie && !use_local)
@@ -126,7 +130,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
126 130
127static long native_hpte_insert(unsigned long hpte_group, unsigned long va, 131static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
128 unsigned long pa, unsigned long rflags, 132 unsigned long pa, unsigned long rflags,
129 unsigned long vflags, int psize) 133 unsigned long vflags, int psize, int ssize)
130{ 134{
131 struct hash_pte *hptep = htab_address + hpte_group; 135 struct hash_pte *hptep = htab_address + hpte_group;
132 unsigned long hpte_v, hpte_r; 136 unsigned long hpte_v, hpte_r;
@@ -153,7 +157,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
153 if (i == HPTES_PER_GROUP) 157 if (i == HPTES_PER_GROUP)
154 return -1; 158 return -1;
155 159
156 hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; 160 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
157 hpte_r = hpte_encode_r(pa, psize) | rflags; 161 hpte_r = hpte_encode_r(pa, psize) | rflags;
158 162
159 if (!(vflags & HPTE_V_BOLTED)) { 163 if (!(vflags & HPTE_V_BOLTED)) {
@@ -215,13 +219,14 @@ static long native_hpte_remove(unsigned long hpte_group)
215} 219}
216 220
217static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 221static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
218 unsigned long va, int psize, int local) 222 unsigned long va, int psize, int ssize,
223 int local)
219{ 224{
220 struct hash_pte *hptep = htab_address + slot; 225 struct hash_pte *hptep = htab_address + slot;
221 unsigned long hpte_v, want_v; 226 unsigned long hpte_v, want_v;
222 int ret = 0; 227 int ret = 0;
223 228
224 want_v = hpte_encode_v(va, psize); 229 want_v = hpte_encode_v(va, psize, ssize);
225 230
226 DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", 231 DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
227 va, want_v & HPTE_V_AVPN, slot, newpp); 232 va, want_v & HPTE_V_AVPN, slot, newpp);
@@ -243,39 +248,32 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
243 native_unlock_hpte(hptep); 248 native_unlock_hpte(hptep);
244 249
245 /* Ensure it is out of the tlb too. */ 250 /* Ensure it is out of the tlb too. */
246 tlbie(va, psize, local); 251 tlbie(va, psize, ssize, local);
247 252
248 return ret; 253 return ret;
249} 254}
250 255
251static long native_hpte_find(unsigned long va, int psize) 256static long native_hpte_find(unsigned long va, int psize, int ssize)
252{ 257{
253 struct hash_pte *hptep; 258 struct hash_pte *hptep;
254 unsigned long hash; 259 unsigned long hash;
255 unsigned long i, j; 260 unsigned long i;
256 long slot; 261 long slot;
257 unsigned long want_v, hpte_v; 262 unsigned long want_v, hpte_v;
258 263
259 hash = hpt_hash(va, mmu_psize_defs[psize].shift); 264 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
260 want_v = hpte_encode_v(va, psize); 265 want_v = hpte_encode_v(va, psize, ssize);
261 266
262 for (j = 0; j < 2; j++) { 267 /* Bolted mappings are only ever in the primary group */
263 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 268 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
264 for (i = 0; i < HPTES_PER_GROUP; i++) { 269 for (i = 0; i < HPTES_PER_GROUP; i++) {
265 hptep = htab_address + slot; 270 hptep = htab_address + slot;
266 hpte_v = hptep->v; 271 hpte_v = hptep->v;
267 272
268 if (HPTE_V_COMPARE(hpte_v, want_v) 273 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
269 && (hpte_v & HPTE_V_VALID) 274 /* HPTE matches */
270 && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) { 275 return slot;
271 /* HPTE matches */ 276 ++slot;
272 if (j)
273 slot = -slot;
274 return slot;
275 }
276 ++slot;
277 }
278 hash = ~hash;
279 } 277 }
280 278
281 return -1; 279 return -1;
@@ -289,16 +287,16 @@ static long native_hpte_find(unsigned long va, int psize)
289 * No need to lock here because we should be the only user. 287 * No need to lock here because we should be the only user.
290 */ 288 */
291static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 289static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
292 int psize) 290 int psize, int ssize)
293{ 291{
294 unsigned long vsid, va; 292 unsigned long vsid, va;
295 long slot; 293 long slot;
296 struct hash_pte *hptep; 294 struct hash_pte *hptep;
297 295
298 vsid = get_kernel_vsid(ea); 296 vsid = get_kernel_vsid(ea, ssize);
299 va = (vsid << 28) | (ea & 0x0fffffff); 297 va = hpt_va(ea, vsid, ssize);
300 298
301 slot = native_hpte_find(va, psize); 299 slot = native_hpte_find(va, psize, ssize);
302 if (slot == -1) 300 if (slot == -1)
303 panic("could not find page to bolt\n"); 301 panic("could not find page to bolt\n");
304 hptep = htab_address + slot; 302 hptep = htab_address + slot;
@@ -308,11 +306,11 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
308 (newpp & (HPTE_R_PP | HPTE_R_N)); 306 (newpp & (HPTE_R_PP | HPTE_R_N));
309 307
310 /* Ensure it is out of the tlb too. */ 308 /* Ensure it is out of the tlb too. */
311 tlbie(va, psize, 0); 309 tlbie(va, psize, ssize, 0);
312} 310}
313 311
314static void native_hpte_invalidate(unsigned long slot, unsigned long va, 312static void native_hpte_invalidate(unsigned long slot, unsigned long va,
315 int psize, int local) 313 int psize, int ssize, int local)
316{ 314{
317 struct hash_pte *hptep = htab_address + slot; 315 struct hash_pte *hptep = htab_address + slot;
318 unsigned long hpte_v; 316 unsigned long hpte_v;
@@ -323,7 +321,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
323 321
324 DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); 322 DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
325 323
326 want_v = hpte_encode_v(va, psize); 324 want_v = hpte_encode_v(va, psize, ssize);
327 native_lock_hpte(hptep); 325 native_lock_hpte(hptep);
328 hpte_v = hptep->v; 326 hpte_v = hptep->v;
329 327
@@ -335,7 +333,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
335 hptep->v = 0; 333 hptep->v = 0;
336 334
337 /* Invalidate the TLB */ 335 /* Invalidate the TLB */
338 tlbie(va, psize, local); 336 tlbie(va, psize, ssize, local);
339 337
340 local_irq_restore(flags); 338 local_irq_restore(flags);
341} 339}
@@ -345,7 +343,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
345#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 343#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
346 344
347static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 345static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
348 int *psize, unsigned long *va) 346 int *psize, int *ssize, unsigned long *va)
349{ 347{
350 unsigned long hpte_r = hpte->r; 348 unsigned long hpte_r = hpte->r;
351 unsigned long hpte_v = hpte->v; 349 unsigned long hpte_v = hpte->v;
@@ -401,6 +399,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
401 399
402 *va = avpn; 400 *va = avpn;
403 *psize = size; 401 *psize = size;
402 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
404} 403}
405 404
406/* 405/*
@@ -417,7 +416,7 @@ static void native_hpte_clear(void)
417 struct hash_pte *hptep = htab_address; 416 struct hash_pte *hptep = htab_address;
418 unsigned long hpte_v, va; 417 unsigned long hpte_v, va;
419 unsigned long pteg_count; 418 unsigned long pteg_count;
420 int psize; 419 int psize, ssize;
421 420
422 pteg_count = htab_hash_mask + 1; 421 pteg_count = htab_hash_mask + 1;
423 422
@@ -443,9 +442,9 @@ static void native_hpte_clear(void)
443 * already hold the native_tlbie_lock. 442 * already hold the native_tlbie_lock.
444 */ 443 */
445 if (hpte_v & HPTE_V_VALID) { 444 if (hpte_v & HPTE_V_VALID) {
446 hpte_decode(hptep, slot, &psize, &va); 445 hpte_decode(hptep, slot, &psize, &ssize, &va);
447 hptep->v = 0; 446 hptep->v = 0;
448 __tlbie(va, psize); 447 __tlbie(va, psize, ssize);
449 } 448 }
450 } 449 }
451 450
@@ -468,6 +467,7 @@ static void native_flush_hash_range(unsigned long number, int local)
468 real_pte_t pte; 467 real_pte_t pte;
469 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 468 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
470 unsigned long psize = batch->psize; 469 unsigned long psize = batch->psize;
470 int ssize = batch->ssize;
471 int i; 471 int i;
472 472
473 local_irq_save(flags); 473 local_irq_save(flags);
@@ -477,14 +477,14 @@ static void native_flush_hash_range(unsigned long number, int local)
477 pte = batch->pte[i]; 477 pte = batch->pte[i];
478 478
479 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 479 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
480 hash = hpt_hash(va, shift); 480 hash = hpt_hash(va, shift, ssize);
481 hidx = __rpte_to_hidx(pte, index); 481 hidx = __rpte_to_hidx(pte, index);
482 if (hidx & _PTEIDX_SECONDARY) 482 if (hidx & _PTEIDX_SECONDARY)
483 hash = ~hash; 483 hash = ~hash;
484 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 484 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
485 slot += hidx & _PTEIDX_GROUP_IX; 485 slot += hidx & _PTEIDX_GROUP_IX;
486 hptep = htab_address + slot; 486 hptep = htab_address + slot;
487 want_v = hpte_encode_v(va, psize); 487 want_v = hpte_encode_v(va, psize, ssize);
488 native_lock_hpte(hptep); 488 native_lock_hpte(hptep);
489 hpte_v = hptep->v; 489 hpte_v = hptep->v;
490 if (!HPTE_V_COMPARE(hpte_v, want_v) || 490 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -504,7 +504,7 @@ static void native_flush_hash_range(unsigned long number, int local)
504 504
505 pte_iterate_hashed_subpages(pte, psize, va, index, 505 pte_iterate_hashed_subpages(pte, psize, va, index,
506 shift) { 506 shift) {
507 __tlbiel(va, psize); 507 __tlbiel(va, psize, ssize);
508 } pte_iterate_hashed_end(); 508 } pte_iterate_hashed_end();
509 } 509 }
510 asm volatile("ptesync":::"memory"); 510 asm volatile("ptesync":::"memory");
@@ -521,7 +521,7 @@ static void native_flush_hash_range(unsigned long number, int local)
521 521
522 pte_iterate_hashed_subpages(pte, psize, va, index, 522 pte_iterate_hashed_subpages(pte, psize, va, index,
523 shift) { 523 shift) {
524 __tlbie(va, psize); 524 __tlbie(va, psize, ssize);
525 } pte_iterate_hashed_end(); 525 } pte_iterate_hashed_end();
526 } 526 }
527 asm volatile("eieio; tlbsync; ptesync":::"memory"); 527 asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index a47151e806ca..611ad084b7e7 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -49,7 +49,6 @@
49#include <asm/tlb.h> 49#include <asm/tlb.h>
50#include <asm/cacheflush.h> 50#include <asm/cacheflush.h>
51#include <asm/cputable.h> 51#include <asm/cputable.h>
52#include <asm/abs_addr.h>
53#include <asm/sections.h> 52#include <asm/sections.h>
54#include <asm/spu.h> 53#include <asm/spu.h>
55 54
@@ -94,6 +93,8 @@ int mmu_linear_psize = MMU_PAGE_4K;
94int mmu_virtual_psize = MMU_PAGE_4K; 93int mmu_virtual_psize = MMU_PAGE_4K;
95int mmu_vmalloc_psize = MMU_PAGE_4K; 94int mmu_vmalloc_psize = MMU_PAGE_4K;
96int mmu_io_psize = MMU_PAGE_4K; 95int mmu_io_psize = MMU_PAGE_4K;
96int mmu_kernel_ssize = MMU_SEGSIZE_256M;
97int mmu_highuser_ssize = MMU_SEGSIZE_256M;
97#ifdef CONFIG_HUGETLB_PAGE 98#ifdef CONFIG_HUGETLB_PAGE
98int mmu_huge_psize = MMU_PAGE_16M; 99int mmu_huge_psize = MMU_PAGE_16M;
99unsigned int HPAGE_SHIFT; 100unsigned int HPAGE_SHIFT;
@@ -146,7 +147,8 @@ struct mmu_psize_def mmu_psize_defaults_gp[] = {
146 147
147 148
148int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 149int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
149 unsigned long pstart, unsigned long mode, int psize) 150 unsigned long pstart, unsigned long mode,
151 int psize, int ssize)
150{ 152{
151 unsigned long vaddr, paddr; 153 unsigned long vaddr, paddr;
152 unsigned int step, shift; 154 unsigned int step, shift;
@@ -159,8 +161,8 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
159 for (vaddr = vstart, paddr = pstart; vaddr < vend; 161 for (vaddr = vstart, paddr = pstart; vaddr < vend;
160 vaddr += step, paddr += step) { 162 vaddr += step, paddr += step) {
161 unsigned long hash, hpteg; 163 unsigned long hash, hpteg;
162 unsigned long vsid = get_kernel_vsid(vaddr); 164 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
163 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); 165 unsigned long va = hpt_va(vaddr, vsid, ssize);
164 166
165 tmp_mode = mode; 167 tmp_mode = mode;
166 168
@@ -168,14 +170,14 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
168 if (!in_kernel_text(vaddr)) 170 if (!in_kernel_text(vaddr))
169 tmp_mode = mode | HPTE_R_N; 171 tmp_mode = mode | HPTE_R_N;
170 172
171 hash = hpt_hash(va, shift); 173 hash = hpt_hash(va, shift, ssize);
172 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 174 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
173 175
174 DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert); 176 DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
175 177
176 BUG_ON(!ppc_md.hpte_insert); 178 BUG_ON(!ppc_md.hpte_insert);
177 ret = ppc_md.hpte_insert(hpteg, va, paddr, 179 ret = ppc_md.hpte_insert(hpteg, va, paddr,
178 tmp_mode, HPTE_V_BOLTED, psize); 180 tmp_mode, HPTE_V_BOLTED, psize, ssize);
179 181
180 if (ret < 0) 182 if (ret < 0)
181 break; 183 break;
@@ -187,6 +189,37 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
187 return ret < 0 ? ret : 0; 189 return ret < 0 ? ret : 0;
188} 190}
189 191
192static int __init htab_dt_scan_seg_sizes(unsigned long node,
193 const char *uname, int depth,
194 void *data)
195{
196 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
197 u32 *prop;
198 unsigned long size = 0;
199
200 /* We are scanning "cpu" nodes only */
201 if (type == NULL || strcmp(type, "cpu") != 0)
202 return 0;
203
204 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
205 &size);
206 if (prop == NULL)
207 return 0;
208 for (; size >= 4; size -= 4, ++prop) {
209 if (prop[0] == 40) {
210 DBG("1T segment support detected\n");
211 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
212 }
213 return 1;
214 }
215 return 0;
216}
217
218static void __init htab_init_seg_sizes(void)
219{
220 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
221}
222
190static int __init htab_dt_scan_page_sizes(unsigned long node, 223static int __init htab_dt_scan_page_sizes(unsigned long node,
191 const char *uname, int depth, 224 const char *uname, int depth,
192 void *data) 225 void *data)
@@ -266,7 +299,6 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
266 return 0; 299 return 0;
267} 300}
268 301
269
270static void __init htab_init_page_sizes(void) 302static void __init htab_init_page_sizes(void)
271{ 303{
272 int rc; 304 int rc;
@@ -399,7 +431,7 @@ void create_section_mapping(unsigned long start, unsigned long end)
399{ 431{
400 BUG_ON(htab_bolt_mapping(start, end, __pa(start), 432 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
401 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX, 433 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
402 mmu_linear_psize)); 434 mmu_linear_psize, mmu_kernel_ssize));
403} 435}
404#endif /* CONFIG_MEMORY_HOTPLUG */ 436#endif /* CONFIG_MEMORY_HOTPLUG */
405 437
@@ -450,9 +482,18 @@ void __init htab_initialize(void)
450 482
451 DBG(" -> htab_initialize()\n"); 483 DBG(" -> htab_initialize()\n");
452 484
485 /* Initialize segment sizes */
486 htab_init_seg_sizes();
487
453 /* Initialize page sizes */ 488 /* Initialize page sizes */
454 htab_init_page_sizes(); 489 htab_init_page_sizes();
455 490
491 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
492 mmu_kernel_ssize = MMU_SEGSIZE_1T;
493 mmu_highuser_ssize = MMU_SEGSIZE_1T;
494 printk(KERN_INFO "Using 1TB segments\n");
495 }
496
456 /* 497 /*
457 * Calculate the required size of the htab. We want the number of 498 * Calculate the required size of the htab. We want the number of
458 * PTEGs to equal one half the number of real pages. 499 * PTEGs to equal one half the number of real pages.
@@ -524,18 +565,20 @@ void __init htab_initialize(void)
524 if (base != dart_tablebase) 565 if (base != dart_tablebase)
525 BUG_ON(htab_bolt_mapping(base, dart_tablebase, 566 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
526 __pa(base), mode_rw, 567 __pa(base), mode_rw,
527 mmu_linear_psize)); 568 mmu_linear_psize,
569 mmu_kernel_ssize));
528 if ((base + size) > dart_table_end) 570 if ((base + size) > dart_table_end)
529 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, 571 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
530 base + size, 572 base + size,
531 __pa(dart_table_end), 573 __pa(dart_table_end),
532 mode_rw, 574 mode_rw,
533 mmu_linear_psize)); 575 mmu_linear_psize,
576 mmu_kernel_ssize));
534 continue; 577 continue;
535 } 578 }
536#endif /* CONFIG_U3_DART */ 579#endif /* CONFIG_U3_DART */
537 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), 580 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
538 mode_rw, mmu_linear_psize)); 581 mode_rw, mmu_linear_psize, mmu_kernel_ssize));
539 } 582 }
540 583
541 /* 584 /*
@@ -554,7 +597,7 @@ void __init htab_initialize(void)
554 597
555 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, 598 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
556 __pa(tce_alloc_start), mode_rw, 599 __pa(tce_alloc_start), mode_rw,
557 mmu_linear_psize)); 600 mmu_linear_psize, mmu_kernel_ssize));
558 } 601 }
559 602
560 htab_finish_init(); 603 htab_finish_init();
@@ -602,13 +645,7 @@ static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
602{ 645{
603 if (mm->context.user_psize == MMU_PAGE_4K) 646 if (mm->context.user_psize == MMU_PAGE_4K)
604 return; 647 return;
605#ifdef CONFIG_PPC_MM_SLICES
606 slice_set_user_psize(mm, MMU_PAGE_4K); 648 slice_set_user_psize(mm, MMU_PAGE_4K);
607#else /* CONFIG_PPC_MM_SLICES */
608 mm->context.user_psize = MMU_PAGE_4K;
609 mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
610#endif /* CONFIG_PPC_MM_SLICES */
611
612#ifdef CONFIG_SPU_BASE 649#ifdef CONFIG_SPU_BASE
613 spu_flush_all_slbs(mm); 650 spu_flush_all_slbs(mm);
614#endif 651#endif
@@ -628,7 +665,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
628 pte_t *ptep; 665 pte_t *ptep;
629 cpumask_t tmp; 666 cpumask_t tmp;
630 int rc, user_region = 0, local = 0; 667 int rc, user_region = 0, local = 0;
631 int psize; 668 int psize, ssize;
632 669
633 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 670 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
634 ea, access, trap); 671 ea, access, trap);
@@ -647,20 +684,22 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
647 DBG_LOW(" user region with no mm !\n"); 684 DBG_LOW(" user region with no mm !\n");
648 return 1; 685 return 1;
649 } 686 }
650 vsid = get_vsid(mm->context.id, ea);
651#ifdef CONFIG_PPC_MM_SLICES 687#ifdef CONFIG_PPC_MM_SLICES
652 psize = get_slice_psize(mm, ea); 688 psize = get_slice_psize(mm, ea);
653#else 689#else
654 psize = mm->context.user_psize; 690 psize = mm->context.user_psize;
655#endif 691#endif
692 ssize = user_segment_size(ea);
693 vsid = get_vsid(mm->context.id, ea, ssize);
656 break; 694 break;
657 case VMALLOC_REGION_ID: 695 case VMALLOC_REGION_ID:
658 mm = &init_mm; 696 mm = &init_mm;
659 vsid = get_kernel_vsid(ea); 697 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
660 if (ea < VMALLOC_END) 698 if (ea < VMALLOC_END)
661 psize = mmu_vmalloc_psize; 699 psize = mmu_vmalloc_psize;
662 else 700 else
663 psize = mmu_io_psize; 701 psize = mmu_io_psize;
702 ssize = mmu_kernel_ssize;
664 break; 703 break;
665 default: 704 default:
666 /* Not a valid range 705 /* Not a valid range
@@ -765,10 +804,10 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
765 804
766#ifdef CONFIG_PPC_HAS_HASH_64K 805#ifdef CONFIG_PPC_HAS_HASH_64K
767 if (psize == MMU_PAGE_64K) 806 if (psize == MMU_PAGE_64K)
768 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); 807 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
769 else 808 else
770#endif /* CONFIG_PPC_HAS_HASH_64K */ 809#endif /* CONFIG_PPC_HAS_HASH_64K */
771 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); 810 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
772 811
773#ifndef CONFIG_PPC_64K_PAGES 812#ifndef CONFIG_PPC_64K_PAGES
774 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); 813 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
@@ -790,6 +829,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
790 cpumask_t mask; 829 cpumask_t mask;
791 unsigned long flags; 830 unsigned long flags;
792 int local = 0; 831 int local = 0;
832 int ssize;
793 833
794 BUG_ON(REGION_ID(ea) != USER_REGION_ID); 834 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
795 835
@@ -822,7 +862,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
822#endif /* CONFIG_PPC_64K_PAGES */ 862#endif /* CONFIG_PPC_64K_PAGES */
823 863
824 /* Get VSID */ 864 /* Get VSID */
825 vsid = get_vsid(mm->context.id, ea); 865 ssize = user_segment_size(ea);
866 vsid = get_vsid(mm->context.id, ea, ssize);
826 867
827 /* Hash doesn't like irqs */ 868 /* Hash doesn't like irqs */
828 local_irq_save(flags); 869 local_irq_save(flags);
@@ -835,28 +876,29 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
835 /* Hash it in */ 876 /* Hash it in */
836#ifdef CONFIG_PPC_HAS_HASH_64K 877#ifdef CONFIG_PPC_HAS_HASH_64K
837 if (mm->context.user_psize == MMU_PAGE_64K) 878 if (mm->context.user_psize == MMU_PAGE_64K)
838 __hash_page_64K(ea, access, vsid, ptep, trap, local); 879 __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
839 else 880 else
840#endif /* CONFIG_PPC_HAS_HASH_64K */ 881#endif /* CONFIG_PPC_HAS_HASH_64K */
841 __hash_page_4K(ea, access, vsid, ptep, trap, local); 882 __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
842 883
843 local_irq_restore(flags); 884 local_irq_restore(flags);
844} 885}
845 886
846void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local) 887void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
888 int local)
847{ 889{
848 unsigned long hash, index, shift, hidx, slot; 890 unsigned long hash, index, shift, hidx, slot;
849 891
850 DBG_LOW("flush_hash_page(va=%016x)\n", va); 892 DBG_LOW("flush_hash_page(va=%016x)\n", va);
851 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 893 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
852 hash = hpt_hash(va, shift); 894 hash = hpt_hash(va, shift, ssize);
853 hidx = __rpte_to_hidx(pte, index); 895 hidx = __rpte_to_hidx(pte, index);
854 if (hidx & _PTEIDX_SECONDARY) 896 if (hidx & _PTEIDX_SECONDARY)
855 hash = ~hash; 897 hash = ~hash;
856 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 898 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
857 slot += hidx & _PTEIDX_GROUP_IX; 899 slot += hidx & _PTEIDX_GROUP_IX;
858 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx); 900 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
859 ppc_md.hpte_invalidate(slot, va, psize, local); 901 ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
860 } pte_iterate_hashed_end(); 902 } pte_iterate_hashed_end();
861} 903}
862 904
@@ -871,7 +913,7 @@ void flush_hash_range(unsigned long number, int local)
871 913
872 for (i = 0; i < number; i++) 914 for (i = 0; i < number; i++)
873 flush_hash_page(batch->vaddr[i], batch->pte[i], 915 flush_hash_page(batch->vaddr[i], batch->pte[i],
874 batch->psize, local); 916 batch->psize, batch->ssize, local);
875 } 917 }
876} 918}
877 919
@@ -897,17 +939,19 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address)
897#ifdef CONFIG_DEBUG_PAGEALLOC 939#ifdef CONFIG_DEBUG_PAGEALLOC
898static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) 940static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
899{ 941{
900 unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr); 942 unsigned long hash, hpteg;
901 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); 943 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
944 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
902 unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY | 945 unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
903 _PAGE_COHERENT | PP_RWXX | HPTE_R_N; 946 _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
904 int ret; 947 int ret;
905 948
906 hash = hpt_hash(va, PAGE_SHIFT); 949 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
907 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 950 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
908 951
909 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), 952 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
910 mode, HPTE_V_BOLTED, mmu_linear_psize); 953 mode, HPTE_V_BOLTED,
954 mmu_linear_psize, mmu_kernel_ssize);
911 BUG_ON (ret < 0); 955 BUG_ON (ret < 0);
912 spin_lock(&linear_map_hash_lock); 956 spin_lock(&linear_map_hash_lock);
913 BUG_ON(linear_map_hash_slots[lmi] & 0x80); 957 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
@@ -917,10 +961,11 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
917 961
918static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) 962static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
919{ 963{
920 unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr); 964 unsigned long hash, hidx, slot;
921 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); 965 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
966 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
922 967
923 hash = hpt_hash(va, PAGE_SHIFT); 968 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
924 spin_lock(&linear_map_hash_lock); 969 spin_lock(&linear_map_hash_lock);
925 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 970 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
926 hidx = linear_map_hash_slots[lmi] & 0x7f; 971 hidx = linear_map_hash_slots[lmi] & 0x7f;
@@ -930,7 +975,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
930 hash = ~hash; 975 hash = ~hash;
931 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 976 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
932 slot += hidx & _PTEIDX_GROUP_IX; 977 slot += hidx & _PTEIDX_GROUP_IX;
933 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0); 978 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
934} 979}
935 980
936void kernel_map_pages(struct page *page, int numpages, int enable) 981void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 4835f73af304..08f0d9ff7712 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -22,11 +22,8 @@
22#include <asm/mmu_context.h> 22#include <asm/mmu_context.h>
23#include <asm/machdep.h> 23#include <asm/machdep.h>
24#include <asm/cputable.h> 24#include <asm/cputable.h>
25#include <asm/tlb.h>
26#include <asm/spu.h> 25#include <asm/spu.h>
27 26
28#include <linux/sysctl.h>
29
30#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) 27#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) 28#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
32 29
@@ -406,11 +403,12 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
406 unsigned long va, rflags, pa; 403 unsigned long va, rflags, pa;
407 long slot; 404 long slot;
408 int err = 1; 405 int err = 1;
406 int ssize = user_segment_size(ea);
409 407
410 ptep = huge_pte_offset(mm, ea); 408 ptep = huge_pte_offset(mm, ea);
411 409
412 /* Search the Linux page table for a match with va */ 410 /* Search the Linux page table for a match with va */
413 va = (vsid << 28) | (ea & 0x0fffffff); 411 va = hpt_va(ea, vsid, ssize);
414 412
415 /* 413 /*
416 * If no pte found or not present, send the problem up to 414 * If no pte found or not present, send the problem up to
@@ -461,19 +459,19 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
461 /* There MIGHT be an HPTE for this pte */ 459 /* There MIGHT be an HPTE for this pte */
462 unsigned long hash, slot; 460 unsigned long hash, slot;
463 461
464 hash = hpt_hash(va, HPAGE_SHIFT); 462 hash = hpt_hash(va, HPAGE_SHIFT, ssize);
465 if (old_pte & _PAGE_F_SECOND) 463 if (old_pte & _PAGE_F_SECOND)
466 hash = ~hash; 464 hash = ~hash;
467 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 465 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
468 slot += (old_pte & _PAGE_F_GIX) >> 12; 466 slot += (old_pte & _PAGE_F_GIX) >> 12;
469 467
470 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize, 468 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
471 local) == -1) 469 ssize, local) == -1)
472 old_pte &= ~_PAGE_HPTEFLAGS; 470 old_pte &= ~_PAGE_HPTEFLAGS;
473 } 471 }
474 472
475 if (likely(!(old_pte & _PAGE_HASHPTE))) { 473 if (likely(!(old_pte & _PAGE_HASHPTE))) {
476 unsigned long hash = hpt_hash(va, HPAGE_SHIFT); 474 unsigned long hash = hpt_hash(va, HPAGE_SHIFT, ssize);
477 unsigned long hpte_group; 475 unsigned long hpte_group;
478 476
479 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; 477 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -492,7 +490,7 @@ repeat:
492 490
493 /* Insert into the hash table, primary slot */ 491 /* Insert into the hash table, primary slot */
494 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, 492 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
495 mmu_huge_psize); 493 mmu_huge_psize, ssize);
496 494
497 /* Primary is full, try the secondary */ 495 /* Primary is full, try the secondary */
498 if (unlikely(slot == -1)) { 496 if (unlikely(slot == -1)) {
@@ -500,7 +498,7 @@ repeat:
500 HPTES_PER_GROUP) & ~0x7UL; 498 HPTES_PER_GROUP) & ~0x7UL;
501 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 499 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
502 HPTE_V_SECONDARY, 500 HPTE_V_SECONDARY,
503 mmu_huge_psize); 501 mmu_huge_psize, ssize);
504 if (slot == -1) { 502 if (slot == -1) {
505 if (mftb() & 0x1) 503 if (mftb() & 0x1)
506 hpte_group = ((hash & htab_hash_mask) * 504 hpte_group = ((hash & htab_hash_mask) *
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index e1f5ded851f6..977cb1ee5e72 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -41,7 +41,6 @@
41#include <asm/machdep.h> 41#include <asm/machdep.h>
42#include <asm/btext.h> 42#include <asm/btext.h>
43#include <asm/tlb.h> 43#include <asm/tlb.h>
44#include <asm/prom.h>
45#include <asm/lmb.h> 44#include <asm/lmb.h>
46#include <asm/sections.h> 45#include <asm/sections.h>
47 46
@@ -133,6 +132,9 @@ void __init MMU_init(void)
133 /* 601 can only access 16MB at the moment */ 132 /* 601 can only access 16MB at the moment */
134 if (PVR_VER(mfspr(SPRN_PVR)) == 1) 133 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
135 __initial_memory_limit = 0x01000000; 134 __initial_memory_limit = 0x01000000;
135 /* 8xx can only access 8MB at the moment */
136 if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
137 __initial_memory_limit = 0x00800000;
136 138
137 /* parse args from command line */ 139 /* parse args from command line */
138 MMU_setup(); 140 MMU_setup();
@@ -256,3 +258,40 @@ void free_initrd_mem(unsigned long start, unsigned long end)
256 } 258 }
257} 259}
258#endif 260#endif
261
262#ifdef CONFIG_PROC_KCORE
263static struct kcore_list kcore_vmem;
264
265static int __init setup_kcore(void)
266{
267 int i;
268
269 for (i = 0; i < lmb.memory.cnt; i++) {
270 unsigned long base;
271 unsigned long size;
272 struct kcore_list *kcore_mem;
273
274 base = lmb.memory.region[i].base;
275 size = lmb.memory.region[i].size;
276
277 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
278 if (!kcore_mem)
279 panic("%s: kmalloc failed\n", __FUNCTION__);
280
281 /* must stay under 32 bits */
282 if ( 0xfffffffful - (unsigned long)__va(base) < size) {
283 size = 0xfffffffful - (unsigned long)(__va(base));
284 printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n",
285 size);
286 }
287
288 kclist_add(kcore_mem, __va(base), size);
289 }
290
291 kclist_add(&kcore_vmem, (void *)VMALLOC_START,
292 VMALLOC_END-VMALLOC_START);
293
294 return 0;
295}
296module_init(setup_kcore);
297#endif
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9f27bb56a61d..fa90f6561b9f 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -113,6 +113,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
113} 113}
114#endif 114#endif
115 115
116#ifdef CONFIG_PROC_KCORE
116static struct kcore_list kcore_vmem; 117static struct kcore_list kcore_vmem;
117 118
118static int __init setup_kcore(void) 119static int __init setup_kcore(void)
@@ -139,6 +140,7 @@ static int __init setup_kcore(void)
139 return 0; 140 return 0;
140} 141}
141module_init(setup_kcore); 142module_init(setup_kcore);
143#endif
142 144
143static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) 145static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
144{ 146{
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index f0e7eedb1ba3..32dcfc9b0082 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -42,7 +42,6 @@
42#include <asm/machdep.h> 42#include <asm/machdep.h>
43#include <asm/btext.h> 43#include <asm/btext.h>
44#include <asm/tlb.h> 44#include <asm/tlb.h>
45#include <asm/prom.h>
46#include <asm/lmb.h> 45#include <asm/lmb.h>
47#include <asm/sections.h> 46#include <asm/sections.h>
48#include <asm/vdso.h> 47#include <asm/vdso.h>
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
index 7a78cdc0515a..1db38ba1f544 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_64.c
@@ -28,7 +28,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
28{ 28{
29 int index; 29 int index;
30 int err; 30 int err;
31 int new_context = (mm->context.id == 0);
32 31
33again: 32again:
34 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) 33 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
@@ -50,19 +49,13 @@ again:
50 return -ENOMEM; 49 return -ENOMEM;
51 } 50 }
52 51
53 mm->context.id = index;
54#ifdef CONFIG_PPC_MM_SLICES
55 /* The old code would re-promote on fork, we don't do that 52 /* The old code would re-promote on fork, we don't do that
56 * when using slices as it could cause problem promoting slices 53 * when using slices as it could cause problem promoting slices
57 * that have been forced down to 4K 54 * that have been forced down to 4K
58 */ 55 */
59 if (new_context) 56 if (slice_mm_new_context(mm))
60 slice_set_user_psize(mm, mmu_virtual_psize); 57 slice_set_user_psize(mm, mmu_virtual_psize);
61#else 58 mm->context.id = index;
62 mm->context.user_psize = mmu_virtual_psize;
63 mm->context.sllp = SLB_VSID_USER |
64 mmu_psize_defs[mmu_virtual_psize].sllp;
65#endif
66 59
67 return 0; 60 return 0;
68} 61}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3dfd10db931a..3ef0ad2f9ca0 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -87,8 +87,8 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
87 * entry in the hardware page table. 87 * entry in the hardware page table.
88 * 88 *
89 */ 89 */
90 if (htab_bolt_mapping(ea, (unsigned long)ea + PAGE_SIZE, 90 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
91 pa, flags, mmu_io_psize)) { 91 mmu_io_psize, mmu_kernel_ssize)) {
92 printk(KERN_ERR "Failed to do bolted mapping IO " 92 printk(KERN_ERR "Failed to do bolted mapping IO "
93 "memory at %016lx !\n", pa); 93 "memory at %016lx !\n", pa);
94 return -ENOMEM; 94 return -ENOMEM;
@@ -228,5 +228,7 @@ void iounmap(volatile void __iomem *token)
228EXPORT_SYMBOL(ioremap); 228EXPORT_SYMBOL(ioremap);
229EXPORT_SYMBOL(ioremap_flags); 229EXPORT_SYMBOL(ioremap_flags);
230EXPORT_SYMBOL(__ioremap); 230EXPORT_SYMBOL(__ioremap);
231EXPORT_SYMBOL(__ioremap_at);
231EXPORT_SYMBOL(iounmap); 232EXPORT_SYMBOL(iounmap);
232EXPORT_SYMBOL(__iounmap); 233EXPORT_SYMBOL(__iounmap);
234EXPORT_SYMBOL(__iounmap_at);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index ff1811ac6c81..6c164cec9d2c 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -43,30 +43,37 @@ static void slb_allocate(unsigned long ea)
43 slb_allocate_realmode(ea); 43 slb_allocate_realmode(ea);
44} 44}
45 45
46static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) 46static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
47 unsigned long slot)
47{ 48{
48 return (ea & ESID_MASK) | SLB_ESID_V | slot; 49 unsigned long mask;
50
51 mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
52 return (ea & mask) | SLB_ESID_V | slot;
49} 53}
50 54
51static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) 55#define slb_vsid_shift(ssize) \
56 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
57
58static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
59 unsigned long flags)
52{ 60{
53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 61 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
62 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
54} 63}
55 64
56static inline void slb_shadow_update(unsigned long ea, 65static inline void slb_shadow_update(unsigned long ea, int ssize,
57 unsigned long flags, 66 unsigned long flags,
58 unsigned long entry) 67 unsigned long entry)
59{ 68{
60 /* 69 /*
61 * Clear the ESID first so the entry is not valid while we are 70 * Clear the ESID first so the entry is not valid while we are
62 * updating it. 71 * updating it. No write barriers are needed here, provided
72 * we only update the current CPU's SLB shadow buffer.
63 */ 73 */
64 get_slb_shadow()->save_area[entry].esid = 0; 74 get_slb_shadow()->save_area[entry].esid = 0;
65 smp_wmb(); 75 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
66 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); 76 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
67 smp_wmb();
68 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
69 smp_wmb();
70} 77}
71 78
72static inline void slb_shadow_clear(unsigned long entry) 79static inline void slb_shadow_clear(unsigned long entry)
@@ -74,7 +81,8 @@ static inline void slb_shadow_clear(unsigned long entry)
74 get_slb_shadow()->save_area[entry].esid = 0; 81 get_slb_shadow()->save_area[entry].esid = 0;
75} 82}
76 83
77static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, 84static inline void create_shadowed_slbe(unsigned long ea, int ssize,
85 unsigned long flags,
78 unsigned long entry) 86 unsigned long entry)
79{ 87{
80 /* 88 /*
@@ -82,11 +90,11 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
82 * we don't get a stale entry here if we get preempted by PHYP 90 * we don't get a stale entry here if we get preempted by PHYP
83 * between these two statements. 91 * between these two statements.
84 */ 92 */
85 slb_shadow_update(ea, flags, entry); 93 slb_shadow_update(ea, ssize, flags, entry);
86 94
87 asm volatile("slbmte %0,%1" : 95 asm volatile("slbmte %0,%1" :
88 : "r" (mk_vsid_data(ea, flags)), 96 : "r" (mk_vsid_data(ea, ssize, flags)),
89 "r" (mk_esid_data(ea, entry)) 97 "r" (mk_esid_data(ea, ssize, entry))
90 : "memory" ); 98 : "memory" );
91} 99}
92 100
@@ -95,7 +103,7 @@ void slb_flush_and_rebolt(void)
95 /* If you change this make sure you change SLB_NUM_BOLTED 103 /* If you change this make sure you change SLB_NUM_BOLTED
96 * appropriately too. */ 104 * appropriately too. */
97 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 105 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
98 unsigned long ksp_esid_data; 106 unsigned long ksp_esid_data, ksp_vsid_data;
99 107
100 WARN_ON(!irqs_disabled()); 108 WARN_ON(!irqs_disabled());
101 109
@@ -104,13 +112,15 @@ void slb_flush_and_rebolt(void)
104 lflags = SLB_VSID_KERNEL | linear_llp; 112 lflags = SLB_VSID_KERNEL | linear_llp;
105 vflags = SLB_VSID_KERNEL | vmalloc_llp; 113 vflags = SLB_VSID_KERNEL | vmalloc_llp;
106 114
107 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 115 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
108 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) { 116 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
109 ksp_esid_data &= ~SLB_ESID_V; 117 ksp_esid_data &= ~SLB_ESID_V;
118 ksp_vsid_data = 0;
110 slb_shadow_clear(2); 119 slb_shadow_clear(2);
111 } else { 120 } else {
112 /* Update stack entry; others don't change */ 121 /* Update stack entry; others don't change */
113 slb_shadow_update(get_paca()->kstack, lflags, 2); 122 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
123 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
114 } 124 }
115 125
116 /* We need to do this all in asm, so we're sure we don't touch 126 /* We need to do this all in asm, so we're sure we don't touch
@@ -122,9 +132,9 @@ void slb_flush_and_rebolt(void)
122 /* Slot 2 - kernel stack */ 132 /* Slot 2 - kernel stack */
123 "slbmte %2,%3\n" 133 "slbmte %2,%3\n"
124 "isync" 134 "isync"
125 :: "r"(mk_vsid_data(VMALLOC_START, vflags)), 135 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
126 "r"(mk_esid_data(VMALLOC_START, 1)), 136 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
127 "r"(mk_vsid_data(ksp_esid_data, lflags)), 137 "r"(ksp_vsid_data),
128 "r"(ksp_esid_data) 138 "r"(ksp_esid_data)
129 : "memory"); 139 : "memory");
130} 140}
@@ -134,7 +144,7 @@ void slb_vmalloc_update(void)
134 unsigned long vflags; 144 unsigned long vflags;
135 145
136 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; 146 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
137 slb_shadow_update(VMALLOC_START, vflags, 1); 147 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
138 slb_flush_and_rebolt(); 148 slb_flush_and_rebolt();
139} 149}
140 150
@@ -142,7 +152,7 @@ void slb_vmalloc_update(void)
142void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 152void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
143{ 153{
144 unsigned long offset = get_paca()->slb_cache_ptr; 154 unsigned long offset = get_paca()->slb_cache_ptr;
145 unsigned long esid_data = 0; 155 unsigned long slbie_data = 0;
146 unsigned long pc = KSTK_EIP(tsk); 156 unsigned long pc = KSTK_EIP(tsk);
147 unsigned long stack = KSTK_ESP(tsk); 157 unsigned long stack = KSTK_ESP(tsk);
148 unsigned long unmapped_base; 158 unsigned long unmapped_base;
@@ -151,9 +161,12 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
151 int i; 161 int i;
152 asm volatile("isync" : : : "memory"); 162 asm volatile("isync" : : : "memory");
153 for (i = 0; i < offset; i++) { 163 for (i = 0; i < offset; i++) {
154 esid_data = ((unsigned long)get_paca()->slb_cache[i] 164 slbie_data = (unsigned long)get_paca()->slb_cache[i]
155 << SID_SHIFT) | SLBIE_C; 165 << SID_SHIFT; /* EA */
156 asm volatile("slbie %0" : : "r" (esid_data)); 166 slbie_data |= user_segment_size(slbie_data)
167 << SLBIE_SSIZE_SHIFT;
168 slbie_data |= SLBIE_C; /* C set for user addresses */
169 asm volatile("slbie %0" : : "r" (slbie_data));
157 } 170 }
158 asm volatile("isync" : : : "memory"); 171 asm volatile("isync" : : : "memory");
159 } else { 172 } else {
@@ -162,7 +175,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
162 175
163 /* Workaround POWER5 < DD2.1 issue */ 176 /* Workaround POWER5 < DD2.1 issue */
164 if (offset == 1 || offset > SLB_CACHE_ENTRIES) 177 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
165 asm volatile("slbie %0" : : "r" (esid_data)); 178 asm volatile("slbie %0" : : "r" (slbie_data));
166 179
167 get_paca()->slb_cache_ptr = 0; 180 get_paca()->slb_cache_ptr = 0;
168 get_paca()->context = mm->context; 181 get_paca()->context = mm->context;
@@ -245,9 +258,9 @@ void slb_initialize(void)
245 asm volatile("isync":::"memory"); 258 asm volatile("isync":::"memory");
246 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 259 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
247 asm volatile("isync; slbia; isync":::"memory"); 260 asm volatile("isync; slbia; isync":::"memory");
248 create_shadowed_slbe(PAGE_OFFSET, lflags, 0); 261 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
249 262
250 create_shadowed_slbe(VMALLOC_START, vflags, 1); 263 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
251 264
252 /* We don't bolt the stack for the time being - we're in boot, 265 /* We don't bolt the stack for the time being - we're in boot,
253 * so the stack is in the bolted segment. By the time it goes 266 * so the stack is in the bolted segment. By the time it goes
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index cd1a93d4948c..1328a81a84aa 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -57,7 +57,10 @@ _GLOBAL(slb_allocate_realmode)
57 */ 57 */
58_GLOBAL(slb_miss_kernel_load_linear) 58_GLOBAL(slb_miss_kernel_load_linear)
59 li r11,0 59 li r11,0
60BEGIN_FTR_SECTION
60 b slb_finish_load 61 b slb_finish_load
62END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
63 b slb_finish_load_1T
61 64
621: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below 651: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
63 * will be patched by the kernel at boot 66 * will be patched by the kernel at boot
@@ -68,13 +71,16 @@ BEGIN_FTR_SECTION
68 cmpldi r11,(VMALLOC_SIZE >> 28) - 1 71 cmpldi r11,(VMALLOC_SIZE >> 28) - 1
69 bgt 5f 72 bgt 5f
70 lhz r11,PACAVMALLOCSLLP(r13) 73 lhz r11,PACAVMALLOCSLLP(r13)
71 b slb_finish_load 74 b 6f
725: 755:
73END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) 76END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
74_GLOBAL(slb_miss_kernel_load_io) 77_GLOBAL(slb_miss_kernel_load_io)
75 li r11,0 78 li r11,0
796:
80BEGIN_FTR_SECTION
76 b slb_finish_load 81 b slb_finish_load
77 82END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
83 b slb_finish_load_1T
78 84
790: /* user address: proto-VSID = context << 15 | ESID. First check 850: /* user address: proto-VSID = context << 15 | ESID. First check
80 * if the address is within the boundaries of the user region 86 * if the address is within the boundaries of the user region
@@ -122,7 +128,13 @@ _GLOBAL(slb_miss_kernel_load_io)
122#endif /* CONFIG_PPC_MM_SLICES */ 128#endif /* CONFIG_PPC_MM_SLICES */
123 129
124 ld r9,PACACONTEXTID(r13) 130 ld r9,PACACONTEXTID(r13)
131BEGIN_FTR_SECTION
132 cmpldi r10,0x1000
133END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
125 rldimi r10,r9,USER_ESID_BITS,0 134 rldimi r10,r9,USER_ESID_BITS,0
135BEGIN_FTR_SECTION
136 bge slb_finish_load_1T
137END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
126 b slb_finish_load 138 b slb_finish_load
127 139
1288: /* invalid EA */ 1408: /* invalid EA */
@@ -188,7 +200,7 @@ _GLOBAL(slb_allocate_user)
188 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET 200 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
189 */ 201 */
190slb_finish_load: 202slb_finish_load:
191 ASM_VSID_SCRAMBLE(r10,r9) 203 ASM_VSID_SCRAMBLE(r10,r9,256M)
192 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ 204 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
193 205
194 /* r3 = EA, r11 = VSID data */ 206 /* r3 = EA, r11 = VSID data */
@@ -213,7 +225,7 @@ BEGIN_FW_FTR_SECTION
213END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 225END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
214#endif /* CONFIG_PPC_ISERIES */ 226#endif /* CONFIG_PPC_ISERIES */
215 227
216 ld r10,PACASTABRR(r13) 2287: ld r10,PACASTABRR(r13)
217 addi r10,r10,1 229 addi r10,r10,1
218 /* use a cpu feature mask if we ever change our slb size */ 230 /* use a cpu feature mask if we ever change our slb size */
219 cmpldi r10,SLB_NUM_ENTRIES 231 cmpldi r10,SLB_NUM_ENTRIES
@@ -259,3 +271,20 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
259 crclr 4*cr0+eq /* set result to "success" */ 271 crclr 4*cr0+eq /* set result to "success" */
260 blr 272 blr
261 273
274/*
275 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
276 * We assume legacy iSeries will never have 1T segments.
277 *
278 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
279 */
280slb_finish_load_1T:
281 srdi r10,r10,40-28 /* get 1T ESID */
282 ASM_VSID_SCRAMBLE(r10,r9,1T)
283 rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */
284 li r10,MMU_SEGSIZE_1T
285 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
286
287 /* r3 = EA, r11 = VSID data */
288 clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
289 b 7b
290
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index d5fd3909d13a..319826ef1645 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -551,6 +551,7 @@ EXPORT_SYMBOL_GPL(get_slice_psize);
551 * 551 *
552 * This is also called in init_new_context() to change back the user 552 * This is also called in init_new_context() to change back the user
553 * psize from whatever the parent context had it set to 553 * psize from whatever the parent context had it set to
554 * N.B. This may be called before mm->context.id has been set.
554 * 555 *
555 * This function will only change the content of the {low,high)_slice_psize 556 * This function will only change the content of the {low,high)_slice_psize
556 * masks, it will not flush SLBs as this shall be handled lazily by the 557 * masks, it will not flush SLBs as this shall be handled lazily by the
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 28492bbdee8e..9e85bda76216 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -122,12 +122,12 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
122 122
123 /* Kernel or user address? */ 123 /* Kernel or user address? */
124 if (is_kernel_addr(ea)) { 124 if (is_kernel_addr(ea)) {
125 vsid = get_kernel_vsid(ea); 125 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
126 } else { 126 } else {
127 if ((ea >= TASK_SIZE_USER64) || (! mm)) 127 if ((ea >= TASK_SIZE_USER64) || (! mm))
128 return 1; 128 return 1;
129 129
130 vsid = get_vsid(mm->context.id, ea); 130 vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M);
131 } 131 }
132 132
133 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); 133 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
@@ -261,7 +261,7 @@ void __init stabs_alloc(void)
261 */ 261 */
262void stab_initialize(unsigned long stab) 262void stab_initialize(unsigned long stab)
263{ 263{
264 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET); 264 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M);
265 unsigned long stabreal; 265 unsigned long stabreal;
266 266
267 asm volatile("isync; slbia; isync":::"memory"); 267 asm volatile("isync; slbia; isync":::"memory");
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index cbd34fc813ee..eafbca52bff9 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -132,6 +132,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
133 unsigned long vsid, vaddr; 133 unsigned long vsid, vaddr;
134 unsigned int psize; 134 unsigned int psize;
135 int ssize;
135 real_pte_t rpte; 136 real_pte_t rpte;
136 int i; 137 int i;
137 138
@@ -161,11 +162,14 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
161 162
162 /* Build full vaddr */ 163 /* Build full vaddr */
163 if (!is_kernel_addr(addr)) { 164 if (!is_kernel_addr(addr)) {
164 vsid = get_vsid(mm->context.id, addr); 165 ssize = user_segment_size(addr);
166 vsid = get_vsid(mm->context.id, addr, ssize);
165 WARN_ON(vsid == 0); 167 WARN_ON(vsid == 0);
166 } else 168 } else {
167 vsid = get_kernel_vsid(addr); 169 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
168 vaddr = (vsid << 28 ) | (addr & 0x0fffffff); 170 ssize = mmu_kernel_ssize;
171 }
172 vaddr = hpt_va(addr, vsid, ssize);
169 rpte = __real_pte(__pte(pte), ptep); 173 rpte = __real_pte(__pte(pte), ptep);
170 174
171 /* 175 /*
@@ -175,7 +179,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
175 * and decide to use local invalidates instead... 179 * and decide to use local invalidates instead...
176 */ 180 */
177 if (!batch->active) { 181 if (!batch->active) {
178 flush_hash_page(vaddr, rpte, psize, 0); 182 flush_hash_page(vaddr, rpte, psize, ssize, 0);
179 return; 183 return;
180 } 184 }
181 185
@@ -189,13 +193,15 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
189 * We also need to ensure only one page size is present in a given 193 * We also need to ensure only one page size is present in a given
190 * batch 194 * batch
191 */ 195 */
192 if (i != 0 && (mm != batch->mm || batch->psize != psize)) { 196 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
197 batch->ssize != ssize)) {
193 __flush_tlb_pending(batch); 198 __flush_tlb_pending(batch);
194 i = 0; 199 i = 0;
195 } 200 }
196 if (i == 0) { 201 if (i == 0) {
197 batch->mm = mm; 202 batch->mm = mm;
198 batch->psize = psize; 203 batch->psize = psize;
204 batch->ssize = ssize;
199 } 205 }
200 batch->pte[i] = rpte; 206 batch->pte[i] = rpte;
201 batch->vaddr[i] = vaddr; 207 batch->vaddr[i] = vaddr;
@@ -222,7 +228,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
222 local = 1; 228 local = 1;
223 if (i == 1) 229 if (i == 1)
224 flush_hash_page(batch->vaddr[0], batch->pte[0], 230 flush_hash_page(batch->vaddr[0], batch->pte[0],
225 batch->psize, local); 231 batch->psize, batch->ssize, local);
226 else 232 else
227 flush_hash_range(i, local); 233 flush_hash_range(i, local);
228 batch->index = 0; 234 batch->index = 0;