aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/prom.c3
-rw-r--r--arch/powerpc/mm/hash_low_64.S28
-rw-r--r--arch/powerpc/mm/hash_utils_64.c84
-rw-r--r--arch/powerpc/mm/mmu_context_64.c3
-rw-r--r--arch/powerpc/mm/slb.c29
-rw-r--r--arch/powerpc/mm/slb_low.S17
-rw-r--r--arch/powerpc/mm/tlb_64.c5
-rw-r--r--include/asm-powerpc/mmu.h13
-rw-r--r--include/asm-powerpc/paca.h1
-rw-r--r--include/asm-powerpc/pgtable-4k.h2
-rw-r--r--include/asm-powerpc/pgtable-64k.h2
-rw-r--r--include/asm-powerpc/pgtable.h10
13 files changed, 160 insertions, 39 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index aa0486d552a3..ff2940548929 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -122,6 +122,8 @@ int main(void)
122 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 122 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
123 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 123 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
124 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 124 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
125 DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
126 DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
125#ifdef CONFIG_HUGETLB_PAGE 127#ifdef CONFIG_HUGETLB_PAGE
126 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); 128 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
127 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); 129 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 969f4abcc0be..d77d24a89b39 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -948,7 +948,10 @@ static struct ibm_pa_feature {
948 {CPU_FTR_CTRL, 0, 0, 3, 0}, 948 {CPU_FTR_CTRL, 0, 0, 3, 0},
949 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, 949 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
950 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, 950 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
951#if 0
952 /* put this back once we know how to test if firmware does 64k IO */
951 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 953 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
954#endif
952}; 955};
953 956
954static void __init check_cpu_pa_features(unsigned long node) 957static void __init check_cpu_pa_features(unsigned long node)
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 106fba391987..52e914238959 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -369,6 +369,7 @@ _GLOBAL(__hash_page_4K)
369 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ 369 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
370 or r30,r30,r31 370 or r30,r30,r31
371 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE 371 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
372 oris r30,r30,_PAGE_COMBO@h
372 /* Write the linux PTE atomically (setting busy) */ 373 /* Write the linux PTE atomically (setting busy) */
373 stdcx. r30,0,r6 374 stdcx. r30,0,r6
374 bne- 1b 375 bne- 1b
@@ -428,6 +429,14 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
428 andi. r0,r31,_PAGE_HASHPTE 429 andi. r0,r31,_PAGE_HASHPTE
429 li r26,0 /* Default hidx */ 430 li r26,0 /* Default hidx */
430 beq htab_insert_pte 431 beq htab_insert_pte
432
433 /*
434 * Check if the pte was already inserted into the hash table
435 * as a 64k HW page, and invalidate the 64k HPTE if so.
436 */
437 andis. r0,r31,_PAGE_COMBO@h
438 beq htab_inval_old_hpte
439
431 ld r6,STK_PARM(r6)(r1) 440 ld r6,STK_PARM(r6)(r1)
432 ori r26,r6,0x8000 /* Load the hidx mask */ 441 ori r26,r6,0x8000 /* Load the hidx mask */
433 ld r26,0(r26) 442 ld r26,0(r26)
@@ -498,6 +507,19 @@ _GLOBAL(htab_call_hpte_remove)
498 /* Try all again */ 507 /* Try all again */
499 b htab_insert_pte 508 b htab_insert_pte
500 509
510 /*
511 * Call out to C code to invalidate an 64k HW HPTE that is
512 * useless now that the segment has been switched to 4k pages.
513 */
514htab_inval_old_hpte:
515 mr r3,r29 /* virtual addr */
516 mr r4,r31 /* PTE.pte */
517 li r5,0 /* PTE.hidx */
518 li r6,MMU_PAGE_64K /* psize */
519 ld r7,STK_PARM(r8)(r1) /* local */
520 bl .flush_hash_page
521 b htab_insert_pte
522
501htab_bail_ok: 523htab_bail_ok:
502 li r3,0 524 li r3,0
503 b htab_bail 525 b htab_bail
@@ -638,6 +660,12 @@ _GLOBAL(__hash_page_64K)
638 * is changing this PTE anyway and might hash it. 660 * is changing this PTE anyway and might hash it.
639 */ 661 */
640 bne- ht64_bail_ok 662 bne- ht64_bail_ok
663BEGIN_FTR_SECTION
664 /* Check if PTE has the cache-inhibit bit set */
665 andi. r0,r31,_PAGE_NO_CACHE
666 /* If so, bail out and refault as a 4k page */
667 bne- ht64_bail_ok
668END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
641 /* Prepare new PTE value (turn access RW into DIRTY, then 669 /* Prepare new PTE value (turn access RW into DIRTY, then
642 * add BUSY,HASHPTE and ACCESSED) 670 * add BUSY,HASHPTE and ACCESSED)
643 */ 671 */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index b43ed92ef471..d03fd2b4445e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -92,10 +92,15 @@ unsigned long htab_size_bytes;
92unsigned long htab_hash_mask; 92unsigned long htab_hash_mask;
93int mmu_linear_psize = MMU_PAGE_4K; 93int mmu_linear_psize = MMU_PAGE_4K;
94int mmu_virtual_psize = MMU_PAGE_4K; 94int mmu_virtual_psize = MMU_PAGE_4K;
95int mmu_vmalloc_psize = MMU_PAGE_4K;
96int mmu_io_psize = MMU_PAGE_4K;
95#ifdef CONFIG_HUGETLB_PAGE 97#ifdef CONFIG_HUGETLB_PAGE
96int mmu_huge_psize = MMU_PAGE_16M; 98int mmu_huge_psize = MMU_PAGE_16M;
97unsigned int HPAGE_SHIFT; 99unsigned int HPAGE_SHIFT;
98#endif 100#endif
101#ifdef CONFIG_PPC_64K_PAGES
102int mmu_ci_restrictions;
103#endif
99 104
100/* There are definitions of page sizes arrays to be used when none 105/* There are definitions of page sizes arrays to be used when none
101 * is provided by the firmware. 106 * is provided by the firmware.
@@ -308,20 +313,31 @@ static void __init htab_init_page_sizes(void)
308 else if (mmu_psize_defs[MMU_PAGE_1M].shift) 313 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
309 mmu_linear_psize = MMU_PAGE_1M; 314 mmu_linear_psize = MMU_PAGE_1M;
310 315
316#ifdef CONFIG_PPC_64K_PAGES
311 /* 317 /*
312 * Pick a size for the ordinary pages. Default is 4K, we support 318 * Pick a size for the ordinary pages. Default is 4K, we support
313 * 64K if cache inhibited large pages are supported by the 319 * 64K for user mappings and vmalloc if supported by the processor.
314 * processor 320 * We only use 64k for ioremap if the processor
321 * (and firmware) support cache-inhibited large pages.
322 * If not, we use 4k and set mmu_ci_restrictions so that
323 * hash_page knows to switch processes that use cache-inhibited
324 * mappings to 4k pages.
315 */ 325 */
316#ifdef CONFIG_PPC_64K_PAGES 326 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
317 if (mmu_psize_defs[MMU_PAGE_64K].shift &&
318 cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
319 mmu_virtual_psize = MMU_PAGE_64K; 327 mmu_virtual_psize = MMU_PAGE_64K;
328 mmu_vmalloc_psize = MMU_PAGE_64K;
329 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
330 mmu_io_psize = MMU_PAGE_64K;
331 else
332 mmu_ci_restrictions = 1;
333 }
320#endif 334#endif
321 335
322 printk(KERN_DEBUG "Page orders: linear mapping = %d, others = %d\n", 336 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
337 "virtual = %d, io = %d\n",
323 mmu_psize_defs[mmu_linear_psize].shift, 338 mmu_psize_defs[mmu_linear_psize].shift,
324 mmu_psize_defs[mmu_virtual_psize].shift); 339 mmu_psize_defs[mmu_virtual_psize].shift,
340 mmu_psize_defs[mmu_io_psize].shift);
325 341
326#ifdef CONFIG_HUGETLB_PAGE 342#ifdef CONFIG_HUGETLB_PAGE
327 /* Init large page size. Currently, we pick 16M or 1M depending 343 /* Init large page size. Currently, we pick 16M or 1M depending
@@ -556,6 +572,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
556 pte_t *ptep; 572 pte_t *ptep;
557 cpumask_t tmp; 573 cpumask_t tmp;
558 int rc, user_region = 0, local = 0; 574 int rc, user_region = 0, local = 0;
575 int psize;
559 576
560 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 577 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
561 ea, access, trap); 578 ea, access, trap);
@@ -575,10 +592,15 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
575 return 1; 592 return 1;
576 } 593 }
577 vsid = get_vsid(mm->context.id, ea); 594 vsid = get_vsid(mm->context.id, ea);
595 psize = mm->context.user_psize;
578 break; 596 break;
579 case VMALLOC_REGION_ID: 597 case VMALLOC_REGION_ID:
580 mm = &init_mm; 598 mm = &init_mm;
581 vsid = get_kernel_vsid(ea); 599 vsid = get_kernel_vsid(ea);
600 if (ea < VMALLOC_END)
601 psize = mmu_vmalloc_psize;
602 else
603 psize = mmu_io_psize;
582 break; 604 break;
583 default: 605 default:
584 /* Not a valid range 606 /* Not a valid range
@@ -629,7 +651,40 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
629#ifndef CONFIG_PPC_64K_PAGES 651#ifndef CONFIG_PPC_64K_PAGES
630 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); 652 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
631#else 653#else
632 if (mmu_virtual_psize == MMU_PAGE_64K) 654 if (mmu_ci_restrictions) {
655 /* If this PTE is non-cacheable, switch to 4k */
656 if (psize == MMU_PAGE_64K &&
657 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
658 if (user_region) {
659 psize = MMU_PAGE_4K;
660 mm->context.user_psize = MMU_PAGE_4K;
661 mm->context.sllp = SLB_VSID_USER |
662 mmu_psize_defs[MMU_PAGE_4K].sllp;
663 } else if (ea < VMALLOC_END) {
664 /*
665 * some driver did a non-cacheable mapping
666 * in vmalloc space, so switch vmalloc
667 * to 4k pages
668 */
669 printk(KERN_ALERT "Reducing vmalloc segment "
670 "to 4kB pages because of "
671 "non-cacheable mapping\n");
672 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
673 }
674 }
675 if (user_region) {
676 if (psize != get_paca()->context.user_psize) {
677 get_paca()->context = mm->context;
678 slb_flush_and_rebolt();
679 }
680 } else if (get_paca()->vmalloc_sllp !=
681 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
682 get_paca()->vmalloc_sllp =
683 mmu_psize_defs[mmu_vmalloc_psize].sllp;
684 slb_flush_and_rebolt();
685 }
686 }
687 if (psize == MMU_PAGE_64K)
633 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); 688 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
634 else 689 else
635 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); 690 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
@@ -681,7 +736,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
681#ifndef CONFIG_PPC_64K_PAGES 736#ifndef CONFIG_PPC_64K_PAGES
682 __hash_page_4K(ea, access, vsid, ptep, trap, local); 737 __hash_page_4K(ea, access, vsid, ptep, trap, local);
683#else 738#else
684 if (mmu_virtual_psize == MMU_PAGE_64K) 739 if (mmu_ci_restrictions) {
740 /* If this PTE is non-cacheable, switch to 4k */
741 if (mm->context.user_psize == MMU_PAGE_64K &&
742 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
743 mm->context.user_psize = MMU_PAGE_4K;
744 mm->context.sllp = SLB_VSID_USER |
745 mmu_psize_defs[MMU_PAGE_4K].sllp;
746 get_paca()->context = mm->context;
747 slb_flush_and_rebolt();
748 }
749 }
750 if (mm->context.user_psize == MMU_PAGE_64K)
685 __hash_page_64K(ea, access, vsid, ptep, trap, local); 751 __hash_page_64K(ea, access, vsid, ptep, trap, local);
686 else 752 else
687 __hash_page_4K(ea, access, vsid, ptep, trap, local); 753 __hash_page_4K(ea, access, vsid, ptep, trap, local);
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
index 714a84dd8d5d..65d18dca266f 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_64.c
@@ -49,6 +49,9 @@ again:
49 } 49 }
50 50
51 mm->context.id = index; 51 mm->context.id = index;
52 mm->context.user_psize = mmu_virtual_psize;
53 mm->context.sllp = SLB_VSID_USER |
54 mmu_psize_defs[mmu_virtual_psize].sllp;
52 55
53 return 0; 56 return 0;
54} 57}
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 2cc61736feee..6a8bf6c6000e 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -60,19 +60,19 @@ static inline void create_slbe(unsigned long ea, unsigned long flags,
60 : "memory" ); 60 : "memory" );
61} 61}
62 62
63static void slb_flush_and_rebolt(void) 63void slb_flush_and_rebolt(void)
64{ 64{
65 /* If you change this make sure you change SLB_NUM_BOLTED 65 /* If you change this make sure you change SLB_NUM_BOLTED
66 * appropriately too. */ 66 * appropriately too. */
67 unsigned long linear_llp, virtual_llp, lflags, vflags; 67 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
68 unsigned long ksp_esid_data; 68 unsigned long ksp_esid_data;
69 69
70 WARN_ON(!irqs_disabled()); 70 WARN_ON(!irqs_disabled());
71 71
72 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 72 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
73 virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; 73 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
74 lflags = SLB_VSID_KERNEL | linear_llp; 74 lflags = SLB_VSID_KERNEL | linear_llp;
75 vflags = SLB_VSID_KERNEL | virtual_llp; 75 vflags = SLB_VSID_KERNEL | vmalloc_llp;
76 76
77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
78 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) 78 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
@@ -164,11 +164,10 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
164 164
165void slb_initialize(void) 165void slb_initialize(void)
166{ 166{
167 unsigned long linear_llp, virtual_llp; 167 unsigned long linear_llp, vmalloc_llp, io_llp;
168 static int slb_encoding_inited; 168 static int slb_encoding_inited;
169 extern unsigned int *slb_miss_kernel_load_linear; 169 extern unsigned int *slb_miss_kernel_load_linear;
170 extern unsigned int *slb_miss_kernel_load_virtual; 170 extern unsigned int *slb_miss_kernel_load_io;
171 extern unsigned int *slb_miss_user_load_normal;
172#ifdef CONFIG_HUGETLB_PAGE 171#ifdef CONFIG_HUGETLB_PAGE
173 extern unsigned int *slb_miss_user_load_huge; 172 extern unsigned int *slb_miss_user_load_huge;
174 unsigned long huge_llp; 173 unsigned long huge_llp;
@@ -178,18 +177,19 @@ void slb_initialize(void)
178 177
179 /* Prepare our SLB miss handler based on our page size */ 178 /* Prepare our SLB miss handler based on our page size */
180 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 179 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
181 virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; 180 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
181 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
182 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
183
182 if (!slb_encoding_inited) { 184 if (!slb_encoding_inited) {
183 slb_encoding_inited = 1; 185 slb_encoding_inited = 1;
184 patch_slb_encoding(slb_miss_kernel_load_linear, 186 patch_slb_encoding(slb_miss_kernel_load_linear,
185 SLB_VSID_KERNEL | linear_llp); 187 SLB_VSID_KERNEL | linear_llp);
186 patch_slb_encoding(slb_miss_kernel_load_virtual, 188 patch_slb_encoding(slb_miss_kernel_load_io,
187 SLB_VSID_KERNEL | virtual_llp); 189 SLB_VSID_KERNEL | io_llp);
188 patch_slb_encoding(slb_miss_user_load_normal,
189 SLB_VSID_USER | virtual_llp);
190 190
191 DBG("SLB: linear LLP = %04x\n", linear_llp); 191 DBG("SLB: linear LLP = %04x\n", linear_llp);
192 DBG("SLB: virtual LLP = %04x\n", virtual_llp); 192 DBG("SLB: io LLP = %04x\n", io_llp);
193#ifdef CONFIG_HUGETLB_PAGE 193#ifdef CONFIG_HUGETLB_PAGE
194 patch_slb_encoding(slb_miss_user_load_huge, 194 patch_slb_encoding(slb_miss_user_load_huge,
195 SLB_VSID_USER | huge_llp); 195 SLB_VSID_USER | huge_llp);
@@ -204,7 +204,7 @@ void slb_initialize(void)
204 unsigned long lflags, vflags; 204 unsigned long lflags, vflags;
205 205
206 lflags = SLB_VSID_KERNEL | linear_llp; 206 lflags = SLB_VSID_KERNEL | linear_llp;
207 vflags = SLB_VSID_KERNEL | virtual_llp; 207 vflags = SLB_VSID_KERNEL | vmalloc_llp;
208 208
209 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 209 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
210 asm volatile("isync":::"memory"); 210 asm volatile("isync":::"memory");
@@ -212,7 +212,6 @@ void slb_initialize(void)
212 asm volatile("isync; slbia; isync":::"memory"); 212 asm volatile("isync; slbia; isync":::"memory");
213 create_slbe(PAGE_OFFSET, lflags, 0); 213 create_slbe(PAGE_OFFSET, lflags, 0);
214 214
215 /* VMALLOC space has 4K pages always for now */
216 create_slbe(VMALLOC_START, vflags, 1); 215 create_slbe(VMALLOC_START, vflags, 1);
217 216
218 /* We don't bolt the stack for the time being - we're in boot, 217 /* We don't bolt the stack for the time being - we're in boot,
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index abfaabf667bf..8548dcf8ef8b 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -59,10 +59,19 @@ _GLOBAL(slb_miss_kernel_load_linear)
59 li r11,0 59 li r11,0
60 b slb_finish_load 60 b slb_finish_load
61 61
621: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below 621: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
63 * will be patched by the kernel at boot 63 * will be patched by the kernel at boot
64 */ 64 */
65_GLOBAL(slb_miss_kernel_load_virtual) 65BEGIN_FTR_SECTION
66 /* check whether this is in vmalloc or ioremap space */
67 clrldi r11,r10,48
68 cmpldi r11,(VMALLOC_SIZE >> 28) - 1
69 bgt 5f
70 lhz r11,PACAVMALLOCSLLP(r13)
71 b slb_finish_load
725:
73END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
74_GLOBAL(slb_miss_kernel_load_io)
66 li r11,0 75 li r11,0
67 b slb_finish_load 76 b slb_finish_load
68 77
@@ -96,9 +105,7 @@ _GLOBAL(slb_miss_user_load_huge)
961: 1051:
97#endif /* CONFIG_HUGETLB_PAGE */ 106#endif /* CONFIG_HUGETLB_PAGE */
98 107
99_GLOBAL(slb_miss_user_load_normal) 108 lhz r11,PACACONTEXTSLLP(r13)
100 li r11,0
101
1022: 1092:
103 ld r9,PACACONTEXTID(r13) 110 ld r9,PACACONTEXTID(r13)
104 rldimi r10,r9,USER_ESID_BITS,0 111 rldimi r10,r9,USER_ESID_BITS,0
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index f734b11566c2..e7449b068c82 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -131,7 +131,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
131{ 131{
132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 132 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
133 unsigned long vsid; 133 unsigned long vsid;
134 unsigned int psize = mmu_virtual_psize; 134 unsigned int psize;
135 int i; 135 int i;
136 136
137 i = batch->index; 137 i = batch->index;
@@ -148,7 +148,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
148#else 148#else
149 BUG(); 149 BUG();
150#endif 150#endif
151 } 151 } else
152 psize = pte_pagesize_index(pte);
152 153
153 /* 154 /*
154 * This can happen when we are in the middle of a TLB batch and 155 * This can happen when we are in the middle of a TLB batch and
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index 885397420104..3a5ebe229af5 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -165,6 +165,16 @@ struct mmu_psize_def
165extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 165extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
166extern int mmu_linear_psize; 166extern int mmu_linear_psize;
167extern int mmu_virtual_psize; 167extern int mmu_virtual_psize;
168extern int mmu_vmalloc_psize;
169extern int mmu_io_psize;
170
171/*
172 * If the processor supports 64k normal pages but not 64k cache
173 * inhibited pages, we have to be prepared to switch processes
174 * to use 4k pages when they create cache-inhibited mappings.
175 * If this is the case, mmu_ci_restrictions will be set to 1.
176 */
177extern int mmu_ci_restrictions;
168 178
169#ifdef CONFIG_HUGETLB_PAGE 179#ifdef CONFIG_HUGETLB_PAGE
170/* 180/*
@@ -256,6 +266,7 @@ extern long iSeries_hpte_insert(unsigned long hpte_group,
256 266
257extern void stabs_alloc(void); 267extern void stabs_alloc(void);
258extern void slb_initialize(void); 268extern void slb_initialize(void);
269extern void slb_flush_and_rebolt(void);
259extern void stab_initialize(unsigned long stab); 270extern void stab_initialize(unsigned long stab);
260 271
261#endif /* __ASSEMBLY__ */ 272#endif /* __ASSEMBLY__ */
@@ -359,6 +370,8 @@ typedef unsigned long mm_context_id_t;
359 370
360typedef struct { 371typedef struct {
361 mm_context_id_t id; 372 mm_context_id_t id;
373 u16 user_psize; /* page size index */
374 u16 sllp; /* SLB entry page size encoding */
362#ifdef CONFIG_HUGETLB_PAGE 375#ifdef CONFIG_HUGETLB_PAGE
363 u16 low_htlb_areas, high_htlb_areas; 376 u16 low_htlb_areas, high_htlb_areas;
364#endif 377#endif
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index c17fd54d995b..17406353e2ce 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -81,6 +81,7 @@ struct paca_struct {
81 * on the linear mapping */ 81 * on the linear mapping */
82 82
83 mm_context_t context; 83 mm_context_t context;
84 u16 vmalloc_sllp;
84 u16 slb_cache[SLB_CACHE_ENTRIES]; 85 u16 slb_cache[SLB_CACHE_ENTRIES];
85 u16 slb_cache_ptr; 86 u16 slb_cache_ptr;
86 87
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index b2e18629932a..e7036155672e 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -78,6 +78,8 @@
78 78
79#define pte_iterate_hashed_end() } while(0) 79#define pte_iterate_hashed_end() } while(0)
80 80
81#define pte_pagesize_index(pte) MMU_PAGE_4K
82
81/* 83/*
82 * 4-level page tables related bits 84 * 4-level page tables related bits
83 */ 85 */
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 653915014dcd..4b7126c53f37 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -90,6 +90,8 @@
90 90
91#define pte_iterate_hashed_end() } while(0); } } while(0) 91#define pte_iterate_hashed_end() } while(0); } } while(0)
92 92
93#define pte_pagesize_index(pte) \
94 (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
93 95
94#endif /* __ASSEMBLY__ */ 96#endif /* __ASSEMBLY__ */
95#endif /* __KERNEL__ */ 97#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index e9f1f4627e6b..260a0fabe97e 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -47,8 +47,8 @@ struct mm_struct;
47/* 47/*
48 * Define the address range of the vmalloc VM area. 48 * Define the address range of the vmalloc VM area.
49 */ 49 */
50#define VMALLOC_START (0xD000000000000000ul) 50#define VMALLOC_START ASM_CONST(0xD000000000000000)
51#define VMALLOC_SIZE (0x80000000000UL) 51#define VMALLOC_SIZE ASM_CONST(0x80000000000)
52#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 52#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
53 53
54/* 54/*
@@ -413,12 +413,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
413 flush_tlb_pending(); 413 flush_tlb_pending();
414 } 414 }
415 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 415 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
416
417#ifdef CONFIG_PPC_64K_PAGES
418 if (mmu_virtual_psize != MMU_PAGE_64K)
419 pte = __pte(pte_val(pte) | _PAGE_COMBO);
420#endif /* CONFIG_PPC_64K_PAGES */
421
422 *ptep = pte; 416 *ptep = pte;
423} 417}
424 418