diff options
author | Paul Mackerras <paulus@samba.org> | 2006-06-14 20:45:18 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-06-14 20:45:18 -0400 |
commit | bf72aeba2ffef599d1d386425c9e46b82be657cd (patch) | |
tree | ead8e5111dbcfa22e156999d1bb8a96e50f06fef /arch/powerpc | |
parent | 31925323b1b51bb65db729e029472a8b1f635b7d (diff) |
powerpc: Use 64k pages without needing cache-inhibited large pages
Some POWER5+ machines can do 64k hardware pages for normal memory but
not for cache-inhibited pages. This patch lets us use 64k hardware
pages for most user processes on such machines (assuming the kernel
has been configured with CONFIG_PPC_64K_PAGES=y). User processes
start out using 64k pages and get switched to 4k pages if they use any
non-cacheable mappings.
With this, we use 64k pages for the vmalloc region and 4k pages for
the imalloc region. If anything creates a non-cacheable mapping in
the vmalloc region, the vmalloc region will get switched to 4k pages.
I don't know of any driver other than the DRM that would do this,
though, and these machines don't have AGP.
When a region gets switched from 64k pages to 4k pages, we do not have
to clear out all the 64k HPTEs from the hash table immediately. We
use the _PAGE_COMBO bit in the Linux PTE to indicate whether the page
was hashed in as a 64k page or a set of 4k pages. If hash_page is
trying to insert a 4k page for a Linux PTE and it sees that it has
already been inserted as a 64k page, it first invalidates the 64k HPTE
before inserting the 4k HPTE. The hash invalidation routines also use
the _PAGE_COMBO bit, to determine whether to look for a 64k HPTE or a
set of 4k HPTEs to remove. With those two changes, we can tolerate a
mix of 4k and 64k HPTEs in the hash table, and they will all get
removed when the address space is torn down.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 28 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 84 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_64.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 29 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 17 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 5 |
8 files changed, 140 insertions, 31 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index aa0486d552a3..ff2940548929 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -122,6 +122,8 @@ int main(void) | |||
122 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); | 122 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); |
123 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); | 123 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); |
124 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); | 124 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); |
125 | DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp)); | ||
126 | DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); | ||
125 | #ifdef CONFIG_HUGETLB_PAGE | 127 | #ifdef CONFIG_HUGETLB_PAGE |
126 | DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); | 128 | DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); |
127 | DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); | 129 | DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 969f4abcc0be..d77d24a89b39 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -948,7 +948,10 @@ static struct ibm_pa_feature { | |||
948 | {CPU_FTR_CTRL, 0, 0, 3, 0}, | 948 | {CPU_FTR_CTRL, 0, 0, 3, 0}, |
949 | {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, | 949 | {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, |
950 | {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, | 950 | {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, |
951 | #if 0 | ||
952 | /* put this back once we know how to test if firmware does 64k IO */ | ||
951 | {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, | 953 | {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, |
954 | #endif | ||
952 | }; | 955 | }; |
953 | 956 | ||
954 | static void __init check_cpu_pa_features(unsigned long node) | 957 | static void __init check_cpu_pa_features(unsigned long node) |
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 106fba391987..52e914238959 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S | |||
@@ -369,6 +369,7 @@ _GLOBAL(__hash_page_4K) | |||
369 | rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ | 369 | rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ |
370 | or r30,r30,r31 | 370 | or r30,r30,r31 |
371 | ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE | 371 | ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE |
372 | oris r30,r30,_PAGE_COMBO@h | ||
372 | /* Write the linux PTE atomically (setting busy) */ | 373 | /* Write the linux PTE atomically (setting busy) */ |
373 | stdcx. r30,0,r6 | 374 | stdcx. r30,0,r6 |
374 | bne- 1b | 375 | bne- 1b |
@@ -428,6 +429,14 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | |||
428 | andi. r0,r31,_PAGE_HASHPTE | 429 | andi. r0,r31,_PAGE_HASHPTE |
429 | li r26,0 /* Default hidx */ | 430 | li r26,0 /* Default hidx */ |
430 | beq htab_insert_pte | 431 | beq htab_insert_pte |
432 | |||
433 | /* | ||
434 | * Check if the pte was already inserted into the hash table | ||
435 | * as a 64k HW page, and invalidate the 64k HPTE if so. | ||
436 | */ | ||
437 | andis. r0,r31,_PAGE_COMBO@h | ||
438 | beq htab_inval_old_hpte | ||
439 | |||
431 | ld r6,STK_PARM(r6)(r1) | 440 | ld r6,STK_PARM(r6)(r1) |
432 | ori r26,r6,0x8000 /* Load the hidx mask */ | 441 | ori r26,r6,0x8000 /* Load the hidx mask */ |
433 | ld r26,0(r26) | 442 | ld r26,0(r26) |
@@ -498,6 +507,19 @@ _GLOBAL(htab_call_hpte_remove) | |||
498 | /* Try all again */ | 507 | /* Try all again */ |
499 | b htab_insert_pte | 508 | b htab_insert_pte |
500 | 509 | ||
510 | /* | ||
511 | * Call out to C code to invalidate an 64k HW HPTE that is | ||
512 | * useless now that the segment has been switched to 4k pages. | ||
513 | */ | ||
514 | htab_inval_old_hpte: | ||
515 | mr r3,r29 /* virtual addr */ | ||
516 | mr r4,r31 /* PTE.pte */ | ||
517 | li r5,0 /* PTE.hidx */ | ||
518 | li r6,MMU_PAGE_64K /* psize */ | ||
519 | ld r7,STK_PARM(r8)(r1) /* local */ | ||
520 | bl .flush_hash_page | ||
521 | b htab_insert_pte | ||
522 | |||
501 | htab_bail_ok: | 523 | htab_bail_ok: |
502 | li r3,0 | 524 | li r3,0 |
503 | b htab_bail | 525 | b htab_bail |
@@ -638,6 +660,12 @@ _GLOBAL(__hash_page_64K) | |||
638 | * is changing this PTE anyway and might hash it. | 660 | * is changing this PTE anyway and might hash it. |
639 | */ | 661 | */ |
640 | bne- ht64_bail_ok | 662 | bne- ht64_bail_ok |
663 | BEGIN_FTR_SECTION | ||
664 | /* Check if PTE has the cache-inhibit bit set */ | ||
665 | andi. r0,r31,_PAGE_NO_CACHE | ||
666 | /* If so, bail out and refault as a 4k page */ | ||
667 | bne- ht64_bail_ok | ||
668 | END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) | ||
641 | /* Prepare new PTE value (turn access RW into DIRTY, then | 669 | /* Prepare new PTE value (turn access RW into DIRTY, then |
642 | * add BUSY,HASHPTE and ACCESSED) | 670 | * add BUSY,HASHPTE and ACCESSED) |
643 | */ | 671 | */ |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b43ed92ef471..d03fd2b4445e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -92,10 +92,15 @@ unsigned long htab_size_bytes; | |||
92 | unsigned long htab_hash_mask; | 92 | unsigned long htab_hash_mask; |
93 | int mmu_linear_psize = MMU_PAGE_4K; | 93 | int mmu_linear_psize = MMU_PAGE_4K; |
94 | int mmu_virtual_psize = MMU_PAGE_4K; | 94 | int mmu_virtual_psize = MMU_PAGE_4K; |
95 | int mmu_vmalloc_psize = MMU_PAGE_4K; | ||
96 | int mmu_io_psize = MMU_PAGE_4K; | ||
95 | #ifdef CONFIG_HUGETLB_PAGE | 97 | #ifdef CONFIG_HUGETLB_PAGE |
96 | int mmu_huge_psize = MMU_PAGE_16M; | 98 | int mmu_huge_psize = MMU_PAGE_16M; |
97 | unsigned int HPAGE_SHIFT; | 99 | unsigned int HPAGE_SHIFT; |
98 | #endif | 100 | #endif |
101 | #ifdef CONFIG_PPC_64K_PAGES | ||
102 | int mmu_ci_restrictions; | ||
103 | #endif | ||
99 | 104 | ||
100 | /* There are definitions of page sizes arrays to be used when none | 105 | /* There are definitions of page sizes arrays to be used when none |
101 | * is provided by the firmware. | 106 | * is provided by the firmware. |
@@ -308,20 +313,31 @@ static void __init htab_init_page_sizes(void) | |||
308 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) | 313 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) |
309 | mmu_linear_psize = MMU_PAGE_1M; | 314 | mmu_linear_psize = MMU_PAGE_1M; |
310 | 315 | ||
316 | #ifdef CONFIG_PPC_64K_PAGES | ||
311 | /* | 317 | /* |
312 | * Pick a size for the ordinary pages. Default is 4K, we support | 318 | * Pick a size for the ordinary pages. Default is 4K, we support |
313 | * 64K if cache inhibited large pages are supported by the | 319 | * 64K for user mappings and vmalloc if supported by the processor. |
314 | * processor | 320 | * We only use 64k for ioremap if the processor |
321 | * (and firmware) support cache-inhibited large pages. | ||
322 | * If not, we use 4k and set mmu_ci_restrictions so that | ||
323 | * hash_page knows to switch processes that use cache-inhibited | ||
324 | * mappings to 4k pages. | ||
315 | */ | 325 | */ |
316 | #ifdef CONFIG_PPC_64K_PAGES | 326 | if (mmu_psize_defs[MMU_PAGE_64K].shift) { |
317 | if (mmu_psize_defs[MMU_PAGE_64K].shift && | ||
318 | cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) | ||
319 | mmu_virtual_psize = MMU_PAGE_64K; | 327 | mmu_virtual_psize = MMU_PAGE_64K; |
328 | mmu_vmalloc_psize = MMU_PAGE_64K; | ||
329 | if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) | ||
330 | mmu_io_psize = MMU_PAGE_64K; | ||
331 | else | ||
332 | mmu_ci_restrictions = 1; | ||
333 | } | ||
320 | #endif | 334 | #endif |
321 | 335 | ||
322 | printk(KERN_DEBUG "Page orders: linear mapping = %d, others = %d\n", | 336 | printk(KERN_DEBUG "Page orders: linear mapping = %d, " |
337 | "virtual = %d, io = %d\n", | ||
323 | mmu_psize_defs[mmu_linear_psize].shift, | 338 | mmu_psize_defs[mmu_linear_psize].shift, |
324 | mmu_psize_defs[mmu_virtual_psize].shift); | 339 | mmu_psize_defs[mmu_virtual_psize].shift, |
340 | mmu_psize_defs[mmu_io_psize].shift); | ||
325 | 341 | ||
326 | #ifdef CONFIG_HUGETLB_PAGE | 342 | #ifdef CONFIG_HUGETLB_PAGE |
327 | /* Init large page size. Currently, we pick 16M or 1M depending | 343 | /* Init large page size. Currently, we pick 16M or 1M depending |
@@ -556,6 +572,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
556 | pte_t *ptep; | 572 | pte_t *ptep; |
557 | cpumask_t tmp; | 573 | cpumask_t tmp; |
558 | int rc, user_region = 0, local = 0; | 574 | int rc, user_region = 0, local = 0; |
575 | int psize; | ||
559 | 576 | ||
560 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", | 577 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", |
561 | ea, access, trap); | 578 | ea, access, trap); |
@@ -575,10 +592,15 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
575 | return 1; | 592 | return 1; |
576 | } | 593 | } |
577 | vsid = get_vsid(mm->context.id, ea); | 594 | vsid = get_vsid(mm->context.id, ea); |
595 | psize = mm->context.user_psize; | ||
578 | break; | 596 | break; |
579 | case VMALLOC_REGION_ID: | 597 | case VMALLOC_REGION_ID: |
580 | mm = &init_mm; | 598 | mm = &init_mm; |
581 | vsid = get_kernel_vsid(ea); | 599 | vsid = get_kernel_vsid(ea); |
600 | if (ea < VMALLOC_END) | ||
601 | psize = mmu_vmalloc_psize; | ||
602 | else | ||
603 | psize = mmu_io_psize; | ||
582 | break; | 604 | break; |
583 | default: | 605 | default: |
584 | /* Not a valid range | 606 | /* Not a valid range |
@@ -629,7 +651,40 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
629 | #ifndef CONFIG_PPC_64K_PAGES | 651 | #ifndef CONFIG_PPC_64K_PAGES |
630 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); | 652 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); |
631 | #else | 653 | #else |
632 | if (mmu_virtual_psize == MMU_PAGE_64K) | 654 | if (mmu_ci_restrictions) { |
655 | /* If this PTE is non-cacheable, switch to 4k */ | ||
656 | if (psize == MMU_PAGE_64K && | ||
657 | (pte_val(*ptep) & _PAGE_NO_CACHE)) { | ||
658 | if (user_region) { | ||
659 | psize = MMU_PAGE_4K; | ||
660 | mm->context.user_psize = MMU_PAGE_4K; | ||
661 | mm->context.sllp = SLB_VSID_USER | | ||
662 | mmu_psize_defs[MMU_PAGE_4K].sllp; | ||
663 | } else if (ea < VMALLOC_END) { | ||
664 | /* | ||
665 | * some driver did a non-cacheable mapping | ||
666 | * in vmalloc space, so switch vmalloc | ||
667 | * to 4k pages | ||
668 | */ | ||
669 | printk(KERN_ALERT "Reducing vmalloc segment " | ||
670 | "to 4kB pages because of " | ||
671 | "non-cacheable mapping\n"); | ||
672 | psize = mmu_vmalloc_psize = MMU_PAGE_4K; | ||
673 | } | ||
674 | } | ||
675 | if (user_region) { | ||
676 | if (psize != get_paca()->context.user_psize) { | ||
677 | get_paca()->context = mm->context; | ||
678 | slb_flush_and_rebolt(); | ||
679 | } | ||
680 | } else if (get_paca()->vmalloc_sllp != | ||
681 | mmu_psize_defs[mmu_vmalloc_psize].sllp) { | ||
682 | get_paca()->vmalloc_sllp = | ||
683 | mmu_psize_defs[mmu_vmalloc_psize].sllp; | ||
684 | slb_flush_and_rebolt(); | ||
685 | } | ||
686 | } | ||
687 | if (psize == MMU_PAGE_64K) | ||
633 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); | 688 | rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); |
634 | else | 689 | else |
635 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); | 690 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); |
@@ -681,7 +736,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
681 | #ifndef CONFIG_PPC_64K_PAGES | 736 | #ifndef CONFIG_PPC_64K_PAGES |
682 | __hash_page_4K(ea, access, vsid, ptep, trap, local); | 737 | __hash_page_4K(ea, access, vsid, ptep, trap, local); |
683 | #else | 738 | #else |
684 | if (mmu_virtual_psize == MMU_PAGE_64K) | 739 | if (mmu_ci_restrictions) { |
740 | /* If this PTE is non-cacheable, switch to 4k */ | ||
741 | if (mm->context.user_psize == MMU_PAGE_64K && | ||
742 | (pte_val(*ptep) & _PAGE_NO_CACHE)) { | ||
743 | mm->context.user_psize = MMU_PAGE_4K; | ||
744 | mm->context.sllp = SLB_VSID_USER | | ||
745 | mmu_psize_defs[MMU_PAGE_4K].sllp; | ||
746 | get_paca()->context = mm->context; | ||
747 | slb_flush_and_rebolt(); | ||
748 | } | ||
749 | } | ||
750 | if (mm->context.user_psize == MMU_PAGE_64K) | ||
685 | __hash_page_64K(ea, access, vsid, ptep, trap, local); | 751 | __hash_page_64K(ea, access, vsid, ptep, trap, local); |
686 | else | 752 | else |
687 | __hash_page_4K(ea, access, vsid, ptep, trap, local); | 753 | __hash_page_4K(ea, access, vsid, ptep, trap, local); |
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c index 714a84dd8d5d..65d18dca266f 100644 --- a/arch/powerpc/mm/mmu_context_64.c +++ b/arch/powerpc/mm/mmu_context_64.c | |||
@@ -49,6 +49,9 @@ again: | |||
49 | } | 49 | } |
50 | 50 | ||
51 | mm->context.id = index; | 51 | mm->context.id = index; |
52 | mm->context.user_psize = mmu_virtual_psize; | ||
53 | mm->context.sllp = SLB_VSID_USER | | ||
54 | mmu_psize_defs[mmu_virtual_psize].sllp; | ||
52 | 55 | ||
53 | return 0; | 56 | return 0; |
54 | } | 57 | } |
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 2cc61736feee..6a8bf6c6000e 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -60,19 +60,19 @@ static inline void create_slbe(unsigned long ea, unsigned long flags, | |||
60 | : "memory" ); | 60 | : "memory" ); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void slb_flush_and_rebolt(void) | 63 | void slb_flush_and_rebolt(void) |
64 | { | 64 | { |
65 | /* If you change this make sure you change SLB_NUM_BOLTED | 65 | /* If you change this make sure you change SLB_NUM_BOLTED |
66 | * appropriately too. */ | 66 | * appropriately too. */ |
67 | unsigned long linear_llp, virtual_llp, lflags, vflags; | 67 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
68 | unsigned long ksp_esid_data; | 68 | unsigned long ksp_esid_data; |
69 | 69 | ||
70 | WARN_ON(!irqs_disabled()); | 70 | WARN_ON(!irqs_disabled()); |
71 | 71 | ||
72 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 72 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
73 | virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; | 73 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
74 | lflags = SLB_VSID_KERNEL | linear_llp; | 74 | lflags = SLB_VSID_KERNEL | linear_llp; |
75 | vflags = SLB_VSID_KERNEL | virtual_llp; | 75 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
76 | 76 | ||
77 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); | 77 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); |
78 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) | 78 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) |
@@ -164,11 +164,10 @@ static inline void patch_slb_encoding(unsigned int *insn_addr, | |||
164 | 164 | ||
165 | void slb_initialize(void) | 165 | void slb_initialize(void) |
166 | { | 166 | { |
167 | unsigned long linear_llp, virtual_llp; | 167 | unsigned long linear_llp, vmalloc_llp, io_llp; |
168 | static int slb_encoding_inited; | 168 | static int slb_encoding_inited; |
169 | extern unsigned int *slb_miss_kernel_load_linear; | 169 | extern unsigned int *slb_miss_kernel_load_linear; |
170 | extern unsigned int *slb_miss_kernel_load_virtual; | 170 | extern unsigned int *slb_miss_kernel_load_io; |
171 | extern unsigned int *slb_miss_user_load_normal; | ||
172 | #ifdef CONFIG_HUGETLB_PAGE | 171 | #ifdef CONFIG_HUGETLB_PAGE |
173 | extern unsigned int *slb_miss_user_load_huge; | 172 | extern unsigned int *slb_miss_user_load_huge; |
174 | unsigned long huge_llp; | 173 | unsigned long huge_llp; |
@@ -178,18 +177,19 @@ void slb_initialize(void) | |||
178 | 177 | ||
179 | /* Prepare our SLB miss handler based on our page size */ | 178 | /* Prepare our SLB miss handler based on our page size */ |
180 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 179 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
181 | virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; | 180 | io_llp = mmu_psize_defs[mmu_io_psize].sllp; |
181 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | ||
182 | get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; | ||
183 | |||
182 | if (!slb_encoding_inited) { | 184 | if (!slb_encoding_inited) { |
183 | slb_encoding_inited = 1; | 185 | slb_encoding_inited = 1; |
184 | patch_slb_encoding(slb_miss_kernel_load_linear, | 186 | patch_slb_encoding(slb_miss_kernel_load_linear, |
185 | SLB_VSID_KERNEL | linear_llp); | 187 | SLB_VSID_KERNEL | linear_llp); |
186 | patch_slb_encoding(slb_miss_kernel_load_virtual, | 188 | patch_slb_encoding(slb_miss_kernel_load_io, |
187 | SLB_VSID_KERNEL | virtual_llp); | 189 | SLB_VSID_KERNEL | io_llp); |
188 | patch_slb_encoding(slb_miss_user_load_normal, | ||
189 | SLB_VSID_USER | virtual_llp); | ||
190 | 190 | ||
191 | DBG("SLB: linear LLP = %04x\n", linear_llp); | 191 | DBG("SLB: linear LLP = %04x\n", linear_llp); |
192 | DBG("SLB: virtual LLP = %04x\n", virtual_llp); | 192 | DBG("SLB: io LLP = %04x\n", io_llp); |
193 | #ifdef CONFIG_HUGETLB_PAGE | 193 | #ifdef CONFIG_HUGETLB_PAGE |
194 | patch_slb_encoding(slb_miss_user_load_huge, | 194 | patch_slb_encoding(slb_miss_user_load_huge, |
195 | SLB_VSID_USER | huge_llp); | 195 | SLB_VSID_USER | huge_llp); |
@@ -204,7 +204,7 @@ void slb_initialize(void) | |||
204 | unsigned long lflags, vflags; | 204 | unsigned long lflags, vflags; |
205 | 205 | ||
206 | lflags = SLB_VSID_KERNEL | linear_llp; | 206 | lflags = SLB_VSID_KERNEL | linear_llp; |
207 | vflags = SLB_VSID_KERNEL | virtual_llp; | 207 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
208 | 208 | ||
209 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ | 209 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ |
210 | asm volatile("isync":::"memory"); | 210 | asm volatile("isync":::"memory"); |
@@ -212,7 +212,6 @@ void slb_initialize(void) | |||
212 | asm volatile("isync; slbia; isync":::"memory"); | 212 | asm volatile("isync; slbia; isync":::"memory"); |
213 | create_slbe(PAGE_OFFSET, lflags, 0); | 213 | create_slbe(PAGE_OFFSET, lflags, 0); |
214 | 214 | ||
215 | /* VMALLOC space has 4K pages always for now */ | ||
216 | create_slbe(VMALLOC_START, vflags, 1); | 215 | create_slbe(VMALLOC_START, vflags, 1); |
217 | 216 | ||
218 | /* We don't bolt the stack for the time being - we're in boot, | 217 | /* We don't bolt the stack for the time being - we're in boot, |
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index abfaabf667bf..8548dcf8ef8b 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -59,10 +59,19 @@ _GLOBAL(slb_miss_kernel_load_linear) | |||
59 | li r11,0 | 59 | li r11,0 |
60 | b slb_finish_load | 60 | b slb_finish_load |
61 | 61 | ||
62 | 1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below | 62 | 1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below |
63 | * will be patched by the kernel at boot | 63 | * will be patched by the kernel at boot |
64 | */ | 64 | */ |
65 | _GLOBAL(slb_miss_kernel_load_virtual) | 65 | BEGIN_FTR_SECTION |
66 | /* check whether this is in vmalloc or ioremap space */ | ||
67 | clrldi r11,r10,48 | ||
68 | cmpldi r11,(VMALLOC_SIZE >> 28) - 1 | ||
69 | bgt 5f | ||
70 | lhz r11,PACAVMALLOCSLLP(r13) | ||
71 | b slb_finish_load | ||
72 | 5: | ||
73 | END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) | ||
74 | _GLOBAL(slb_miss_kernel_load_io) | ||
66 | li r11,0 | 75 | li r11,0 |
67 | b slb_finish_load | 76 | b slb_finish_load |
68 | 77 | ||
@@ -96,9 +105,7 @@ _GLOBAL(slb_miss_user_load_huge) | |||
96 | 1: | 105 | 1: |
97 | #endif /* CONFIG_HUGETLB_PAGE */ | 106 | #endif /* CONFIG_HUGETLB_PAGE */ |
98 | 107 | ||
99 | _GLOBAL(slb_miss_user_load_normal) | 108 | lhz r11,PACACONTEXTSLLP(r13) |
100 | li r11,0 | ||
101 | |||
102 | 2: | 109 | 2: |
103 | ld r9,PACACONTEXTID(r13) | 110 | ld r9,PACACONTEXTID(r13) |
104 | rldimi r10,r9,USER_ESID_BITS,0 | 111 | rldimi r10,r9,USER_ESID_BITS,0 |
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index f734b11566c2..e7449b068c82 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -131,7 +131,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, | |||
131 | { | 131 | { |
132 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 132 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
133 | unsigned long vsid; | 133 | unsigned long vsid; |
134 | unsigned int psize = mmu_virtual_psize; | 134 | unsigned int psize; |
135 | int i; | 135 | int i; |
136 | 136 | ||
137 | i = batch->index; | 137 | i = batch->index; |
@@ -148,7 +148,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, | |||
148 | #else | 148 | #else |
149 | BUG(); | 149 | BUG(); |
150 | #endif | 150 | #endif |
151 | } | 151 | } else |
152 | psize = pte_pagesize_index(pte); | ||
152 | 153 | ||
153 | /* | 154 | /* |
154 | * This can happen when we are in the middle of a TLB batch and | 155 | * This can happen when we are in the middle of a TLB batch and |