aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-06-14 20:45:18 -0400
committerPaul Mackerras <paulus@samba.org>2006-06-14 20:45:18 -0400
commitbf72aeba2ffef599d1d386425c9e46b82be657cd (patch)
treeead8e5111dbcfa22e156999d1bb8a96e50f06fef /arch/powerpc/mm/slb.c
parent31925323b1b51bb65db729e029472a8b1f635b7d (diff)
powerpc: Use 64k pages without needing cache-inhibited large pages
Some POWER5+ machines can do 64k hardware pages for normal memory but not for cache-inhibited pages. This patch lets us use 64k hardware pages for most user processes on such machines (assuming the kernel has been configured with CONFIG_PPC_64K_PAGES=y). User processes start out using 64k pages and get switched to 4k pages if they use any non-cacheable mappings. With this, we use 64k pages for the vmalloc region and 4k pages for the imalloc region. If anything creates a non-cacheable mapping in the vmalloc region, the vmalloc region will get switched to 4k pages. I don't know of any driver other than the DRM that would do this, though, and these machines don't have AGP. When a region gets switched from 64k pages to 4k pages, we do not have to clear out all the 64k HPTEs from the hash table immediately. We use the _PAGE_COMBO bit in the Linux PTE to indicate whether the page was hashed in as a 64k page or a set of 4k pages. If hash_page is trying to insert a 4k page for a Linux PTE and it sees that it has already been inserted as a 64k page, it first invalidates the 64k HPTE before inserting the 4k HPTE. The hash invalidation routines also use the _PAGE_COMBO bit, to determine whether to look for a 64k HPTE or a set of 4k HPTEs to remove. With those two changes, we can tolerate a mix of 4k and 64k HPTEs in the hash table, and they will all get removed when the address space is torn down. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 2cc61736feee..6a8bf6c6000e 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -60,19 +60,19 @@ static inline void create_slbe(unsigned long ea, unsigned long flags,
60 : "memory" ); 60 : "memory" );
61} 61}
62 62
63static void slb_flush_and_rebolt(void) 63void slb_flush_and_rebolt(void)
64{ 64{
65 /* If you change this make sure you change SLB_NUM_BOLTED 65 /* If you change this make sure you change SLB_NUM_BOLTED
66 * appropriately too. */ 66 * appropriately too. */
67 unsigned long linear_llp, virtual_llp, lflags, vflags; 67 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
68 unsigned long ksp_esid_data; 68 unsigned long ksp_esid_data;
69 69
70 WARN_ON(!irqs_disabled()); 70 WARN_ON(!irqs_disabled());
71 71
72 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 72 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
73 virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; 73 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
74 lflags = SLB_VSID_KERNEL | linear_llp; 74 lflags = SLB_VSID_KERNEL | linear_llp;
75 vflags = SLB_VSID_KERNEL | virtual_llp; 75 vflags = SLB_VSID_KERNEL | vmalloc_llp;
76 76
77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
78 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) 78 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
@@ -164,11 +164,10 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
164 164
165void slb_initialize(void) 165void slb_initialize(void)
166{ 166{
167 unsigned long linear_llp, virtual_llp; 167 unsigned long linear_llp, vmalloc_llp, io_llp;
168 static int slb_encoding_inited; 168 static int slb_encoding_inited;
169 extern unsigned int *slb_miss_kernel_load_linear; 169 extern unsigned int *slb_miss_kernel_load_linear;
170 extern unsigned int *slb_miss_kernel_load_virtual; 170 extern unsigned int *slb_miss_kernel_load_io;
171 extern unsigned int *slb_miss_user_load_normal;
172#ifdef CONFIG_HUGETLB_PAGE 171#ifdef CONFIG_HUGETLB_PAGE
173 extern unsigned int *slb_miss_user_load_huge; 172 extern unsigned int *slb_miss_user_load_huge;
174 unsigned long huge_llp; 173 unsigned long huge_llp;
@@ -178,18 +177,19 @@ void slb_initialize(void)
178 177
179 /* Prepare our SLB miss handler based on our page size */ 178 /* Prepare our SLB miss handler based on our page size */
180 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 179 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
181 virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; 180 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
181 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
182 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
183
182 if (!slb_encoding_inited) { 184 if (!slb_encoding_inited) {
183 slb_encoding_inited = 1; 185 slb_encoding_inited = 1;
184 patch_slb_encoding(slb_miss_kernel_load_linear, 186 patch_slb_encoding(slb_miss_kernel_load_linear,
185 SLB_VSID_KERNEL | linear_llp); 187 SLB_VSID_KERNEL | linear_llp);
186 patch_slb_encoding(slb_miss_kernel_load_virtual, 188 patch_slb_encoding(slb_miss_kernel_load_io,
187 SLB_VSID_KERNEL | virtual_llp); 189 SLB_VSID_KERNEL | io_llp);
188 patch_slb_encoding(slb_miss_user_load_normal,
189 SLB_VSID_USER | virtual_llp);
190 190
191 DBG("SLB: linear LLP = %04x\n", linear_llp); 191 DBG("SLB: linear LLP = %04x\n", linear_llp);
192 DBG("SLB: virtual LLP = %04x\n", virtual_llp); 192 DBG("SLB: io LLP = %04x\n", io_llp);
193#ifdef CONFIG_HUGETLB_PAGE 193#ifdef CONFIG_HUGETLB_PAGE
194 patch_slb_encoding(slb_miss_user_load_huge, 194 patch_slb_encoding(slb_miss_user_load_huge,
195 SLB_VSID_USER | huge_llp); 195 SLB_VSID_USER | huge_llp);
@@ -204,7 +204,7 @@ void slb_initialize(void)
204 unsigned long lflags, vflags; 204 unsigned long lflags, vflags;
205 205
206 lflags = SLB_VSID_KERNEL | linear_llp; 206 lflags = SLB_VSID_KERNEL | linear_llp;
207 vflags = SLB_VSID_KERNEL | virtual_llp; 207 vflags = SLB_VSID_KERNEL | vmalloc_llp;
208 208
209 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 209 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
210 asm volatile("isync":::"memory"); 210 asm volatile("isync":::"memory");
@@ -212,7 +212,6 @@ void slb_initialize(void)
212 asm volatile("isync; slbia; isync":::"memory"); 212 asm volatile("isync; slbia; isync":::"memory");
213 create_slbe(PAGE_OFFSET, lflags, 0); 213 create_slbe(PAGE_OFFSET, lflags, 0);
214 214
215 /* VMALLOC space has 4K pages always for now */
216 create_slbe(VMALLOC_START, vflags, 1); 215 create_slbe(VMALLOC_START, vflags, 1);
217 216
218 /* We don't bolt the stack for the time being - we're in boot, 217 /* We don't bolt the stack for the time being - we're in boot,