aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-11-06 19:06:55 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-06 19:56:47 -0500
commit3c726f8dee6f55e96475574e9f645327e461884c (patch)
treef67c381e8f57959aa4a94bda4c68e24253cd8171 /arch/powerpc/mm/slb.c
parentf912696ab330bf539231d1f8032320f2a08b850f (diff)
[PATCH] ppc64: support 64k pages
Adds a new CONFIG_PPC_64K_PAGES which, when enabled, changes the kernel base page size to 64K. The resulting kernel still boots on any hardware. On current machines with 4K pages support only, the kernel will maintain 16 "subpages" for each 64K page transparently. Note that while real 64K capable HW has been tested, the current patch will not enable it yet as such hardware is not released yet, and I'm still verifying with the firmware architects the proper to get the information from the newer hypervisors. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c102
1 files changed, 87 insertions, 15 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 0473953f6a37..60e852f2f8e5 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -14,14 +14,32 @@
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 */ 15 */
16 16
17#undef DEBUG
18
17#include <linux/config.h> 19#include <linux/config.h>
18#include <asm/pgtable.h> 20#include <asm/pgtable.h>
19#include <asm/mmu.h> 21#include <asm/mmu.h>
20#include <asm/mmu_context.h> 22#include <asm/mmu_context.h>
21#include <asm/paca.h> 23#include <asm/paca.h>
22#include <asm/cputable.h> 24#include <asm/cputable.h>
25#include <asm/cacheflush.h>
26
27#ifdef DEBUG
28#define DBG(fmt...) udbg_printf(fmt)
29#else
30#define DBG(fmt...)
31#endif
23 32
24extern void slb_allocate(unsigned long ea); 33extern void slb_allocate_realmode(unsigned long ea);
34extern void slb_allocate_user(unsigned long ea);
35
36static void slb_allocate(unsigned long ea)
37{
38 /* Currently, we do real mode for all SLBs including user, but
39 * that will change if we bring back dynamic VSIDs
40 */
41 slb_allocate_realmode(ea);
42}
25 43
26static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) 44static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
27{ 45{
@@ -46,13 +64,15 @@ static void slb_flush_and_rebolt(void)
46{ 64{
47 /* If you change this make sure you change SLB_NUM_BOLTED 65 /* If you change this make sure you change SLB_NUM_BOLTED
48 * appropriately too. */ 66 * appropriately too. */
49 unsigned long ksp_flags = SLB_VSID_KERNEL; 67 unsigned long linear_llp, virtual_llp, lflags, vflags;
50 unsigned long ksp_esid_data; 68 unsigned long ksp_esid_data;
51 69
52 WARN_ON(!irqs_disabled()); 70 WARN_ON(!irqs_disabled());
53 71
54 if (cpu_has_feature(CPU_FTR_16M_PAGE)) 72 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
55 ksp_flags |= SLB_VSID_L; 73 virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
74 lflags = SLB_VSID_KERNEL | linear_llp;
75 vflags = SLB_VSID_KERNEL | virtual_llp;
56 76
57 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
58 if ((ksp_esid_data & ESID_MASK) == KERNELBASE) 78 if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
@@ -67,9 +87,9 @@ static void slb_flush_and_rebolt(void)
67 /* Slot 2 - kernel stack */ 87 /* Slot 2 - kernel stack */
68 "slbmte %2,%3\n" 88 "slbmte %2,%3\n"
69 "isync" 89 "isync"
70 :: "r"(mk_vsid_data(VMALLOCBASE, SLB_VSID_KERNEL)), 90 :: "r"(mk_vsid_data(VMALLOCBASE, vflags)),
71 "r"(mk_esid_data(VMALLOCBASE, 1)), 91 "r"(mk_esid_data(VMALLOCBASE, 1)),
72 "r"(mk_vsid_data(ksp_esid_data, ksp_flags)), 92 "r"(mk_vsid_data(ksp_esid_data, lflags)),
73 "r"(ksp_esid_data) 93 "r"(ksp_esid_data)
74 : "memory"); 94 : "memory");
75} 95}
@@ -102,6 +122,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
102 122
103 get_paca()->slb_cache_ptr = 0; 123 get_paca()->slb_cache_ptr = 0;
104 get_paca()->context = mm->context; 124 get_paca()->context = mm->context;
125#ifdef CONFIG_PPC_64K_PAGES
126 get_paca()->pgdir = mm->pgd;
127#endif /* CONFIG_PPC_64K_PAGES */
105 128
106 /* 129 /*
107 * preload some userspace segments into the SLB. 130 * preload some userspace segments into the SLB.
@@ -131,28 +154,77 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
131 slb_allocate(unmapped_base); 154 slb_allocate(unmapped_base);
132} 155}
133 156
157static inline void patch_slb_encoding(unsigned int *insn_addr,
158 unsigned int immed)
159{
160 /* Assume the instruction had a "0" immediate value, just
161 * "or" in the new value
162 */
163 *insn_addr |= immed;
164 flush_icache_range((unsigned long)insn_addr, 4+
165 (unsigned long)insn_addr);
166}
167
134void slb_initialize(void) 168void slb_initialize(void)
135{ 169{
170 unsigned long linear_llp, virtual_llp;
171 static int slb_encoding_inited;
172 extern unsigned int *slb_miss_kernel_load_linear;
173 extern unsigned int *slb_miss_kernel_load_virtual;
174 extern unsigned int *slb_miss_user_load_normal;
175#ifdef CONFIG_HUGETLB_PAGE
176 extern unsigned int *slb_miss_user_load_huge;
177 unsigned long huge_llp;
178
179 huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
180#endif
181
182 /* Prepare our SLB miss handler based on our page size */
183 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
184 virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
185 if (!slb_encoding_inited) {
186 slb_encoding_inited = 1;
187 patch_slb_encoding(slb_miss_kernel_load_linear,
188 SLB_VSID_KERNEL | linear_llp);
189 patch_slb_encoding(slb_miss_kernel_load_virtual,
190 SLB_VSID_KERNEL | virtual_llp);
191 patch_slb_encoding(slb_miss_user_load_normal,
192 SLB_VSID_USER | virtual_llp);
193
194 DBG("SLB: linear LLP = %04x\n", linear_llp);
195 DBG("SLB: virtual LLP = %04x\n", virtual_llp);
196#ifdef CONFIG_HUGETLB_PAGE
197 patch_slb_encoding(slb_miss_user_load_huge,
198 SLB_VSID_USER | huge_llp);
199 DBG("SLB: huge LLP = %04x\n", huge_llp);
200#endif
201 }
202
136 /* On iSeries the bolted entries have already been set up by 203 /* On iSeries the bolted entries have already been set up by
137 * the hypervisor from the lparMap data in head.S */ 204 * the hypervisor from the lparMap data in head.S */
138#ifndef CONFIG_PPC_ISERIES 205#ifndef CONFIG_PPC_ISERIES
139 unsigned long flags = SLB_VSID_KERNEL; 206 {
207 unsigned long lflags, vflags;
140 208
141 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 209 lflags = SLB_VSID_KERNEL | linear_llp;
142 if (cpu_has_feature(CPU_FTR_16M_PAGE)) 210 vflags = SLB_VSID_KERNEL | virtual_llp;
143 flags |= SLB_VSID_L;
144 211
145 asm volatile("isync":::"memory"); 212 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
146 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 213 asm volatile("isync":::"memory");
214 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
147 asm volatile("isync; slbia; isync":::"memory"); 215 asm volatile("isync; slbia; isync":::"memory");
148 create_slbe(KERNELBASE, flags, 0); 216 create_slbe(KERNELBASE, lflags, 0);
149 create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1); 217
218 /* VMALLOC space has 4K pages always for now */
219 create_slbe(VMALLOCBASE, vflags, 1);
220
150 /* We don't bolt the stack for the time being - we're in boot, 221 /* We don't bolt the stack for the time being - we're in boot,
151 * so the stack is in the bolted segment. By the time it goes 222 * so the stack is in the bolted segment. By the time it goes
152 * elsewhere, we'll call _switch() which will bolt in the new 223 * elsewhere, we'll call _switch() which will bolt in the new
153 * one. */ 224 * one. */
154 asm volatile("isync":::"memory"); 225 asm volatile("isync":::"memory");
155#endif 226 }
227#endif /* CONFIG_PPC_ISERIES */
156 228
157 get_paca()->stab_rr = SLB_NUM_BOLTED; 229 get_paca()->stab_rr = SLB_NUM_BOLTED;
158} 230}