aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm/stab.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-07-27 14:44:19 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:25:58 -0400
commit533f08172e21521a74e15cdef8a13c929596d506 (patch)
tree60f2117ffb85f61fcdc0e17d864d0114096a4192 /arch/ppc64/mm/stab.c
parent6fdfb382813d66757aef4d83e369f8153a40b371 (diff)
[PATCH] ppc64: dynamically allocate segment tables
PPC64 machines before Power4 need a segment table page allocated for each CPU. Currently these are allocated statically in a big array in head.S for all CPUs. The segment tables need to be in the first segment (so do_stab_bolted doesn't take a recursive fault on the stab itself), but other than that there are no constraints which require the stabs for the secondary CPUs to be statically allocated. This patch allocates segment tables dynamically during boot, using lmb_alloc() to ensure they are within the first 256M segment. This reduces the kernel image size by 192k... Tested on RS64 iSeries, POWER3 pSeries, and POWER5. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/mm/stab.c')
-rw-r--r--arch/ppc64/mm/stab.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index df4bbe14153c..1b83f002bf27 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -18,6 +18,8 @@
18#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
19#include <asm/paca.h> 19#include <asm/paca.h>
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21#include <asm/lmb.h>
22#include <asm/abs_addr.h>
21 23
22struct stab_entry { 24struct stab_entry {
23 unsigned long esid_data; 25 unsigned long esid_data;
@@ -224,6 +226,39 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
224extern void slb_initialize(void); 226extern void slb_initialize(void);
225 227
226/* 228/*
229 * Allocate segment tables for secondary CPUs. These must all go in
230 * the first (bolted) segment, so that do_stab_bolted won't get a
231 * recursive segment miss on the segment table itself.
232 */
233void stabs_alloc(void)
234{
235 int cpu;
236
237 if (cpu_has_feature(CPU_FTR_SLB))
238 return;
239
240 for_each_cpu(cpu) {
241 unsigned long newstab;
242
243 if (cpu == 0)
244 continue; /* stab for CPU 0 is statically allocated */
245
246 newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT);
247 if (! newstab)
248 panic("Unable to allocate segment table for CPU %d.\n",
249 cpu);
250
251 newstab += KERNELBASE;
252
253 memset((void *)newstab, 0, PAGE_SIZE);
254
255 paca[cpu].stab_addr = newstab;
256 paca[cpu].stab_real = virt_to_abs(newstab);
257 printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
258 }
259}
260
261/*
227 * Build an entry for the base kernel segment and put it into 262 * Build an entry for the base kernel segment and put it into
228 * the segment table or SLB. All other segment table or SLB 263 * the segment table or SLB. All other segment table or SLB
229 * entries are faulted in. 264 * entries are faulted in.