aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-07-27 14:44:19 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:25:58 -0400
commit533f08172e21521a74e15cdef8a13c929596d506 (patch)
tree60f2117ffb85f61fcdc0e17d864d0114096a4192 /include/asm-ppc64
parent6fdfb382813d66757aef4d83e369f8153a40b371 (diff)
[PATCH] ppc64: dynamically allocate segment tables
PPC64 machines before Power4 need a segment table page allocated for each CPU. Currently these are allocated statically in a big array in head.S for all CPUs. The segment tables need to be in the first segment (so do_stab_bolted doesn't take a recursive fault on the stab itself), but other than that there are no constraints which require the stabs for the secondary CPUs to be statically allocated. This patch allocates segment tables dynamically during boot, using lmb_alloc() to ensure they are within the first 256M segment. This reduces the kernel image size by 192k... Tested on RS64 iSeries, POWER3 pSeries, and POWER5. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-ppc64')
-rw-r--r--include/asm-ppc64/mmu.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 3d07ddd11e3..88911803680 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -200,6 +200,8 @@ extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
200 unsigned long prpn, 200 unsigned long prpn,
201 unsigned long vflags, unsigned long rflags); 201 unsigned long vflags, unsigned long rflags);
202 202
203extern void stabs_alloc(void);
204
203#endif /* __ASSEMBLY__ */ 205#endif /* __ASSEMBLY__ */
204 206
205/* 207/*