aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-07-27 14:44:19 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:25:58 -0400
commit533f08172e21521a74e15cdef8a13c929596d506 (patch)
tree60f2117ffb85f61fcdc0e17d864d0114096a4192 /arch/ppc64
parent6fdfb382813d66757aef4d83e369f8153a40b371 (diff)
[PATCH] ppc64: dynamically allocate segment tables
PPC64 machines before Power4 need a segment table page allocated for each CPU. Currently these are allocated statically in a big array in head.S for all CPUs. The segment tables need to be in the first segment (so do_stab_bolted doesn't take a recursive fault on the stab itself), but other than that there are no constraints which require the stabs for the secondary CPUs to be statically allocated. This patch allocates segment tables dynamically during boot, using lmb_alloc() to ensure they are within the first 256M segment. This reduces the kernel image size by 192k... Tested on RS64 iSeries, POWER3 pSeries, and POWER5. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/kernel/head.S7
-rw-r--r--arch/ppc64/kernel/setup.c2
-rw-r--r--arch/ppc64/kernel/smp.c15
-rw-r--r--arch/ppc64/mm/stab.c35
4 files changed, 37 insertions, 22 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 93ebcac0d5a2..3f447712e3ff 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -2131,13 +2131,6 @@ empty_zero_page:
2131swapper_pg_dir: 2131swapper_pg_dir:
2132 .space 4096 2132 .space 4096
2133 2133
2134#ifdef CONFIG_SMP
2135/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
2136 .globl stab_array
2137stab_array:
2138 .space 4096 * 48
2139#endif
2140
2141/* 2134/*
2142 * This space gets a copy of optional info passed to us by the bootstrap 2135 * This space gets a copy of optional info passed to us by the bootstrap
2143 * Used to pass parameters into the kernel like root=/dev/sda1, etc. 2136 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index e80f10c89824..687e85595208 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -1068,6 +1068,8 @@ void __init setup_arch(char **cmdline_p)
1068 irqstack_early_init(); 1068 irqstack_early_init();
1069 emergency_stack_init(); 1069 emergency_stack_init();
1070 1070
1071 stabs_alloc();
1072
1071 /* set up the bootmem stuff with available memory */ 1073 /* set up the bootmem stuff with available memory */
1072 do_init_bootmem(); 1074 do_init_bootmem();
1073 sparse_init(); 1075 sparse_init();
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 2fcddfcb594d..793b562da653 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -65,8 +65,6 @@ struct smp_ops_t *smp_ops;
65 65
66static volatile unsigned int cpu_callin_map[NR_CPUS]; 66static volatile unsigned int cpu_callin_map[NR_CPUS];
67 67
68extern unsigned char stab_array[];
69
70void smp_call_function_interrupt(void); 68void smp_call_function_interrupt(void);
71 69
72int smt_enabled_at_boot = 1; 70int smt_enabled_at_boot = 1;
@@ -492,19 +490,6 @@ int __devinit __cpu_up(unsigned int cpu)
492 490
493 paca[cpu].default_decr = tb_ticks_per_jiffy; 491 paca[cpu].default_decr = tb_ticks_per_jiffy;
494 492
495 if (!cpu_has_feature(CPU_FTR_SLB)) {
496 void *tmp;
497
498 /* maximum of 48 CPUs on machines with a segment table */
499 if (cpu >= 48)
500 BUG();
501
502 tmp = &stab_array[PAGE_SIZE * cpu];
503 memset(tmp, 0, PAGE_SIZE);
504 paca[cpu].stab_addr = (unsigned long)tmp;
505 paca[cpu].stab_real = virt_to_abs(tmp);
506 }
507
508 /* Make sure callin-map entry is 0 (can be leftover a CPU 493 /* Make sure callin-map entry is 0 (can be leftover a CPU
509 * hotplug 494 * hotplug
510 */ 495 */
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index df4bbe14153c..1b83f002bf27 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -18,6 +18,8 @@
18#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
19#include <asm/paca.h> 19#include <asm/paca.h>
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21#include <asm/lmb.h>
22#include <asm/abs_addr.h>
21 23
22struct stab_entry { 24struct stab_entry {
23 unsigned long esid_data; 25 unsigned long esid_data;
@@ -224,6 +226,39 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
224extern void slb_initialize(void); 226extern void slb_initialize(void);
225 227
226/* 228/*
229 * Allocate segment tables for secondary CPUs. These must all go in
230 * the first (bolted) segment, so that do_stab_bolted won't get a
231 * recursive segment miss on the segment table itself.
232 */
233void stabs_alloc(void)
234{
235 int cpu;
236
237 if (cpu_has_feature(CPU_FTR_SLB))
238 return;
239
240 for_each_cpu(cpu) {
241 unsigned long newstab;
242
243 if (cpu == 0)
244 continue; /* stab for CPU 0 is statically allocated */
245
246 newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT);
247 if (! newstab)
248 panic("Unable to allocate segment table for CPU %d.\n",
249 cpu);
250
251 newstab += KERNELBASE;
252
253 memset((void *)newstab, 0, PAGE_SIZE);
254
255 paca[cpu].stab_addr = newstab;
256 paca[cpu].stab_real = virt_to_abs(newstab);
257 printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
258 }
259}
260
261/*
227 * Build an entry for the base kernel segment and put it into 262 * Build an entry for the base kernel segment and put it into
228 * the segment table or SLB. All other segment table or SLB 263 * the segment table or SLB. All other segment table or SLB
229 * entries are faulted in. 264 * entries are faulted in.