aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-08-12 13:34:20 -0400
committerTony Luck <tony.luck@intel.com>2008-08-12 13:34:20 -0400
commit10617bbe84628eb18ab5f723d3ba35005adde143 (patch)
tree2d1dada5b7d8dd8cd060f54a597aaa34ccc8edb6 /arch/ia64/mm
parent45fc3c4d9b7ab12798af43a73aea53eeecd16acf (diff)
[IA64] Ensure cpu0 can access per-cpu variables in early boot code
ia64 handles per-cpu variables a litle differently from other architectures in that it maps the physical memory allocated for each cpu at a constant virtual address (0xffffffffffff0000). This mapping is not enabled until the architecture specific cpu_init() function is run, which causes problems since some generic code is run before this point. In particular when CONFIG_PRINTK_TIME is enabled, the boot cpu will trap on the access to per-cpu memory at the first printk() call so the boot will fail without the kernel printing anything to the console. Fix this by allocating percpu memory for cpu0 in the kernel data section and doing all initialization to enable percpu access in head.S before calling any generic code. Other cpus must take care not to access per-cpu variables too early, but their code path from start_secondary() to cpu_init() is all in arch/ia64 Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/contig.c10
-rw-r--r--arch/ia64/mm/discontig.c6
2 files changed, 13 insertions, 3 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 798bf9835a51..e566ff43884a 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -163,8 +163,14 @@ per_cpu_init (void)
163 * get_zeroed_page(). 163 * get_zeroed_page().
164 */ 164 */
165 if (first_time) { 165 if (first_time) {
166 void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
167
166 first_time=0; 168 first_time=0;
167 for (cpu = 0; cpu < NR_CPUS; cpu++) { 169
170 __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
171 per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
172
173 for (cpu = 1; cpu < NR_CPUS; cpu++) {
168 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); 174 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
169 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start; 175 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
170 cpu_data += PERCPU_PAGE_SIZE; 176 cpu_data += PERCPU_PAGE_SIZE;
@@ -177,7 +183,7 @@ per_cpu_init (void)
177static inline void 183static inline void
178alloc_per_cpu_data(void) 184alloc_per_cpu_data(void)
179{ 185{
180 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, 186 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
181 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 187 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
182} 188}
183#else 189#else
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d83125e1ed27..78026aabaa7f 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -143,7 +143,11 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
143 int cpu; 143 int cpu;
144 144
145 for_each_possible_early_cpu(cpu) { 145 for_each_possible_early_cpu(cpu) {
146 if (node == node_cpuid[cpu].nid) { 146 if (cpu == 0) {
147 void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
148 __per_cpu_offset[cpu] = (char*)cpu0_data -
149 __per_cpu_start;
150 } else if (node == node_cpuid[cpu].nid) {
147 memcpy(__va(cpu_data), __phys_per_cpu_start, 151 memcpy(__va(cpu_data), __phys_per_cpu_start,
148 __per_cpu_end - __per_cpu_start); 152 __per_cpu_end - __per_cpu_start);
149 __per_cpu_offset[cpu] = (char*)__va(cpu_data) - 153 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -