aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/contig.c
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-08-12 13:34:20 -0400
committerTony Luck <tony.luck@intel.com>2008-08-12 13:34:20 -0400
commit10617bbe84628eb18ab5f723d3ba35005adde143 (patch)
tree2d1dada5b7d8dd8cd060f54a597aaa34ccc8edb6 /arch/ia64/mm/contig.c
parent45fc3c4d9b7ab12798af43a73aea53eeecd16acf (diff)
[IA64] Ensure cpu0 can access per-cpu variables in early boot code
ia64 handles per-cpu variables a litle differently from other architectures in that it maps the physical memory allocated for each cpu at a constant virtual address (0xffffffffffff0000). This mapping is not enabled until the architecture specific cpu_init() function is run, which causes problems since some generic code is run before this point. In particular when CONFIG_PRINTK_TIME is enabled, the boot cpu will trap on the access to per-cpu memory at the first printk() call so the boot will fail without the kernel printing anything to the console. Fix this by allocating percpu memory for cpu0 in the kernel data section and doing all initialization to enable percpu access in head.S before calling any generic code. Other cpus must take care not to access per-cpu variables too early, but their code path from start_secondary() to cpu_init() is all in arch/ia64 Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm/contig.c')
-rw-r--r--arch/ia64/mm/contig.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 798bf9835a51..e566ff43884a 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -163,8 +163,14 @@ per_cpu_init (void)
163 * get_zeroed_page(). 163 * get_zeroed_page().
164 */ 164 */
165 if (first_time) { 165 if (first_time) {
166 void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
167
166 first_time=0; 168 first_time=0;
167 for (cpu = 0; cpu < NR_CPUS; cpu++) { 169
170 __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
171 per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
172
173 for (cpu = 1; cpu < NR_CPUS; cpu++) {
168 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); 174 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
169 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start; 175 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
170 cpu_data += PERCPU_PAGE_SIZE; 176 cpu_data += PERCPU_PAGE_SIZE;
@@ -177,7 +183,7 @@ per_cpu_init (void)
177static inline void 183static inline void
178alloc_per_cpu_data(void) 184alloc_per_cpu_data(void)
179{ 185{
180 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, 186 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
181 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 187 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
182} 188}
183#else 189#else