aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-08-12 13:34:20 -0400
committerTony Luck <tony.luck@intel.com>2008-08-12 13:34:20 -0400
commit10617bbe84628eb18ab5f723d3ba35005adde143 (patch)
tree2d1dada5b7d8dd8cd060f54a597aaa34ccc8edb6 /arch/ia64/kernel
parent45fc3c4d9b7ab12798af43a73aea53eeecd16acf (diff)
[IA64] Ensure cpu0 can access per-cpu variables in early boot code
ia64 handles per-cpu variables a litle differently from other architectures in that it maps the physical memory allocated for each cpu at a constant virtual address (0xffffffffffff0000). This mapping is not enabled until the architecture specific cpu_init() function is run, which causes problems since some generic code is run before this point. In particular when CONFIG_PRINTK_TIME is enabled, the boot cpu will trap on the access to per-cpu memory at the first printk() call so the boot will fail without the kernel printing anything to the console. Fix this by allocating percpu memory for cpu0 in the kernel data section and doing all initialization to enable percpu access in head.S before calling any generic code. Other cpus must take care not to access per-cpu variables too early, but their code path from start_secondary() to cpu_init() is all in arch/ia64 Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/head.S26
-rw-r--r--arch/ia64/kernel/setup.c18
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S3
4 files changed, 40 insertions, 9 deletions
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 41c712917ff7..8bdea8eb62e3 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -359,7 +359,31 @@ start_ap:
359 mov ar.rsc=0 // place RSE in enforced lazy mode 359 mov ar.rsc=0 // place RSE in enforced lazy mode
360 ;; 360 ;;
361 loadrs // clear the dirty partition 361 loadrs // clear the dirty partition
362 mov IA64_KR(PER_CPU_DATA)=r0 // clear physical per-CPU base 362 movl r19=__phys_per_cpu_start
363 mov r18=PERCPU_PAGE_SIZE
364 ;;
365#ifndef CONFIG_SMP
366 add r19=r19,r18
367 ;;
368#else
369(isAP) br.few 2f
370 mov r20=r19
371 sub r19=r19,r18
372 ;;
373 shr.u r18=r18,3
3741:
375 ld8 r21=[r20],8;;
376 st8[r19]=r21,8
377 adds r18=-1,r18;;
378 cmp4.lt p7,p6=0,r18
379(p7) br.cond.dptk.few 1b
3802:
381#endif
382 tpa r19=r19
383 ;;
384 .pred.rel.mutex isBP,isAP
385(isBP) mov IA64_KR(PER_CPU_DATA)=r19 // per-CPU base for cpu0
386(isAP) mov IA64_KR(PER_CPU_DATA)=r0 // clear physical per-CPU base
363 ;; 387 ;;
364 mov ar.bspstore=r2 // establish the new RSE stack 388 mov ar.bspstore=r2 // establish the new RSE stack
365 ;; 389 ;;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 593279f33e96..c27d5b2c182b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -927,17 +927,19 @@ cpu_init (void)
927 if (smp_processor_id() == 0) { 927 if (smp_processor_id() == 0) {
928 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 928 cpu_set(0, per_cpu(cpu_sibling_map, 0));
929 cpu_set(0, cpu_core_map[0]); 929 cpu_set(0, cpu_core_map[0]);
930 } else {
931 /*
932 * Set ar.k3 so that assembly code in MCA handler can compute
933 * physical addresses of per cpu variables with a simple:
934 * phys = ar.k3 + &per_cpu_var
935 * and the alt-dtlb-miss handler can set per-cpu mapping into
936 * the TLB when needed. head.S already did this for cpu0.
937 */
938 ia64_set_kr(IA64_KR_PER_CPU_DATA,
939 ia64_tpa(cpu_data) - (long) __per_cpu_start);
930 } 940 }
931#endif 941#endif
932 942
933 /*
934 * We set ar.k3 so that assembly code in MCA handler can compute
935 * physical addresses of per cpu variables with a simple:
936 * phys = ar.k3 + &per_cpu_var
937 */
938 ia64_set_kr(IA64_KR_PER_CPU_DATA,
939 ia64_tpa(cpu_data) - (long) __per_cpu_start);
940
941 get_max_cacheline_size(); 943 get_max_cacheline_size();
942 944
943 /* 945 /*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 03f1a9908afc..b39853a292d5 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -467,7 +467,9 @@ start_secondary (void *unused)
467{ 467{
468 /* Early console may use I/O ports */ 468 /* Early console may use I/O ports */
469 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 469 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
470#ifndef CONFIG_PRINTK_TIME
470 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); 471 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
472#endif
471 efi_map_pal_code(); 473 efi_map_pal_code();
472 cpu_init(); 474 cpu_init();
473 preempt_disable(); 475 preempt_disable();
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a77206c2492..de71da811cd6 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -215,6 +215,9 @@ SECTIONS
215 /* Per-cpu data: */ 215 /* Per-cpu data: */
216 percpu : { } :percpu 216 percpu : { } :percpu
217 . = ALIGN(PERCPU_PAGE_SIZE); 217 . = ALIGN(PERCPU_PAGE_SIZE);
218#ifdef CONFIG_SMP
219 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
220#endif
218 __phys_per_cpu_start = .; 221 __phys_per_cpu_start = .;
219 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) 222 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
220 { 223 {