aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/mca.c55
1 files changed, 26 insertions, 29 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 846e7e036b13..6e17aed53135 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -17,7 +17,7 @@
17 * Copyright (C) 2000 Intel 17 * Copyright (C) 2000 Intel
18 * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com> 18 * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
19 * 19 *
20 * Copyright (C) 1999, 2004 Silicon Graphics, Inc. 20 * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
21 * Copyright (C) Vijay Chander <vijay@engr.sgi.com> 21 * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
22 * 22 *
23 * Copyright (C) 2006 FUJITSU LIMITED 23 * Copyright (C) 2006 FUJITSU LIMITED
@@ -1762,11 +1762,8 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
1762/* Caller prevents this from being called after init */ 1762/* Caller prevents this from being called after init */
1763static void * __init_refok mca_bootmem(void) 1763static void * __init_refok mca_bootmem(void)
1764{ 1764{
1765 void *p; 1765 return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
1766 1766 KERNEL_STACK_SIZE, 0);
1767 p = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS +
1768 KERNEL_STACK_SIZE);
1769 return (void *)ALIGN((unsigned long)p, KERNEL_STACK_SIZE);
1770} 1767}
1771 1768
1772/* Do per-CPU MCA-related initialization. */ 1769/* Do per-CPU MCA-related initialization. */
@@ -1774,33 +1771,33 @@ void __cpuinit
1774ia64_mca_cpu_init(void *cpu_data) 1771ia64_mca_cpu_init(void *cpu_data)
1775{ 1772{
1776 void *pal_vaddr; 1773 void *pal_vaddr;
1774 void *data;
1775 long sz = sizeof(struct ia64_mca_cpu);
1776 int cpu = smp_processor_id();
1777 static int first_time = 1; 1777 static int first_time = 1;
1778 1778
1779 if (first_time) {
1780 void *mca_data;
1781 int cpu;
1782
1783 first_time = 0;
1784 mca_data = mca_bootmem();
1785 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1786 format_mca_init_stack(mca_data,
1787 offsetof(struct ia64_mca_cpu, mca_stack),
1788 "MCA", cpu);
1789 format_mca_init_stack(mca_data,
1790 offsetof(struct ia64_mca_cpu, init_stack),
1791 "INIT", cpu);
1792 __per_cpu_mca[cpu] = __pa(mca_data);
1793 mca_data += sizeof(struct ia64_mca_cpu);
1794 }
1795 }
1796
1797 /* 1779 /*
1798 * The MCA info structure was allocated earlier and its 1780 * Structure will already be allocated if cpu has been online,
1799 * physical address saved in __per_cpu_mca[cpu]. Copy that 1781 * then offlined.
1800 * address * to ia64_mca_data so we can access it as a per-CPU
1801 * variable.
1802 */ 1782 */
1803 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; 1783 if (__per_cpu_mca[cpu]) {
1784 data = __va(__per_cpu_mca[cpu]);
1785 } else {
1786 if (first_time) {
1787 data = mca_bootmem();
1788 first_time = 0;
1789 } else
1790 data = page_address(alloc_pages_node(numa_node_id(),
1791 GFP_KERNEL, get_order(sz)));
1792 if (!data)
1793 panic("Could not allocate MCA memory for cpu %d\n",
1794 cpu);
1795 }
1796 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
1797 "MCA", cpu);
1798 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
1799 "INIT", cpu);
1800 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
1804 1801
1805 /* 1802 /*
1806 * Stash away a copy of the PTE needed to map the per-CPU page. 1803 * Stash away a copy of the PTE needed to map the per-CPU page.