diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 19 | ||||
-rw-r--r-- | arch/ia64/configs/tiger_defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 13 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 114 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 9 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 4 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 9 |
11 files changed, 181 insertions, 18 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 199eeaf0f4e3..5e0f58e37c59 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -272,6 +272,25 @@ config SCHED_SMT | |||
272 | Intel IA64 chips with MultiThreading at a cost of slightly increased | 272 | Intel IA64 chips with MultiThreading at a cost of slightly increased |
273 | overhead in some places. If unsure say N here. | 273 | overhead in some places. If unsure say N here. |
274 | 274 | ||
275 | config PERMIT_BSP_REMOVE | ||
276 | bool "Support removal of Bootstrap Processor" | ||
277 | depends on HOTPLUG_CPU | ||
278 | default n | ||
279 | ---help--- | ||
280 | Say Y here if your platform SAL will support removal of BSP with HOTPLUG_CPU | ||
281 | support. | ||
282 | |||
283 | config FORCE_CPEI_RETARGET | ||
284 | bool "Force assumption that CPEI can be re-targetted" | ||
285 | depends on PERMIT_BSP_REMOVE | ||
286 | default n | ||
287 | ---help--- | ||
288 | Say Y if you need to force the assumption that CPEI can be re-targetted to | ||
289 | any cpu in the system. This hint is available via ACPI 3.0 specifications. | ||
290 | Tiger4 systems are capable of re-directing CPEI to any CPU other than BSP. | ||
291 | This option it useful to enable this feature on older BIOS's as well. | ||
292 | You can also enable this by using boot command line option force_cpei=1. | ||
293 | |||
275 | config PREEMPT | 294 | config PREEMPT |
276 | bool "Preemptible Kernel" | 295 | bool "Preemptible Kernel" |
277 | help | 296 | help |
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index b1e8f09e9fd5..aed034d33976 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
@@ -114,6 +114,8 @@ CONFIG_FORCE_MAX_ZONEORDER=17 | |||
114 | CONFIG_SMP=y | 114 | CONFIG_SMP=y |
115 | CONFIG_NR_CPUS=4 | 115 | CONFIG_NR_CPUS=4 |
116 | CONFIG_HOTPLUG_CPU=y | 116 | CONFIG_HOTPLUG_CPU=y |
117 | CONFIG_PERMIT_BSP_REMOVE=y | ||
118 | CONFIG_FORCE_CPEI_RETARGET=y | ||
117 | # CONFIG_SCHED_SMT is not set | 119 | # CONFIG_SCHED_SMT is not set |
118 | # CONFIG_PREEMPT is not set | 120 | # CONFIG_PREEMPT is not set |
119 | CONFIG_SELECT_MEMORY_MODEL=y | 121 | CONFIG_SELECT_MEMORY_MODEL=y |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 9ad94ddf6687..fe1d90b0c6ea 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -287,16 +287,20 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header, | |||
287 | unsigned int can_cpei_retarget(void) | 287 | unsigned int can_cpei_retarget(void) |
288 | { | 288 | { |
289 | extern int cpe_vector; | 289 | extern int cpe_vector; |
290 | extern unsigned int force_cpei_retarget; | ||
290 | 291 | ||
291 | /* | 292 | /* |
292 | * Only if CPEI is supported and the override flag | 293 | * Only if CPEI is supported and the override flag |
293 | * is present, otherwise return that its re-targettable | 294 | * is present, otherwise return that its re-targettable |
294 | * if we are in polling mode. | 295 | * if we are in polling mode. |
295 | */ | 296 | */ |
296 | if (cpe_vector > 0 && !acpi_cpei_override) | 297 | if (cpe_vector > 0) { |
297 | return 0; | 298 | if (acpi_cpei_override || force_cpei_retarget) |
298 | else | 299 | return 1; |
299 | return 1; | 300 | else |
301 | return 0; | ||
302 | } | ||
303 | return 1; | ||
300 | } | 304 | } |
301 | 305 | ||
302 | unsigned int is_cpu_cpei_target(unsigned int cpu) | 306 | unsigned int is_cpu_cpei_target(unsigned int cpu) |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 574084f343fa..37ac742da8ed 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -631,6 +631,7 @@ get_target_cpu (unsigned int gsi, int vector) | |||
631 | { | 631 | { |
632 | #ifdef CONFIG_SMP | 632 | #ifdef CONFIG_SMP |
633 | static int cpu = -1; | 633 | static int cpu = -1; |
634 | extern int cpe_vector; | ||
634 | 635 | ||
635 | /* | 636 | /* |
636 | * In case of vector shared by multiple RTEs, all RTEs that | 637 | * In case of vector shared by multiple RTEs, all RTEs that |
@@ -653,6 +654,11 @@ get_target_cpu (unsigned int gsi, int vector) | |||
653 | if (!cpu_online(smp_processor_id())) | 654 | if (!cpu_online(smp_processor_id())) |
654 | return cpu_physical_id(smp_processor_id()); | 655 | return cpu_physical_id(smp_processor_id()); |
655 | 656 | ||
657 | #ifdef CONFIG_ACPI | ||
658 | if (cpe_vector > 0 && vector == IA64_CPEP_VECTOR) | ||
659 | return get_cpei_target_cpu(); | ||
660 | #endif | ||
661 | |||
656 | #ifdef CONFIG_NUMA | 662 | #ifdef CONFIG_NUMA |
657 | { | 663 | { |
658 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; | 664 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index d33244c32759..5ce908ef9c95 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -163,8 +163,19 @@ void fixup_irqs(void) | |||
163 | { | 163 | { |
164 | unsigned int irq; | 164 | unsigned int irq; |
165 | extern void ia64_process_pending_intr(void); | 165 | extern void ia64_process_pending_intr(void); |
166 | extern void ia64_disable_timer(void); | ||
167 | extern volatile int time_keeper_id; | ||
168 | |||
169 | ia64_disable_timer(); | ||
170 | |||
171 | /* | ||
172 | * Find a new timesync master | ||
173 | */ | ||
174 | if (smp_processor_id() == time_keeper_id) { | ||
175 | time_keeper_id = first_cpu(cpu_online_map); | ||
176 | printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); | ||
177 | } | ||
166 | 178 | ||
167 | ia64_set_itv(1<<16); | ||
168 | /* | 179 | /* |
169 | * Phase 1: Locate irq's bound to this cpu and | 180 | * Phase 1: Locate irq's bound to this cpu and |
170 | * relocate them for cpu removal. | 181 | * relocate them for cpu removal. |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 355af15287c7..967571b466a2 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -289,6 +289,7 @@ ia64_mca_log_sal_error_record(int sal_info_type) | |||
289 | #ifdef CONFIG_ACPI | 289 | #ifdef CONFIG_ACPI |
290 | 290 | ||
291 | int cpe_vector = -1; | 291 | int cpe_vector = -1; |
292 | int ia64_cpe_irq = -1; | ||
292 | 293 | ||
293 | static irqreturn_t | 294 | static irqreturn_t |
294 | ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | 295 | ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) |
@@ -1444,11 +1445,13 @@ void __devinit | |||
1444 | ia64_mca_cpu_init(void *cpu_data) | 1445 | ia64_mca_cpu_init(void *cpu_data) |
1445 | { | 1446 | { |
1446 | void *pal_vaddr; | 1447 | void *pal_vaddr; |
1448 | static int first_time = 1; | ||
1447 | 1449 | ||
1448 | if (smp_processor_id() == 0) { | 1450 | if (first_time) { |
1449 | void *mca_data; | 1451 | void *mca_data; |
1450 | int cpu; | 1452 | int cpu; |
1451 | 1453 | ||
1454 | first_time = 0; | ||
1452 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) | 1455 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) |
1453 | * NR_CPUS + KERNEL_STACK_SIZE); | 1456 | * NR_CPUS + KERNEL_STACK_SIZE); |
1454 | mca_data = (void *)(((unsigned long)mca_data + | 1457 | mca_data = (void *)(((unsigned long)mca_data + |
@@ -1704,6 +1707,7 @@ ia64_mca_late_init(void) | |||
1704 | desc = irq_descp(irq); | 1707 | desc = irq_descp(irq); |
1705 | desc->status |= IRQ_PER_CPU; | 1708 | desc->status |= IRQ_PER_CPU; |
1706 | setup_irq(irq, &mca_cpe_irqaction); | 1709 | setup_irq(irq, &mca_cpe_irqaction); |
1710 | ia64_cpe_irq = irq; | ||
1707 | } | 1711 | } |
1708 | ia64_mca_register_cpev(cpe_vector); | 1712 | ia64_mca_register_cpev(cpe_vector); |
1709 | IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); | 1713 | IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 410d4804fa6e..18c51c37a9a3 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -6718,6 +6718,7 @@ __initcall(pfm_init); | |||
6718 | void | 6718 | void |
6719 | pfm_init_percpu (void) | 6719 | pfm_init_percpu (void) |
6720 | { | 6720 | { |
6721 | static int first_time=1; | ||
6721 | /* | 6722 | /* |
6722 | * make sure no measurement is active | 6723 | * make sure no measurement is active |
6723 | * (may inherit programmed PMCs from EFI). | 6724 | * (may inherit programmed PMCs from EFI). |
@@ -6730,8 +6731,10 @@ pfm_init_percpu (void) | |||
6730 | */ | 6731 | */ |
6731 | pfm_unfreeze_pmu(); | 6732 | pfm_unfreeze_pmu(); |
6732 | 6733 | ||
6733 | if (smp_processor_id() == 0) | 6734 | if (first_time) { |
6734 | register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); | 6735 | register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); |
6736 | first_time=0; | ||
6737 | } | ||
6735 | 6738 | ||
6736 | ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); | 6739 | ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); |
6737 | ia64_srlz_d(); | 6740 | ia64_srlz_d(); |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 8f44e7d2df66..e9d37bf67d69 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -70,6 +70,12 @@ | |||
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | #ifdef CONFIG_HOTPLUG_CPU | 72 | #ifdef CONFIG_HOTPLUG_CPU |
73 | #ifdef CONFIG_PERMIT_BSP_REMOVE | ||
74 | #define bsp_remove_ok 1 | ||
75 | #else | ||
76 | #define bsp_remove_ok 0 | ||
77 | #endif | ||
78 | |||
73 | /* | 79 | /* |
74 | * Store all idle threads, this can be reused instead of creating | 80 | * Store all idle threads, this can be reused instead of creating |
75 | * a new thread. Also avoids complicated thread destroy functionality | 81 | * a new thread. Also avoids complicated thread destroy functionality |
@@ -104,7 +110,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; | |||
104 | /* | 110 | /* |
105 | * ITC synchronization related stuff: | 111 | * ITC synchronization related stuff: |
106 | */ | 112 | */ |
107 | #define MASTER 0 | 113 | #define MASTER (0) |
108 | #define SLAVE (SMP_CACHE_BYTES/8) | 114 | #define SLAVE (SMP_CACHE_BYTES/8) |
109 | 115 | ||
110 | #define NUM_ROUNDS 64 /* magic value */ | 116 | #define NUM_ROUNDS 64 /* magic value */ |
@@ -151,6 +157,27 @@ char __initdata no_int_routing; | |||
151 | 157 | ||
152 | unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ | 158 | unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ |
153 | 159 | ||
160 | #ifdef CONFIG_FORCE_CPEI_RETARGET | ||
161 | #define CPEI_OVERRIDE_DEFAULT (1) | ||
162 | #else | ||
163 | #define CPEI_OVERRIDE_DEFAULT (0) | ||
164 | #endif | ||
165 | |||
166 | unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT; | ||
167 | |||
168 | static int __init | ||
169 | cmdl_force_cpei(char *str) | ||
170 | { | ||
171 | int value=0; | ||
172 | |||
173 | get_option (&str, &value); | ||
174 | force_cpei_retarget = value; | ||
175 | |||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | __setup("force_cpei=", cmdl_force_cpei); | ||
180 | |||
154 | static int __init | 181 | static int __init |
155 | nointroute (char *str) | 182 | nointroute (char *str) |
156 | { | 183 | { |
@@ -161,6 +188,27 @@ nointroute (char *str) | |||
161 | 188 | ||
162 | __setup("nointroute", nointroute); | 189 | __setup("nointroute", nointroute); |
163 | 190 | ||
191 | static void fix_b0_for_bsp(void) | ||
192 | { | ||
193 | #ifdef CONFIG_HOTPLUG_CPU | ||
194 | int cpuid; | ||
195 | static int fix_bsp_b0 = 1; | ||
196 | |||
197 | cpuid = smp_processor_id(); | ||
198 | |||
199 | /* | ||
200 | * Cache the b0 value on the first AP that comes up | ||
201 | */ | ||
202 | if (!(fix_bsp_b0 && cpuid)) | ||
203 | return; | ||
204 | |||
205 | sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; | ||
206 | printk ("Fixed BSP b0 value from CPU %d\n", cpuid); | ||
207 | |||
208 | fix_bsp_b0 = 0; | ||
209 | #endif | ||
210 | } | ||
211 | |||
164 | void | 212 | void |
165 | sync_master (void *arg) | 213 | sync_master (void *arg) |
166 | { | 214 | { |
@@ -327,8 +375,9 @@ smp_setup_percpu_timer (void) | |||
327 | static void __devinit | 375 | static void __devinit |
328 | smp_callin (void) | 376 | smp_callin (void) |
329 | { | 377 | { |
330 | int cpuid, phys_id; | 378 | int cpuid, phys_id, itc_master; |
331 | extern void ia64_init_itm(void); | 379 | extern void ia64_init_itm(void); |
380 | extern volatile int time_keeper_id; | ||
332 | 381 | ||
333 | #ifdef CONFIG_PERFMON | 382 | #ifdef CONFIG_PERFMON |
334 | extern void pfm_init_percpu(void); | 383 | extern void pfm_init_percpu(void); |
@@ -336,6 +385,7 @@ smp_callin (void) | |||
336 | 385 | ||
337 | cpuid = smp_processor_id(); | 386 | cpuid = smp_processor_id(); |
338 | phys_id = hard_smp_processor_id(); | 387 | phys_id = hard_smp_processor_id(); |
388 | itc_master = time_keeper_id; | ||
339 | 389 | ||
340 | if (cpu_online(cpuid)) { | 390 | if (cpu_online(cpuid)) { |
341 | printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", | 391 | printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", |
@@ -343,6 +393,8 @@ smp_callin (void) | |||
343 | BUG(); | 393 | BUG(); |
344 | } | 394 | } |
345 | 395 | ||
396 | fix_b0_for_bsp(); | ||
397 | |||
346 | lock_ipi_calllock(); | 398 | lock_ipi_calllock(); |
347 | cpu_set(cpuid, cpu_online_map); | 399 | cpu_set(cpuid, cpu_online_map); |
348 | unlock_ipi_calllock(); | 400 | unlock_ipi_calllock(); |
@@ -365,8 +417,8 @@ smp_callin (void) | |||
365 | * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls | 417 | * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls |
366 | * local_bh_enable(), which bugs out if irqs are not enabled... | 418 | * local_bh_enable(), which bugs out if irqs are not enabled... |
367 | */ | 419 | */ |
368 | Dprintk("Going to syncup ITC with BP.\n"); | 420 | Dprintk("Going to syncup ITC with ITC Master.\n"); |
369 | ia64_sync_itc(0); | 421 | ia64_sync_itc(itc_master); |
370 | } | 422 | } |
371 | 423 | ||
372 | /* | 424 | /* |
@@ -638,6 +690,47 @@ remove_siblinginfo(int cpu) | |||
638 | } | 690 | } |
639 | 691 | ||
640 | extern void fixup_irqs(void); | 692 | extern void fixup_irqs(void); |
693 | |||
694 | int migrate_platform_irqs(unsigned int cpu) | ||
695 | { | ||
696 | int new_cpei_cpu; | ||
697 | irq_desc_t *desc = NULL; | ||
698 | cpumask_t mask; | ||
699 | int retval = 0; | ||
700 | |||
701 | /* | ||
702 | * dont permit CPEI target to removed. | ||
703 | */ | ||
704 | if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { | ||
705 | printk ("CPU (%d) is CPEI Target\n", cpu); | ||
706 | if (can_cpei_retarget()) { | ||
707 | /* | ||
708 | * Now re-target the CPEI to a different processor | ||
709 | */ | ||
710 | new_cpei_cpu = any_online_cpu(cpu_online_map); | ||
711 | mask = cpumask_of_cpu(new_cpei_cpu); | ||
712 | set_cpei_target_cpu(new_cpei_cpu); | ||
713 | desc = irq_descp(ia64_cpe_irq); | ||
714 | /* | ||
715 | * Switch for now, immediatly, we need to do fake intr | ||
716 | * as other interrupts, but need to study CPEI behaviour with | ||
717 | * polling before making changes. | ||
718 | */ | ||
719 | if (desc) { | ||
720 | desc->handler->disable(ia64_cpe_irq); | ||
721 | desc->handler->set_affinity(ia64_cpe_irq, mask); | ||
722 | desc->handler->enable(ia64_cpe_irq); | ||
723 | printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); | ||
724 | } | ||
725 | } | ||
726 | if (!desc) { | ||
727 | printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); | ||
728 | retval = -EBUSY; | ||
729 | } | ||
730 | } | ||
731 | return retval; | ||
732 | } | ||
733 | |||
641 | /* must be called with cpucontrol mutex held */ | 734 | /* must be called with cpucontrol mutex held */ |
642 | int __cpu_disable(void) | 735 | int __cpu_disable(void) |
643 | { | 736 | { |
@@ -646,8 +739,17 @@ int __cpu_disable(void) | |||
646 | /* | 739 | /* |
647 | * dont permit boot processor for now | 740 | * dont permit boot processor for now |
648 | */ | 741 | */ |
649 | if (cpu == 0) | 742 | if (cpu == 0 && !bsp_remove_ok) { |
650 | return -EBUSY; | 743 | printk ("Your platform does not support removal of BSP\n"); |
744 | return (-EBUSY); | ||
745 | } | ||
746 | |||
747 | cpu_clear(cpu, cpu_online_map); | ||
748 | |||
749 | if (migrate_platform_irqs(cpu)) { | ||
750 | cpu_set(cpu, cpu_online_map); | ||
751 | return (-EBUSY); | ||
752 | } | ||
651 | 753 | ||
652 | remove_siblinginfo(cpu); | 754 | remove_siblinginfo(cpu); |
653 | cpu_clear(cpu, cpu_online_map); | 755 | cpu_clear(cpu, cpu_online_map); |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 028a2b95936c..1ca130a83856 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | extern unsigned long wall_jiffies; | 33 | extern unsigned long wall_jiffies; |
34 | 34 | ||
35 | #define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */ | 35 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ |
36 | 36 | ||
37 | #ifdef CONFIG_IA64_DEBUG_IRQ | 37 | #ifdef CONFIG_IA64_DEBUG_IRQ |
38 | 38 | ||
@@ -71,7 +71,7 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) | |||
71 | 71 | ||
72 | new_itm += local_cpu_data->itm_delta; | 72 | new_itm += local_cpu_data->itm_delta; |
73 | 73 | ||
74 | if (smp_processor_id() == TIME_KEEPER_ID) { | 74 | if (smp_processor_id() == time_keeper_id) { |
75 | /* | 75 | /* |
76 | * Here we are in the timer irq handler. We have irqs locally | 76 | * Here we are in the timer irq handler. We have irqs locally |
77 | * disabled, but we don't know if the timer_bh is running on | 77 | * disabled, but we don't know if the timer_bh is running on |
@@ -236,6 +236,11 @@ static struct irqaction timer_irqaction = { | |||
236 | .name = "timer" | 236 | .name = "timer" |
237 | }; | 237 | }; |
238 | 238 | ||
239 | void __devinit ia64_disable_timer(void) | ||
240 | { | ||
241 | ia64_set_itv(1 << 16); | ||
242 | } | ||
243 | |||
239 | void __init | 244 | void __init |
240 | time_init (void) | 245 | time_init (void) |
241 | { | 246 | { |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index acaaec4e4681..9855ba318094 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -181,13 +181,15 @@ per_cpu_init (void) | |||
181 | { | 181 | { |
182 | void *cpu_data; | 182 | void *cpu_data; |
183 | int cpu; | 183 | int cpu; |
184 | static int first_time=1; | ||
184 | 185 | ||
185 | /* | 186 | /* |
186 | * get_free_pages() cannot be used before cpu_init() done. BSP | 187 | * get_free_pages() cannot be used before cpu_init() done. BSP |
187 | * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls | 188 | * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls |
188 | * get_zeroed_page(). | 189 | * get_zeroed_page(). |
189 | */ | 190 | */ |
190 | if (smp_processor_id() == 0) { | 191 | if (first_time) { |
192 | first_time=0; | ||
191 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, | 193 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, |
192 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 194 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
193 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 195 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index c87d6d1d5813..573d5cc63e2b 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -528,12 +528,17 @@ void __init find_memory(void) | |||
528 | void *per_cpu_init(void) | 528 | void *per_cpu_init(void) |
529 | { | 529 | { |
530 | int cpu; | 530 | int cpu; |
531 | static int first_time = 1; | ||
532 | |||
531 | 533 | ||
532 | if (smp_processor_id() != 0) | 534 | if (smp_processor_id() != 0) |
533 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 535 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
534 | 536 | ||
535 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 537 | if (first_time) { |
536 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | 538 | first_time = 0; |
539 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
540 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | ||
541 | } | ||
537 | 542 | ||
538 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 543 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
539 | } | 544 | } |