diff options
| -rw-r--r-- | arch/ia64/Kconfig | 19 | ||||
| -rw-r--r-- | arch/ia64/configs/tiger_defconfig | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/acpi.c | 14 | ||||
| -rw-r--r-- | arch/ia64/kernel/iosapic.c | 6 | ||||
| -rw-r--r-- | arch/ia64/kernel/irq.c | 13 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca.c | 6 | ||||
| -rw-r--r-- | arch/ia64/kernel/perfmon.c | 5 | ||||
| -rw-r--r-- | arch/ia64/kernel/smpboot.c | 114 | ||||
| -rw-r--r-- | arch/ia64/kernel/time.c | 9 | ||||
| -rw-r--r-- | arch/ia64/kernel/topology.c | 2 | ||||
| -rw-r--r-- | arch/ia64/mm/contig.c | 4 | ||||
| -rw-r--r-- | arch/ia64/mm/discontig.c | 9 | ||||
| -rw-r--r-- | include/asm-ia64/mca.h | 2 |
13 files changed, 186 insertions, 19 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index a85ea9d37f05..ff7ae6b664e8 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
| @@ -271,6 +271,25 @@ config SCHED_SMT | |||
| 271 | Intel IA64 chips with MultiThreading at a cost of slightly increased | 271 | Intel IA64 chips with MultiThreading at a cost of slightly increased |
| 272 | overhead in some places. If unsure say N here. | 272 | overhead in some places. If unsure say N here. |
| 273 | 273 | ||
| 274 | config PERMIT_BSP_REMOVE | ||
| 275 | bool "Support removal of Bootstrap Processor" | ||
| 276 | depends on HOTPLUG_CPU | ||
| 277 | default n | ||
| 278 | ---help--- | ||
| 279 | Say Y here if your platform SAL will support removal of BSP with HOTPLUG_CPU | ||
| 280 | support. | ||
| 281 | |||
| 282 | config FORCE_CPEI_RETARGET | ||
| 283 | bool "Force assumption that CPEI can be re-targetted" | ||
| 284 | depends on PERMIT_BSP_REMOVE | ||
| 285 | default n | ||
| 286 | ---help--- | ||
| 287 | Say Y if you need to force the assumption that CPEI can be re-targetted to | ||
| 288 | any cpu in the system. This hint is available via ACPI 3.0 specifications. | ||
| 289 | Tiger4 systems are capable of re-directing CPEI to any CPU other than BSP. | ||
| 290 | This option it useful to enable this feature on older BIOS's as well. | ||
| 291 | You can also enable this by using boot command line option force_cpei=1. | ||
| 292 | |||
| 274 | config PREEMPT | 293 | config PREEMPT |
| 275 | bool "Preemptible Kernel" | 294 | bool "Preemptible Kernel" |
| 276 | help | 295 | help |
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index 125568118b84..766bf4955432 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
| @@ -116,6 +116,8 @@ CONFIG_FORCE_MAX_ZONEORDER=17 | |||
| 116 | CONFIG_SMP=y | 116 | CONFIG_SMP=y |
| 117 | CONFIG_NR_CPUS=4 | 117 | CONFIG_NR_CPUS=4 |
| 118 | CONFIG_HOTPLUG_CPU=y | 118 | CONFIG_HOTPLUG_CPU=y |
| 119 | CONFIG_PERMIT_BSP_REMOVE=y | ||
| 120 | CONFIG_FORCE_CPEI_RETARGET=y | ||
| 119 | # CONFIG_SCHED_SMT is not set | 121 | # CONFIG_SCHED_SMT is not set |
| 120 | # CONFIG_PREEMPT is not set | 122 | # CONFIG_PREEMPT is not set |
| 121 | CONFIG_SELECT_MEMORY_MODEL=y | 123 | CONFIG_SELECT_MEMORY_MODEL=y |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index ecd44bdc8394..4722ec51c70c 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
| @@ -284,19 +284,24 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header, | |||
| 284 | return 0; | 284 | return 0; |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 287 | unsigned int can_cpei_retarget(void) | 288 | unsigned int can_cpei_retarget(void) |
| 288 | { | 289 | { |
| 289 | extern int cpe_vector; | 290 | extern int cpe_vector; |
| 291 | extern unsigned int force_cpei_retarget; | ||
| 290 | 292 | ||
| 291 | /* | 293 | /* |
| 292 | * Only if CPEI is supported and the override flag | 294 | * Only if CPEI is supported and the override flag |
| 293 | * is present, otherwise return that its re-targettable | 295 | * is present, otherwise return that its re-targettable |
| 294 | * if we are in polling mode. | 296 | * if we are in polling mode. |
| 295 | */ | 297 | */ |
| 296 | if (cpe_vector > 0 && !acpi_cpei_override) | 298 | if (cpe_vector > 0) { |
| 297 | return 0; | 299 | if (acpi_cpei_override || force_cpei_retarget) |
| 298 | else | 300 | return 1; |
| 299 | return 1; | 301 | else |
| 302 | return 0; | ||
| 303 | } | ||
| 304 | return 1; | ||
| 300 | } | 305 | } |
| 301 | 306 | ||
| 302 | unsigned int is_cpu_cpei_target(unsigned int cpu) | 307 | unsigned int is_cpu_cpei_target(unsigned int cpu) |
| @@ -315,6 +320,7 @@ void set_cpei_target_cpu(unsigned int cpu) | |||
| 315 | { | 320 | { |
| 316 | acpi_cpei_phys_cpuid = cpu_physical_id(cpu); | 321 | acpi_cpei_phys_cpuid = cpu_physical_id(cpu); |
| 317 | } | 322 | } |
| 323 | #endif | ||
| 318 | 324 | ||
| 319 | unsigned int get_cpei_target_cpu(void) | 325 | unsigned int get_cpei_target_cpu(void) |
| 320 | { | 326 | { |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 574084f343fa..8832c553230a 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
| @@ -631,6 +631,7 @@ get_target_cpu (unsigned int gsi, int vector) | |||
| 631 | { | 631 | { |
| 632 | #ifdef CONFIG_SMP | 632 | #ifdef CONFIG_SMP |
| 633 | static int cpu = -1; | 633 | static int cpu = -1; |
| 634 | extern int cpe_vector; | ||
| 634 | 635 | ||
| 635 | /* | 636 | /* |
| 636 | * In case of vector shared by multiple RTEs, all RTEs that | 637 | * In case of vector shared by multiple RTEs, all RTEs that |
| @@ -653,6 +654,11 @@ get_target_cpu (unsigned int gsi, int vector) | |||
| 653 | if (!cpu_online(smp_processor_id())) | 654 | if (!cpu_online(smp_processor_id())) |
| 654 | return cpu_physical_id(smp_processor_id()); | 655 | return cpu_physical_id(smp_processor_id()); |
| 655 | 656 | ||
| 657 | #ifdef CONFIG_ACPI | ||
| 658 | if (cpe_vector > 0 && vector == IA64_CPEP_VECTOR) | ||
| 659 | return get_cpei_target_cpu(); | ||
| 660 | #endif | ||
| 661 | |||
| 656 | #ifdef CONFIG_NUMA | 662 | #ifdef CONFIG_NUMA |
| 657 | { | 663 | { |
| 658 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; | 664 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index d33244c32759..5ce908ef9c95 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
| @@ -163,8 +163,19 @@ void fixup_irqs(void) | |||
| 163 | { | 163 | { |
| 164 | unsigned int irq; | 164 | unsigned int irq; |
| 165 | extern void ia64_process_pending_intr(void); | 165 | extern void ia64_process_pending_intr(void); |
| 166 | extern void ia64_disable_timer(void); | ||
| 167 | extern volatile int time_keeper_id; | ||
| 168 | |||
| 169 | ia64_disable_timer(); | ||
| 170 | |||
| 171 | /* | ||
| 172 | * Find a new timesync master | ||
| 173 | */ | ||
| 174 | if (smp_processor_id() == time_keeper_id) { | ||
| 175 | time_keeper_id = first_cpu(cpu_online_map); | ||
| 176 | printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); | ||
| 177 | } | ||
| 166 | 178 | ||
| 167 | ia64_set_itv(1<<16); | ||
| 168 | /* | 179 | /* |
| 169 | * Phase 1: Locate irq's bound to this cpu and | 180 | * Phase 1: Locate irq's bound to this cpu and |
| 170 | * relocate them for cpu removal. | 181 | * relocate them for cpu removal. |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index ee7eec9ee576..87fb7cecead0 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
| @@ -289,6 +289,7 @@ ia64_mca_log_sal_error_record(int sal_info_type) | |||
| 289 | #ifdef CONFIG_ACPI | 289 | #ifdef CONFIG_ACPI |
| 290 | 290 | ||
| 291 | int cpe_vector = -1; | 291 | int cpe_vector = -1; |
| 292 | int ia64_cpe_irq = -1; | ||
| 292 | 293 | ||
| 293 | static irqreturn_t | 294 | static irqreturn_t |
| 294 | ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | 295 | ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) |
| @@ -1444,11 +1445,13 @@ void __devinit | |||
| 1444 | ia64_mca_cpu_init(void *cpu_data) | 1445 | ia64_mca_cpu_init(void *cpu_data) |
| 1445 | { | 1446 | { |
| 1446 | void *pal_vaddr; | 1447 | void *pal_vaddr; |
| 1448 | static int first_time = 1; | ||
| 1447 | 1449 | ||
| 1448 | if (smp_processor_id() == 0) { | 1450 | if (first_time) { |
| 1449 | void *mca_data; | 1451 | void *mca_data; |
| 1450 | int cpu; | 1452 | int cpu; |
| 1451 | 1453 | ||
| 1454 | first_time = 0; | ||
| 1452 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) | 1455 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) |
| 1453 | * NR_CPUS + KERNEL_STACK_SIZE); | 1456 | * NR_CPUS + KERNEL_STACK_SIZE); |
| 1454 | mca_data = (void *)(((unsigned long)mca_data + | 1457 | mca_data = (void *)(((unsigned long)mca_data + |
| @@ -1704,6 +1707,7 @@ ia64_mca_late_init(void) | |||
| 1704 | desc = irq_descp(irq); | 1707 | desc = irq_descp(irq); |
| 1705 | desc->status |= IRQ_PER_CPU; | 1708 | desc->status |= IRQ_PER_CPU; |
| 1706 | setup_irq(irq, &mca_cpe_irqaction); | 1709 | setup_irq(irq, &mca_cpe_irqaction); |
| 1710 | ia64_cpe_irq = irq; | ||
| 1707 | } | 1711 | } |
| 1708 | ia64_mca_register_cpev(cpe_vector); | 1712 | ia64_mca_register_cpev(cpe_vector); |
| 1709 | IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); | 1713 | IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9c5194b385da..077f21216b65 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
| @@ -6722,6 +6722,7 @@ __initcall(pfm_init); | |||
| 6722 | void | 6722 | void |
| 6723 | pfm_init_percpu (void) | 6723 | pfm_init_percpu (void) |
| 6724 | { | 6724 | { |
| 6725 | static int first_time=1; | ||
| 6725 | /* | 6726 | /* |
| 6726 | * make sure no measurement is active | 6727 | * make sure no measurement is active |
| 6727 | * (may inherit programmed PMCs from EFI). | 6728 | * (may inherit programmed PMCs from EFI). |
| @@ -6734,8 +6735,10 @@ pfm_init_percpu (void) | |||
| 6734 | */ | 6735 | */ |
| 6735 | pfm_unfreeze_pmu(); | 6736 | pfm_unfreeze_pmu(); |
| 6736 | 6737 | ||
| 6737 | if (smp_processor_id() == 0) | 6738 | if (first_time) { |
| 6738 | register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); | 6739 | register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); |
| 6740 | first_time=0; | ||
| 6741 | } | ||
| 6739 | 6742 | ||
| 6740 | ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); | 6743 | ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); |
| 6741 | ia64_srlz_d(); | 6744 | ia64_srlz_d(); |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index b681ef34a86e..c4b633b36dab 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
| @@ -70,6 +70,12 @@ | |||
| 70 | #endif | 70 | #endif |
| 71 | 71 | ||
| 72 | #ifdef CONFIG_HOTPLUG_CPU | 72 | #ifdef CONFIG_HOTPLUG_CPU |
| 73 | #ifdef CONFIG_PERMIT_BSP_REMOVE | ||
| 74 | #define bsp_remove_ok 1 | ||
| 75 | #else | ||
| 76 | #define bsp_remove_ok 0 | ||
| 77 | #endif | ||
| 78 | |||
| 73 | /* | 79 | /* |
| 74 | * Store all idle threads, this can be reused instead of creating | 80 | * Store all idle threads, this can be reused instead of creating |
| 75 | * a new thread. Also avoids complicated thread destroy functionality | 81 | * a new thread. Also avoids complicated thread destroy functionality |
| @@ -104,7 +110,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; | |||
| 104 | /* | 110 | /* |
| 105 | * ITC synchronization related stuff: | 111 | * ITC synchronization related stuff: |
| 106 | */ | 112 | */ |
| 107 | #define MASTER 0 | 113 | #define MASTER (0) |
| 108 | #define SLAVE (SMP_CACHE_BYTES/8) | 114 | #define SLAVE (SMP_CACHE_BYTES/8) |
| 109 | 115 | ||
| 110 | #define NUM_ROUNDS 64 /* magic value */ | 116 | #define NUM_ROUNDS 64 /* magic value */ |
| @@ -151,6 +157,27 @@ char __initdata no_int_routing; | |||
| 151 | 157 | ||
| 152 | unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ | 158 | unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ |
| 153 | 159 | ||
| 160 | #ifdef CONFIG_FORCE_CPEI_RETARGET | ||
| 161 | #define CPEI_OVERRIDE_DEFAULT (1) | ||
| 162 | #else | ||
| 163 | #define CPEI_OVERRIDE_DEFAULT (0) | ||
| 164 | #endif | ||
| 165 | |||
| 166 | unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT; | ||
| 167 | |||
| 168 | static int __init | ||
| 169 | cmdl_force_cpei(char *str) | ||
| 170 | { | ||
| 171 | int value=0; | ||
| 172 | |||
| 173 | get_option (&str, &value); | ||
| 174 | force_cpei_retarget = value; | ||
| 175 | |||
| 176 | return 1; | ||
| 177 | } | ||
| 178 | |||
| 179 | __setup("force_cpei=", cmdl_force_cpei); | ||
| 180 | |||
| 154 | static int __init | 181 | static int __init |
| 155 | nointroute (char *str) | 182 | nointroute (char *str) |
| 156 | { | 183 | { |
| @@ -161,6 +188,27 @@ nointroute (char *str) | |||
| 161 | 188 | ||
| 162 | __setup("nointroute", nointroute); | 189 | __setup("nointroute", nointroute); |
| 163 | 190 | ||
| 191 | static void fix_b0_for_bsp(void) | ||
| 192 | { | ||
| 193 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 194 | int cpuid; | ||
| 195 | static int fix_bsp_b0 = 1; | ||
| 196 | |||
| 197 | cpuid = smp_processor_id(); | ||
| 198 | |||
| 199 | /* | ||
| 200 | * Cache the b0 value on the first AP that comes up | ||
| 201 | */ | ||
| 202 | if (!(fix_bsp_b0 && cpuid)) | ||
| 203 | return; | ||
| 204 | |||
| 205 | sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; | ||
| 206 | printk ("Fixed BSP b0 value from CPU %d\n", cpuid); | ||
| 207 | |||
| 208 | fix_bsp_b0 = 0; | ||
| 209 | #endif | ||
| 210 | } | ||
| 211 | |||
| 164 | void | 212 | void |
| 165 | sync_master (void *arg) | 213 | sync_master (void *arg) |
| 166 | { | 214 | { |
| @@ -327,8 +375,9 @@ smp_setup_percpu_timer (void) | |||
| 327 | static void __devinit | 375 | static void __devinit |
| 328 | smp_callin (void) | 376 | smp_callin (void) |
| 329 | { | 377 | { |
| 330 | int cpuid, phys_id; | 378 | int cpuid, phys_id, itc_master; |
| 331 | extern void ia64_init_itm(void); | 379 | extern void ia64_init_itm(void); |
| 380 | extern volatile int time_keeper_id; | ||
| 332 | 381 | ||
| 333 | #ifdef CONFIG_PERFMON | 382 | #ifdef CONFIG_PERFMON |
| 334 | extern void pfm_init_percpu(void); | 383 | extern void pfm_init_percpu(void); |
| @@ -336,6 +385,7 @@ smp_callin (void) | |||
| 336 | 385 | ||
| 337 | cpuid = smp_processor_id(); | 386 | cpuid = smp_processor_id(); |
| 338 | phys_id = hard_smp_processor_id(); | 387 | phys_id = hard_smp_processor_id(); |
| 388 | itc_master = time_keeper_id; | ||
| 339 | 389 | ||
| 340 | if (cpu_online(cpuid)) { | 390 | if (cpu_online(cpuid)) { |
| 341 | printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", | 391 | printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", |
| @@ -343,6 +393,8 @@ smp_callin (void) | |||
| 343 | BUG(); | 393 | BUG(); |
| 344 | } | 394 | } |
| 345 | 395 | ||
| 396 | fix_b0_for_bsp(); | ||
| 397 | |||
| 346 | lock_ipi_calllock(); | 398 | lock_ipi_calllock(); |
| 347 | cpu_set(cpuid, cpu_online_map); | 399 | cpu_set(cpuid, cpu_online_map); |
| 348 | unlock_ipi_calllock(); | 400 | unlock_ipi_calllock(); |
| @@ -365,8 +417,8 @@ smp_callin (void) | |||
| 365 | * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls | 417 | * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls |
| 366 | * local_bh_enable(), which bugs out if irqs are not enabled... | 418 | * local_bh_enable(), which bugs out if irqs are not enabled... |
| 367 | */ | 419 | */ |
| 368 | Dprintk("Going to syncup ITC with BP.\n"); | 420 | Dprintk("Going to syncup ITC with ITC Master.\n"); |
| 369 | ia64_sync_itc(0); | 421 | ia64_sync_itc(itc_master); |
| 370 | } | 422 | } |
| 371 | 423 | ||
| 372 | /* | 424 | /* |
| @@ -635,6 +687,47 @@ remove_siblinginfo(int cpu) | |||
| 635 | } | 687 | } |
| 636 | 688 | ||
| 637 | extern void fixup_irqs(void); | 689 | extern void fixup_irqs(void); |
| 690 | |||
| 691 | int migrate_platform_irqs(unsigned int cpu) | ||
| 692 | { | ||
| 693 | int new_cpei_cpu; | ||
| 694 | irq_desc_t *desc = NULL; | ||
| 695 | cpumask_t mask; | ||
| 696 | int retval = 0; | ||
| 697 | |||
| 698 | /* | ||
| 699 | * dont permit CPEI target to removed. | ||
| 700 | */ | ||
| 701 | if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { | ||
| 702 | printk ("CPU (%d) is CPEI Target\n", cpu); | ||
| 703 | if (can_cpei_retarget()) { | ||
| 704 | /* | ||
| 705 | * Now re-target the CPEI to a different processor | ||
| 706 | */ | ||
| 707 | new_cpei_cpu = any_online_cpu(cpu_online_map); | ||
| 708 | mask = cpumask_of_cpu(new_cpei_cpu); | ||
| 709 | set_cpei_target_cpu(new_cpei_cpu); | ||
| 710 | desc = irq_descp(ia64_cpe_irq); | ||
| 711 | /* | ||
| 712 | * Switch for now, immediatly, we need to do fake intr | ||
| 713 | * as other interrupts, but need to study CPEI behaviour with | ||
| 714 | * polling before making changes. | ||
| 715 | */ | ||
| 716 | if (desc) { | ||
| 717 | desc->handler->disable(ia64_cpe_irq); | ||
| 718 | desc->handler->set_affinity(ia64_cpe_irq, mask); | ||
| 719 | desc->handler->enable(ia64_cpe_irq); | ||
| 720 | printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); | ||
| 721 | } | ||
| 722 | } | ||
| 723 | if (!desc) { | ||
| 724 | printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); | ||
| 725 | retval = -EBUSY; | ||
| 726 | } | ||
| 727 | } | ||
| 728 | return retval; | ||
| 729 | } | ||
| 730 | |||
| 638 | /* must be called with cpucontrol mutex held */ | 731 | /* must be called with cpucontrol mutex held */ |
| 639 | int __cpu_disable(void) | 732 | int __cpu_disable(void) |
| 640 | { | 733 | { |
| @@ -643,8 +736,17 @@ int __cpu_disable(void) | |||
| 643 | /* | 736 | /* |
| 644 | * dont permit boot processor for now | 737 | * dont permit boot processor for now |
| 645 | */ | 738 | */ |
| 646 | if (cpu == 0) | 739 | if (cpu == 0 && !bsp_remove_ok) { |
| 647 | return -EBUSY; | 740 | printk ("Your platform does not support removal of BSP\n"); |
| 741 | return (-EBUSY); | ||
| 742 | } | ||
| 743 | |||
| 744 | cpu_clear(cpu, cpu_online_map); | ||
| 745 | |||
| 746 | if (migrate_platform_irqs(cpu)) { | ||
| 747 | cpu_set(cpu, cpu_online_map); | ||
| 748 | return (-EBUSY); | ||
| 749 | } | ||
| 648 | 750 | ||
| 649 | remove_siblinginfo(cpu); | 751 | remove_siblinginfo(cpu); |
| 650 | cpu_clear(cpu, cpu_online_map); | 752 | cpu_clear(cpu, cpu_online_map); |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 307d01e15b2e..ac167436e936 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | extern unsigned long wall_jiffies; | 33 | extern unsigned long wall_jiffies; |
| 34 | 34 | ||
| 35 | #define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */ | 35 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ |
| 36 | 36 | ||
| 37 | #ifdef CONFIG_IA64_DEBUG_IRQ | 37 | #ifdef CONFIG_IA64_DEBUG_IRQ |
| 38 | 38 | ||
| @@ -71,7 +71,7 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) | |||
| 71 | 71 | ||
| 72 | new_itm += local_cpu_data->itm_delta; | 72 | new_itm += local_cpu_data->itm_delta; |
| 73 | 73 | ||
| 74 | if (smp_processor_id() == TIME_KEEPER_ID) { | 74 | if (smp_processor_id() == time_keeper_id) { |
| 75 | /* | 75 | /* |
| 76 | * Here we are in the timer irq handler. We have irqs locally | 76 | * Here we are in the timer irq handler. We have irqs locally |
| 77 | * disabled, but we don't know if the timer_bh is running on | 77 | * disabled, but we don't know if the timer_bh is running on |
| @@ -236,6 +236,11 @@ static struct irqaction timer_irqaction = { | |||
| 236 | .name = "timer" | 236 | .name = "timer" |
| 237 | }; | 237 | }; |
| 238 | 238 | ||
| 239 | void __devinit ia64_disable_timer(void) | ||
| 240 | { | ||
| 241 | ia64_set_itv(1 << 16); | ||
| 242 | } | ||
| 243 | |||
| 239 | void __init | 244 | void __init |
| 240 | time_init (void) | 245 | time_init (void) |
| 241 | { | 246 | { |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 6e5eea19fa67..3b6fd798c4d6 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
| @@ -36,7 +36,7 @@ int arch_register_cpu(int num) | |||
| 36 | parent = &sysfs_nodes[cpu_to_node(num)]; | 36 | parent = &sysfs_nodes[cpu_to_node(num)]; |
| 37 | #endif /* CONFIG_NUMA */ | 37 | #endif /* CONFIG_NUMA */ |
| 38 | 38 | ||
| 39 | #ifdef CONFIG_ACPI | 39 | #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) |
| 40 | /* | 40 | /* |
| 41 | * If CPEI cannot be re-targetted, and this is | 41 | * If CPEI cannot be re-targetted, and this is |
| 42 | * CPEI target, then dont create the control file | 42 | * CPEI target, then dont create the control file |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index acaaec4e4681..9855ba318094 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
| @@ -181,13 +181,15 @@ per_cpu_init (void) | |||
| 181 | { | 181 | { |
| 182 | void *cpu_data; | 182 | void *cpu_data; |
| 183 | int cpu; | 183 | int cpu; |
| 184 | static int first_time=1; | ||
| 184 | 185 | ||
| 185 | /* | 186 | /* |
| 186 | * get_free_pages() cannot be used before cpu_init() done. BSP | 187 | * get_free_pages() cannot be used before cpu_init() done. BSP |
| 187 | * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls | 188 | * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls |
| 188 | * get_zeroed_page(). | 189 | * get_zeroed_page(). |
| 189 | */ | 190 | */ |
| 190 | if (smp_processor_id() == 0) { | 191 | if (first_time) { |
| 192 | first_time=0; | ||
| 191 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, | 193 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, |
| 192 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 194 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
| 193 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 195 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index c87d6d1d5813..573d5cc63e2b 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
| @@ -528,12 +528,17 @@ void __init find_memory(void) | |||
| 528 | void *per_cpu_init(void) | 528 | void *per_cpu_init(void) |
| 529 | { | 529 | { |
| 530 | int cpu; | 530 | int cpu; |
| 531 | static int first_time = 1; | ||
| 532 | |||
| 531 | 533 | ||
| 532 | if (smp_processor_id() != 0) | 534 | if (smp_processor_id() != 0) |
| 533 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 535 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
| 534 | 536 | ||
| 535 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 537 | if (first_time) { |
| 536 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | 538 | first_time = 0; |
| 539 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
| 540 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | ||
| 541 | } | ||
| 537 | 542 | ||
| 538 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 543 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
| 539 | } | 544 | } |
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index c7d9c9ed38ba..bfbbb8da79c7 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
| @@ -131,6 +131,8 @@ struct ia64_mca_cpu { | |||
| 131 | /* Array of physical addresses of each CPU's MCA area. */ | 131 | /* Array of physical addresses of each CPU's MCA area. */ |
| 132 | extern unsigned long __per_cpu_mca[NR_CPUS]; | 132 | extern unsigned long __per_cpu_mca[NR_CPUS]; |
| 133 | 133 | ||
| 134 | extern int cpe_vector; | ||
| 135 | extern int ia64_cpe_irq; | ||
| 134 | extern void ia64_mca_init(void); | 136 | extern void ia64_mca_init(void); |
| 135 | extern void ia64_mca_cpu_init(void *); | 137 | extern void ia64_mca_cpu_init(void *); |
| 136 | extern void ia64_os_mca_dispatch(void); | 138 | extern void ia64_os_mca_dispatch(void); |
