diff options
author | Ashok Raj <ashok.raj@intel.com> | 2005-11-11 17:32:40 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-01-05 13:24:20 -0500 |
commit | ff741906ad3cf4b8ca1a958acb013a97a6381ca2 (patch) | |
tree | 66a4c2dbacd4c10015824a6789f9206693003092 /arch/ia64/kernel | |
parent | db9edfd7e339ca4113153d887e782dd05be5a9eb (diff) |
[IA64] support for cpu0 removal
here is the BSP removal support for IA64. Its pretty much the same thing that
was released a while back, but has your feedback incorporated.
- Removed CONFIG_BSP_REMOVE_WORKAROUND and associated cmdline param
- Fixed compile issue with sn2/zx1 due to a undefined fix_b0_for_bsp
- some formatting nits (whitespace etc)
This has been tested on tiger and long back by alex on hp systems as well.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 13 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 114 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 9 |
7 files changed, 150 insertions, 15 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 9ad94ddf6687..fe1d90b0c6ea 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -287,16 +287,20 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header, | |||
287 | unsigned int can_cpei_retarget(void) | 287 | unsigned int can_cpei_retarget(void) |
288 | { | 288 | { |
289 | extern int cpe_vector; | 289 | extern int cpe_vector; |
290 | extern unsigned int force_cpei_retarget; | ||
290 | 291 | ||
291 | /* | 292 | /* |
292 | * Only if CPEI is supported and the override flag | 293 | * Only if CPEI is supported and the override flag |
293 | * is present, otherwise return that its re-targettable | 294 | * is present, otherwise return that its re-targettable |
294 | * if we are in polling mode. | 295 | * if we are in polling mode. |
295 | */ | 296 | */ |
296 | if (cpe_vector > 0 && !acpi_cpei_override) | 297 | if (cpe_vector > 0) { |
297 | return 0; | 298 | if (acpi_cpei_override || force_cpei_retarget) |
298 | else | 299 | return 1; |
299 | return 1; | 300 | else |
301 | return 0; | ||
302 | } | ||
303 | return 1; | ||
300 | } | 304 | } |
301 | 305 | ||
302 | unsigned int is_cpu_cpei_target(unsigned int cpu) | 306 | unsigned int is_cpu_cpei_target(unsigned int cpu) |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 574084f343fa..37ac742da8ed 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -631,6 +631,7 @@ get_target_cpu (unsigned int gsi, int vector) | |||
631 | { | 631 | { |
632 | #ifdef CONFIG_SMP | 632 | #ifdef CONFIG_SMP |
633 | static int cpu = -1; | 633 | static int cpu = -1; |
634 | extern int cpe_vector; | ||
634 | 635 | ||
635 | /* | 636 | /* |
636 | * In case of vector shared by multiple RTEs, all RTEs that | 637 | * In case of vector shared by multiple RTEs, all RTEs that |
@@ -653,6 +654,11 @@ get_target_cpu (unsigned int gsi, int vector) | |||
653 | if (!cpu_online(smp_processor_id())) | 654 | if (!cpu_online(smp_processor_id())) |
654 | return cpu_physical_id(smp_processor_id()); | 655 | return cpu_physical_id(smp_processor_id()); |
655 | 656 | ||
657 | #ifdef CONFIG_ACPI | ||
658 | if (cpe_vector > 0 && vector == IA64_CPEP_VECTOR) | ||
659 | return get_cpei_target_cpu(); | ||
660 | #endif | ||
661 | |||
656 | #ifdef CONFIG_NUMA | 662 | #ifdef CONFIG_NUMA |
657 | { | 663 | { |
658 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; | 664 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index d33244c32759..5ce908ef9c95 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -163,8 +163,19 @@ void fixup_irqs(void) | |||
163 | { | 163 | { |
164 | unsigned int irq; | 164 | unsigned int irq; |
165 | extern void ia64_process_pending_intr(void); | 165 | extern void ia64_process_pending_intr(void); |
166 | extern void ia64_disable_timer(void); | ||
167 | extern volatile int time_keeper_id; | ||
168 | |||
169 | ia64_disable_timer(); | ||
170 | |||
171 | /* | ||
172 | * Find a new timesync master | ||
173 | */ | ||
174 | if (smp_processor_id() == time_keeper_id) { | ||
175 | time_keeper_id = first_cpu(cpu_online_map); | ||
176 | printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); | ||
177 | } | ||
166 | 178 | ||
167 | ia64_set_itv(1<<16); | ||
168 | /* | 179 | /* |
169 | * Phase 1: Locate irq's bound to this cpu and | 180 | * Phase 1: Locate irq's bound to this cpu and |
170 | * relocate them for cpu removal. | 181 | * relocate them for cpu removal. |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 355af15287c7..967571b466a2 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -289,6 +289,7 @@ ia64_mca_log_sal_error_record(int sal_info_type) | |||
289 | #ifdef CONFIG_ACPI | 289 | #ifdef CONFIG_ACPI |
290 | 290 | ||
291 | int cpe_vector = -1; | 291 | int cpe_vector = -1; |
292 | int ia64_cpe_irq = -1; | ||
292 | 293 | ||
293 | static irqreturn_t | 294 | static irqreturn_t |
294 | ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | 295 | ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) |
@@ -1444,11 +1445,13 @@ void __devinit | |||
1444 | ia64_mca_cpu_init(void *cpu_data) | 1445 | ia64_mca_cpu_init(void *cpu_data) |
1445 | { | 1446 | { |
1446 | void *pal_vaddr; | 1447 | void *pal_vaddr; |
1448 | static int first_time = 1; | ||
1447 | 1449 | ||
1448 | if (smp_processor_id() == 0) { | 1450 | if (first_time) { |
1449 | void *mca_data; | 1451 | void *mca_data; |
1450 | int cpu; | 1452 | int cpu; |
1451 | 1453 | ||
1454 | first_time = 0; | ||
1452 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) | 1455 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) |
1453 | * NR_CPUS + KERNEL_STACK_SIZE); | 1456 | * NR_CPUS + KERNEL_STACK_SIZE); |
1454 | mca_data = (void *)(((unsigned long)mca_data + | 1457 | mca_data = (void *)(((unsigned long)mca_data + |
@@ -1704,6 +1707,7 @@ ia64_mca_late_init(void) | |||
1704 | desc = irq_descp(irq); | 1707 | desc = irq_descp(irq); |
1705 | desc->status |= IRQ_PER_CPU; | 1708 | desc->status |= IRQ_PER_CPU; |
1706 | setup_irq(irq, &mca_cpe_irqaction); | 1709 | setup_irq(irq, &mca_cpe_irqaction); |
1710 | ia64_cpe_irq = irq; | ||
1707 | } | 1711 | } |
1708 | ia64_mca_register_cpev(cpe_vector); | 1712 | ia64_mca_register_cpev(cpe_vector); |
1709 | IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); | 1713 | IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 410d4804fa6e..18c51c37a9a3 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -6718,6 +6718,7 @@ __initcall(pfm_init); | |||
6718 | void | 6718 | void |
6719 | pfm_init_percpu (void) | 6719 | pfm_init_percpu (void) |
6720 | { | 6720 | { |
6721 | static int first_time=1; | ||
6721 | /* | 6722 | /* |
6722 | * make sure no measurement is active | 6723 | * make sure no measurement is active |
6723 | * (may inherit programmed PMCs from EFI). | 6724 | * (may inherit programmed PMCs from EFI). |
@@ -6730,8 +6731,10 @@ pfm_init_percpu (void) | |||
6730 | */ | 6731 | */ |
6731 | pfm_unfreeze_pmu(); | 6732 | pfm_unfreeze_pmu(); |
6732 | 6733 | ||
6733 | if (smp_processor_id() == 0) | 6734 | if (first_time) { |
6734 | register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); | 6735 | register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); |
6736 | first_time=0; | ||
6737 | } | ||
6735 | 6738 | ||
6736 | ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); | 6739 | ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); |
6737 | ia64_srlz_d(); | 6740 | ia64_srlz_d(); |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 8f44e7d2df66..e9d37bf67d69 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -70,6 +70,12 @@ | |||
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | #ifdef CONFIG_HOTPLUG_CPU | 72 | #ifdef CONFIG_HOTPLUG_CPU |
73 | #ifdef CONFIG_PERMIT_BSP_REMOVE | ||
74 | #define bsp_remove_ok 1 | ||
75 | #else | ||
76 | #define bsp_remove_ok 0 | ||
77 | #endif | ||
78 | |||
73 | /* | 79 | /* |
74 | * Store all idle threads, this can be reused instead of creating | 80 | * Store all idle threads, this can be reused instead of creating |
75 | * a new thread. Also avoids complicated thread destroy functionality | 81 | * a new thread. Also avoids complicated thread destroy functionality |
@@ -104,7 +110,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; | |||
104 | /* | 110 | /* |
105 | * ITC synchronization related stuff: | 111 | * ITC synchronization related stuff: |
106 | */ | 112 | */ |
107 | #define MASTER 0 | 113 | #define MASTER (0) |
108 | #define SLAVE (SMP_CACHE_BYTES/8) | 114 | #define SLAVE (SMP_CACHE_BYTES/8) |
109 | 115 | ||
110 | #define NUM_ROUNDS 64 /* magic value */ | 116 | #define NUM_ROUNDS 64 /* magic value */ |
@@ -151,6 +157,27 @@ char __initdata no_int_routing; | |||
151 | 157 | ||
152 | unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ | 158 | unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ |
153 | 159 | ||
160 | #ifdef CONFIG_FORCE_CPEI_RETARGET | ||
161 | #define CPEI_OVERRIDE_DEFAULT (1) | ||
162 | #else | ||
163 | #define CPEI_OVERRIDE_DEFAULT (0) | ||
164 | #endif | ||
165 | |||
166 | unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT; | ||
167 | |||
168 | static int __init | ||
169 | cmdl_force_cpei(char *str) | ||
170 | { | ||
171 | int value=0; | ||
172 | |||
173 | get_option (&str, &value); | ||
174 | force_cpei_retarget = value; | ||
175 | |||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | __setup("force_cpei=", cmdl_force_cpei); | ||
180 | |||
154 | static int __init | 181 | static int __init |
155 | nointroute (char *str) | 182 | nointroute (char *str) |
156 | { | 183 | { |
@@ -161,6 +188,27 @@ nointroute (char *str) | |||
161 | 188 | ||
162 | __setup("nointroute", nointroute); | 189 | __setup("nointroute", nointroute); |
163 | 190 | ||
191 | static void fix_b0_for_bsp(void) | ||
192 | { | ||
193 | #ifdef CONFIG_HOTPLUG_CPU | ||
194 | int cpuid; | ||
195 | static int fix_bsp_b0 = 1; | ||
196 | |||
197 | cpuid = smp_processor_id(); | ||
198 | |||
199 | /* | ||
200 | * Cache the b0 value on the first AP that comes up | ||
201 | */ | ||
202 | if (!(fix_bsp_b0 && cpuid)) | ||
203 | return; | ||
204 | |||
205 | sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; | ||
206 | printk ("Fixed BSP b0 value from CPU %d\n", cpuid); | ||
207 | |||
208 | fix_bsp_b0 = 0; | ||
209 | #endif | ||
210 | } | ||
211 | |||
164 | void | 212 | void |
165 | sync_master (void *arg) | 213 | sync_master (void *arg) |
166 | { | 214 | { |
@@ -327,8 +375,9 @@ smp_setup_percpu_timer (void) | |||
327 | static void __devinit | 375 | static void __devinit |
328 | smp_callin (void) | 376 | smp_callin (void) |
329 | { | 377 | { |
330 | int cpuid, phys_id; | 378 | int cpuid, phys_id, itc_master; |
331 | extern void ia64_init_itm(void); | 379 | extern void ia64_init_itm(void); |
380 | extern volatile int time_keeper_id; | ||
332 | 381 | ||
333 | #ifdef CONFIG_PERFMON | 382 | #ifdef CONFIG_PERFMON |
334 | extern void pfm_init_percpu(void); | 383 | extern void pfm_init_percpu(void); |
@@ -336,6 +385,7 @@ smp_callin (void) | |||
336 | 385 | ||
337 | cpuid = smp_processor_id(); | 386 | cpuid = smp_processor_id(); |
338 | phys_id = hard_smp_processor_id(); | 387 | phys_id = hard_smp_processor_id(); |
388 | itc_master = time_keeper_id; | ||
339 | 389 | ||
340 | if (cpu_online(cpuid)) { | 390 | if (cpu_online(cpuid)) { |
341 | printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", | 391 | printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", |
@@ -343,6 +393,8 @@ smp_callin (void) | |||
343 | BUG(); | 393 | BUG(); |
344 | } | 394 | } |
345 | 395 | ||
396 | fix_b0_for_bsp(); | ||
397 | |||
346 | lock_ipi_calllock(); | 398 | lock_ipi_calllock(); |
347 | cpu_set(cpuid, cpu_online_map); | 399 | cpu_set(cpuid, cpu_online_map); |
348 | unlock_ipi_calllock(); | 400 | unlock_ipi_calllock(); |
@@ -365,8 +417,8 @@ smp_callin (void) | |||
365 | * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls | 417 | * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls |
366 | * local_bh_enable(), which bugs out if irqs are not enabled... | 418 | * local_bh_enable(), which bugs out if irqs are not enabled... |
367 | */ | 419 | */ |
368 | Dprintk("Going to syncup ITC with BP.\n"); | 420 | Dprintk("Going to syncup ITC with ITC Master.\n"); |
369 | ia64_sync_itc(0); | 421 | ia64_sync_itc(itc_master); |
370 | } | 422 | } |
371 | 423 | ||
372 | /* | 424 | /* |
@@ -638,6 +690,47 @@ remove_siblinginfo(int cpu) | |||
638 | } | 690 | } |
639 | 691 | ||
640 | extern void fixup_irqs(void); | 692 | extern void fixup_irqs(void); |
693 | |||
694 | int migrate_platform_irqs(unsigned int cpu) | ||
695 | { | ||
696 | int new_cpei_cpu; | ||
697 | irq_desc_t *desc = NULL; | ||
698 | cpumask_t mask; | ||
699 | int retval = 0; | ||
700 | |||
701 | /* | ||
702 | * dont permit CPEI target to removed. | ||
703 | */ | ||
704 | if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { | ||
705 | printk ("CPU (%d) is CPEI Target\n", cpu); | ||
706 | if (can_cpei_retarget()) { | ||
707 | /* | ||
708 | * Now re-target the CPEI to a different processor | ||
709 | */ | ||
710 | new_cpei_cpu = any_online_cpu(cpu_online_map); | ||
711 | mask = cpumask_of_cpu(new_cpei_cpu); | ||
712 | set_cpei_target_cpu(new_cpei_cpu); | ||
713 | desc = irq_descp(ia64_cpe_irq); | ||
714 | /* | ||
715 | * Switch for now, immediatly, we need to do fake intr | ||
716 | * as other interrupts, but need to study CPEI behaviour with | ||
717 | * polling before making changes. | ||
718 | */ | ||
719 | if (desc) { | ||
720 | desc->handler->disable(ia64_cpe_irq); | ||
721 | desc->handler->set_affinity(ia64_cpe_irq, mask); | ||
722 | desc->handler->enable(ia64_cpe_irq); | ||
723 | printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); | ||
724 | } | ||
725 | } | ||
726 | if (!desc) { | ||
727 | printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); | ||
728 | retval = -EBUSY; | ||
729 | } | ||
730 | } | ||
731 | return retval; | ||
732 | } | ||
733 | |||
641 | /* must be called with cpucontrol mutex held */ | 734 | /* must be called with cpucontrol mutex held */ |
642 | int __cpu_disable(void) | 735 | int __cpu_disable(void) |
643 | { | 736 | { |
@@ -646,8 +739,17 @@ int __cpu_disable(void) | |||
646 | /* | 739 | /* |
647 | * dont permit boot processor for now | 740 | * dont permit boot processor for now |
648 | */ | 741 | */ |
649 | if (cpu == 0) | 742 | if (cpu == 0 && !bsp_remove_ok) { |
650 | return -EBUSY; | 743 | printk ("Your platform does not support removal of BSP\n"); |
744 | return (-EBUSY); | ||
745 | } | ||
746 | |||
747 | cpu_clear(cpu, cpu_online_map); | ||
748 | |||
749 | if (migrate_platform_irqs(cpu)) { | ||
750 | cpu_set(cpu, cpu_online_map); | ||
751 | return (-EBUSY); | ||
752 | } | ||
651 | 753 | ||
652 | remove_siblinginfo(cpu); | 754 | remove_siblinginfo(cpu); |
653 | cpu_clear(cpu, cpu_online_map); | 755 | cpu_clear(cpu, cpu_online_map); |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 028a2b95936c..1ca130a83856 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | extern unsigned long wall_jiffies; | 33 | extern unsigned long wall_jiffies; |
34 | 34 | ||
35 | #define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */ | 35 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ |
36 | 36 | ||
37 | #ifdef CONFIG_IA64_DEBUG_IRQ | 37 | #ifdef CONFIG_IA64_DEBUG_IRQ |
38 | 38 | ||
@@ -71,7 +71,7 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) | |||
71 | 71 | ||
72 | new_itm += local_cpu_data->itm_delta; | 72 | new_itm += local_cpu_data->itm_delta; |
73 | 73 | ||
74 | if (smp_processor_id() == TIME_KEEPER_ID) { | 74 | if (smp_processor_id() == time_keeper_id) { |
75 | /* | 75 | /* |
76 | * Here we are in the timer irq handler. We have irqs locally | 76 | * Here we are in the timer irq handler. We have irqs locally |
77 | * disabled, but we don't know if the timer_bh is running on | 77 | * disabled, but we don't know if the timer_bh is running on |
@@ -236,6 +236,11 @@ static struct irqaction timer_irqaction = { | |||
236 | .name = "timer" | 236 | .name = "timer" |
237 | }; | 237 | }; |
238 | 238 | ||
239 | void __devinit ia64_disable_timer(void) | ||
240 | { | ||
241 | ia64_set_itv(1 << 16); | ||
242 | } | ||
243 | |||
239 | void __init | 244 | void __init |
240 | time_init (void) | 245 | time_init (void) |
241 | { | 246 | { |