diff options
Diffstat (limited to 'arch/ia64/kernel/smpboot.c')
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 221 |
1 files changed, 111 insertions, 110 deletions
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index b681ef34a86e..44e9547878ac 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -70,6 +70,12 @@ | |||
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | #ifdef CONFIG_HOTPLUG_CPU | 72 | #ifdef CONFIG_HOTPLUG_CPU |
73 | #ifdef CONFIG_PERMIT_BSP_REMOVE | ||
74 | #define bsp_remove_ok 1 | ||
75 | #else | ||
76 | #define bsp_remove_ok 0 | ||
77 | #endif | ||
78 | |||
73 | /* | 79 | /* |
74 | * Store all idle threads, this can be reused instead of creating | 80 | * Store all idle threads, this can be reused instead of creating |
75 | * a new thread. Also avoids complicated thread destroy functionality | 81 | * a new thread. Also avoids complicated thread destroy functionality |
@@ -104,7 +110,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; | |||
104 | /* | 110 | /* |
105 | * ITC synchronization related stuff: | 111 | * ITC synchronization related stuff: |
106 | */ | 112 | */ |
107 | #define MASTER 0 | 113 | #define MASTER (0) |
108 | #define SLAVE (SMP_CACHE_BYTES/8) | 114 | #define SLAVE (SMP_CACHE_BYTES/8) |
109 | 115 | ||
110 | #define NUM_ROUNDS 64 /* magic value */ | 116 | #define NUM_ROUNDS 64 /* magic value */ |
@@ -151,6 +157,27 @@ char __initdata no_int_routing; | |||
151 | 157 | ||
152 | unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ | 158 | unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ |
153 | 159 | ||
160 | #ifdef CONFIG_FORCE_CPEI_RETARGET | ||
161 | #define CPEI_OVERRIDE_DEFAULT (1) | ||
162 | #else | ||
163 | #define CPEI_OVERRIDE_DEFAULT (0) | ||
164 | #endif | ||
165 | |||
166 | unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT; | ||
167 | |||
168 | static int __init | ||
169 | cmdl_force_cpei(char *str) | ||
170 | { | ||
171 | int value=0; | ||
172 | |||
173 | get_option (&str, &value); | ||
174 | force_cpei_retarget = value; | ||
175 | |||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | __setup("force_cpei=", cmdl_force_cpei); | ||
180 | |||
154 | static int __init | 181 | static int __init |
155 | nointroute (char *str) | 182 | nointroute (char *str) |
156 | { | 183 | { |
@@ -161,6 +188,27 @@ nointroute (char *str) | |||
161 | 188 | ||
162 | __setup("nointroute", nointroute); | 189 | __setup("nointroute", nointroute); |
163 | 190 | ||
191 | static void fix_b0_for_bsp(void) | ||
192 | { | ||
193 | #ifdef CONFIG_HOTPLUG_CPU | ||
194 | int cpuid; | ||
195 | static int fix_bsp_b0 = 1; | ||
196 | |||
197 | cpuid = smp_processor_id(); | ||
198 | |||
199 | /* | ||
200 | * Cache the b0 value on the first AP that comes up | ||
201 | */ | ||
202 | if (!(fix_bsp_b0 && cpuid)) | ||
203 | return; | ||
204 | |||
205 | sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; | ||
206 | printk ("Fixed BSP b0 value from CPU %d\n", cpuid); | ||
207 | |||
208 | fix_bsp_b0 = 0; | ||
209 | #endif | ||
210 | } | ||
211 | |||
164 | void | 212 | void |
165 | sync_master (void *arg) | 213 | sync_master (void *arg) |
166 | { | 214 | { |
@@ -327,8 +375,9 @@ smp_setup_percpu_timer (void) | |||
327 | static void __devinit | 375 | static void __devinit |
328 | smp_callin (void) | 376 | smp_callin (void) |
329 | { | 377 | { |
330 | int cpuid, phys_id; | 378 | int cpuid, phys_id, itc_master; |
331 | extern void ia64_init_itm(void); | 379 | extern void ia64_init_itm(void); |
380 | extern volatile int time_keeper_id; | ||
332 | 381 | ||
333 | #ifdef CONFIG_PERFMON | 382 | #ifdef CONFIG_PERFMON |
334 | extern void pfm_init_percpu(void); | 383 | extern void pfm_init_percpu(void); |
@@ -336,6 +385,7 @@ smp_callin (void) | |||
336 | 385 | ||
337 | cpuid = smp_processor_id(); | 386 | cpuid = smp_processor_id(); |
338 | phys_id = hard_smp_processor_id(); | 387 | phys_id = hard_smp_processor_id(); |
388 | itc_master = time_keeper_id; | ||
339 | 389 | ||
340 | if (cpu_online(cpuid)) { | 390 | if (cpu_online(cpuid)) { |
341 | printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", | 391 | printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", |
@@ -343,6 +393,8 @@ smp_callin (void) | |||
343 | BUG(); | 393 | BUG(); |
344 | } | 394 | } |
345 | 395 | ||
396 | fix_b0_for_bsp(); | ||
397 | |||
346 | lock_ipi_calllock(); | 398 | lock_ipi_calllock(); |
347 | cpu_set(cpuid, cpu_online_map); | 399 | cpu_set(cpuid, cpu_online_map); |
348 | unlock_ipi_calllock(); | 400 | unlock_ipi_calllock(); |
@@ -365,8 +417,8 @@ smp_callin (void) | |||
365 | * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls | 417 | * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls |
366 | * local_bh_enable(), which bugs out if irqs are not enabled... | 418 | * local_bh_enable(), which bugs out if irqs are not enabled... |
367 | */ | 419 | */ |
368 | Dprintk("Going to syncup ITC with BP.\n"); | 420 | Dprintk("Going to syncup ITC with ITC Master.\n"); |
369 | ia64_sync_itc(0); | 421 | ia64_sync_itc(itc_master); |
370 | } | 422 | } |
371 | 423 | ||
372 | /* | 424 | /* |
@@ -572,32 +624,8 @@ void __devinit smp_prepare_boot_cpu(void) | |||
572 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 624 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
573 | } | 625 | } |
574 | 626 | ||
575 | /* | ||
576 | * mt_info[] is a temporary store for all info returned by | ||
577 | * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the | ||
578 | * specific cpu comes. | ||
579 | */ | ||
580 | static struct { | ||
581 | __u32 socket_id; | ||
582 | __u16 core_id; | ||
583 | __u16 thread_id; | ||
584 | __u16 proc_fixed_addr; | ||
585 | __u8 valid; | ||
586 | } mt_info[NR_CPUS] __devinitdata; | ||
587 | |||
588 | #ifdef CONFIG_HOTPLUG_CPU | 627 | #ifdef CONFIG_HOTPLUG_CPU |
589 | static inline void | 628 | static inline void |
590 | remove_from_mtinfo(int cpu) | ||
591 | { | ||
592 | int i; | ||
593 | |||
594 | for_each_cpu(i) | ||
595 | if (mt_info[i].valid && mt_info[i].socket_id == | ||
596 | cpu_data(cpu)->socket_id) | ||
597 | mt_info[i].valid = 0; | ||
598 | } | ||
599 | |||
600 | static inline void | ||
601 | clear_cpu_sibling_map(int cpu) | 629 | clear_cpu_sibling_map(int cpu) |
602 | { | 630 | { |
603 | int i; | 631 | int i; |
@@ -626,15 +654,50 @@ remove_siblinginfo(int cpu) | |||
626 | 654 | ||
627 | /* remove it from all sibling map's */ | 655 | /* remove it from all sibling map's */ |
628 | clear_cpu_sibling_map(cpu); | 656 | clear_cpu_sibling_map(cpu); |
657 | } | ||
658 | |||
659 | extern void fixup_irqs(void); | ||
629 | 660 | ||
630 | /* if this cpu is the last in the core group, remove all its info | 661 | int migrate_platform_irqs(unsigned int cpu) |
631 | * from mt_info structure | 662 | { |
663 | int new_cpei_cpu; | ||
664 | irq_desc_t *desc = NULL; | ||
665 | cpumask_t mask; | ||
666 | int retval = 0; | ||
667 | |||
668 | /* | ||
669 | * dont permit CPEI target to removed. | ||
632 | */ | 670 | */ |
633 | if (last) | 671 | if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { |
634 | remove_from_mtinfo(cpu); | 672 | printk ("CPU (%d) is CPEI Target\n", cpu); |
673 | if (can_cpei_retarget()) { | ||
674 | /* | ||
675 | * Now re-target the CPEI to a different processor | ||
676 | */ | ||
677 | new_cpei_cpu = any_online_cpu(cpu_online_map); | ||
678 | mask = cpumask_of_cpu(new_cpei_cpu); | ||
679 | set_cpei_target_cpu(new_cpei_cpu); | ||
680 | desc = irq_descp(ia64_cpe_irq); | ||
681 | /* | ||
682 | * Switch for now, immediatly, we need to do fake intr | ||
683 | * as other interrupts, but need to study CPEI behaviour with | ||
684 | * polling before making changes. | ||
685 | */ | ||
686 | if (desc) { | ||
687 | desc->handler->disable(ia64_cpe_irq); | ||
688 | desc->handler->set_affinity(ia64_cpe_irq, mask); | ||
689 | desc->handler->enable(ia64_cpe_irq); | ||
690 | printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); | ||
691 | } | ||
692 | } | ||
693 | if (!desc) { | ||
694 | printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); | ||
695 | retval = -EBUSY; | ||
696 | } | ||
697 | } | ||
698 | return retval; | ||
635 | } | 699 | } |
636 | 700 | ||
637 | extern void fixup_irqs(void); | ||
638 | /* must be called with cpucontrol mutex held */ | 701 | /* must be called with cpucontrol mutex held */ |
639 | int __cpu_disable(void) | 702 | int __cpu_disable(void) |
640 | { | 703 | { |
@@ -643,8 +706,17 @@ int __cpu_disable(void) | |||
643 | /* | 706 | /* |
644 | * dont permit boot processor for now | 707 | * dont permit boot processor for now |
645 | */ | 708 | */ |
646 | if (cpu == 0) | 709 | if (cpu == 0 && !bsp_remove_ok) { |
647 | return -EBUSY; | 710 | printk ("Your platform does not support removal of BSP\n"); |
711 | return (-EBUSY); | ||
712 | } | ||
713 | |||
714 | cpu_clear(cpu, cpu_online_map); | ||
715 | |||
716 | if (migrate_platform_irqs(cpu)) { | ||
717 | cpu_set(cpu, cpu_online_map); | ||
718 | return (-EBUSY); | ||
719 | } | ||
648 | 720 | ||
649 | remove_siblinginfo(cpu); | 721 | remove_siblinginfo(cpu); |
650 | cpu_clear(cpu, cpu_online_map); | 722 | cpu_clear(cpu, cpu_online_map); |
@@ -776,40 +848,6 @@ init_smp_config(void) | |||
776 | ia64_sal_strerror(sal_ret)); | 848 | ia64_sal_strerror(sal_ret)); |
777 | } | 849 | } |
778 | 850 | ||
779 | static inline int __devinit | ||
780 | check_for_mtinfo_index(void) | ||
781 | { | ||
782 | int i; | ||
783 | |||
784 | for_each_cpu(i) | ||
785 | if (!mt_info[i].valid) | ||
786 | return i; | ||
787 | |||
788 | return -1; | ||
789 | } | ||
790 | |||
791 | /* | ||
792 | * Search the mt_info to find out if this socket's cid/tid information is | ||
793 | * cached or not. If the socket exists, fill in the core_id and thread_id | ||
794 | * in cpuinfo | ||
795 | */ | ||
796 | static int __devinit | ||
797 | check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c) | ||
798 | { | ||
799 | int i; | ||
800 | __u32 sid = c->socket_id; | ||
801 | |||
802 | for_each_cpu(i) { | ||
803 | if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address | ||
804 | && mt_info[i].socket_id == sid) { | ||
805 | c->core_id = mt_info[i].core_id; | ||
806 | c->thread_id = mt_info[i].thread_id; | ||
807 | return 1; /* not a new socket */ | ||
808 | } | ||
809 | } | ||
810 | return 0; | ||
811 | } | ||
812 | |||
813 | /* | 851 | /* |
814 | * identify_siblings(cpu) gets called from identify_cpu. This populates the | 852 | * identify_siblings(cpu) gets called from identify_cpu. This populates the |
815 | * information related to logical execution units in per_cpu_data structure. | 853 | * information related to logical execution units in per_cpu_data structure. |
@@ -819,14 +857,12 @@ identify_siblings(struct cpuinfo_ia64 *c) | |||
819 | { | 857 | { |
820 | s64 status; | 858 | s64 status; |
821 | u16 pltid; | 859 | u16 pltid; |
822 | u64 proc_fixed_addr; | ||
823 | int count, i; | ||
824 | pal_logical_to_physical_t info; | 860 | pal_logical_to_physical_t info; |
825 | 861 | ||
826 | if (smp_num_cpucores == 1 && smp_num_siblings == 1) | 862 | if (smp_num_cpucores == 1 && smp_num_siblings == 1) |
827 | return; | 863 | return; |
828 | 864 | ||
829 | if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) { | 865 | if ((status = ia64_pal_logical_to_phys(-1, &info)) != PAL_STATUS_SUCCESS) { |
830 | printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", | 866 | printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", |
831 | status); | 867 | status); |
832 | return; | 868 | return; |
@@ -835,47 +871,12 @@ identify_siblings(struct cpuinfo_ia64 *c) | |||
835 | printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); | 871 | printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); |
836 | return; | 872 | return; |
837 | } | 873 | } |
838 | if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) { | ||
839 | printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status); | ||
840 | return; | ||
841 | } | ||
842 | 874 | ||
843 | c->socket_id = (pltid << 8) | info.overview_ppid; | 875 | c->socket_id = (pltid << 8) | info.overview_ppid; |
844 | c->cores_per_socket = info.overview_cpp; | 876 | c->cores_per_socket = info.overview_cpp; |
845 | c->threads_per_core = info.overview_tpc; | 877 | c->threads_per_core = info.overview_tpc; |
846 | count = c->num_log = info.overview_num_log; | 878 | c->num_log = info.overview_num_log; |
847 | |||
848 | /* If the thread and core id information is already cached, then | ||
849 | * we will simply update cpu_info and return. Otherwise, we will | ||
850 | * do the PAL calls and cache core and thread id's of all the siblings. | ||
851 | */ | ||
852 | if (check_for_new_socket(proc_fixed_addr, c)) | ||
853 | return; | ||
854 | |||
855 | for (i = 0; i < count; i++) { | ||
856 | int index; | ||
857 | |||
858 | if (i && (status = ia64_pal_logical_to_phys(i, &info)) | ||
859 | != PAL_STATUS_SUCCESS) { | ||
860 | printk(KERN_ERR "ia64_pal_logical_to_phys failed" | ||
861 | " with %ld\n", status); | ||
862 | return; | ||
863 | } | ||
864 | if (info.log2_la == proc_fixed_addr) { | ||
865 | c->core_id = info.log1_cid; | ||
866 | c->thread_id = info.log1_tid; | ||
867 | } | ||
868 | 879 | ||
869 | index = check_for_mtinfo_index(); | 880 | c->core_id = info.log1_cid; |
870 | /* We will not do the mt_info caching optimization in this case. | 881 | c->thread_id = info.log1_tid; |
871 | */ | ||
872 | if (index < 0) | ||
873 | continue; | ||
874 | |||
875 | mt_info[index].valid = 1; | ||
876 | mt_info[index].socket_id = c->socket_id; | ||
877 | mt_info[index].core_id = info.log1_cid; | ||
878 | mt_info[index].thread_id = info.log1_tid; | ||
879 | mt_info[index].proc_fixed_addr = info.log2_la; | ||
880 | } | ||
881 | } | 882 | } |