diff options
author | Andi Kleen <ak@suse.de> | 2005-04-16 18:25:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:25:19 -0400 |
commit | a8ab26fe5bfeef43bdcde5182ca051ae0647607e (patch) | |
tree | 456f92b42111f83a4568b27efd863f20ffabbe3c /include/asm-x86_64 | |
parent | ebfcaa96fccc01301a577c5c56a5f00543cf167e (diff) |
[PATCH] x86_64: Switch SMP bootup over to new CPU hotplug state machine
This will allow hotplug CPU in the future and in general cleans up a lot of
crufty code. It also should plug some races that the old hackish way
introduces. Remove one old race workaround in NMI watchdog setup that is not
needed anymore.
I removed the old total sum of bogomips reporting code. The brag value of
BogoMips has been greatly devalued in the last years on the open market.
Real CPU hotplug will need some more work, but the infrastructure for it is
there now.
One drawback: the new TSC sync algorithm is less accurate than before. The
old way of zeroing TSCs is too intrusive to do later. Instead the TSC of the
BP is duplicated now, which is less accurate.
akpm:
- sync_tsc_bp_init seems to have the sense of `init' inverted.
- SPIN_LOCK_UNLOCKED is deprecated - use DEFINE_SPINLOCK.
Cc: <rusty@rustcorp.com.au>
Cc: <mingo@elte.hu>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r-- | include/asm-x86_64/msr.h | 1 | ||||
-rw-r--r-- | include/asm-x86_64/proto.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/smp.h | 29 |
3 files changed, 8 insertions, 24 deletions
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index 66f0be191ab4..513e52c71821 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h | |||
@@ -163,6 +163,7 @@ extern inline unsigned int cpuid_edx(unsigned int op) | |||
163 | #define EFER_NX (1<<_EFER_NX) | 163 | #define EFER_NX (1<<_EFER_NX) |
164 | 164 | ||
165 | /* Intel MSRs. Some also available on other CPUs */ | 165 | /* Intel MSRs. Some also available on other CPUs */ |
166 | #define MSR_IA32_TSC 0x10 | ||
166 | #define MSR_IA32_PLATFORM_ID 0x17 | 167 | #define MSR_IA32_PLATFORM_ID 0x17 |
167 | 168 | ||
168 | #define MSR_IA32_PERFCTR0 0xc1 | 169 | #define MSR_IA32_PERFCTR0 0xc1 |
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 217bd9ace69b..d0f8f8b4c394 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h | |||
@@ -29,7 +29,7 @@ extern void config_acpi_tables(void); | |||
29 | extern void ia32_syscall(void); | 29 | extern void ia32_syscall(void); |
30 | extern void iommu_hole_init(void); | 30 | extern void iommu_hole_init(void); |
31 | 31 | ||
32 | extern void time_init_smp(void); | 32 | extern void time_init_gtod(void); |
33 | 33 | ||
34 | extern void do_softirq_thunk(void); | 34 | extern void do_softirq_thunk(void); |
35 | 35 | ||
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index f5eaa1ab48ff..96844fecbde8 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h | |||
@@ -31,12 +31,16 @@ extern int disable_apic; | |||
31 | 31 | ||
32 | struct pt_regs; | 32 | struct pt_regs; |
33 | 33 | ||
34 | extern cpumask_t cpu_present_mask; | ||
35 | extern cpumask_t cpu_possible_map; | ||
36 | extern cpumask_t cpu_online_map; | ||
37 | extern cpumask_t cpu_callout_map; | ||
38 | |||
34 | /* | 39 | /* |
35 | * Private routines/data | 40 | * Private routines/data |
36 | */ | 41 | */ |
37 | 42 | ||
38 | extern void smp_alloc_memory(void); | 43 | extern void smp_alloc_memory(void); |
39 | extern cpumask_t cpu_online_map; | ||
40 | extern volatile unsigned long smp_invalidate_needed; | 44 | extern volatile unsigned long smp_invalidate_needed; |
41 | extern int pic_mode; | 45 | extern int pic_mode; |
42 | extern int smp_num_siblings; | 46 | extern int smp_num_siblings; |
@@ -44,7 +48,6 @@ extern void smp_flush_tlb(void); | |||
44 | extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); | 48 | extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); |
45 | extern void smp_send_reschedule(int cpu); | 49 | extern void smp_send_reschedule(int cpu); |
46 | extern void smp_invalidate_rcv(void); /* Process an NMI */ | 50 | extern void smp_invalidate_rcv(void); /* Process an NMI */ |
47 | extern void (*mtrr_hook) (void); | ||
48 | extern void zap_low_mappings(void); | 51 | extern void zap_low_mappings(void); |
49 | void smp_stop_cpu(void); | 52 | void smp_stop_cpu(void); |
50 | extern cpumask_t cpu_sibling_map[NR_CPUS]; | 53 | extern cpumask_t cpu_sibling_map[NR_CPUS]; |
@@ -60,10 +63,6 @@ extern u8 cpu_core_id[NR_CPUS]; | |||
60 | * compresses data structures. | 63 | * compresses data structures. |
61 | */ | 64 | */ |
62 | 65 | ||
63 | extern cpumask_t cpu_callout_map; | ||
64 | extern cpumask_t cpu_callin_map; | ||
65 | #define cpu_possible_map cpu_callout_map | ||
66 | |||
67 | static inline int num_booting_cpus(void) | 66 | static inline int num_booting_cpus(void) |
68 | { | 67 | { |
69 | return cpus_weight(cpu_callout_map); | 68 | return cpus_weight(cpu_callout_map); |
@@ -77,7 +76,7 @@ extern __inline int hard_smp_processor_id(void) | |||
77 | return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); | 76 | return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); |
78 | } | 77 | } |
79 | 78 | ||
80 | #define safe_smp_processor_id() (disable_apic ? 0 : x86_apicid_to_cpu(hard_smp_processor_id())) | 79 | extern int safe_smp_processor_id(void); |
81 | 80 | ||
82 | #endif /* !ASSEMBLY */ | 81 | #endif /* !ASSEMBLY */ |
83 | 82 | ||
@@ -99,22 +98,6 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
99 | return cpus_addr(cpumask)[0]; | 98 | return cpus_addr(cpumask)[0]; |
100 | } | 99 | } |
101 | 100 | ||
102 | static inline int x86_apicid_to_cpu(u8 apicid) | ||
103 | { | ||
104 | int i; | ||
105 | |||
106 | for (i = 0; i < NR_CPUS; ++i) | ||
107 | if (x86_cpu_to_apicid[i] == apicid) | ||
108 | return i; | ||
109 | |||
110 | /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI, | ||
111 | * or called too early. Either way, we must be CPU 0. */ | ||
112 | if (x86_cpu_to_apicid[0] == BAD_APICID) | ||
113 | return 0; | ||
114 | |||
115 | return -1; | ||
116 | } | ||
117 | |||
118 | static inline int cpu_present_to_apicid(int mps_cpu) | 101 | static inline int cpu_present_to_apicid(int mps_cpu) |
119 | { | 102 | { |
120 | if (mps_cpu < NR_CPUS) | 103 | if (mps_cpu < NR_CPUS) |