diff options
Diffstat (limited to 'include/asm-x86/smp.h')
-rw-r--r-- | include/asm-x86/smp.h | 61 |
1 files changed, 42 insertions, 19 deletions
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 3c877f74f279..6df2615f9138 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SMP_H_ | 1 | #ifndef ASM_X86__SMP_H |
2 | #define _ASM_X86_SMP_H_ | 2 | #define ASM_X86__SMP_H |
3 | #ifndef __ASSEMBLY__ | 3 | #ifndef __ASSEMBLY__ |
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | #include <linux/init.h> | 5 | #include <linux/init.h> |
@@ -34,6 +34,9 @@ extern cpumask_t cpu_initialized; | |||
34 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | 34 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); |
35 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | 35 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); |
36 | DECLARE_PER_CPU(u16, cpu_llc_id); | 36 | DECLARE_PER_CPU(u16, cpu_llc_id); |
37 | #ifdef CONFIG_X86_32 | ||
38 | DECLARE_PER_CPU(int, cpu_number); | ||
39 | #endif | ||
37 | 40 | ||
38 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); | 41 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); |
39 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); | 42 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); |
@@ -47,12 +50,16 @@ extern struct { | |||
47 | struct smp_ops { | 50 | struct smp_ops { |
48 | void (*smp_prepare_boot_cpu)(void); | 51 | void (*smp_prepare_boot_cpu)(void); |
49 | void (*smp_prepare_cpus)(unsigned max_cpus); | 52 | void (*smp_prepare_cpus)(unsigned max_cpus); |
50 | int (*cpu_up)(unsigned cpu); | ||
51 | void (*smp_cpus_done)(unsigned max_cpus); | 53 | void (*smp_cpus_done)(unsigned max_cpus); |
52 | 54 | ||
53 | void (*smp_send_stop)(void); | 55 | void (*smp_send_stop)(void); |
54 | void (*smp_send_reschedule)(int cpu); | 56 | void (*smp_send_reschedule)(int cpu); |
55 | 57 | ||
58 | int (*cpu_up)(unsigned cpu); | ||
59 | int (*cpu_disable)(void); | ||
60 | void (*cpu_die)(unsigned int cpu); | ||
61 | void (*play_dead)(void); | ||
62 | |||
56 | void (*send_call_func_ipi)(cpumask_t mask); | 63 | void (*send_call_func_ipi)(cpumask_t mask); |
57 | void (*send_call_func_single_ipi)(int cpu); | 64 | void (*send_call_func_single_ipi)(int cpu); |
58 | }; | 65 | }; |
@@ -91,6 +98,21 @@ static inline int __cpu_up(unsigned int cpu) | |||
91 | return smp_ops.cpu_up(cpu); | 98 | return smp_ops.cpu_up(cpu); |
92 | } | 99 | } |
93 | 100 | ||
101 | static inline int __cpu_disable(void) | ||
102 | { | ||
103 | return smp_ops.cpu_disable(); | ||
104 | } | ||
105 | |||
106 | static inline void __cpu_die(unsigned int cpu) | ||
107 | { | ||
108 | smp_ops.cpu_die(cpu); | ||
109 | } | ||
110 | |||
111 | static inline void play_dead(void) | ||
112 | { | ||
113 | smp_ops.play_dead(); | ||
114 | } | ||
115 | |||
94 | static inline void smp_send_reschedule(int cpu) | 116 | static inline void smp_send_reschedule(int cpu) |
95 | { | 117 | { |
96 | smp_ops.smp_send_reschedule(cpu); | 118 | smp_ops.smp_send_reschedule(cpu); |
@@ -106,16 +128,19 @@ static inline void arch_send_call_function_ipi(cpumask_t mask) | |||
106 | smp_ops.send_call_func_ipi(mask); | 128 | smp_ops.send_call_func_ipi(mask); |
107 | } | 129 | } |
108 | 130 | ||
131 | void cpu_disable_common(void); | ||
109 | void native_smp_prepare_boot_cpu(void); | 132 | void native_smp_prepare_boot_cpu(void); |
110 | void native_smp_prepare_cpus(unsigned int max_cpus); | 133 | void native_smp_prepare_cpus(unsigned int max_cpus); |
111 | void native_smp_cpus_done(unsigned int max_cpus); | 134 | void native_smp_cpus_done(unsigned int max_cpus); |
112 | int native_cpu_up(unsigned int cpunum); | 135 | int native_cpu_up(unsigned int cpunum); |
136 | int native_cpu_disable(void); | ||
137 | void native_cpu_die(unsigned int cpu); | ||
138 | void native_play_dead(void); | ||
139 | void play_dead_common(void); | ||
140 | |||
113 | void native_send_call_func_ipi(cpumask_t mask); | 141 | void native_send_call_func_ipi(cpumask_t mask); |
114 | void native_send_call_func_single_ipi(int cpu); | 142 | void native_send_call_func_single_ipi(int cpu); |
115 | 143 | ||
116 | extern int __cpu_disable(void); | ||
117 | extern void __cpu_die(unsigned int cpu); | ||
118 | |||
119 | void smp_store_cpu_info(int id); | 144 | void smp_store_cpu_info(int id); |
120 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | 145 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) |
121 | 146 | ||
@@ -142,7 +167,6 @@ extern unsigned disabled_cpus __cpuinitdata; | |||
142 | * from the initial startup. We map APIC_BASE very early in page_setup(), | 167 | * from the initial startup. We map APIC_BASE very early in page_setup(), |
143 | * so this is correct in the x86 case. | 168 | * so this is correct in the x86 case. |
144 | */ | 169 | */ |
145 | DECLARE_PER_CPU(int, cpu_number); | ||
146 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) | 170 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) |
147 | extern int safe_smp_processor_id(void); | 171 | extern int safe_smp_processor_id(void); |
148 | 172 | ||
@@ -165,30 +189,33 @@ extern int safe_smp_processor_id(void); | |||
165 | 189 | ||
166 | #ifdef CONFIG_X86_LOCAL_APIC | 190 | #ifdef CONFIG_X86_LOCAL_APIC |
167 | 191 | ||
192 | #ifndef CONFIG_X86_64 | ||
168 | static inline int logical_smp_processor_id(void) | 193 | static inline int logical_smp_processor_id(void) |
169 | { | 194 | { |
170 | /* we don't want to mark this access volatile - bad code generation */ | 195 | /* we don't want to mark this access volatile - bad code generation */ |
171 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | 196 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); |
172 | } | 197 | } |
173 | 198 | ||
174 | #ifndef CONFIG_X86_64 | 199 | #include <mach_apicdef.h> |
175 | static inline unsigned int read_apic_id(void) | 200 | static inline unsigned int read_apic_id(void) |
176 | { | 201 | { |
177 | return *(u32 *)(APIC_BASE + APIC_ID); | 202 | unsigned int reg; |
203 | |||
204 | reg = *(u32 *)(APIC_BASE + APIC_ID); | ||
205 | |||
206 | return GET_APIC_ID(reg); | ||
178 | } | 207 | } |
179 | #else | ||
180 | extern unsigned int read_apic_id(void); | ||
181 | #endif | 208 | #endif |
182 | 209 | ||
183 | 210 | ||
184 | # ifdef APIC_DEFINITION | 211 | # if defined(APIC_DEFINITION) || defined(CONFIG_X86_64) |
185 | extern int hard_smp_processor_id(void); | 212 | extern int hard_smp_processor_id(void); |
186 | # else | 213 | # else |
187 | # include <mach_apicdef.h> | 214 | #include <mach_apicdef.h> |
188 | static inline int hard_smp_processor_id(void) | 215 | static inline int hard_smp_processor_id(void) |
189 | { | 216 | { |
190 | /* we don't want to mark this access volatile - bad code generation */ | 217 | /* we don't want to mark this access volatile - bad code generation */ |
191 | return GET_APIC_ID(read_apic_id()); | 218 | return read_apic_id(); |
192 | } | 219 | } |
193 | # endif /* APIC_DEFINITION */ | 220 | # endif /* APIC_DEFINITION */ |
194 | 221 | ||
@@ -200,9 +227,5 @@ static inline int hard_smp_processor_id(void) | |||
200 | 227 | ||
201 | #endif /* CONFIG_X86_LOCAL_APIC */ | 228 | #endif /* CONFIG_X86_LOCAL_APIC */ |
202 | 229 | ||
203 | #ifdef CONFIG_HOTPLUG_CPU | ||
204 | extern void cpu_uninit(void); | ||
205 | #endif | ||
206 | |||
207 | #endif /* __ASSEMBLY__ */ | 230 | #endif /* __ASSEMBLY__ */ |
208 | #endif | 231 | #endif /* ASM_X86__SMP_H */ |