aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/smp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/smp.h')
-rw-r--r--include/asm-x86/smp.h67
1 files changed, 44 insertions, 23 deletions
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 3c877f74f279..a6afc29f2dd9 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SMP_H_ 1#ifndef ASM_X86__SMP_H
2#define _ASM_X86_SMP_H_ 2#define ASM_X86__SMP_H
3#ifndef __ASSEMBLY__ 3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <linux/init.h> 5#include <linux/init.h>
@@ -34,6 +34,9 @@ extern cpumask_t cpu_initialized;
34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
35DECLARE_PER_CPU(cpumask_t, cpu_core_map); 35DECLARE_PER_CPU(cpumask_t, cpu_core_map);
36DECLARE_PER_CPU(u16, cpu_llc_id); 36DECLARE_PER_CPU(u16, cpu_llc_id);
37#ifdef CONFIG_X86_32
38DECLARE_PER_CPU(int, cpu_number);
39#endif
37 40
38DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 41DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
39DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 42DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
@@ -47,12 +50,16 @@ extern struct {
47struct smp_ops { 50struct smp_ops {
48 void (*smp_prepare_boot_cpu)(void); 51 void (*smp_prepare_boot_cpu)(void);
49 void (*smp_prepare_cpus)(unsigned max_cpus); 52 void (*smp_prepare_cpus)(unsigned max_cpus);
50 int (*cpu_up)(unsigned cpu);
51 void (*smp_cpus_done)(unsigned max_cpus); 53 void (*smp_cpus_done)(unsigned max_cpus);
52 54
53 void (*smp_send_stop)(void); 55 void (*smp_send_stop)(void);
54 void (*smp_send_reschedule)(int cpu); 56 void (*smp_send_reschedule)(int cpu);
55 57
58 int (*cpu_up)(unsigned cpu);
59 int (*cpu_disable)(void);
60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void);
62
56 void (*send_call_func_ipi)(cpumask_t mask); 63 void (*send_call_func_ipi)(cpumask_t mask);
57 void (*send_call_func_single_ipi)(int cpu); 64 void (*send_call_func_single_ipi)(int cpu);
58}; 65};
@@ -91,6 +98,21 @@ static inline int __cpu_up(unsigned int cpu)
91 return smp_ops.cpu_up(cpu); 98 return smp_ops.cpu_up(cpu);
92} 99}
93 100
101static inline int __cpu_disable(void)
102{
103 return smp_ops.cpu_disable();
104}
105
106static inline void __cpu_die(unsigned int cpu)
107{
108 smp_ops.cpu_die(cpu);
109}
110
111static inline void play_dead(void)
112{
113 smp_ops.play_dead();
114}
115
94static inline void smp_send_reschedule(int cpu) 116static inline void smp_send_reschedule(int cpu)
95{ 117{
96 smp_ops.smp_send_reschedule(cpu); 118 smp_ops.smp_send_reschedule(cpu);
@@ -106,15 +128,20 @@ static inline void arch_send_call_function_ipi(cpumask_t mask)
106 smp_ops.send_call_func_ipi(mask); 128 smp_ops.send_call_func_ipi(mask);
107} 129}
108 130
131void cpu_disable_common(void);
109void native_smp_prepare_boot_cpu(void); 132void native_smp_prepare_boot_cpu(void);
110void native_smp_prepare_cpus(unsigned int max_cpus); 133void native_smp_prepare_cpus(unsigned int max_cpus);
111void native_smp_cpus_done(unsigned int max_cpus); 134void native_smp_cpus_done(unsigned int max_cpus);
112int native_cpu_up(unsigned int cpunum); 135int native_cpu_up(unsigned int cpunum);
136int native_cpu_disable(void);
137void native_cpu_die(unsigned int cpu);
138void native_play_dead(void);
139void play_dead_common(void);
140
113void native_send_call_func_ipi(cpumask_t mask); 141void native_send_call_func_ipi(cpumask_t mask);
114void native_send_call_func_single_ipi(int cpu); 142void native_send_call_func_single_ipi(int cpu);
115 143
116extern int __cpu_disable(void); 144extern void prefill_possible_map(void);
117extern void __cpu_die(unsigned int cpu);
118 145
119void smp_store_cpu_info(int id); 146void smp_store_cpu_info(int id);
120#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 147#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
@@ -124,15 +151,11 @@ static inline int num_booting_cpus(void)
124{ 151{
125 return cpus_weight(cpu_callout_map); 152 return cpus_weight(cpu_callout_map);
126} 153}
127#endif /* CONFIG_SMP */
128
129#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_CPU)
130extern void prefill_possible_map(void);
131#else 154#else
132static inline void prefill_possible_map(void) 155static inline void prefill_possible_map(void)
133{ 156{
134} 157}
135#endif 158#endif /* CONFIG_SMP */
136 159
137extern unsigned disabled_cpus __cpuinitdata; 160extern unsigned disabled_cpus __cpuinitdata;
138 161
@@ -142,7 +165,6 @@ extern unsigned disabled_cpus __cpuinitdata;
142 * from the initial startup. We map APIC_BASE very early in page_setup(), 165 * from the initial startup. We map APIC_BASE very early in page_setup(),
143 * so this is correct in the x86 case. 166 * so this is correct in the x86 case.
144 */ 167 */
145DECLARE_PER_CPU(int, cpu_number);
146#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) 168#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
147extern int safe_smp_processor_id(void); 169extern int safe_smp_processor_id(void);
148 170
@@ -165,30 +187,33 @@ extern int safe_smp_processor_id(void);
165 187
166#ifdef CONFIG_X86_LOCAL_APIC 188#ifdef CONFIG_X86_LOCAL_APIC
167 189
190#ifndef CONFIG_X86_64
168static inline int logical_smp_processor_id(void) 191static inline int logical_smp_processor_id(void)
169{ 192{
170 /* we don't want to mark this access volatile - bad code generation */ 193 /* we don't want to mark this access volatile - bad code generation */
171 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); 194 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
172} 195}
173 196
174#ifndef CONFIG_X86_64 197#include <mach_apicdef.h>
175static inline unsigned int read_apic_id(void) 198static inline unsigned int read_apic_id(void)
176{ 199{
177 return *(u32 *)(APIC_BASE + APIC_ID); 200 unsigned int reg;
201
202 reg = *(u32 *)(APIC_BASE + APIC_ID);
203
204 return GET_APIC_ID(reg);
178} 205}
179#else
180extern unsigned int read_apic_id(void);
181#endif 206#endif
182 207
183 208
184# ifdef APIC_DEFINITION 209# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
185extern int hard_smp_processor_id(void); 210extern int hard_smp_processor_id(void);
186# else 211# else
187# include <mach_apicdef.h> 212#include <mach_apicdef.h>
188static inline int hard_smp_processor_id(void) 213static inline int hard_smp_processor_id(void)
189{ 214{
190 /* we don't want to mark this access volatile - bad code generation */ 215 /* we don't want to mark this access volatile - bad code generation */
191 return GET_APIC_ID(read_apic_id()); 216 return read_apic_id();
192} 217}
193# endif /* APIC_DEFINITION */ 218# endif /* APIC_DEFINITION */
194 219
@@ -200,9 +225,5 @@ static inline int hard_smp_processor_id(void)
200 225
201#endif /* CONFIG_X86_LOCAL_APIC */ 226#endif /* CONFIG_X86_LOCAL_APIC */
202 227
203#ifdef CONFIG_HOTPLUG_CPU
204extern void cpu_uninit(void);
205#endif
206
207#endif /* __ASSEMBLY__ */ 228#endif /* __ASSEMBLY__ */
208#endif 229#endif /* ASM_X86__SMP_H */