diff options
Diffstat (limited to 'arch/x86/include/asm/smp.h')
-rw-r--r-- | arch/x86/include/asm/smp.h | 229 |
1 files changed, 229 insertions, 0 deletions
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h new file mode 100644 index 000000000000..a6afc29f2dd9 --- /dev/null +++ b/arch/x86/include/asm/smp.h | |||
@@ -0,0 +1,229 @@ | |||
1 | #ifndef ASM_X86__SMP_H | ||
2 | #define ASM_X86__SMP_H | ||
3 | #ifndef __ASSEMBLY__ | ||
4 | #include <linux/cpumask.h> | ||
5 | #include <linux/init.h> | ||
6 | #include <asm/percpu.h> | ||
7 | |||
8 | /* | ||
9 | * We need the APIC definitions automatically as part of 'smp.h' | ||
10 | */ | ||
11 | #ifdef CONFIG_X86_LOCAL_APIC | ||
12 | # include <asm/mpspec.h> | ||
13 | # include <asm/apic.h> | ||
14 | # ifdef CONFIG_X86_IO_APIC | ||
15 | # include <asm/io_apic.h> | ||
16 | # endif | ||
17 | #endif | ||
18 | #include <asm/pda.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | |||
21 | extern cpumask_t cpu_callout_map; | ||
22 | extern cpumask_t cpu_initialized; | ||
23 | extern cpumask_t cpu_callin_map; | ||
24 | |||
25 | extern void (*mtrr_hook)(void); | ||
26 | extern void zap_low_mappings(void); | ||
27 | |||
28 | extern int __cpuinit get_local_pda(int cpu); | ||
29 | |||
30 | extern int smp_num_siblings; | ||
31 | extern unsigned int num_processors; | ||
32 | extern cpumask_t cpu_initialized; | ||
33 | |||
34 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
35 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | ||
36 | DECLARE_PER_CPU(u16, cpu_llc_id); | ||
37 | #ifdef CONFIG_X86_32 | ||
38 | DECLARE_PER_CPU(int, cpu_number); | ||
39 | #endif | ||
40 | |||
41 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); | ||
42 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); | ||
43 | |||
44 | /* Static state in head.S used to set up a CPU */ | ||
45 | extern struct { | ||
46 | void *sp; | ||
47 | unsigned short ss; | ||
48 | } stack_start; | ||
49 | |||
50 | struct smp_ops { | ||
51 | void (*smp_prepare_boot_cpu)(void); | ||
52 | void (*smp_prepare_cpus)(unsigned max_cpus); | ||
53 | void (*smp_cpus_done)(unsigned max_cpus); | ||
54 | |||
55 | void (*smp_send_stop)(void); | ||
56 | void (*smp_send_reschedule)(int cpu); | ||
57 | |||
58 | int (*cpu_up)(unsigned cpu); | ||
59 | int (*cpu_disable)(void); | ||
60 | void (*cpu_die)(unsigned int cpu); | ||
61 | void (*play_dead)(void); | ||
62 | |||
63 | void (*send_call_func_ipi)(cpumask_t mask); | ||
64 | void (*send_call_func_single_ipi)(int cpu); | ||
65 | }; | ||
66 | |||
67 | /* Globals due to paravirt */ | ||
68 | extern void set_cpu_sibling_map(int cpu); | ||
69 | |||
70 | #ifdef CONFIG_SMP | ||
71 | #ifndef CONFIG_PARAVIRT | ||
72 | #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) | ||
73 | #endif | ||
74 | extern struct smp_ops smp_ops; | ||
75 | |||
76 | static inline void smp_send_stop(void) | ||
77 | { | ||
78 | smp_ops.smp_send_stop(); | ||
79 | } | ||
80 | |||
81 | static inline void smp_prepare_boot_cpu(void) | ||
82 | { | ||
83 | smp_ops.smp_prepare_boot_cpu(); | ||
84 | } | ||
85 | |||
86 | static inline void smp_prepare_cpus(unsigned int max_cpus) | ||
87 | { | ||
88 | smp_ops.smp_prepare_cpus(max_cpus); | ||
89 | } | ||
90 | |||
91 | static inline void smp_cpus_done(unsigned int max_cpus) | ||
92 | { | ||
93 | smp_ops.smp_cpus_done(max_cpus); | ||
94 | } | ||
95 | |||
96 | static inline int __cpu_up(unsigned int cpu) | ||
97 | { | ||
98 | return smp_ops.cpu_up(cpu); | ||
99 | } | ||
100 | |||
101 | static inline int __cpu_disable(void) | ||
102 | { | ||
103 | return smp_ops.cpu_disable(); | ||
104 | } | ||
105 | |||
106 | static inline void __cpu_die(unsigned int cpu) | ||
107 | { | ||
108 | smp_ops.cpu_die(cpu); | ||
109 | } | ||
110 | |||
111 | static inline void play_dead(void) | ||
112 | { | ||
113 | smp_ops.play_dead(); | ||
114 | } | ||
115 | |||
116 | static inline void smp_send_reschedule(int cpu) | ||
117 | { | ||
118 | smp_ops.smp_send_reschedule(cpu); | ||
119 | } | ||
120 | |||
121 | static inline void arch_send_call_function_single_ipi(int cpu) | ||
122 | { | ||
123 | smp_ops.send_call_func_single_ipi(cpu); | ||
124 | } | ||
125 | |||
126 | static inline void arch_send_call_function_ipi(cpumask_t mask) | ||
127 | { | ||
128 | smp_ops.send_call_func_ipi(mask); | ||
129 | } | ||
130 | |||
131 | void cpu_disable_common(void); | ||
132 | void native_smp_prepare_boot_cpu(void); | ||
133 | void native_smp_prepare_cpus(unsigned int max_cpus); | ||
134 | void native_smp_cpus_done(unsigned int max_cpus); | ||
135 | int native_cpu_up(unsigned int cpunum); | ||
136 | int native_cpu_disable(void); | ||
137 | void native_cpu_die(unsigned int cpu); | ||
138 | void native_play_dead(void); | ||
139 | void play_dead_common(void); | ||
140 | |||
141 | void native_send_call_func_ipi(cpumask_t mask); | ||
142 | void native_send_call_func_single_ipi(int cpu); | ||
143 | |||
144 | extern void prefill_possible_map(void); | ||
145 | |||
146 | void smp_store_cpu_info(int id); | ||
147 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
148 | |||
149 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | ||
150 | static inline int num_booting_cpus(void) | ||
151 | { | ||
152 | return cpus_weight(cpu_callout_map); | ||
153 | } | ||
154 | #else | ||
155 | static inline void prefill_possible_map(void) | ||
156 | { | ||
157 | } | ||
158 | #endif /* CONFIG_SMP */ | ||
159 | |||
160 | extern unsigned disabled_cpus __cpuinitdata; | ||
161 | |||
162 | #ifdef CONFIG_X86_32_SMP | ||
163 | /* | ||
164 | * This function is needed by all SMP systems. It must _always_ be valid | ||
165 | * from the initial startup. We map APIC_BASE very early in page_setup(), | ||
166 | * so this is correct in the x86 case. | ||
167 | */ | ||
168 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) | ||
169 | extern int safe_smp_processor_id(void); | ||
170 | |||
171 | #elif defined(CONFIG_X86_64_SMP) | ||
172 | #define raw_smp_processor_id() read_pda(cpunumber) | ||
173 | |||
174 | #define stack_smp_processor_id() \ | ||
175 | ({ \ | ||
176 | struct thread_info *ti; \ | ||
177 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | ||
178 | ti->cpu; \ | ||
179 | }) | ||
180 | #define safe_smp_processor_id() smp_processor_id() | ||
181 | |||
182 | #else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ | ||
183 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid | ||
184 | #define safe_smp_processor_id() 0 | ||
185 | #define stack_smp_processor_id() 0 | ||
186 | #endif | ||
187 | |||
188 | #ifdef CONFIG_X86_LOCAL_APIC | ||
189 | |||
190 | #ifndef CONFIG_X86_64 | ||
191 | static inline int logical_smp_processor_id(void) | ||
192 | { | ||
193 | /* we don't want to mark this access volatile - bad code generation */ | ||
194 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | ||
195 | } | ||
196 | |||
197 | #include <mach_apicdef.h> | ||
198 | static inline unsigned int read_apic_id(void) | ||
199 | { | ||
200 | unsigned int reg; | ||
201 | |||
202 | reg = *(u32 *)(APIC_BASE + APIC_ID); | ||
203 | |||
204 | return GET_APIC_ID(reg); | ||
205 | } | ||
206 | #endif | ||
207 | |||
208 | |||
209 | # if defined(APIC_DEFINITION) || defined(CONFIG_X86_64) | ||
210 | extern int hard_smp_processor_id(void); | ||
211 | # else | ||
212 | #include <mach_apicdef.h> | ||
213 | static inline int hard_smp_processor_id(void) | ||
214 | { | ||
215 | /* we don't want to mark this access volatile - bad code generation */ | ||
216 | return read_apic_id(); | ||
217 | } | ||
218 | # endif /* APIC_DEFINITION */ | ||
219 | |||
220 | #else /* CONFIG_X86_LOCAL_APIC */ | ||
221 | |||
222 | # ifndef CONFIG_SMP | ||
223 | # define hard_smp_processor_id() 0 | ||
224 | # endif | ||
225 | |||
226 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
227 | |||
228 | #endif /* __ASSEMBLY__ */ | ||
229 | #endif /* ASM_X86__SMP_H */ | ||