aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/smp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/smp.h')
-rw-r--r--include/asm-x86/smp.h210
1 files changed, 207 insertions, 3 deletions
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index f2e8319a6b0b..62ebdec394b9 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -1,5 +1,209 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_SMP_H_
2# include "smp_32.h" 2#define _ASM_X86_SMP_H_
3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h>
5#include <linux/init.h>
6#include <asm/percpu.h>
7
8/*
9 * We need the APIC definitions automatically as part of 'smp.h'
10 */
11#ifdef CONFIG_X86_LOCAL_APIC
12# include <asm/mpspec.h>
13# include <asm/apic.h>
14# ifdef CONFIG_X86_IO_APIC
15# include <asm/io_apic.h>
16# endif
17#endif
18#include <asm/pda.h>
19#include <asm/thread_info.h>
20
21extern cpumask_t cpu_callout_map;
22extern cpumask_t cpu_initialized;
23extern cpumask_t cpu_callin_map;
24
25extern void (*mtrr_hook)(void);
26extern void zap_low_mappings(void);
27
28extern int smp_num_siblings;
29extern unsigned int num_processors;
30extern cpumask_t cpu_initialized;
31
32#ifdef CONFIG_SMP
33extern u16 x86_cpu_to_apicid_init[];
34extern u16 x86_bios_cpu_apicid_init[];
35extern void *x86_cpu_to_apicid_early_ptr;
36extern void *x86_bios_cpu_apicid_early_ptr;
3#else 37#else
4# include "smp_64.h" 38#define x86_cpu_to_apicid_early_ptr NULL
39#define x86_bios_cpu_apicid_early_ptr NULL
40#endif
41
42DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
43DECLARE_PER_CPU(cpumask_t, cpu_core_map);
44DECLARE_PER_CPU(u16, cpu_llc_id);
45DECLARE_PER_CPU(u16, x86_cpu_to_apicid);
46DECLARE_PER_CPU(u16, x86_bios_cpu_apicid);
47
48/* Static state in head.S used to set up a CPU */
49extern struct {
50 void *sp;
51 unsigned short ss;
52} stack_start;
53
54struct smp_ops {
55 void (*smp_prepare_boot_cpu)(void);
56 void (*smp_prepare_cpus)(unsigned max_cpus);
57 int (*cpu_up)(unsigned cpu);
58 void (*smp_cpus_done)(unsigned max_cpus);
59
60 void (*smp_send_stop)(void);
61 void (*smp_send_reschedule)(int cpu);
62 int (*smp_call_function_mask)(cpumask_t mask,
63 void (*func)(void *info), void *info,
64 int wait);
65};
66
67/* Globals due to paravirt */
68extern void set_cpu_sibling_map(int cpu);
69
70#ifdef CONFIG_SMP
71#ifndef CONFIG_PARAVIRT
72#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
73#endif
74extern struct smp_ops smp_ops;
75
76static inline void smp_send_stop(void)
77{
78 smp_ops.smp_send_stop();
79}
80
81static inline void smp_prepare_boot_cpu(void)
82{
83 smp_ops.smp_prepare_boot_cpu();
84}
85
86static inline void smp_prepare_cpus(unsigned int max_cpus)
87{
88 smp_ops.smp_prepare_cpus(max_cpus);
89}
90
91static inline void smp_cpus_done(unsigned int max_cpus)
92{
93 smp_ops.smp_cpus_done(max_cpus);
94}
95
96static inline int __cpu_up(unsigned int cpu)
97{
98 return smp_ops.cpu_up(cpu);
99}
100
101static inline void smp_send_reschedule(int cpu)
102{
103 smp_ops.smp_send_reschedule(cpu);
104}
105
106static inline int smp_call_function_mask(cpumask_t mask,
107 void (*func) (void *info), void *info,
108 int wait)
109{
110 return smp_ops.smp_call_function_mask(mask, func, info, wait);
111}
112
113void native_smp_prepare_boot_cpu(void);
114void native_smp_prepare_cpus(unsigned int max_cpus);
115void native_smp_cpus_done(unsigned int max_cpus);
116int native_cpu_up(unsigned int cpunum);
117
118extern int __cpu_disable(void);
119extern void __cpu_die(unsigned int cpu);
120
121extern void prefill_possible_map(void);
122
123void smp_store_cpu_info(int id);
124#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
125
126/* We don't mark CPUs online until __cpu_up(), so we need another measure */
127static inline int num_booting_cpus(void)
128{
129 return cpus_weight(cpu_callout_map);
130}
131#endif /* CONFIG_SMP */
132
133extern unsigned disabled_cpus __cpuinitdata;
134
135#ifdef CONFIG_X86_32_SMP
136/*
137 * This function is needed by all SMP systems. It must _always_ be valid
138 * from the initial startup. We map APIC_BASE very early in page_setup(),
139 * so this is correct in the x86 case.
140 */
141DECLARE_PER_CPU(int, cpu_number);
142#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
143extern int safe_smp_processor_id(void);
144
145#elif defined(CONFIG_X86_64_SMP)
146#define raw_smp_processor_id() read_pda(cpunumber)
147
148#define stack_smp_processor_id() \
149({ \
150 struct thread_info *ti; \
151 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
152 ti->cpu; \
153})
154#define safe_smp_processor_id() smp_processor_id()
155
156#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
157#define cpu_physical_id(cpu) boot_cpu_physical_apicid
158#define safe_smp_processor_id() 0
159#define stack_smp_processor_id() 0
160#endif
161
162#ifdef CONFIG_X86_LOCAL_APIC
163
164static inline int logical_smp_processor_id(void)
165{
166 /* we don't want to mark this access volatile - bad code generation */
167 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
168}
169
170#ifndef CONFIG_X86_64
171static inline unsigned int read_apic_id(void)
172{
173 return *(u32 *)(APIC_BASE + APIC_ID);
174}
175#else
176extern unsigned int read_apic_id(void);
177#endif
178
179
180# ifdef APIC_DEFINITION
181extern int hard_smp_processor_id(void);
182# else
183# include <mach_apicdef.h>
184static inline int hard_smp_processor_id(void)
185{
186 /* we don't want to mark this access volatile - bad code generation */
187 return GET_APIC_ID(read_apic_id());
188}
189# endif /* APIC_DEFINITION */
190
191#else /* CONFIG_X86_LOCAL_APIC */
192
193# ifndef CONFIG_SMP
194# define hard_smp_processor_id() 0
195# endif
196
197#endif /* CONFIG_X86_LOCAL_APIC */
198
199#ifdef CONFIG_HOTPLUG_CPU
200extern void cpu_exit_clear(void);
201extern void cpu_uninit(void);
202extern void remove_siblinginfo(int cpu);
203#endif
204
205extern void smp_alloc_memory(void);
206extern void lock_ipi_call_lock(void);
207extern void unlock_ipi_call_lock(void);
208#endif /* __ASSEMBLY__ */
5#endif 209#endif