aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/smp_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/smp_64.h')
-rw-r--r--include/asm-x86/smp_64.h117
1 files changed, 117 insertions, 0 deletions
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
new file mode 100644
index 000000000000..3f303d2365ed
--- /dev/null
+++ b/include/asm-x86/smp_64.h
@@ -0,0 +1,117 @@
1#ifndef __ASM_SMP_H
2#define __ASM_SMP_H
3
4/*
5 * We need the APIC definitions automatically as part of 'smp.h'
6 */
7#include <linux/threads.h>
8#include <linux/cpumask.h>
9#include <linux/bitops.h>
10#include <linux/init.h>
11extern int disable_apic;
12
13#include <asm/mpspec.h>
14#include <asm/apic.h>
15#include <asm/io_apic.h>
16#include <asm/thread_info.h>
17
18#ifdef CONFIG_SMP
19
20#include <asm/pda.h>
21
22struct pt_regs;
23
24extern cpumask_t cpu_present_mask;
25extern cpumask_t cpu_possible_map;
26extern cpumask_t cpu_online_map;
27extern cpumask_t cpu_callout_map;
28extern cpumask_t cpu_initialized;
29
30/*
31 * Private routines/data
32 */
33
34extern void smp_alloc_memory(void);
35extern volatile unsigned long smp_invalidate_needed;
36extern void lock_ipi_call_lock(void);
37extern void unlock_ipi_call_lock(void);
38extern int smp_num_siblings;
39extern void smp_send_reschedule(int cpu);
40
41extern cpumask_t cpu_sibling_map[NR_CPUS];
42extern cpumask_t cpu_core_map[NR_CPUS];
43extern u8 cpu_llc_id[NR_CPUS];
44
45#define SMP_TRAMPOLINE_BASE 0x6000
46
47/*
48 * On x86 all CPUs are mapped 1:1 to the APIC space.
49 * This simplifies scheduling and IPI sending and
50 * compresses data structures.
51 */
52
53static inline int num_booting_cpus(void)
54{
55 return cpus_weight(cpu_callout_map);
56}
57
58#define raw_smp_processor_id() read_pda(cpunumber)
59
60extern int __cpu_disable(void);
61extern void __cpu_die(unsigned int cpu);
62extern void prefill_possible_map(void);
63extern unsigned num_processors;
64extern unsigned __cpuinitdata disabled_cpus;
65
66#define NO_PROC_ID 0xFF /* No processor magic marker */
67
68#endif /* CONFIG_SMP */
69
70static inline int hard_smp_processor_id(void)
71{
72 /* we don't want to mark this access volatile - bad code generation */
73 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
74}
75
76/*
77 * Some lowlevel functions might want to know about
78 * the real APIC ID <-> CPU # mapping.
79 */
80extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
81extern u8 x86_cpu_to_log_apicid[NR_CPUS];
82extern u8 bios_cpu_apicid[];
83
84static inline int cpu_present_to_apicid(int mps_cpu)
85{
86 if (mps_cpu < NR_CPUS)
87 return (int)bios_cpu_apicid[mps_cpu];
88 else
89 return BAD_APICID;
90}
91
92#ifndef CONFIG_SMP
93#define stack_smp_processor_id() 0
94#define cpu_logical_map(x) (x)
95#else
96#include <asm/thread_info.h>
97#define stack_smp_processor_id() \
98({ \
99 struct thread_info *ti; \
100 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
101 ti->cpu; \
102})
103#endif
104
105static __inline int logical_smp_processor_id(void)
106{
107 /* we don't want to mark this access volatile - bad code generation */
108 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
109}
110
111#ifdef CONFIG_SMP
112#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
113#else
114#define cpu_physical_id(cpu) boot_cpu_id
115#endif /* !CONFIG_SMP */
116#endif
117