diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 07:30:36 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:36 -0500 |
commit | ae9d983be1eefac4b5efad69a188e7ac89a75797 (patch) | |
tree | ecdf7ad736e1fe98dff2277649b573135d1381fd /include/asm-x86/smp_64.h | |
parent | c2805aa1d8ae51c7582d2ccbd736afa545cf5cc4 (diff) |
x86: cleanup smp.h variants
Bring the smp.h variants into sync to prepare merging and
paravirt support.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/smp_64.h')
-rw-r--r-- | include/asm-x86/smp_64.h | 133 |
1 files changed, 51 insertions, 82 deletions
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index ab612b0ff270..2feddda91e12 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h | |||
@@ -1,130 +1,99 @@ | |||
1 | #ifndef __ASM_SMP_H | 1 | #ifndef __ASM_SMP_H |
2 | #define __ASM_SMP_H | 2 | #define __ASM_SMP_H |
3 | 3 | ||
4 | /* | ||
5 | * We need the APIC definitions automatically as part of 'smp.h' | ||
6 | */ | ||
7 | #include <linux/threads.h> | ||
8 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
9 | #include <linux/bitops.h> | ||
10 | #include <linux/init.h> | 5 | #include <linux/init.h> |
11 | extern int disable_apic; | ||
12 | 6 | ||
13 | #include <asm/mpspec.h> | 7 | /* |
8 | * We need the APIC definitions automatically as part of 'smp.h' | ||
9 | */ | ||
14 | #include <asm/apic.h> | 10 | #include <asm/apic.h> |
15 | #include <asm/io_apic.h> | 11 | #include <asm/io_apic.h> |
16 | #include <asm/thread_info.h> | 12 | #include <asm/mpspec.h> |
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | |||
20 | #include <asm/pda.h> | 13 | #include <asm/pda.h> |
14 | #include <asm/thread_info.h> | ||
21 | 15 | ||
22 | struct pt_regs; | ||
23 | |||
24 | extern cpumask_t cpu_present_mask; | ||
25 | extern cpumask_t cpu_possible_map; | ||
26 | extern cpumask_t cpu_online_map; | ||
27 | extern cpumask_t cpu_callout_map; | 16 | extern cpumask_t cpu_callout_map; |
28 | extern cpumask_t cpu_initialized; | 17 | extern cpumask_t cpu_initialized; |
29 | 18 | ||
30 | /* | 19 | extern int smp_num_siblings; |
31 | * Private routines/data | 20 | extern unsigned int num_processors; |
32 | */ | 21 | |
33 | |||
34 | extern void smp_alloc_memory(void); | 22 | extern void smp_alloc_memory(void); |
35 | extern volatile unsigned long smp_invalidate_needed; | ||
36 | extern void lock_ipi_call_lock(void); | 23 | extern void lock_ipi_call_lock(void); |
37 | extern void unlock_ipi_call_lock(void); | 24 | extern void unlock_ipi_call_lock(void); |
38 | extern int smp_num_siblings; | 25 | |
39 | extern void smp_send_reschedule(int cpu); | ||
40 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | 26 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), |
41 | void *info, int wait); | 27 | void *info, int wait); |
42 | 28 | ||
43 | /* | 29 | extern u8 __initdata x86_cpu_to_apicid_init[]; |
44 | * cpu_sibling_map and cpu_core_map now live | 30 | extern void *x86_cpu_to_apicid_ptr; |
45 | * in the per cpu area | 31 | extern u8 bios_cpu_apicid[]; |
46 | * | 32 | |
47 | * extern cpumask_t cpu_sibling_map[NR_CPUS]; | ||
48 | * extern cpumask_t cpu_core_map[NR_CPUS]; | ||
49 | */ | ||
50 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | 33 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); |
51 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | 34 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); |
52 | DECLARE_PER_CPU(u8, cpu_llc_id); | 35 | DECLARE_PER_CPU(u8, cpu_llc_id); |
36 | DECLARE_PER_CPU(u8, x86_cpu_to_apicid); | ||
53 | 37 | ||
54 | #define SMP_TRAMPOLINE_BASE 0x6000 | 38 | static inline int cpu_present_to_apicid(int mps_cpu) |
55 | |||
56 | /* | ||
57 | * On x86 all CPUs are mapped 1:1 to the APIC space. | ||
58 | * This simplifies scheduling and IPI sending and | ||
59 | * compresses data structures. | ||
60 | */ | ||
61 | |||
62 | static inline int num_booting_cpus(void) | ||
63 | { | 39 | { |
64 | return cpus_weight(cpu_callout_map); | 40 | if (mps_cpu < NR_CPUS) |
41 | return (int)bios_cpu_apicid[mps_cpu]; | ||
42 | else | ||
43 | return BAD_APICID; | ||
65 | } | 44 | } |
66 | 45 | ||
67 | #define raw_smp_processor_id() read_pda(cpunumber) | 46 | #ifdef CONFIG_SMP |
47 | |||
48 | #define SMP_TRAMPOLINE_BASE 0x6000 | ||
68 | 49 | ||
69 | extern int __cpu_disable(void); | 50 | extern int __cpu_disable(void); |
70 | extern void __cpu_die(unsigned int cpu); | 51 | extern void __cpu_die(unsigned int cpu); |
71 | extern void prefill_possible_map(void); | 52 | extern void prefill_possible_map(void); |
72 | extern unsigned num_processors; | ||
73 | extern unsigned __cpuinitdata disabled_cpus; | 53 | extern unsigned __cpuinitdata disabled_cpus; |
74 | 54 | ||
75 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 55 | #define raw_smp_processor_id() read_pda(cpunumber) |
76 | 56 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | |
77 | #endif /* CONFIG_SMP */ | ||
78 | |||
79 | #define safe_smp_processor_id() smp_processor_id() | ||
80 | 57 | ||
81 | static inline int hard_smp_processor_id(void) | 58 | #define stack_smp_processor_id() \ |
82 | { | 59 | ({ \ |
83 | /* we don't want to mark this access volatile - bad code generation */ | 60 | struct thread_info *ti; \ |
84 | return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); | 61 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ |
85 | } | 62 | ti->cpu; \ |
63 | }) | ||
86 | 64 | ||
87 | /* | 65 | /* |
88 | * Some lowlevel functions might want to know about | 66 | * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies |
89 | * the real APIC ID <-> CPU # mapping. | 67 | * scheduling and IPI sending and compresses data structures. |
90 | */ | 68 | */ |
91 | extern u8 __initdata x86_cpu_to_apicid_init[]; | 69 | static inline int num_booting_cpus(void) |
92 | extern void *x86_cpu_to_apicid_ptr; | ||
93 | DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */ | ||
94 | extern u8 bios_cpu_apicid[]; | ||
95 | |||
96 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
97 | { | 70 | { |
98 | if (mps_cpu < NR_CPUS) | 71 | return cpus_weight(cpu_callout_map); |
99 | return (int)bios_cpu_apicid[mps_cpu]; | ||
100 | else | ||
101 | return BAD_APICID; | ||
102 | } | 72 | } |
103 | 73 | ||
104 | #ifndef CONFIG_SMP | 74 | extern void smp_send_reschedule(int cpu); |
75 | |||
76 | #else /* CONFIG_SMP */ | ||
77 | |||
78 | extern unsigned int boot_cpu_id; | ||
79 | #define cpu_physical_id(cpu) boot_cpu_id | ||
105 | #define stack_smp_processor_id() 0 | 80 | #define stack_smp_processor_id() 0 |
106 | #define cpu_logical_map(x) (x) | 81 | |
107 | #else | 82 | #endif /* !CONFIG_SMP */ |
108 | #include <asm/thread_info.h> | 83 | |
109 | #define stack_smp_processor_id() \ | 84 | #define safe_smp_processor_id() smp_processor_id() |
110 | ({ \ | ||
111 | struct thread_info *ti; \ | ||
112 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | ||
113 | ti->cpu; \ | ||
114 | }) | ||
115 | #endif | ||
116 | 85 | ||
117 | static __inline int logical_smp_processor_id(void) | 86 | static __inline int logical_smp_processor_id(void) |
118 | { | 87 | { |
119 | /* we don't want to mark this access volatile - bad code generation */ | 88 | /* we don't want to mark this access volatile - bad code generation */ |
120 | return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); | 89 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); |
90 | } | ||
91 | |||
92 | static inline int hard_smp_processor_id(void) | ||
93 | { | ||
94 | /* we don't want to mark this access volatile - bad code generation */ | ||
95 | return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); | ||
121 | } | 96 | } |
122 | 97 | ||
123 | #ifdef CONFIG_SMP | ||
124 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
125 | #else | ||
126 | extern unsigned int boot_cpu_id; | ||
127 | #define cpu_physical_id(cpu) boot_cpu_id | ||
128 | #endif /* !CONFIG_SMP */ | ||
129 | #endif | 98 | #endif |
130 | 99 | ||