diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-03-03 12:12:53 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:40:56 -0400 |
commit | 8202350367ac11d571f6dd4c21c2027a4d235276 (patch) | |
tree | 2af8ec46d8191638c43b13ca16a8df2545db5064 /arch/x86/kernel/smp_32.c | |
parent | f9e47a126be2eaabf04a1a5c71ca7b23a473d0d8 (diff) |
x86: create ipi.c
This patch moves all ipi and apic related functions
from smp_32.c to ipi.c
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/smp_32.c')
-rw-r--r-- | arch/x86/kernel/smp_32.c | 153 |
1 files changed, 0 insertions, 153 deletions
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index 61e546e85733..d80623aba9c5 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c | |||
@@ -107,132 +107,6 @@ | |||
107 | 107 | ||
108 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, }; | 108 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, }; |
109 | 109 | ||
110 | /* | ||
111 | * the following functions deal with sending IPIs between CPUs. | ||
112 | * | ||
113 | * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. | ||
114 | */ | ||
115 | |||
116 | static inline int __prepare_ICR (unsigned int shortcut, int vector) | ||
117 | { | ||
118 | unsigned int icr = shortcut | APIC_DEST_LOGICAL; | ||
119 | |||
120 | switch (vector) { | ||
121 | default: | ||
122 | icr |= APIC_DM_FIXED | vector; | ||
123 | break; | ||
124 | case NMI_VECTOR: | ||
125 | icr |= APIC_DM_NMI; | ||
126 | break; | ||
127 | } | ||
128 | return icr; | ||
129 | } | ||
130 | |||
131 | static inline int __prepare_ICR2 (unsigned int mask) | ||
132 | { | ||
133 | return SET_APIC_DEST_FIELD(mask); | ||
134 | } | ||
135 | |||
136 | void __send_IPI_shortcut(unsigned int shortcut, int vector) | ||
137 | { | ||
138 | /* | ||
139 | * Subtle. In the case of the 'never do double writes' workaround | ||
140 | * we have to lock out interrupts to be safe. As we don't care | ||
141 | * of the value read we use an atomic rmw access to avoid costly | ||
142 | * cli/sti. Otherwise we use an even cheaper single atomic write | ||
143 | * to the APIC. | ||
144 | */ | ||
145 | unsigned int cfg; | ||
146 | |||
147 | /* | ||
148 | * Wait for idle. | ||
149 | */ | ||
150 | apic_wait_icr_idle(); | ||
151 | |||
152 | /* | ||
153 | * No need to touch the target chip field | ||
154 | */ | ||
155 | cfg = __prepare_ICR(shortcut, vector); | ||
156 | |||
157 | /* | ||
158 | * Send the IPI. The write to APIC_ICR fires this off. | ||
159 | */ | ||
160 | apic_write_around(APIC_ICR, cfg); | ||
161 | } | ||
162 | |||
163 | void send_IPI_self(int vector) | ||
164 | { | ||
165 | __send_IPI_shortcut(APIC_DEST_SELF, vector); | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * This is used to send an IPI with no shorthand notation (the destination is | ||
170 | * specified in bits 56 to 63 of the ICR). | ||
171 | */ | ||
172 | static inline void __send_IPI_dest_field(unsigned long mask, int vector) | ||
173 | { | ||
174 | unsigned long cfg; | ||
175 | |||
176 | /* | ||
177 | * Wait for idle. | ||
178 | */ | ||
179 | if (unlikely(vector == NMI_VECTOR)) | ||
180 | safe_apic_wait_icr_idle(); | ||
181 | else | ||
182 | apic_wait_icr_idle(); | ||
183 | |||
184 | /* | ||
185 | * prepare target chip field | ||
186 | */ | ||
187 | cfg = __prepare_ICR2(mask); | ||
188 | apic_write_around(APIC_ICR2, cfg); | ||
189 | |||
190 | /* | ||
191 | * program the ICR | ||
192 | */ | ||
193 | cfg = __prepare_ICR(0, vector); | ||
194 | |||
195 | /* | ||
196 | * Send the IPI. The write to APIC_ICR fires this off. | ||
197 | */ | ||
198 | apic_write_around(APIC_ICR, cfg); | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * This is only used on smaller machines. | ||
203 | */ | ||
204 | void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) | ||
205 | { | ||
206 | unsigned long mask = cpus_addr(cpumask)[0]; | ||
207 | unsigned long flags; | ||
208 | |||
209 | local_irq_save(flags); | ||
210 | WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); | ||
211 | __send_IPI_dest_field(mask, vector); | ||
212 | local_irq_restore(flags); | ||
213 | } | ||
214 | |||
215 | void send_IPI_mask_sequence(cpumask_t mask, int vector) | ||
216 | { | ||
217 | unsigned long flags; | ||
218 | unsigned int query_cpu; | ||
219 | |||
220 | /* | ||
221 | * Hack. The clustered APIC addressing mode doesn't allow us to send | ||
222 | * to an arbitrary mask, so I do a unicasts to each CPU instead. This | ||
223 | * should be modified to do 1 message per cluster ID - mbligh | ||
224 | */ | ||
225 | |||
226 | local_irq_save(flags); | ||
227 | for_each_possible_cpu(query_cpu) { | ||
228 | if (cpu_isset(query_cpu, mask)) { | ||
229 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), | ||
230 | vector); | ||
231 | } | ||
232 | } | ||
233 | local_irq_restore(flags); | ||
234 | } | ||
235 | |||
236 | #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */ | 110 | #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */ |
237 | 111 | ||
238 | /* | 112 | /* |
@@ -465,30 +339,3 @@ void flush_tlb_all(void) | |||
465 | { | 339 | { |
466 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 340 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); |
467 | } | 341 | } |
468 | |||
469 | static int convert_apicid_to_cpu(int apic_id) | ||
470 | { | ||
471 | int i; | ||
472 | |||
473 | for_each_possible_cpu(i) { | ||
474 | if (per_cpu(x86_cpu_to_apicid, i) == apic_id) | ||
475 | return i; | ||
476 | } | ||
477 | return -1; | ||
478 | } | ||
479 | |||
480 | int safe_smp_processor_id(void) | ||
481 | { | ||
482 | int apicid, cpuid; | ||
483 | |||
484 | if (!boot_cpu_has(X86_FEATURE_APIC)) | ||
485 | return 0; | ||
486 | |||
487 | apicid = hard_smp_processor_id(); | ||
488 | if (apicid == BAD_APICID) | ||
489 | return 0; | ||
490 | |||
491 | cpuid = convert_apicid_to_cpu(apicid); | ||
492 | |||
493 | return cpuid >= 0 ? cpuid : 0; | ||
494 | } | ||