diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-24 15:52:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-24 15:52:27 -0500 |
commit | 87b203079ed949de52f0d92aeae20e5e0116c12f (patch) | |
tree | 1878756f936963822ed2d51a15db1da5814973e7 /arch/x86/kernel/apic/apic_flat_64.c | |
parent | 58105ef1857112a186696c9b8957020090226a28 (diff) | |
parent | a852cbfaaf8122827602027b1614971cfd832304 (diff) |
Merge branch 'x86/core' into core/percpu
Diffstat (limited to 'arch/x86/kernel/apic/apic_flat_64.c')
-rw-r--r-- | arch/x86/kernel/apic/apic_flat_64.c | 389 |
1 files changed, 389 insertions, 0 deletions
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c new file mode 100644 index 00000000000..3b002995e14 --- /dev/null +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -0,0 +1,389 @@ | |||
1 | /* | ||
2 | * Copyright 2004 James Cleverdon, IBM. | ||
3 | * Subject to the GNU Public License, v.2 | ||
4 | * | ||
5 | * Flat APIC subarch code. | ||
6 | * | ||
7 | * Hacked for x86-64 by James Cleverdon from i386 architecture code by | ||
8 | * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and | ||
9 | * James Cleverdon. | ||
10 | */ | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/threads.h> | ||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/ctype.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/hardirq.h> | ||
19 | #include <asm/smp.h> | ||
20 | #include <asm/apic.h> | ||
21 | #include <asm/ipi.h> | ||
22 | |||
23 | #ifdef CONFIG_ACPI | ||
24 | #include <acpi/acpi_bus.h> | ||
25 | #endif | ||
26 | |||
27 | static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
28 | { | ||
29 | return 1; | ||
30 | } | ||
31 | |||
32 | static const struct cpumask *flat_target_cpus(void) | ||
33 | { | ||
34 | return cpu_online_mask; | ||
35 | } | ||
36 | |||
37 | static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
38 | { | ||
39 | /* Careful. Some cpus do not strictly honor the set of cpus | ||
40 | * specified in the interrupt destination when using lowest | ||
41 | * priority interrupt delivery mode. | ||
42 | * | ||
43 | * In particular there was a hyperthreading cpu observed to | ||
44 | * deliver interrupts to the wrong hyperthread when only one | ||
45 | * hyperthread was specified in the interrupt desitination. | ||
46 | */ | ||
47 | cpumask_clear(retmask); | ||
48 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Set up the logical destination ID. | ||
53 | * | ||
54 | * Intel recommends to set DFR, LDR and TPR before enabling | ||
55 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | ||
56 | * document number 292116). So here it goes... | ||
57 | */ | ||
58 | static void flat_init_apic_ldr(void) | ||
59 | { | ||
60 | unsigned long val; | ||
61 | unsigned long num, id; | ||
62 | |||
63 | num = smp_processor_id(); | ||
64 | id = 1UL << num; | ||
65 | apic_write(APIC_DFR, APIC_DFR_FLAT); | ||
66 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | ||
67 | val |= SET_APIC_LOGICAL_ID(id); | ||
68 | apic_write(APIC_LDR, val); | ||
69 | } | ||
70 | |||
71 | static inline void _flat_send_IPI_mask(unsigned long mask, int vector) | ||
72 | { | ||
73 | unsigned long flags; | ||
74 | |||
75 | local_irq_save(flags); | ||
76 | __default_send_IPI_dest_field(mask, vector, apic->dest_logical); | ||
77 | local_irq_restore(flags); | ||
78 | } | ||
79 | |||
80 | static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) | ||
81 | { | ||
82 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
83 | |||
84 | _flat_send_IPI_mask(mask, vector); | ||
85 | } | ||
86 | |||
87 | static void | ||
88 | flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) | ||
89 | { | ||
90 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
91 | int cpu = smp_processor_id(); | ||
92 | |||
93 | if (cpu < BITS_PER_LONG) | ||
94 | clear_bit(cpu, &mask); | ||
95 | |||
96 | _flat_send_IPI_mask(mask, vector); | ||
97 | } | ||
98 | |||
99 | static void flat_send_IPI_allbutself(int vector) | ||
100 | { | ||
101 | int cpu = smp_processor_id(); | ||
102 | #ifdef CONFIG_HOTPLUG_CPU | ||
103 | int hotplug = 1; | ||
104 | #else | ||
105 | int hotplug = 0; | ||
106 | #endif | ||
107 | if (hotplug || vector == NMI_VECTOR) { | ||
108 | if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { | ||
109 | unsigned long mask = cpumask_bits(cpu_online_mask)[0]; | ||
110 | |||
111 | if (cpu < BITS_PER_LONG) | ||
112 | clear_bit(cpu, &mask); | ||
113 | |||
114 | _flat_send_IPI_mask(mask, vector); | ||
115 | } | ||
116 | } else if (num_online_cpus() > 1) { | ||
117 | __default_send_IPI_shortcut(APIC_DEST_ALLBUT, | ||
118 | vector, apic->dest_logical); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static void flat_send_IPI_all(int vector) | ||
123 | { | ||
124 | if (vector == NMI_VECTOR) { | ||
125 | flat_send_IPI_mask(cpu_online_mask, vector); | ||
126 | } else { | ||
127 | __default_send_IPI_shortcut(APIC_DEST_ALLINC, | ||
128 | vector, apic->dest_logical); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | static unsigned int flat_get_apic_id(unsigned long x) | ||
133 | { | ||
134 | unsigned int id; | ||
135 | |||
136 | id = (((x)>>24) & 0xFFu); | ||
137 | |||
138 | return id; | ||
139 | } | ||
140 | |||
141 | static unsigned long set_apic_id(unsigned int id) | ||
142 | { | ||
143 | unsigned long x; | ||
144 | |||
145 | x = ((id & 0xFFu)<<24); | ||
146 | return x; | ||
147 | } | ||
148 | |||
149 | static unsigned int read_xapic_id(void) | ||
150 | { | ||
151 | unsigned int id; | ||
152 | |||
153 | id = flat_get_apic_id(apic_read(APIC_ID)); | ||
154 | return id; | ||
155 | } | ||
156 | |||
157 | static int flat_apic_id_registered(void) | ||
158 | { | ||
159 | return physid_isset(read_xapic_id(), phys_cpu_present_map); | ||
160 | } | ||
161 | |||
162 | static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask) | ||
163 | { | ||
164 | return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; | ||
165 | } | ||
166 | |||
167 | static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
168 | const struct cpumask *andmask) | ||
169 | { | ||
170 | unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; | ||
171 | unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS; | ||
172 | |||
173 | return mask1 & mask2; | ||
174 | } | ||
175 | |||
176 | static int flat_phys_pkg_id(int initial_apic_id, int index_msb) | ||
177 | { | ||
178 | return hard_smp_processor_id() >> index_msb; | ||
179 | } | ||
180 | |||
181 | struct apic apic_flat = { | ||
182 | .name = "flat", | ||
183 | .probe = NULL, | ||
184 | .acpi_madt_oem_check = flat_acpi_madt_oem_check, | ||
185 | .apic_id_registered = flat_apic_id_registered, | ||
186 | |||
187 | .irq_delivery_mode = dest_LowestPrio, | ||
188 | .irq_dest_mode = 1, /* logical */ | ||
189 | |||
190 | .target_cpus = flat_target_cpus, | ||
191 | .disable_esr = 0, | ||
192 | .dest_logical = APIC_DEST_LOGICAL, | ||
193 | .check_apicid_used = NULL, | ||
194 | .check_apicid_present = NULL, | ||
195 | |||
196 | .vector_allocation_domain = flat_vector_allocation_domain, | ||
197 | .init_apic_ldr = flat_init_apic_ldr, | ||
198 | |||
199 | .ioapic_phys_id_map = NULL, | ||
200 | .setup_apic_routing = NULL, | ||
201 | .multi_timer_check = NULL, | ||
202 | .apicid_to_node = NULL, | ||
203 | .cpu_to_logical_apicid = NULL, | ||
204 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | ||
205 | .apicid_to_cpu_present = NULL, | ||
206 | .setup_portio_remap = NULL, | ||
207 | .check_phys_apicid_present = default_check_phys_apicid_present, | ||
208 | .enable_apic_mode = NULL, | ||
209 | .phys_pkg_id = flat_phys_pkg_id, | ||
210 | .mps_oem_check = NULL, | ||
211 | |||
212 | .get_apic_id = flat_get_apic_id, | ||
213 | .set_apic_id = set_apic_id, | ||
214 | .apic_id_mask = 0xFFu << 24, | ||
215 | |||
216 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | ||
217 | .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, | ||
218 | |||
219 | .send_IPI_mask = flat_send_IPI_mask, | ||
220 | .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, | ||
221 | .send_IPI_allbutself = flat_send_IPI_allbutself, | ||
222 | .send_IPI_all = flat_send_IPI_all, | ||
223 | .send_IPI_self = apic_send_IPI_self, | ||
224 | |||
225 | .wakeup_cpu = NULL, | ||
226 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | ||
227 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | ||
228 | .wait_for_init_deassert = NULL, | ||
229 | .smp_callin_clear_local_apic = NULL, | ||
230 | .inquire_remote_apic = NULL, | ||
231 | |||
232 | .read = native_apic_mem_read, | ||
233 | .write = native_apic_mem_write, | ||
234 | .icr_read = native_apic_icr_read, | ||
235 | .icr_write = native_apic_icr_write, | ||
236 | .wait_icr_idle = native_apic_wait_icr_idle, | ||
237 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | ||
238 | }; | ||
239 | |||
240 | /* | ||
241 | * Physflat mode is used when there are more than 8 CPUs on a AMD system. | ||
242 | * We cannot use logical delivery in this case because the mask | ||
243 | * overflows, so use physical mode. | ||
244 | */ | ||
245 | static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
246 | { | ||
247 | #ifdef CONFIG_ACPI | ||
248 | /* | ||
249 | * Quirk: some x86_64 machines can only use physical APIC mode | ||
250 | * regardless of how many processors are present (x86_64 ES7000 | ||
251 | * is an example). | ||
252 | */ | ||
253 | if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && | ||
254 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { | ||
255 | printk(KERN_DEBUG "system APIC only can use physical flat"); | ||
256 | return 1; | ||
257 | } | ||
258 | #endif | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static const struct cpumask *physflat_target_cpus(void) | ||
264 | { | ||
265 | return cpu_online_mask; | ||
266 | } | ||
267 | |||
268 | static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
269 | { | ||
270 | cpumask_clear(retmask); | ||
271 | cpumask_set_cpu(cpu, retmask); | ||
272 | } | ||
273 | |||
274 | static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) | ||
275 | { | ||
276 | default_send_IPI_mask_sequence_phys(cpumask, vector); | ||
277 | } | ||
278 | |||
279 | static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, | ||
280 | int vector) | ||
281 | { | ||
282 | default_send_IPI_mask_allbutself_phys(cpumask, vector); | ||
283 | } | ||
284 | |||
285 | static void physflat_send_IPI_allbutself(int vector) | ||
286 | { | ||
287 | default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); | ||
288 | } | ||
289 | |||
290 | static void physflat_send_IPI_all(int vector) | ||
291 | { | ||
292 | physflat_send_IPI_mask(cpu_online_mask, vector); | ||
293 | } | ||
294 | |||
295 | static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) | ||
296 | { | ||
297 | int cpu; | ||
298 | |||
299 | /* | ||
300 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
301 | * May as well be the first. | ||
302 | */ | ||
303 | cpu = cpumask_first(cpumask); | ||
304 | if ((unsigned)cpu < nr_cpu_ids) | ||
305 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
306 | else | ||
307 | return BAD_APICID; | ||
308 | } | ||
309 | |||
310 | static unsigned int | ||
311 | physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
312 | const struct cpumask *andmask) | ||
313 | { | ||
314 | int cpu; | ||
315 | |||
316 | /* | ||
317 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
318 | * May as well be the first. | ||
319 | */ | ||
320 | for_each_cpu_and(cpu, cpumask, andmask) { | ||
321 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
322 | break; | ||
323 | } | ||
324 | if (cpu < nr_cpu_ids) | ||
325 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
326 | |||
327 | return BAD_APICID; | ||
328 | } | ||
329 | |||
330 | struct apic apic_physflat = { | ||
331 | |||
332 | .name = "physical flat", | ||
333 | .probe = NULL, | ||
334 | .acpi_madt_oem_check = physflat_acpi_madt_oem_check, | ||
335 | .apic_id_registered = flat_apic_id_registered, | ||
336 | |||
337 | .irq_delivery_mode = dest_Fixed, | ||
338 | .irq_dest_mode = 0, /* physical */ | ||
339 | |||
340 | .target_cpus = physflat_target_cpus, | ||
341 | .disable_esr = 0, | ||
342 | .dest_logical = 0, | ||
343 | .check_apicid_used = NULL, | ||
344 | .check_apicid_present = NULL, | ||
345 | |||
346 | .vector_allocation_domain = physflat_vector_allocation_domain, | ||
347 | /* not needed, but shouldn't hurt: */ | ||
348 | .init_apic_ldr = flat_init_apic_ldr, | ||
349 | |||
350 | .ioapic_phys_id_map = NULL, | ||
351 | .setup_apic_routing = NULL, | ||
352 | .multi_timer_check = NULL, | ||
353 | .apicid_to_node = NULL, | ||
354 | .cpu_to_logical_apicid = NULL, | ||
355 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | ||
356 | .apicid_to_cpu_present = NULL, | ||
357 | .setup_portio_remap = NULL, | ||
358 | .check_phys_apicid_present = default_check_phys_apicid_present, | ||
359 | .enable_apic_mode = NULL, | ||
360 | .phys_pkg_id = flat_phys_pkg_id, | ||
361 | .mps_oem_check = NULL, | ||
362 | |||
363 | .get_apic_id = flat_get_apic_id, | ||
364 | .set_apic_id = set_apic_id, | ||
365 | .apic_id_mask = 0xFFu << 24, | ||
366 | |||
367 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | ||
368 | .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, | ||
369 | |||
370 | .send_IPI_mask = physflat_send_IPI_mask, | ||
371 | .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, | ||
372 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | ||
373 | .send_IPI_all = physflat_send_IPI_all, | ||
374 | .send_IPI_self = apic_send_IPI_self, | ||
375 | |||
376 | .wakeup_cpu = NULL, | ||
377 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | ||
378 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | ||
379 | .wait_for_init_deassert = NULL, | ||
380 | .smp_callin_clear_local_apic = NULL, | ||
381 | .inquire_remote_apic = NULL, | ||
382 | |||
383 | .read = native_apic_mem_read, | ||
384 | .write = native_apic_mem_write, | ||
385 | .icr_read = native_apic_icr_read, | ||
386 | .icr_write = native_apic_icr_write, | ||
387 | .wait_icr_idle = native_apic_wait_icr_idle, | ||
388 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | ||
389 | }; | ||