diff options
Diffstat (limited to 'arch/x86/kernel/apic/apic_flat_64.c')
-rw-r--r-- | arch/x86/kernel/apic/apic_flat_64.c | 387 |
1 files changed, 387 insertions, 0 deletions
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c new file mode 100644 index 00000000000..f933822dba1 --- /dev/null +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -0,0 +1,387 @@ | |||
1 | /* | ||
2 | * Copyright 2004 James Cleverdon, IBM. | ||
3 | * Subject to the GNU Public License, v.2 | ||
4 | * | ||
5 | * Flat APIC subarch code. | ||
6 | * | ||
7 | * Hacked for x86-64 by James Cleverdon from i386 architecture code by | ||
8 | * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and | ||
9 | * James Cleverdon. | ||
10 | */ | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/threads.h> | ||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/ctype.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/hardirq.h> | ||
19 | #include <asm/smp.h> | ||
20 | #include <asm/apic.h> | ||
21 | #include <asm/ipi.h> | ||
22 | |||
23 | #ifdef CONFIG_ACPI | ||
24 | #include <acpi/acpi_bus.h> | ||
25 | #endif | ||
26 | |||
27 | static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
28 | { | ||
29 | return 1; | ||
30 | } | ||
31 | |||
32 | static const struct cpumask *flat_target_cpus(void) | ||
33 | { | ||
34 | return cpu_online_mask; | ||
35 | } | ||
36 | |||
37 | static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
38 | { | ||
39 | /* Careful. Some cpus do not strictly honor the set of cpus | ||
40 | * specified in the interrupt destination when using lowest | ||
41 | * priority interrupt delivery mode. | ||
42 | * | ||
43 | * In particular there was a hyperthreading cpu observed to | ||
44 | * deliver interrupts to the wrong hyperthread when only one | ||
45 | * hyperthread was specified in the interrupt desitination. | ||
46 | */ | ||
47 | cpumask_clear(retmask); | ||
48 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Set up the logical destination ID. | ||
53 | * | ||
54 | * Intel recommends to set DFR, LDR and TPR before enabling | ||
55 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | ||
56 | * document number 292116). So here it goes... | ||
57 | */ | ||
58 | static void flat_init_apic_ldr(void) | ||
59 | { | ||
60 | unsigned long val; | ||
61 | unsigned long num, id; | ||
62 | |||
63 | num = smp_processor_id(); | ||
64 | id = 1UL << num; | ||
65 | apic_write(APIC_DFR, APIC_DFR_FLAT); | ||
66 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | ||
67 | val |= SET_APIC_LOGICAL_ID(id); | ||
68 | apic_write(APIC_LDR, val); | ||
69 | } | ||
70 | |||
71 | static inline void _flat_send_IPI_mask(unsigned long mask, int vector) | ||
72 | { | ||
73 | unsigned long flags; | ||
74 | |||
75 | local_irq_save(flags); | ||
76 | __default_send_IPI_dest_field(mask, vector, apic->dest_logical); | ||
77 | local_irq_restore(flags); | ||
78 | } | ||
79 | |||
80 | static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) | ||
81 | { | ||
82 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
83 | |||
84 | _flat_send_IPI_mask(mask, vector); | ||
85 | } | ||
86 | |||
87 | static void | ||
88 | flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) | ||
89 | { | ||
90 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
91 | int cpu = smp_processor_id(); | ||
92 | |||
93 | if (cpu < BITS_PER_LONG) | ||
94 | clear_bit(cpu, &mask); | ||
95 | |||
96 | _flat_send_IPI_mask(mask, vector); | ||
97 | } | ||
98 | |||
99 | static void flat_send_IPI_allbutself(int vector) | ||
100 | { | ||
101 | int cpu = smp_processor_id(); | ||
102 | #ifdef CONFIG_HOTPLUG_CPU | ||
103 | int hotplug = 1; | ||
104 | #else | ||
105 | int hotplug = 0; | ||
106 | #endif | ||
107 | if (hotplug || vector == NMI_VECTOR) { | ||
108 | if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { | ||
109 | unsigned long mask = cpumask_bits(cpu_online_mask)[0]; | ||
110 | |||
111 | if (cpu < BITS_PER_LONG) | ||
112 | clear_bit(cpu, &mask); | ||
113 | |||
114 | _flat_send_IPI_mask(mask, vector); | ||
115 | } | ||
116 | } else if (num_online_cpus() > 1) { | ||
117 | __default_send_IPI_shortcut(APIC_DEST_ALLBUT, | ||
118 | vector, apic->dest_logical); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static void flat_send_IPI_all(int vector) | ||
123 | { | ||
124 | if (vector == NMI_VECTOR) { | ||
125 | flat_send_IPI_mask(cpu_online_mask, vector); | ||
126 | } else { | ||
127 | __default_send_IPI_shortcut(APIC_DEST_ALLINC, | ||
128 | vector, apic->dest_logical); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | static unsigned int flat_get_apic_id(unsigned long x) | ||
133 | { | ||
134 | unsigned int id; | ||
135 | |||
136 | id = (((x)>>24) & 0xFFu); | ||
137 | |||
138 | return id; | ||
139 | } | ||
140 | |||
141 | static unsigned long set_apic_id(unsigned int id) | ||
142 | { | ||
143 | unsigned long x; | ||
144 | |||
145 | x = ((id & 0xFFu)<<24); | ||
146 | return x; | ||
147 | } | ||
148 | |||
149 | static unsigned int read_xapic_id(void) | ||
150 | { | ||
151 | unsigned int id; | ||
152 | |||
153 | id = flat_get_apic_id(apic_read(APIC_ID)); | ||
154 | return id; | ||
155 | } | ||
156 | |||
157 | static int flat_apic_id_registered(void) | ||
158 | { | ||
159 | return physid_isset(read_xapic_id(), phys_cpu_present_map); | ||
160 | } | ||
161 | |||
162 | static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask) | ||
163 | { | ||
164 | return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; | ||
165 | } | ||
166 | |||
167 | static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
168 | const struct cpumask *andmask) | ||
169 | { | ||
170 | unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; | ||
171 | unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS; | ||
172 | |||
173 | return mask1 & mask2; | ||
174 | } | ||
175 | |||
176 | static int flat_phys_pkg_id(int initial_apic_id, int index_msb) | ||
177 | { | ||
178 | return hard_smp_processor_id() >> index_msb; | ||
179 | } | ||
180 | |||
181 | struct apic apic_flat = { | ||
182 | .name = "flat", | ||
183 | .probe = NULL, | ||
184 | .acpi_madt_oem_check = flat_acpi_madt_oem_check, | ||
185 | .apic_id_registered = flat_apic_id_registered, | ||
186 | |||
187 | .irq_delivery_mode = dest_LowestPrio, | ||
188 | .irq_dest_mode = 1, /* logical */ | ||
189 | |||
190 | .target_cpus = flat_target_cpus, | ||
191 | .disable_esr = 0, | ||
192 | .dest_logical = APIC_DEST_LOGICAL, | ||
193 | .check_apicid_used = NULL, | ||
194 | .check_apicid_present = NULL, | ||
195 | |||
196 | .vector_allocation_domain = flat_vector_allocation_domain, | ||
197 | .init_apic_ldr = flat_init_apic_ldr, | ||
198 | |||
199 | .ioapic_phys_id_map = NULL, | ||
200 | .setup_apic_routing = NULL, | ||
201 | .multi_timer_check = NULL, | ||
202 | .apicid_to_node = NULL, | ||
203 | .cpu_to_logical_apicid = NULL, | ||
204 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | ||
205 | .apicid_to_cpu_present = NULL, | ||
206 | .setup_portio_remap = NULL, | ||
207 | .check_phys_apicid_present = default_check_phys_apicid_present, | ||
208 | .enable_apic_mode = NULL, | ||
209 | .phys_pkg_id = flat_phys_pkg_id, | ||
210 | .mps_oem_check = NULL, | ||
211 | |||
212 | .get_apic_id = flat_get_apic_id, | ||
213 | .set_apic_id = set_apic_id, | ||
214 | .apic_id_mask = 0xFFu << 24, | ||
215 | |||
216 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | ||
217 | .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, | ||
218 | |||
219 | .send_IPI_mask = flat_send_IPI_mask, | ||
220 | .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, | ||
221 | .send_IPI_allbutself = flat_send_IPI_allbutself, | ||
222 | .send_IPI_all = flat_send_IPI_all, | ||
223 | .send_IPI_self = apic_send_IPI_self, | ||
224 | |||
225 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | ||
226 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | ||
227 | .wait_for_init_deassert = NULL, | ||
228 | .smp_callin_clear_local_apic = NULL, | ||
229 | .inquire_remote_apic = NULL, | ||
230 | |||
231 | .read = native_apic_mem_read, | ||
232 | .write = native_apic_mem_write, | ||
233 | .icr_read = native_apic_icr_read, | ||
234 | .icr_write = native_apic_icr_write, | ||
235 | .wait_icr_idle = native_apic_wait_icr_idle, | ||
236 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | ||
237 | }; | ||
238 | |||
239 | /* | ||
240 | * Physflat mode is used when there are more than 8 CPUs on a AMD system. | ||
241 | * We cannot use logical delivery in this case because the mask | ||
242 | * overflows, so use physical mode. | ||
243 | */ | ||
244 | static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
245 | { | ||
246 | #ifdef CONFIG_ACPI | ||
247 | /* | ||
248 | * Quirk: some x86_64 machines can only use physical APIC mode | ||
249 | * regardless of how many processors are present (x86_64 ES7000 | ||
250 | * is an example). | ||
251 | */ | ||
252 | if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && | ||
253 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { | ||
254 | printk(KERN_DEBUG "system APIC only can use physical flat"); | ||
255 | return 1; | ||
256 | } | ||
257 | #endif | ||
258 | |||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | static const struct cpumask *physflat_target_cpus(void) | ||
263 | { | ||
264 | return cpu_online_mask; | ||
265 | } | ||
266 | |||
267 | static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
268 | { | ||
269 | cpumask_clear(retmask); | ||
270 | cpumask_set_cpu(cpu, retmask); | ||
271 | } | ||
272 | |||
273 | static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) | ||
274 | { | ||
275 | default_send_IPI_mask_sequence_phys(cpumask, vector); | ||
276 | } | ||
277 | |||
278 | static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, | ||
279 | int vector) | ||
280 | { | ||
281 | default_send_IPI_mask_allbutself_phys(cpumask, vector); | ||
282 | } | ||
283 | |||
284 | static void physflat_send_IPI_allbutself(int vector) | ||
285 | { | ||
286 | default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); | ||
287 | } | ||
288 | |||
289 | static void physflat_send_IPI_all(int vector) | ||
290 | { | ||
291 | physflat_send_IPI_mask(cpu_online_mask, vector); | ||
292 | } | ||
293 | |||
294 | static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) | ||
295 | { | ||
296 | int cpu; | ||
297 | |||
298 | /* | ||
299 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
300 | * May as well be the first. | ||
301 | */ | ||
302 | cpu = cpumask_first(cpumask); | ||
303 | if ((unsigned)cpu < nr_cpu_ids) | ||
304 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
305 | else | ||
306 | return BAD_APICID; | ||
307 | } | ||
308 | |||
309 | static unsigned int | ||
310 | physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
311 | const struct cpumask *andmask) | ||
312 | { | ||
313 | int cpu; | ||
314 | |||
315 | /* | ||
316 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
317 | * May as well be the first. | ||
318 | */ | ||
319 | for_each_cpu_and(cpu, cpumask, andmask) { | ||
320 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
321 | break; | ||
322 | } | ||
323 | if (cpu < nr_cpu_ids) | ||
324 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
325 | |||
326 | return BAD_APICID; | ||
327 | } | ||
328 | |||
329 | struct apic apic_physflat = { | ||
330 | |||
331 | .name = "physical flat", | ||
332 | .probe = NULL, | ||
333 | .acpi_madt_oem_check = physflat_acpi_madt_oem_check, | ||
334 | .apic_id_registered = flat_apic_id_registered, | ||
335 | |||
336 | .irq_delivery_mode = dest_Fixed, | ||
337 | .irq_dest_mode = 0, /* physical */ | ||
338 | |||
339 | .target_cpus = physflat_target_cpus, | ||
340 | .disable_esr = 0, | ||
341 | .dest_logical = 0, | ||
342 | .check_apicid_used = NULL, | ||
343 | .check_apicid_present = NULL, | ||
344 | |||
345 | .vector_allocation_domain = physflat_vector_allocation_domain, | ||
346 | /* not needed, but shouldn't hurt: */ | ||
347 | .init_apic_ldr = flat_init_apic_ldr, | ||
348 | |||
349 | .ioapic_phys_id_map = NULL, | ||
350 | .setup_apic_routing = NULL, | ||
351 | .multi_timer_check = NULL, | ||
352 | .apicid_to_node = NULL, | ||
353 | .cpu_to_logical_apicid = NULL, | ||
354 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | ||
355 | .apicid_to_cpu_present = NULL, | ||
356 | .setup_portio_remap = NULL, | ||
357 | .check_phys_apicid_present = default_check_phys_apicid_present, | ||
358 | .enable_apic_mode = NULL, | ||
359 | .phys_pkg_id = flat_phys_pkg_id, | ||
360 | .mps_oem_check = NULL, | ||
361 | |||
362 | .get_apic_id = flat_get_apic_id, | ||
363 | .set_apic_id = set_apic_id, | ||
364 | .apic_id_mask = 0xFFu << 24, | ||
365 | |||
366 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | ||
367 | .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, | ||
368 | |||
369 | .send_IPI_mask = physflat_send_IPI_mask, | ||
370 | .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, | ||
371 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | ||
372 | .send_IPI_all = physflat_send_IPI_all, | ||
373 | .send_IPI_self = apic_send_IPI_self, | ||
374 | |||
375 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | ||
376 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | ||
377 | .wait_for_init_deassert = NULL, | ||
378 | .smp_callin_clear_local_apic = NULL, | ||
379 | .inquire_remote_apic = NULL, | ||
380 | |||
381 | .read = native_apic_mem_read, | ||
382 | .write = native_apic_mem_write, | ||
383 | .icr_read = native_apic_icr_read, | ||
384 | .icr_write = native_apic_icr_write, | ||
385 | .wait_icr_idle = native_apic_wait_icr_idle, | ||
386 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | ||
387 | }; | ||