diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:24 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:24 -0400 |
commit | 250c22777fe1ccd7ac588579a6c16db4c0161cc5 (patch) | |
tree | 55c317efb7d792ec6fdae1d1937c67a502c48dec /arch/x86/kernel/genapic_flat_64.c | |
parent | 2db55d344e529492545cb3b755c7e9ba8e4fa94e (diff) |
x86_64: move kernel
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/genapic_flat_64.c')
-rw-r--r-- | arch/x86/kernel/genapic_flat_64.c | 194 |
1 files changed, 194 insertions, 0 deletions
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c new file mode 100644 index 000000000000..ecb01eefdd27 --- /dev/null +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -0,0 +1,194 @@ | |||
1 | /* | ||
2 | * Copyright 2004 James Cleverdon, IBM. | ||
3 | * Subject to the GNU Public License, v.2 | ||
4 | * | ||
5 | * Flat APIC subarch code. | ||
6 | * | ||
7 | * Hacked for x86-64 by James Cleverdon from i386 architecture code by | ||
8 | * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and | ||
9 | * James Cleverdon. | ||
10 | */ | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/threads.h> | ||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/ctype.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <asm/smp.h> | ||
19 | #include <asm/ipi.h> | ||
20 | #include <asm/genapic.h> | ||
21 | |||
22 | static cpumask_t flat_target_cpus(void) | ||
23 | { | ||
24 | return cpu_online_map; | ||
25 | } | ||
26 | |||
27 | static cpumask_t flat_vector_allocation_domain(int cpu) | ||
28 | { | ||
29 | /* Careful. Some cpus do not strictly honor the set of cpus | ||
30 | * specified in the interrupt destination when using lowest | ||
31 | * priority interrupt delivery mode. | ||
32 | * | ||
33 | * In particular there was a hyperthreading cpu observed to | ||
34 | * deliver interrupts to the wrong hyperthread when only one | ||
35 | * hyperthread was specified in the interrupt desitination. | ||
36 | */ | ||
37 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | ||
38 | return domain; | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * Set up the logical destination ID. | ||
43 | * | ||
44 | * Intel recommends to set DFR, LDR and TPR before enabling | ||
45 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | ||
46 | * document number 292116). So here it goes... | ||
47 | */ | ||
48 | static void flat_init_apic_ldr(void) | ||
49 | { | ||
50 | unsigned long val; | ||
51 | unsigned long num, id; | ||
52 | |||
53 | num = smp_processor_id(); | ||
54 | id = 1UL << num; | ||
55 | x86_cpu_to_log_apicid[num] = id; | ||
56 | apic_write(APIC_DFR, APIC_DFR_FLAT); | ||
57 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | ||
58 | val |= SET_APIC_LOGICAL_ID(id); | ||
59 | apic_write(APIC_LDR, val); | ||
60 | } | ||
61 | |||
62 | static void flat_send_IPI_mask(cpumask_t cpumask, int vector) | ||
63 | { | ||
64 | unsigned long mask = cpus_addr(cpumask)[0]; | ||
65 | unsigned long flags; | ||
66 | |||
67 | local_irq_save(flags); | ||
68 | __send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL); | ||
69 | local_irq_restore(flags); | ||
70 | } | ||
71 | |||
72 | static void flat_send_IPI_allbutself(int vector) | ||
73 | { | ||
74 | #ifdef CONFIG_HOTPLUG_CPU | ||
75 | int hotplug = 1; | ||
76 | #else | ||
77 | int hotplug = 0; | ||
78 | #endif | ||
79 | if (hotplug || vector == NMI_VECTOR) { | ||
80 | cpumask_t allbutme = cpu_online_map; | ||
81 | |||
82 | cpu_clear(smp_processor_id(), allbutme); | ||
83 | |||
84 | if (!cpus_empty(allbutme)) | ||
85 | flat_send_IPI_mask(allbutme, vector); | ||
86 | } else if (num_online_cpus() > 1) { | ||
87 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); | ||
88 | } | ||
89 | } | ||
90 | |||
91 | static void flat_send_IPI_all(int vector) | ||
92 | { | ||
93 | if (vector == NMI_VECTOR) | ||
94 | flat_send_IPI_mask(cpu_online_map, vector); | ||
95 | else | ||
96 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); | ||
97 | } | ||
98 | |||
99 | static int flat_apic_id_registered(void) | ||
100 | { | ||
101 | return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); | ||
102 | } | ||
103 | |||
104 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) | ||
105 | { | ||
106 | return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; | ||
107 | } | ||
108 | |||
109 | static unsigned int phys_pkg_id(int index_msb) | ||
110 | { | ||
111 | return hard_smp_processor_id() >> index_msb; | ||
112 | } | ||
113 | |||
114 | struct genapic apic_flat = { | ||
115 | .name = "flat", | ||
116 | .int_delivery_mode = dest_LowestPrio, | ||
117 | .int_dest_mode = (APIC_DEST_LOGICAL != 0), | ||
118 | .target_cpus = flat_target_cpus, | ||
119 | .vector_allocation_domain = flat_vector_allocation_domain, | ||
120 | .apic_id_registered = flat_apic_id_registered, | ||
121 | .init_apic_ldr = flat_init_apic_ldr, | ||
122 | .send_IPI_all = flat_send_IPI_all, | ||
123 | .send_IPI_allbutself = flat_send_IPI_allbutself, | ||
124 | .send_IPI_mask = flat_send_IPI_mask, | ||
125 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | ||
126 | .phys_pkg_id = phys_pkg_id, | ||
127 | }; | ||
128 | |||
129 | /* | ||
130 | * Physflat mode is used when there are more than 8 CPUs on a AMD system. | ||
131 | * We cannot use logical delivery in this case because the mask | ||
132 | * overflows, so use physical mode. | ||
133 | */ | ||
134 | |||
135 | static cpumask_t physflat_target_cpus(void) | ||
136 | { | ||
137 | return cpu_online_map; | ||
138 | } | ||
139 | |||
140 | static cpumask_t physflat_vector_allocation_domain(int cpu) | ||
141 | { | ||
142 | cpumask_t domain = CPU_MASK_NONE; | ||
143 | cpu_set(cpu, domain); | ||
144 | return domain; | ||
145 | } | ||
146 | |||
147 | |||
148 | static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) | ||
149 | { | ||
150 | send_IPI_mask_sequence(cpumask, vector); | ||
151 | } | ||
152 | |||
153 | static void physflat_send_IPI_allbutself(int vector) | ||
154 | { | ||
155 | cpumask_t allbutme = cpu_online_map; | ||
156 | |||
157 | cpu_clear(smp_processor_id(), allbutme); | ||
158 | physflat_send_IPI_mask(allbutme, vector); | ||
159 | } | ||
160 | |||
161 | static void physflat_send_IPI_all(int vector) | ||
162 | { | ||
163 | physflat_send_IPI_mask(cpu_online_map, vector); | ||
164 | } | ||
165 | |||
166 | static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | ||
167 | { | ||
168 | int cpu; | ||
169 | |||
170 | /* | ||
171 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
172 | * May as well be the first. | ||
173 | */ | ||
174 | cpu = first_cpu(cpumask); | ||
175 | if ((unsigned)cpu < NR_CPUS) | ||
176 | return x86_cpu_to_apicid[cpu]; | ||
177 | else | ||
178 | return BAD_APICID; | ||
179 | } | ||
180 | |||
181 | struct genapic apic_physflat = { | ||
182 | .name = "physical flat", | ||
183 | .int_delivery_mode = dest_Fixed, | ||
184 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | ||
185 | .target_cpus = physflat_target_cpus, | ||
186 | .vector_allocation_domain = physflat_vector_allocation_domain, | ||
187 | .apic_id_registered = flat_apic_id_registered, | ||
188 | .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/ | ||
189 | .send_IPI_all = physflat_send_IPI_all, | ||
190 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | ||
191 | .send_IPI_mask = physflat_send_IPI_mask, | ||
192 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | ||
193 | .phys_pkg_id = phys_pkg_id, | ||
194 | }; | ||