diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-17 12:09:24 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-17 12:17:36 -0500 |
commit | f62bae5009c1ba596cd475cafbc83e0570a36e26 (patch) | |
tree | 0c5a3000c566f42a7cc25d6c03d69d20b9bd0166 /arch/x86/kernel/apic/x2apic_cluster.c | |
parent | be163a159b223e94b3180afdd47a8d468eb9a492 (diff) |
x86, apic: move APIC drivers to arch/x86/kernel/apic/*
arch/x86/kernel/ is getting a bit crowded, and the APIC
drivers are scattered into various different files.
Move them to arch/x86/kernel/apic/*, and also remove
the 'gen' prefix from those which had it.
Also move APIC related functionality: the IO-APIC driver,
the NMI and the IPI code.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/apic/x2apic_cluster.c')
-rw-r--r-- | arch/x86/kernel/apic/x2apic_cluster.c | 243 |
1 files changed, 243 insertions, 0 deletions
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c new file mode 100644 index 00000000000..4e39d9ad4d5 --- /dev/null +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -0,0 +1,243 @@ | |||
1 | #include <linux/threads.h> | ||
2 | #include <linux/cpumask.h> | ||
3 | #include <linux/string.h> | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/ctype.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/dmar.h> | ||
8 | |||
9 | #include <asm/smp.h> | ||
10 | #include <asm/apic.h> | ||
11 | #include <asm/ipi.h> | ||
12 | |||
13 | DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); | ||
14 | |||
15 | static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
16 | { | ||
17 | if (cpu_has_x2apic) | ||
18 | return 1; | ||
19 | |||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | ||
24 | |||
25 | static const struct cpumask *x2apic_target_cpus(void) | ||
26 | { | ||
27 | return cpumask_of(0); | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * for now each logical cpu is in its own vector allocation domain. | ||
32 | */ | ||
33 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
34 | { | ||
35 | cpumask_clear(retmask); | ||
36 | cpumask_set_cpu(cpu, retmask); | ||
37 | } | ||
38 | |||
39 | static void | ||
40 | __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) | ||
41 | { | ||
42 | unsigned long cfg; | ||
43 | |||
44 | cfg = __prepare_ICR(0, vector, dest); | ||
45 | |||
46 | /* | ||
47 | * send the IPI. | ||
48 | */ | ||
49 | native_x2apic_icr_write(cfg, apicid); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * for now, we send the IPI's one by one in the cpumask. | ||
54 | * TBD: Based on the cpu mask, we can send the IPI's to the cluster group | ||
55 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register | ||
56 | * writes. | ||
57 | */ | ||
58 | static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) | ||
59 | { | ||
60 | unsigned long query_cpu; | ||
61 | unsigned long flags; | ||
62 | |||
63 | local_irq_save(flags); | ||
64 | for_each_cpu(query_cpu, mask) { | ||
65 | __x2apic_send_IPI_dest( | ||
66 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
67 | vector, apic->dest_logical); | ||
68 | } | ||
69 | local_irq_restore(flags); | ||
70 | } | ||
71 | |||
72 | static void | ||
73 | x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) | ||
74 | { | ||
75 | unsigned long this_cpu = smp_processor_id(); | ||
76 | unsigned long query_cpu; | ||
77 | unsigned long flags; | ||
78 | |||
79 | local_irq_save(flags); | ||
80 | for_each_cpu(query_cpu, mask) { | ||
81 | if (query_cpu == this_cpu) | ||
82 | continue; | ||
83 | __x2apic_send_IPI_dest( | ||
84 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
85 | vector, apic->dest_logical); | ||
86 | } | ||
87 | local_irq_restore(flags); | ||
88 | } | ||
89 | |||
90 | static void x2apic_send_IPI_allbutself(int vector) | ||
91 | { | ||
92 | unsigned long this_cpu = smp_processor_id(); | ||
93 | unsigned long query_cpu; | ||
94 | unsigned long flags; | ||
95 | |||
96 | local_irq_save(flags); | ||
97 | for_each_online_cpu(query_cpu) { | ||
98 | if (query_cpu == this_cpu) | ||
99 | continue; | ||
100 | __x2apic_send_IPI_dest( | ||
101 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
102 | vector, apic->dest_logical); | ||
103 | } | ||
104 | local_irq_restore(flags); | ||
105 | } | ||
106 | |||
107 | static void x2apic_send_IPI_all(int vector) | ||
108 | { | ||
109 | x2apic_send_IPI_mask(cpu_online_mask, vector); | ||
110 | } | ||
111 | |||
112 | static int x2apic_apic_id_registered(void) | ||
113 | { | ||
114 | return 1; | ||
115 | } | ||
116 | |||
117 | static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) | ||
118 | { | ||
119 | /* | ||
120 | * We're using fixed IRQ delivery, can only return one logical APIC ID. | ||
121 | * May as well be the first. | ||
122 | */ | ||
123 | int cpu = cpumask_first(cpumask); | ||
124 | |||
125 | if ((unsigned)cpu < nr_cpu_ids) | ||
126 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
127 | else | ||
128 | return BAD_APICID; | ||
129 | } | ||
130 | |||
131 | static unsigned int | ||
132 | x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
133 | const struct cpumask *andmask) | ||
134 | { | ||
135 | int cpu; | ||
136 | |||
137 | /* | ||
138 | * We're using fixed IRQ delivery, can only return one logical APIC ID. | ||
139 | * May as well be the first. | ||
140 | */ | ||
141 | for_each_cpu_and(cpu, cpumask, andmask) { | ||
142 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
143 | break; | ||
144 | } | ||
145 | |||
146 | if (cpu < nr_cpu_ids) | ||
147 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
148 | |||
149 | return BAD_APICID; | ||
150 | } | ||
151 | |||
152 | static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x) | ||
153 | { | ||
154 | unsigned int id; | ||
155 | |||
156 | id = x; | ||
157 | return id; | ||
158 | } | ||
159 | |||
160 | static unsigned long set_apic_id(unsigned int id) | ||
161 | { | ||
162 | unsigned long x; | ||
163 | |||
164 | x = id; | ||
165 | return x; | ||
166 | } | ||
167 | |||
168 | static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) | ||
169 | { | ||
170 | return current_cpu_data.initial_apicid >> index_msb; | ||
171 | } | ||
172 | |||
173 | static void x2apic_send_IPI_self(int vector) | ||
174 | { | ||
175 | apic_write(APIC_SELF_IPI, vector); | ||
176 | } | ||
177 | |||
178 | static void init_x2apic_ldr(void) | ||
179 | { | ||
180 | int cpu = smp_processor_id(); | ||
181 | |||
182 | per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); | ||
183 | } | ||
184 | |||
185 | struct apic apic_x2apic_cluster = { | ||
186 | |||
187 | .name = "cluster x2apic", | ||
188 | .probe = NULL, | ||
189 | .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, | ||
190 | .apic_id_registered = x2apic_apic_id_registered, | ||
191 | |||
192 | .irq_delivery_mode = dest_LowestPrio, | ||
193 | .irq_dest_mode = 1, /* logical */ | ||
194 | |||
195 | .target_cpus = x2apic_target_cpus, | ||
196 | .disable_esr = 0, | ||
197 | .dest_logical = APIC_DEST_LOGICAL, | ||
198 | .check_apicid_used = NULL, | ||
199 | .check_apicid_present = NULL, | ||
200 | |||
201 | .vector_allocation_domain = x2apic_vector_allocation_domain, | ||
202 | .init_apic_ldr = init_x2apic_ldr, | ||
203 | |||
204 | .ioapic_phys_id_map = NULL, | ||
205 | .setup_apic_routing = NULL, | ||
206 | .multi_timer_check = NULL, | ||
207 | .apicid_to_node = NULL, | ||
208 | .cpu_to_logical_apicid = NULL, | ||
209 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | ||
210 | .apicid_to_cpu_present = NULL, | ||
211 | .setup_portio_remap = NULL, | ||
212 | .check_phys_apicid_present = default_check_phys_apicid_present, | ||
213 | .enable_apic_mode = NULL, | ||
214 | .phys_pkg_id = x2apic_cluster_phys_pkg_id, | ||
215 | .mps_oem_check = NULL, | ||
216 | |||
217 | .get_apic_id = x2apic_cluster_phys_get_apic_id, | ||
218 | .set_apic_id = set_apic_id, | ||
219 | .apic_id_mask = 0xFFFFFFFFu, | ||
220 | |||
221 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | ||
222 | .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, | ||
223 | |||
224 | .send_IPI_mask = x2apic_send_IPI_mask, | ||
225 | .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, | ||
226 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | ||
227 | .send_IPI_all = x2apic_send_IPI_all, | ||
228 | .send_IPI_self = x2apic_send_IPI_self, | ||
229 | |||
230 | .wakeup_cpu = NULL, | ||
231 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | ||
232 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | ||
233 | .wait_for_init_deassert = NULL, | ||
234 | .smp_callin_clear_local_apic = NULL, | ||
235 | .inquire_remote_apic = NULL, | ||
236 | |||
237 | .read = native_apic_msr_read, | ||
238 | .write = native_apic_msr_write, | ||
239 | .icr_read = native_x2apic_icr_read, | ||
240 | .icr_write = native_x2apic_icr_write, | ||
241 | .wait_icr_idle = native_x2apic_wait_icr_idle, | ||
242 | .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, | ||
243 | }; | ||