diff options
Diffstat (limited to 'arch/x86/kernel/genapic_flat_64.c')
-rw-r--r-- | arch/x86/kernel/genapic_flat_64.c | 76 |
1 files changed, 50 insertions, 26 deletions
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index c0262791bda4..50eebd0328fe 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
30 | return 1; | 30 | return 1; |
31 | } | 31 | } |
32 | 32 | ||
33 | static cpumask_t flat_target_cpus(void) | 33 | static const cpumask_t *flat_target_cpus(void) |
34 | { | 34 | { |
35 | return cpu_online_map; | 35 | return &cpu_online_map; |
36 | } | 36 | } |
37 | 37 | ||
38 | static cpumask_t flat_vector_allocation_domain(int cpu) | 38 | static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask) |
39 | { | 39 | { |
40 | /* Careful. Some cpus do not strictly honor the set of cpus | 40 | /* Careful. Some cpus do not strictly honor the set of cpus |
41 | * specified in the interrupt destination when using lowest | 41 | * specified in the interrupt destination when using lowest |
@@ -45,8 +45,7 @@ static cpumask_t flat_vector_allocation_domain(int cpu) | |||
45 | * deliver interrupts to the wrong hyperthread when only one | 45 | * deliver interrupts to the wrong hyperthread when only one |
46 | * hyperthread was specified in the interrupt desitination. | 46 | * hyperthread was specified in the interrupt desitination. |
47 | */ | 47 | */ |
48 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 48 | *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } }; |
49 | return domain; | ||
50 | } | 49 | } |
51 | 50 | ||
52 | /* | 51 | /* |
@@ -69,9 +68,8 @@ static void flat_init_apic_ldr(void) | |||
69 | apic_write(APIC_LDR, val); | 68 | apic_write(APIC_LDR, val); |
70 | } | 69 | } |
71 | 70 | ||
72 | static void flat_send_IPI_mask(cpumask_t cpumask, int vector) | 71 | static inline void _flat_send_IPI_mask(unsigned long mask, int vector) |
73 | { | 72 | { |
74 | unsigned long mask = cpus_addr(cpumask)[0]; | ||
75 | unsigned long flags; | 73 | unsigned long flags; |
76 | 74 | ||
77 | local_irq_save(flags); | 75 | local_irq_save(flags); |
@@ -79,20 +77,40 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector) | |||
79 | local_irq_restore(flags); | 77 | local_irq_restore(flags); |
80 | } | 78 | } |
81 | 79 | ||
80 | static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector) | ||
81 | { | ||
82 | unsigned long mask = cpus_addr(*cpumask)[0]; | ||
83 | |||
84 | _flat_send_IPI_mask(mask, vector); | ||
85 | } | ||
86 | |||
87 | static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector) | ||
88 | { | ||
89 | unsigned long mask = cpus_addr(*cpumask)[0]; | ||
90 | int cpu = smp_processor_id(); | ||
91 | |||
92 | if (cpu < BITS_PER_LONG) | ||
93 | clear_bit(cpu, &mask); | ||
94 | _flat_send_IPI_mask(mask, vector); | ||
95 | } | ||
96 | |||
82 | static void flat_send_IPI_allbutself(int vector) | 97 | static void flat_send_IPI_allbutself(int vector) |
83 | { | 98 | { |
99 | int cpu = smp_processor_id(); | ||
84 | #ifdef CONFIG_HOTPLUG_CPU | 100 | #ifdef CONFIG_HOTPLUG_CPU |
85 | int hotplug = 1; | 101 | int hotplug = 1; |
86 | #else | 102 | #else |
87 | int hotplug = 0; | 103 | int hotplug = 0; |
88 | #endif | 104 | #endif |
89 | if (hotplug || vector == NMI_VECTOR) { | 105 | if (hotplug || vector == NMI_VECTOR) { |
90 | cpumask_t allbutme = cpu_online_map; | 106 | if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) { |
107 | unsigned long mask = cpus_addr(cpu_online_map)[0]; | ||
91 | 108 | ||
92 | cpu_clear(smp_processor_id(), allbutme); | 109 | if (cpu < BITS_PER_LONG) |
110 | clear_bit(cpu, &mask); | ||
93 | 111 | ||
94 | if (!cpus_empty(allbutme)) | 112 | _flat_send_IPI_mask(mask, vector); |
95 | flat_send_IPI_mask(allbutme, vector); | 113 | } |
96 | } else if (num_online_cpus() > 1) { | 114 | } else if (num_online_cpus() > 1) { |
97 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); | 115 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); |
98 | } | 116 | } |
@@ -101,7 +119,7 @@ static void flat_send_IPI_allbutself(int vector) | |||
101 | static void flat_send_IPI_all(int vector) | 119 | static void flat_send_IPI_all(int vector) |
102 | { | 120 | { |
103 | if (vector == NMI_VECTOR) | 121 | if (vector == NMI_VECTOR) |
104 | flat_send_IPI_mask(cpu_online_map, vector); | 122 | flat_send_IPI_mask(&cpu_online_map, vector); |
105 | else | 123 | else |
106 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); | 124 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); |
107 | } | 125 | } |
@@ -135,9 +153,9 @@ static int flat_apic_id_registered(void) | |||
135 | return physid_isset(read_xapic_id(), phys_cpu_present_map); | 153 | return physid_isset(read_xapic_id(), phys_cpu_present_map); |
136 | } | 154 | } |
137 | 155 | ||
138 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) | 156 | static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) |
139 | { | 157 | { |
140 | return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; | 158 | return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; |
141 | } | 159 | } |
142 | 160 | ||
143 | static unsigned int phys_pkg_id(int index_msb) | 161 | static unsigned int phys_pkg_id(int index_msb) |
@@ -157,6 +175,7 @@ struct genapic apic_flat = { | |||
157 | .send_IPI_all = flat_send_IPI_all, | 175 | .send_IPI_all = flat_send_IPI_all, |
158 | .send_IPI_allbutself = flat_send_IPI_allbutself, | 176 | .send_IPI_allbutself = flat_send_IPI_allbutself, |
159 | .send_IPI_mask = flat_send_IPI_mask, | 177 | .send_IPI_mask = flat_send_IPI_mask, |
178 | .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, | ||
160 | .send_IPI_self = apic_send_IPI_self, | 179 | .send_IPI_self = apic_send_IPI_self, |
161 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | 180 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, |
162 | .phys_pkg_id = phys_pkg_id, | 181 | .phys_pkg_id = phys_pkg_id, |
@@ -188,35 +207,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
188 | return 0; | 207 | return 0; |
189 | } | 208 | } |
190 | 209 | ||
191 | static cpumask_t physflat_target_cpus(void) | 210 | static const cpumask_t *physflat_target_cpus(void) |
192 | { | 211 | { |
193 | return cpu_online_map; | 212 | return &cpu_online_map; |
194 | } | 213 | } |
195 | 214 | ||
196 | static cpumask_t physflat_vector_allocation_domain(int cpu) | 215 | static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask) |
197 | { | 216 | { |
198 | return cpumask_of_cpu(cpu); | 217 | cpus_clear(*retmask); |
218 | cpu_set(cpu, *retmask); | ||
199 | } | 219 | } |
200 | 220 | ||
201 | static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) | 221 | static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector) |
202 | { | 222 | { |
203 | send_IPI_mask_sequence(cpumask, vector); | 223 | send_IPI_mask_sequence(cpumask, vector); |
204 | } | 224 | } |
205 | 225 | ||
206 | static void physflat_send_IPI_allbutself(int vector) | 226 | static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask, |
227 | int vector) | ||
207 | { | 228 | { |
208 | cpumask_t allbutme = cpu_online_map; | 229 | send_IPI_mask_allbutself(cpumask, vector); |
230 | } | ||
209 | 231 | ||
210 | cpu_clear(smp_processor_id(), allbutme); | 232 | static void physflat_send_IPI_allbutself(int vector) |
211 | physflat_send_IPI_mask(allbutme, vector); | 233 | { |
234 | send_IPI_mask_allbutself(&cpu_online_map, vector); | ||
212 | } | 235 | } |
213 | 236 | ||
214 | static void physflat_send_IPI_all(int vector) | 237 | static void physflat_send_IPI_all(int vector) |
215 | { | 238 | { |
216 | physflat_send_IPI_mask(cpu_online_map, vector); | 239 | physflat_send_IPI_mask(&cpu_online_map, vector); |
217 | } | 240 | } |
218 | 241 | ||
219 | static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | 242 | static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) |
220 | { | 243 | { |
221 | int cpu; | 244 | int cpu; |
222 | 245 | ||
@@ -224,7 +247,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | |||
224 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 247 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
225 | * May as well be the first. | 248 | * May as well be the first. |
226 | */ | 249 | */ |
227 | cpu = first_cpu(cpumask); | 250 | cpu = first_cpu(*cpumask); |
228 | if ((unsigned)cpu < nr_cpu_ids) | 251 | if ((unsigned)cpu < nr_cpu_ids) |
229 | return per_cpu(x86_cpu_to_apicid, cpu); | 252 | return per_cpu(x86_cpu_to_apicid, cpu); |
230 | else | 253 | else |
@@ -243,6 +266,7 @@ struct genapic apic_physflat = { | |||
243 | .send_IPI_all = physflat_send_IPI_all, | 266 | .send_IPI_all = physflat_send_IPI_all, |
244 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | 267 | .send_IPI_allbutself = physflat_send_IPI_allbutself, |
245 | .send_IPI_mask = physflat_send_IPI_mask, | 268 | .send_IPI_mask = physflat_send_IPI_mask, |
269 | .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, | ||
246 | .send_IPI_self = apic_send_IPI_self, | 270 | .send_IPI_self = apic_send_IPI_self, |
247 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | 271 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, |
248 | .phys_pkg_id = phys_pkg_id, | 272 | .phys_pkg_id = phys_pkg_id, |