diff options
author | Mike Travis <travis@sgi.com> | 2008-12-16 20:33:59 -0500 |
---|---|---|
committer | Mike Travis <travis@sgi.com> | 2008-12-16 20:40:57 -0500 |
commit | bcda016eddd7a8b374bb371473c821a91ff1d8cc (patch) | |
tree | 9335614036937765c385479d707ef7327fca7d67 /arch/x86/kernel/genapic_flat_64.c | |
parent | d7b381bb7b1ad69ff008ea063d26e988b686c8de (diff) |
x86: cosmetic changes apic-related files.
This patch simply changes cpumask_t to struct cpumask and similar
trivial modernizations.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Diffstat (limited to 'arch/x86/kernel/genapic_flat_64.c')
-rw-r--r-- | arch/x86/kernel/genapic_flat_64.c | 50 |
1 files changed, 26 insertions, 24 deletions
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index c772bb10b173..7fa5f49c2dda 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
30 | return 1; | 30 | return 1; |
31 | } | 31 | } |
32 | 32 | ||
33 | static const cpumask_t *flat_target_cpus(void) | 33 | static const struct cpumask *flat_target_cpus(void) |
34 | { | 34 | { |
35 | return &cpu_online_map; | 35 | return cpu_online_mask; |
36 | } | 36 | } |
37 | 37 | ||
38 | static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask) | 38 | static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) |
39 | { | 39 | { |
40 | /* Careful. Some cpus do not strictly honor the set of cpus | 40 | /* Careful. Some cpus do not strictly honor the set of cpus |
41 | * specified in the interrupt destination when using lowest | 41 | * specified in the interrupt destination when using lowest |
@@ -45,7 +45,8 @@ static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
45 | * deliver interrupts to the wrong hyperthread when only one | 45 | * deliver interrupts to the wrong hyperthread when only one |
46 | * hyperthread was specified in the interrupt desitination. | 46 | * hyperthread was specified in the interrupt desitination. |
47 | */ | 47 | */ |
48 | *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } }; | 48 | cpumask_clear(retmask); |
49 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
49 | } | 50 | } |
50 | 51 | ||
51 | /* | 52 | /* |
@@ -77,16 +78,17 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector) | |||
77 | local_irq_restore(flags); | 78 | local_irq_restore(flags); |
78 | } | 79 | } |
79 | 80 | ||
80 | static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector) | 81 | static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) |
81 | { | 82 | { |
82 | unsigned long mask = cpus_addr(*cpumask)[0]; | 83 | unsigned long mask = cpumask_bits(cpumask)[0]; |
83 | 84 | ||
84 | _flat_send_IPI_mask(mask, vector); | 85 | _flat_send_IPI_mask(mask, vector); |
85 | } | 86 | } |
86 | 87 | ||
87 | static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector) | 88 | static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, |
89 | int vector) | ||
88 | { | 90 | { |
89 | unsigned long mask = cpus_addr(*cpumask)[0]; | 91 | unsigned long mask = cpumask_bits(cpumask)[0]; |
90 | int cpu = smp_processor_id(); | 92 | int cpu = smp_processor_id(); |
91 | 93 | ||
92 | if (cpu < BITS_PER_LONG) | 94 | if (cpu < BITS_PER_LONG) |
@@ -103,8 +105,8 @@ static void flat_send_IPI_allbutself(int vector) | |||
103 | int hotplug = 0; | 105 | int hotplug = 0; |
104 | #endif | 106 | #endif |
105 | if (hotplug || vector == NMI_VECTOR) { | 107 | if (hotplug || vector == NMI_VECTOR) { |
106 | if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) { | 108 | if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { |
107 | unsigned long mask = cpus_addr(cpu_online_map)[0]; | 109 | unsigned long mask = cpumask_bits(cpu_online_mask)[0]; |
108 | 110 | ||
109 | if (cpu < BITS_PER_LONG) | 111 | if (cpu < BITS_PER_LONG) |
110 | clear_bit(cpu, &mask); | 112 | clear_bit(cpu, &mask); |
@@ -119,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector) | |||
119 | static void flat_send_IPI_all(int vector) | 121 | static void flat_send_IPI_all(int vector) |
120 | { | 122 | { |
121 | if (vector == NMI_VECTOR) | 123 | if (vector == NMI_VECTOR) |
122 | flat_send_IPI_mask(&cpu_online_map, vector); | 124 | flat_send_IPI_mask(cpu_online_mask, vector); |
123 | else | 125 | else |
124 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); | 126 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); |
125 | } | 127 | } |
@@ -153,9 +155,9 @@ static int flat_apic_id_registered(void) | |||
153 | return physid_isset(read_xapic_id(), phys_cpu_present_map); | 155 | return physid_isset(read_xapic_id(), phys_cpu_present_map); |
154 | } | 156 | } |
155 | 157 | ||
156 | static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) | 158 | static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask) |
157 | { | 159 | { |
158 | return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; | 160 | return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; |
159 | } | 161 | } |
160 | 162 | ||
161 | static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | 163 | static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
@@ -217,23 +219,23 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
217 | return 0; | 219 | return 0; |
218 | } | 220 | } |
219 | 221 | ||
220 | static const cpumask_t *physflat_target_cpus(void) | 222 | static const struct cpumask *physflat_target_cpus(void) |
221 | { | 223 | { |
222 | return &cpu_online_map; | 224 | return cpu_online_mask; |
223 | } | 225 | } |
224 | 226 | ||
225 | static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask) | 227 | static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) |
226 | { | 228 | { |
227 | cpus_clear(*retmask); | 229 | cpumask_clear(retmask); |
228 | cpu_set(cpu, *retmask); | 230 | cpumask_set_cpu(cpu, retmask); |
229 | } | 231 | } |
230 | 232 | ||
231 | static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector) | 233 | static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) |
232 | { | 234 | { |
233 | send_IPI_mask_sequence(cpumask, vector); | 235 | send_IPI_mask_sequence(cpumask, vector); |
234 | } | 236 | } |
235 | 237 | ||
236 | static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask, | 238 | static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, |
237 | int vector) | 239 | int vector) |
238 | { | 240 | { |
239 | send_IPI_mask_allbutself(cpumask, vector); | 241 | send_IPI_mask_allbutself(cpumask, vector); |
@@ -241,15 +243,15 @@ static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask, | |||
241 | 243 | ||
242 | static void physflat_send_IPI_allbutself(int vector) | 244 | static void physflat_send_IPI_allbutself(int vector) |
243 | { | 245 | { |
244 | send_IPI_mask_allbutself(&cpu_online_map, vector); | 246 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
245 | } | 247 | } |
246 | 248 | ||
247 | static void physflat_send_IPI_all(int vector) | 249 | static void physflat_send_IPI_all(int vector) |
248 | { | 250 | { |
249 | physflat_send_IPI_mask(&cpu_online_map, vector); | 251 | physflat_send_IPI_mask(cpu_online_mask, vector); |
250 | } | 252 | } |
251 | 253 | ||
252 | static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) | 254 | static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) |
253 | { | 255 | { |
254 | int cpu; | 256 | int cpu; |
255 | 257 | ||
@@ -257,7 +259,7 @@ static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) | |||
257 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 259 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
258 | * May as well be the first. | 260 | * May as well be the first. |
259 | */ | 261 | */ |
260 | cpu = first_cpu(*cpumask); | 262 | cpu = cpumask_first(cpumask); |
261 | if ((unsigned)cpu < nr_cpu_ids) | 263 | if ((unsigned)cpu < nr_cpu_ids) |
262 | return per_cpu(x86_cpu_to_apicid, cpu); | 264 | return per_cpu(x86_cpu_to_apicid, cpu); |
263 | else | 265 | else |