aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-12-16 20:33:59 -0500
committerMike Travis <travis@sgi.com>2008-12-16 20:40:57 -0500
commitbcda016eddd7a8b374bb371473c821a91ff1d8cc (patch)
tree9335614036937765c385479d707ef7327fca7d67 /arch/x86/kernel
parentd7b381bb7b1ad69ff008ea063d26e988b686c8de (diff)
x86: cosmetic changes apic-related files.
This patch simply changes cpumask_t to struct cpumask and similar trivial modernizations. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Mike Travis <travis@sgi.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/genapic_flat_64.c50
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c25
-rw-r--r--arch/x86/kernel/genx2apic_phys.c25
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c24
-rw-r--r--arch/x86/kernel/ipi.c14
-rw-r--r--arch/x86/kernel/smp.c6
6 files changed, 74 insertions, 70 deletions
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index c772bb10b173..7fa5f49c2dda 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
30 return 1; 30 return 1;
31} 31}
32 32
33static const cpumask_t *flat_target_cpus(void) 33static const struct cpumask *flat_target_cpus(void)
34{ 34{
35 return &cpu_online_map; 35 return cpu_online_mask;
36} 36}
37 37
38static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask) 38static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
39{ 39{
40 /* Careful. Some cpus do not strictly honor the set of cpus 40 /* Careful. Some cpus do not strictly honor the set of cpus
41 * specified in the interrupt destination when using lowest 41 * specified in the interrupt destination when using lowest
@@ -45,7 +45,8 @@ static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask)
45 * deliver interrupts to the wrong hyperthread when only one 45 * deliver interrupts to the wrong hyperthread when only one
46 * hyperthread was specified in the interrupt desitination. 46 * hyperthread was specified in the interrupt desitination.
47 */ 47 */
48 *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } }; 48 cpumask_clear(retmask);
49 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
49} 50}
50 51
51/* 52/*
@@ -77,16 +78,17 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
77 local_irq_restore(flags); 78 local_irq_restore(flags);
78} 79}
79 80
80static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector) 81static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
81{ 82{
82 unsigned long mask = cpus_addr(*cpumask)[0]; 83 unsigned long mask = cpumask_bits(cpumask)[0];
83 84
84 _flat_send_IPI_mask(mask, vector); 85 _flat_send_IPI_mask(mask, vector);
85} 86}
86 87
87static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector) 88static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
89 int vector)
88{ 90{
89 unsigned long mask = cpus_addr(*cpumask)[0]; 91 unsigned long mask = cpumask_bits(cpumask)[0];
90 int cpu = smp_processor_id(); 92 int cpu = smp_processor_id();
91 93
92 if (cpu < BITS_PER_LONG) 94 if (cpu < BITS_PER_LONG)
@@ -103,8 +105,8 @@ static void flat_send_IPI_allbutself(int vector)
103 int hotplug = 0; 105 int hotplug = 0;
104#endif 106#endif
105 if (hotplug || vector == NMI_VECTOR) { 107 if (hotplug || vector == NMI_VECTOR) {
106 if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) { 108 if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
107 unsigned long mask = cpus_addr(cpu_online_map)[0]; 109 unsigned long mask = cpumask_bits(cpu_online_mask)[0];
108 110
109 if (cpu < BITS_PER_LONG) 111 if (cpu < BITS_PER_LONG)
110 clear_bit(cpu, &mask); 112 clear_bit(cpu, &mask);
@@ -119,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector)
119static void flat_send_IPI_all(int vector) 121static void flat_send_IPI_all(int vector)
120{ 122{
121 if (vector == NMI_VECTOR) 123 if (vector == NMI_VECTOR)
122 flat_send_IPI_mask(&cpu_online_map, vector); 124 flat_send_IPI_mask(cpu_online_mask, vector);
123 else 125 else
124 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 126 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
125} 127}
@@ -153,9 +155,9 @@ static int flat_apic_id_registered(void)
153 return physid_isset(read_xapic_id(), phys_cpu_present_map); 155 return physid_isset(read_xapic_id(), phys_cpu_present_map);
154} 156}
155 157
156static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) 158static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
157{ 159{
158 return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; 160 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
159} 161}
160 162
161static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 163static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -217,23 +219,23 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
217 return 0; 219 return 0;
218} 220}
219 221
220static const cpumask_t *physflat_target_cpus(void) 222static const struct cpumask *physflat_target_cpus(void)
221{ 223{
222 return &cpu_online_map; 224 return cpu_online_mask;
223} 225}
224 226
225static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask) 227static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
226{ 228{
227 cpus_clear(*retmask); 229 cpumask_clear(retmask);
228 cpu_set(cpu, *retmask); 230 cpumask_set_cpu(cpu, retmask);
229} 231}
230 232
231static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector) 233static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
232{ 234{
233 send_IPI_mask_sequence(cpumask, vector); 235 send_IPI_mask_sequence(cpumask, vector);
234} 236}
235 237
236static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask, 238static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
237 int vector) 239 int vector)
238{ 240{
239 send_IPI_mask_allbutself(cpumask, vector); 241 send_IPI_mask_allbutself(cpumask, vector);
@@ -241,15 +243,15 @@ static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask,
241 243
242static void physflat_send_IPI_allbutself(int vector) 244static void physflat_send_IPI_allbutself(int vector)
243{ 245{
244 send_IPI_mask_allbutself(&cpu_online_map, vector); 246 send_IPI_mask_allbutself(cpu_online_mask, vector);
245} 247}
246 248
247static void physflat_send_IPI_all(int vector) 249static void physflat_send_IPI_all(int vector)
248{ 250{
249 physflat_send_IPI_mask(&cpu_online_map, vector); 251 physflat_send_IPI_mask(cpu_online_mask, vector);
250} 252}
251 253
252static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) 254static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
253{ 255{
254 int cpu; 256 int cpu;
255 257
@@ -257,7 +259,7 @@ static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask)
257 * We're using fixed IRQ delivery, can only return one phys APIC ID. 259 * We're using fixed IRQ delivery, can only return one phys APIC ID.
258 * May as well be the first. 260 * May as well be the first.
259 */ 261 */
260 cpu = first_cpu(*cpumask); 262 cpu = cpumask_first(cpumask);
261 if ((unsigned)cpu < nr_cpu_ids) 263 if ((unsigned)cpu < nr_cpu_ids)
262 return per_cpu(x86_cpu_to_apicid, cpu); 264 return per_cpu(x86_cpu_to_apicid, cpu);
263 else 265 else
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
index e7d16f53b9cd..4716a0c9f936 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -22,18 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
22 22
23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
24 24
25static const cpumask_t *x2apic_target_cpus(void) 25static const struct cpumask *x2apic_target_cpus(void)
26{ 26{
27 return &cpumask_of_cpu(0); 27 return cpumask_of(0);
28} 28}
29 29
30/* 30/*
31 * for now each logical cpu is in its own vector allocation domain. 31 * for now each logical cpu is in its own vector allocation domain.
32 */ 32 */
33static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) 33static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
34{ 34{
35 cpus_clear(*retmask); 35 cpumask_clear(retmask);
36 cpu_set(cpu, *retmask); 36 cpumask_set_cpu(cpu, retmask);
37} 37}
38 38
39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -55,27 +55,28 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
55 * at once. We have 16 cpu's in a cluster. This will minimize IPI register 55 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
56 * writes. 56 * writes.
57 */ 57 */
58static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) 58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59{ 59{
60 unsigned long flags; 60 unsigned long flags;
61 unsigned long query_cpu; 61 unsigned long query_cpu;
62 62
63 local_irq_save(flags); 63 local_irq_save(flags);
64 for_each_cpu_mask_nr(query_cpu, *mask) 64 for_each_cpu(query_cpu, mask)
65 __x2apic_send_IPI_dest( 65 __x2apic_send_IPI_dest(
66 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 66 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
67 vector, APIC_DEST_LOGICAL); 67 vector, APIC_DEST_LOGICAL);
68 local_irq_restore(flags); 68 local_irq_restore(flags);
69} 69}
70 70
71static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) 71static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
72 int vector)
72{ 73{
73 unsigned long flags; 74 unsigned long flags;
74 unsigned long query_cpu; 75 unsigned long query_cpu;
75 unsigned long this_cpu = smp_processor_id(); 76 unsigned long this_cpu = smp_processor_id();
76 77
77 local_irq_save(flags); 78 local_irq_save(flags);
78 for_each_cpu_mask_nr(query_cpu, *mask) 79 for_each_cpu(query_cpu, mask)
79 if (query_cpu != this_cpu) 80 if (query_cpu != this_cpu)
80 __x2apic_send_IPI_dest( 81 __x2apic_send_IPI_dest(
81 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 82 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
@@ -100,7 +101,7 @@ static void x2apic_send_IPI_allbutself(int vector)
100 101
101static void x2apic_send_IPI_all(int vector) 102static void x2apic_send_IPI_all(int vector)
102{ 103{
103 x2apic_send_IPI_mask(&cpu_online_map, vector); 104 x2apic_send_IPI_mask(cpu_online_mask, vector);
104} 105}
105 106
106static int x2apic_apic_id_registered(void) 107static int x2apic_apic_id_registered(void)
@@ -108,7 +109,7 @@ static int x2apic_apic_id_registered(void)
108 return 1; 109 return 1;
109} 110}
110 111
111static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) 112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
112{ 113{
113 int cpu; 114 int cpu;
114 115
@@ -116,7 +117,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
116 * We're using fixed IRQ delivery, can only return one phys APIC ID. 117 * We're using fixed IRQ delivery, can only return one phys APIC ID.
117 * May as well be the first. 118 * May as well be the first.
118 */ 119 */
119 cpu = first_cpu(*cpumask); 120 cpu = cpumask_first(cpumask);
120 if ((unsigned)cpu < nr_cpu_ids) 121 if ((unsigned)cpu < nr_cpu_ids)
121 return per_cpu(x86_cpu_to_logical_apicid, cpu); 122 return per_cpu(x86_cpu_to_logical_apicid, cpu);
122 else 123 else
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
index 9d0386c7e798..b255507884f2 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -29,15 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
29 29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
31 31
32static const cpumask_t *x2apic_target_cpus(void) 32static const struct cpumask *x2apic_target_cpus(void)
33{ 33{
34 return &cpumask_of_cpu(0); 34 return cpumask_of(0);
35} 35}
36 36
37static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) 37static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
38{ 38{
39 cpus_clear(*retmask); 39 cpumask_clear(retmask);
40 cpu_set(cpu, *retmask); 40 cpumask_set_cpu(cpu, retmask);
41} 41}
42 42
43static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 43static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -53,27 +53,28 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
53 x2apic_icr_write(cfg, apicid); 53 x2apic_icr_write(cfg, apicid);
54} 54}
55 55
56static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) 56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
57{ 57{
58 unsigned long flags; 58 unsigned long flags;
59 unsigned long query_cpu; 59 unsigned long query_cpu;
60 60
61 local_irq_save(flags); 61 local_irq_save(flags);
62 for_each_cpu_mask_nr(query_cpu, *mask) { 62 for_each_cpu(query_cpu, mask) {
63 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), 63 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
64 vector, APIC_DEST_PHYSICAL); 64 vector, APIC_DEST_PHYSICAL);
65 } 65 }
66 local_irq_restore(flags); 66 local_irq_restore(flags);
67} 67}
68 68
69static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) 69static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
70 int vector)
70{ 71{
71 unsigned long flags; 72 unsigned long flags;
72 unsigned long query_cpu; 73 unsigned long query_cpu;
73 unsigned long this_cpu = smp_processor_id(); 74 unsigned long this_cpu = smp_processor_id();
74 75
75 local_irq_save(flags); 76 local_irq_save(flags);
76 for_each_cpu_mask_nr(query_cpu, *mask) { 77 for_each_cpu(query_cpu, mask) {
77 if (query_cpu != this_cpu) 78 if (query_cpu != this_cpu)
78 __x2apic_send_IPI_dest( 79 __x2apic_send_IPI_dest(
79 per_cpu(x86_cpu_to_apicid, query_cpu), 80 per_cpu(x86_cpu_to_apicid, query_cpu),
@@ -99,7 +100,7 @@ static void x2apic_send_IPI_allbutself(int vector)
99 100
100static void x2apic_send_IPI_all(int vector) 101static void x2apic_send_IPI_all(int vector)
101{ 102{
102 x2apic_send_IPI_mask(&cpu_online_map, vector); 103 x2apic_send_IPI_mask(cpu_online_mask, vector);
103} 104}
104 105
105static int x2apic_apic_id_registered(void) 106static int x2apic_apic_id_registered(void)
@@ -107,7 +108,7 @@ static int x2apic_apic_id_registered(void)
107 return 1; 108 return 1;
108} 109}
109 110
110static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) 111static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
111{ 112{
112 int cpu; 113 int cpu;
113 114
@@ -115,7 +116,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
115 * We're using fixed IRQ delivery, can only return one phys APIC ID. 116 * We're using fixed IRQ delivery, can only return one phys APIC ID.
116 * May as well be the first. 117 * May as well be the first.
117 */ 118 */
118 cpu = first_cpu(*cpumask); 119 cpu = cpumask_first(cpumask);
119 if ((unsigned)cpu < nr_cpu_ids) 120 if ((unsigned)cpu < nr_cpu_ids)
120 return per_cpu(x86_cpu_to_apicid, cpu); 121 return per_cpu(x86_cpu_to_apicid, cpu);
121 else 122 else
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 22596ec94c82..3984682cd849 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -75,15 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
75 75
76/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 76/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
77 77
78static const cpumask_t *uv_target_cpus(void) 78static const struct cpumask *uv_target_cpus(void)
79{ 79{
80 return &cpumask_of_cpu(0); 80 return cpumask_of(0);
81} 81}
82 82
83static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask) 83static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
84{ 84{
85 cpus_clear(*retmask); 85 cpumask_clear(retmask);
86 cpu_set(cpu, *retmask); 86 cpumask_set_cpu(cpu, retmask);
87} 87}
88 88
89int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 89int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
@@ -122,20 +122,20 @@ static void uv_send_IPI_one(int cpu, int vector)
122 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 122 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
123} 123}
124 124
125static void uv_send_IPI_mask(const cpumask_t *mask, int vector) 125static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
126{ 126{
127 unsigned int cpu; 127 unsigned int cpu;
128 128
129 for_each_cpu_mask_nr(cpu, *mask) 129 for_each_cpu(cpu, mask)
130 uv_send_IPI_one(cpu, vector); 130 uv_send_IPI_one(cpu, vector);
131} 131}
132 132
133static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) 133static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
134{ 134{
135 unsigned int cpu; 135 unsigned int cpu;
136 unsigned int this_cpu = smp_processor_id(); 136 unsigned int this_cpu = smp_processor_id();
137 137
138 for_each_cpu_mask_nr(cpu, *mask) 138 for_each_cpu(cpu, mask)
139 if (cpu != this_cpu) 139 if (cpu != this_cpu)
140 uv_send_IPI_one(cpu, vector); 140 uv_send_IPI_one(cpu, vector);
141} 141}
@@ -152,7 +152,7 @@ static void uv_send_IPI_allbutself(int vector)
152 152
153static void uv_send_IPI_all(int vector) 153static void uv_send_IPI_all(int vector)
154{ 154{
155 uv_send_IPI_mask(&cpu_online_map, vector); 155 uv_send_IPI_mask(cpu_online_mask, vector);
156} 156}
157 157
158static int uv_apic_id_registered(void) 158static int uv_apic_id_registered(void)
@@ -164,7 +164,7 @@ static void uv_init_apic_ldr(void)
164{ 164{
165} 165}
166 166
167static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) 167static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
168{ 168{
169 int cpu; 169 int cpu;
170 170
@@ -172,7 +172,7 @@ static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask)
172 * We're using fixed IRQ delivery, can only return one phys APIC ID. 172 * We're using fixed IRQ delivery, can only return one phys APIC ID.
173 * May as well be the first. 173 * May as well be the first.
174 */ 174 */
175 cpu = first_cpu(*cpumask); 175 cpu = cpumask_first(cpumask);
176 if ((unsigned)cpu < nr_cpu_ids) 176 if ((unsigned)cpu < nr_cpu_ids)
177 return per_cpu(x86_cpu_to_apicid, cpu); 177 return per_cpu(x86_cpu_to_apicid, cpu);
178 else 178 else
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index 86aa50fc65a1..285bbf8831fa 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
116/* 116/*
117 * This is only used on smaller machines. 117 * This is only used on smaller machines.
118 */ 118 */
119void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector) 119void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
120{ 120{
121 unsigned long mask = cpus_addr(*cpumask)[0]; 121 unsigned long mask = cpumask_bits(cpumask)[0];
122 unsigned long flags; 122 unsigned long flags;
123 123
124 local_irq_save(flags); 124 local_irq_save(flags);
125 WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); 125 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
126 __send_IPI_dest_field(mask, vector); 126 __send_IPI_dest_field(mask, vector);
127 local_irq_restore(flags); 127 local_irq_restore(flags);
128} 128}
129 129
130void send_IPI_mask_sequence(const cpumask_t *mask, int vector) 130void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
131{ 131{
132 unsigned long flags; 132 unsigned long flags;
133 unsigned int query_cpu; 133 unsigned int query_cpu;
@@ -139,12 +139,12 @@ void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
139 */ 139 */
140 140
141 local_irq_save(flags); 141 local_irq_save(flags);
142 for_each_cpu_mask_nr(query_cpu, *mask) 142 for_each_cpu(query_cpu, mask)
143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); 143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
144 local_irq_restore(flags); 144 local_irq_restore(flags);
145} 145}
146 146
147void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) 147void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
148{ 148{
149 unsigned long flags; 149 unsigned long flags;
150 unsigned int query_cpu; 150 unsigned int query_cpu;
@@ -153,7 +153,7 @@ void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
153 /* See Hack comment above */ 153 /* See Hack comment above */
154 154
155 local_irq_save(flags); 155 local_irq_save(flags);
156 for_each_cpu_mask_nr(query_cpu, *mask) 156 for_each_cpu(query_cpu, mask)
157 if (query_cpu != this_cpu) 157 if (query_cpu != this_cpu)
158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), 158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
159 vector); 159 vector);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 341df946f9a9..49ed667b06f3 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -118,15 +118,15 @@ static void native_smp_send_reschedule(int cpu)
118 WARN_ON(1); 118 WARN_ON(1);
119 return; 119 return;
120 } 120 }
121 send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 121 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
122} 122}
123 123
124void native_send_call_func_single_ipi(int cpu) 124void native_send_call_func_single_ipi(int cpu)
125{ 125{
126 send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); 126 send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
127} 127}
128 128
129void native_send_call_func_ipi(const cpumask_t *mask) 129void native_send_call_func_ipi(const struct cpumask *mask)
130{ 130{
131 cpumask_t allbutself; 131 cpumask_t allbutself;
132 132