aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-12-16 20:33:59 -0500
committerMike Travis <travis@sgi.com>2008-12-16 20:40:57 -0500
commitbcda016eddd7a8b374bb371473c821a91ff1d8cc (patch)
tree9335614036937765c385479d707ef7327fca7d67
parentd7b381bb7b1ad69ff008ea063d26e988b686c8de (diff)
x86: cosmetic changes apic-related files.
This patch simply changes cpumask_t to struct cpumask and similar trivial modernizations. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Mike Travis <travis@sgi.com>
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h14
-rw-r--r--arch/x86/include/asm/es7000/ipi.h13
-rw-r--r--arch/x86/include/asm/genapic_32.h11
-rw-r--r--arch/x86/include/asm/genapic_64.h11
-rw-r--r--arch/x86/include/asm/ipi.h10
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h12
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h12
-rw-r--r--arch/x86/include/asm/numaq/ipi.h14
-rw-r--r--arch/x86/include/asm/smp.h4
-rw-r--r--arch/x86/kernel/genapic_flat_64.c50
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c25
-rw-r--r--arch/x86/kernel/genx2apic_phys.c25
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c24
-rw-r--r--arch/x86/kernel/ipi.c14
-rw-r--r--arch/x86/kernel/smp.c6
-rw-r--r--arch/x86/xen/smp.c11
16 files changed, 127 insertions, 129 deletions
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
index 63553e9f22b2..27fcd01b3ae6 100644
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ b/arch/x86/include/asm/bigsmp/ipi.h
@@ -1,26 +1,22 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_MACH_IPI_H
3 3
4void send_IPI_mask_sequence(const cpumask_t *mask, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); 5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6 6
7static inline void send_IPI_mask(const cpumask_t *mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{ 8{
9 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
10} 10}
11 11
12static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
13{ 13{
14 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15 cpu_clear(smp_processor_id(), mask);
16
17 if (!cpus_empty(mask))
18 send_IPI_mask(&mask, vector);
19} 15}
20 16
21static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
22{ 18{
23 send_IPI_mask(&cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
24} 20}
25 21
26#endif /* __ASM_MACH_IPI_H */ 22#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
index 1a8507265f91..7e8ed24d4b8a 100644
--- a/arch/x86/include/asm/es7000/ipi.h
+++ b/arch/x86/include/asm/es7000/ipi.h
@@ -1,25 +1,22 @@
1#ifndef __ASM_ES7000_IPI_H 1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_ES7000_IPI_H 2#define __ASM_ES7000_IPI_H
3 3
4void send_IPI_mask_sequence(const cpumask_t *mask, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); 5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6 6
7static inline void send_IPI_mask(const cpumask_t *mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{ 8{
9 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
10} 10}
11 11
12static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
13{ 13{
14 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15 cpu_clear(smp_processor_id(), mask);
16 if (!cpus_empty(mask))
17 send_IPI_mask(&mask, vector);
18} 15}
19 16
20static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
21{ 18{
22 send_IPI_mask(&cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
23} 20}
24 21
25#endif /* __ASM_ES7000_IPI_H */ 22#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
index eed6e305291f..746f37a7963a 100644
--- a/arch/x86/include/asm/genapic_32.h
+++ b/arch/x86/include/asm/genapic_32.h
@@ -24,7 +24,7 @@ struct genapic {
24 int (*probe)(void); 24 int (*probe)(void);
25 25
26 int (*apic_id_registered)(void); 26 int (*apic_id_registered)(void);
27 const cpumask_t *(*target_cpus)(void); 27 const struct cpumask *(*target_cpus)(void);
28 int int_delivery_mode; 28 int int_delivery_mode;
29 int int_dest_mode; 29 int int_dest_mode;
30 int ESR_DISABLE; 30 int ESR_DISABLE;
@@ -57,15 +57,16 @@ struct genapic {
57 57
58 unsigned (*get_apic_id)(unsigned long x); 58 unsigned (*get_apic_id)(unsigned long x);
59 unsigned long apic_id_mask; 59 unsigned long apic_id_mask;
60 unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); 60 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
61 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, 61 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
62 const struct cpumask *andmask); 62 const struct cpumask *andmask);
63 void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); 63 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
64 64
65#ifdef CONFIG_SMP 65#ifdef CONFIG_SMP
66 /* ipi */ 66 /* ipi */
67 void (*send_IPI_mask)(const cpumask_t *mask, int vector); 67 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
68 void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector); 68 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
69 int vector);
69 void (*send_IPI_allbutself)(int vector); 70 void (*send_IPI_allbutself)(int vector);
70 void (*send_IPI_all)(int vector); 71 void (*send_IPI_all)(int vector);
71#endif 72#endif
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
index 244b71729ecb..adf32fb56aa6 100644
--- a/arch/x86/include/asm/genapic_64.h
+++ b/arch/x86/include/asm/genapic_64.h
@@ -20,17 +20,18 @@ struct genapic {
20 u32 int_delivery_mode; 20 u32 int_delivery_mode;
21 u32 int_dest_mode; 21 u32 int_dest_mode;
22 int (*apic_id_registered)(void); 22 int (*apic_id_registered)(void);
23 const cpumask_t *(*target_cpus)(void); 23 const struct cpumask *(*target_cpus)(void);
24 void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); 24 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
25 void (*init_apic_ldr)(void); 25 void (*init_apic_ldr)(void);
26 /* ipi */ 26 /* ipi */
27 void (*send_IPI_mask)(const cpumask_t *mask, int vector); 27 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
28 void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector); 28 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
29 int vector);
29 void (*send_IPI_allbutself)(int vector); 30 void (*send_IPI_allbutself)(int vector);
30 void (*send_IPI_all)(int vector); 31 void (*send_IPI_all)(int vector);
31 void (*send_IPI_self)(int vector); 32 void (*send_IPI_self)(int vector);
32 /* */ 33 /* */
33 unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); 34 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
34 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, 35 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
35 const struct cpumask *andmask); 36 const struct cpumask *andmask);
36 unsigned int (*phys_pkg_id)(int index_msb); 37 unsigned int (*phys_pkg_id)(int index_msb);
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index 24b6e613edfa..c745a306f7d3 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
117 native_apic_mem_write(APIC_ICR, cfg); 117 native_apic_mem_write(APIC_ICR, cfg);
118} 118}
119 119
120static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector) 120static inline void send_IPI_mask_sequence(const struct cpumask *mask,
121 int vector)
121{ 122{
122 unsigned long flags; 123 unsigned long flags;
123 unsigned long query_cpu; 124 unsigned long query_cpu;
@@ -128,14 +129,15 @@ static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
128 * - mbligh 129 * - mbligh
129 */ 130 */
130 local_irq_save(flags); 131 local_irq_save(flags);
131 for_each_cpu_mask_nr(query_cpu, *mask) { 132 for_each_cpu(query_cpu, mask) {
132 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 133 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
133 vector, APIC_DEST_PHYSICAL); 134 vector, APIC_DEST_PHYSICAL);
134 } 135 }
135 local_irq_restore(flags); 136 local_irq_restore(flags);
136} 137}
137 138
138static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) 139static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
140 int vector)
139{ 141{
140 unsigned long flags; 142 unsigned long flags;
141 unsigned int query_cpu; 143 unsigned int query_cpu;
@@ -144,7 +146,7 @@ static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
144 /* See Hack comment above */ 146 /* See Hack comment above */
145 147
146 local_irq_save(flags); 148 local_irq_save(flags);
147 for_each_cpu_mask_nr(query_cpu, *mask) 149 for_each_cpu(query_cpu, mask)
148 if (query_cpu != this_cpu) 150 if (query_cpu != this_cpu)
149 __send_IPI_dest_field( 151 __send_IPI_dest_field(
150 per_cpu(x86_cpu_to_apicid, query_cpu), 152 per_cpu(x86_cpu_to_apicid, query_cpu),
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
index df8e024c43c5..8863d978cb96 100644
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ b/arch/x86/include/asm/mach-default/mach_apic.h
@@ -8,12 +8,12 @@
8 8
9#define APIC_DFR_VALUE (APIC_DFR_FLAT) 9#define APIC_DFR_VALUE (APIC_DFR_FLAT)
10 10
11static inline const cpumask_t *target_cpus(void) 11static inline const struct cpumask *target_cpus(void)
12{ 12{
13#ifdef CONFIG_SMP 13#ifdef CONFIG_SMP
14 return &cpu_online_map; 14 return cpu_online_mask;
15#else 15#else
16 return &cpumask_of_cpu(0); 16 return cpumask_of(0);
17#endif 17#endif
18} 18}
19 19
@@ -62,9 +62,9 @@ static inline int apic_id_registered(void)
62 return physid_isset(read_apic_id(), phys_cpu_present_map); 62 return physid_isset(read_apic_id(), phys_cpu_present_map);
63} 63}
64 64
65static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) 65static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
66{ 66{
67 return cpus_addr(*cpumask)[0]; 67 return cpumask_bits(cpumask)[0];
68} 68}
69 69
70static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, 70static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -98,7 +98,7 @@ static inline int apicid_to_node(int logical_apicid)
98#endif 98#endif
99} 99}
100 100
101static inline void vector_allocation_domain(int cpu, cpumask_t *retmask) 101static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
102{ 102{
103 /* Careful. Some cpus do not strictly honor the set of cpus 103 /* Careful. Some cpus do not strictly honor the set of cpus
104 * specified in the interrupt destination when using lowest 104 * specified in the interrupt destination when using lowest
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
index 9353ab854a10..191312d155da 100644
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ b/arch/x86/include/asm/mach-default/mach_ipi.h
@@ -4,8 +4,8 @@
4/* Avoid include hell */ 4/* Avoid include hell */
5#define NMI_VECTOR 0x02 5#define NMI_VECTOR 0x02
6 6
7void send_IPI_mask_bitmask(const cpumask_t *mask, int vector); 7void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
8void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); 8void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
9void __send_IPI_shortcut(unsigned int shortcut, int vector); 9void __send_IPI_shortcut(unsigned int shortcut, int vector);
10 10
11extern int no_broadcast; 11extern int no_broadcast;
@@ -15,17 +15,17 @@ extern int no_broadcast;
15#define send_IPI_mask (genapic->send_IPI_mask) 15#define send_IPI_mask (genapic->send_IPI_mask)
16#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself) 16#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
17#else 17#else
18static inline void send_IPI_mask(const cpumask_t *mask, int vector) 18static inline void send_IPI_mask(const struct cpumask *mask, int vector)
19{ 19{
20 send_IPI_mask_bitmask(mask, vector); 20 send_IPI_mask_bitmask(mask, vector);
21} 21}
22void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); 22void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
23#endif 23#endif
24 24
25static inline void __local_send_IPI_allbutself(int vector) 25static inline void __local_send_IPI_allbutself(int vector)
26{ 26{
27 if (no_broadcast || vector == NMI_VECTOR) 27 if (no_broadcast || vector == NMI_VECTOR)
28 send_IPI_mask_allbutself(&cpu_online_map, vector); 28 send_IPI_mask_allbutself(cpu_online_mask, vector);
29 else 29 else
30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); 30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
31} 31}
@@ -33,7 +33,7 @@ static inline void __local_send_IPI_allbutself(int vector)
33static inline void __local_send_IPI_all(int vector) 33static inline void __local_send_IPI_all(int vector)
34{ 34{
35 if (no_broadcast || vector == NMI_VECTOR) 35 if (no_broadcast || vector == NMI_VECTOR)
36 send_IPI_mask(&cpu_online_map, vector); 36 send_IPI_mask(cpu_online_mask, vector);
37 else 37 else
38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector); 38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
39} 39}
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
index c734d7acc430..a8374c652778 100644
--- a/arch/x86/include/asm/numaq/ipi.h
+++ b/arch/x86/include/asm/numaq/ipi.h
@@ -1,26 +1,22 @@
1#ifndef __ASM_NUMAQ_IPI_H 1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_NUMAQ_IPI_H 2#define __ASM_NUMAQ_IPI_H
3 3
4void send_IPI_mask_sequence(const cpumask_t *mask, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); 5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6 6
7static inline void send_IPI_mask(const cpumask_t *mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{ 8{
9 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
10} 10}
11 11
12static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
13{ 13{
14 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15 cpu_clear(smp_processor_id(), mask);
16
17 if (!cpus_empty(mask))
18 send_IPI_mask(&mask, vector);
19} 15}
20 16
21static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
22{ 18{
23 send_IPI_mask(&cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
24} 20}
25 21
26#endif /* __ASM_NUMAQ_IPI_H */ 22#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index c4a9aa52df6e..830b9fcb6427 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -60,7 +60,7 @@ struct smp_ops {
60 void (*cpu_die)(unsigned int cpu); 60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void); 61 void (*play_dead)(void);
62 62
63 void (*send_call_func_ipi)(const cpumask_t *mask); 63 void (*send_call_func_ipi)(const struct cpumask *mask);
64 void (*send_call_func_single_ipi)(int cpu); 64 void (*send_call_func_single_ipi)(int cpu);
65}; 65};
66 66
@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu);
138void native_play_dead(void); 138void native_play_dead(void);
139void play_dead_common(void); 139void play_dead_common(void);
140 140
141void native_send_call_func_ipi(const cpumask_t *mask); 141void native_send_call_func_ipi(const struct cpumask *mask);
142void native_send_call_func_single_ipi(int cpu); 142void native_send_call_func_single_ipi(int cpu);
143 143
144extern void prefill_possible_map(void); 144extern void prefill_possible_map(void);
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index c772bb10b173..7fa5f49c2dda 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
30 return 1; 30 return 1;
31} 31}
32 32
33static const cpumask_t *flat_target_cpus(void) 33static const struct cpumask *flat_target_cpus(void)
34{ 34{
35 return &cpu_online_map; 35 return cpu_online_mask;
36} 36}
37 37
38static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask) 38static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
39{ 39{
40 /* Careful. Some cpus do not strictly honor the set of cpus 40 /* Careful. Some cpus do not strictly honor the set of cpus
41 * specified in the interrupt destination when using lowest 41 * specified in the interrupt destination when using lowest
@@ -45,7 +45,8 @@ static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask)
45 * deliver interrupts to the wrong hyperthread when only one 45 * deliver interrupts to the wrong hyperthread when only one
46 * hyperthread was specified in the interrupt desitination. 46 * hyperthread was specified in the interrupt desitination.
47 */ 47 */
48 *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } }; 48 cpumask_clear(retmask);
49 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
49} 50}
50 51
51/* 52/*
@@ -77,16 +78,17 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
77 local_irq_restore(flags); 78 local_irq_restore(flags);
78} 79}
79 80
80static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector) 81static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
81{ 82{
82 unsigned long mask = cpus_addr(*cpumask)[0]; 83 unsigned long mask = cpumask_bits(cpumask)[0];
83 84
84 _flat_send_IPI_mask(mask, vector); 85 _flat_send_IPI_mask(mask, vector);
85} 86}
86 87
87static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector) 88static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
89 int vector)
88{ 90{
89 unsigned long mask = cpus_addr(*cpumask)[0]; 91 unsigned long mask = cpumask_bits(cpumask)[0];
90 int cpu = smp_processor_id(); 92 int cpu = smp_processor_id();
91 93
92 if (cpu < BITS_PER_LONG) 94 if (cpu < BITS_PER_LONG)
@@ -103,8 +105,8 @@ static void flat_send_IPI_allbutself(int vector)
103 int hotplug = 0; 105 int hotplug = 0;
104#endif 106#endif
105 if (hotplug || vector == NMI_VECTOR) { 107 if (hotplug || vector == NMI_VECTOR) {
106 if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) { 108 if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
107 unsigned long mask = cpus_addr(cpu_online_map)[0]; 109 unsigned long mask = cpumask_bits(cpu_online_mask)[0];
108 110
109 if (cpu < BITS_PER_LONG) 111 if (cpu < BITS_PER_LONG)
110 clear_bit(cpu, &mask); 112 clear_bit(cpu, &mask);
@@ -119,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector)
119static void flat_send_IPI_all(int vector) 121static void flat_send_IPI_all(int vector)
120{ 122{
121 if (vector == NMI_VECTOR) 123 if (vector == NMI_VECTOR)
122 flat_send_IPI_mask(&cpu_online_map, vector); 124 flat_send_IPI_mask(cpu_online_mask, vector);
123 else 125 else
124 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 126 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
125} 127}
@@ -153,9 +155,9 @@ static int flat_apic_id_registered(void)
153 return physid_isset(read_xapic_id(), phys_cpu_present_map); 155 return physid_isset(read_xapic_id(), phys_cpu_present_map);
154} 156}
155 157
156static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) 158static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
157{ 159{
158 return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; 160 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
159} 161}
160 162
161static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 163static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -217,23 +219,23 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
217 return 0; 219 return 0;
218} 220}
219 221
220static const cpumask_t *physflat_target_cpus(void) 222static const struct cpumask *physflat_target_cpus(void)
221{ 223{
222 return &cpu_online_map; 224 return cpu_online_mask;
223} 225}
224 226
225static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask) 227static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
226{ 228{
227 cpus_clear(*retmask); 229 cpumask_clear(retmask);
228 cpu_set(cpu, *retmask); 230 cpumask_set_cpu(cpu, retmask);
229} 231}
230 232
231static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector) 233static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
232{ 234{
233 send_IPI_mask_sequence(cpumask, vector); 235 send_IPI_mask_sequence(cpumask, vector);
234} 236}
235 237
236static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask, 238static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
237 int vector) 239 int vector)
238{ 240{
239 send_IPI_mask_allbutself(cpumask, vector); 241 send_IPI_mask_allbutself(cpumask, vector);
@@ -241,15 +243,15 @@ static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask,
241 243
242static void physflat_send_IPI_allbutself(int vector) 244static void physflat_send_IPI_allbutself(int vector)
243{ 245{
244 send_IPI_mask_allbutself(&cpu_online_map, vector); 246 send_IPI_mask_allbutself(cpu_online_mask, vector);
245} 247}
246 248
247static void physflat_send_IPI_all(int vector) 249static void physflat_send_IPI_all(int vector)
248{ 250{
249 physflat_send_IPI_mask(&cpu_online_map, vector); 251 physflat_send_IPI_mask(cpu_online_mask, vector);
250} 252}
251 253
252static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) 254static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
253{ 255{
254 int cpu; 256 int cpu;
255 257
@@ -257,7 +259,7 @@ static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask)
257 * We're using fixed IRQ delivery, can only return one phys APIC ID. 259 * We're using fixed IRQ delivery, can only return one phys APIC ID.
258 * May as well be the first. 260 * May as well be the first.
259 */ 261 */
260 cpu = first_cpu(*cpumask); 262 cpu = cpumask_first(cpumask);
261 if ((unsigned)cpu < nr_cpu_ids) 263 if ((unsigned)cpu < nr_cpu_ids)
262 return per_cpu(x86_cpu_to_apicid, cpu); 264 return per_cpu(x86_cpu_to_apicid, cpu);
263 else 265 else
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
index e7d16f53b9cd..4716a0c9f936 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -22,18 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
22 22
23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
24 24
25static const cpumask_t *x2apic_target_cpus(void) 25static const struct cpumask *x2apic_target_cpus(void)
26{ 26{
27 return &cpumask_of_cpu(0); 27 return cpumask_of(0);
28} 28}
29 29
30/* 30/*
31 * for now each logical cpu is in its own vector allocation domain. 31 * for now each logical cpu is in its own vector allocation domain.
32 */ 32 */
33static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) 33static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
34{ 34{
35 cpus_clear(*retmask); 35 cpumask_clear(retmask);
36 cpu_set(cpu, *retmask); 36 cpumask_set_cpu(cpu, retmask);
37} 37}
38 38
39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -55,27 +55,28 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
55 * at once. We have 16 cpu's in a cluster. This will minimize IPI register 55 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
56 * writes. 56 * writes.
57 */ 57 */
58static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) 58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59{ 59{
60 unsigned long flags; 60 unsigned long flags;
61 unsigned long query_cpu; 61 unsigned long query_cpu;
62 62
63 local_irq_save(flags); 63 local_irq_save(flags);
64 for_each_cpu_mask_nr(query_cpu, *mask) 64 for_each_cpu(query_cpu, mask)
65 __x2apic_send_IPI_dest( 65 __x2apic_send_IPI_dest(
66 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 66 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
67 vector, APIC_DEST_LOGICAL); 67 vector, APIC_DEST_LOGICAL);
68 local_irq_restore(flags); 68 local_irq_restore(flags);
69} 69}
70 70
71static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) 71static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
72 int vector)
72{ 73{
73 unsigned long flags; 74 unsigned long flags;
74 unsigned long query_cpu; 75 unsigned long query_cpu;
75 unsigned long this_cpu = smp_processor_id(); 76 unsigned long this_cpu = smp_processor_id();
76 77
77 local_irq_save(flags); 78 local_irq_save(flags);
78 for_each_cpu_mask_nr(query_cpu, *mask) 79 for_each_cpu(query_cpu, mask)
79 if (query_cpu != this_cpu) 80 if (query_cpu != this_cpu)
80 __x2apic_send_IPI_dest( 81 __x2apic_send_IPI_dest(
81 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 82 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
@@ -100,7 +101,7 @@ static void x2apic_send_IPI_allbutself(int vector)
100 101
101static void x2apic_send_IPI_all(int vector) 102static void x2apic_send_IPI_all(int vector)
102{ 103{
103 x2apic_send_IPI_mask(&cpu_online_map, vector); 104 x2apic_send_IPI_mask(cpu_online_mask, vector);
104} 105}
105 106
106static int x2apic_apic_id_registered(void) 107static int x2apic_apic_id_registered(void)
@@ -108,7 +109,7 @@ static int x2apic_apic_id_registered(void)
108 return 1; 109 return 1;
109} 110}
110 111
111static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) 112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
112{ 113{
113 int cpu; 114 int cpu;
114 115
@@ -116,7 +117,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
116 * We're using fixed IRQ delivery, can only return one phys APIC ID. 117 * We're using fixed IRQ delivery, can only return one phys APIC ID.
117 * May as well be the first. 118 * May as well be the first.
118 */ 119 */
119 cpu = first_cpu(*cpumask); 120 cpu = cpumask_first(cpumask);
120 if ((unsigned)cpu < nr_cpu_ids) 121 if ((unsigned)cpu < nr_cpu_ids)
121 return per_cpu(x86_cpu_to_logical_apicid, cpu); 122 return per_cpu(x86_cpu_to_logical_apicid, cpu);
122 else 123 else
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
index 9d0386c7e798..b255507884f2 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -29,15 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
29 29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
31 31
32static const cpumask_t *x2apic_target_cpus(void) 32static const struct cpumask *x2apic_target_cpus(void)
33{ 33{
34 return &cpumask_of_cpu(0); 34 return cpumask_of(0);
35} 35}
36 36
37static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) 37static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
38{ 38{
39 cpus_clear(*retmask); 39 cpumask_clear(retmask);
40 cpu_set(cpu, *retmask); 40 cpumask_set_cpu(cpu, retmask);
41} 41}
42 42
43static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 43static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -53,27 +53,28 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
53 x2apic_icr_write(cfg, apicid); 53 x2apic_icr_write(cfg, apicid);
54} 54}
55 55
56static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) 56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
57{ 57{
58 unsigned long flags; 58 unsigned long flags;
59 unsigned long query_cpu; 59 unsigned long query_cpu;
60 60
61 local_irq_save(flags); 61 local_irq_save(flags);
62 for_each_cpu_mask_nr(query_cpu, *mask) { 62 for_each_cpu(query_cpu, mask) {
63 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), 63 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
64 vector, APIC_DEST_PHYSICAL); 64 vector, APIC_DEST_PHYSICAL);
65 } 65 }
66 local_irq_restore(flags); 66 local_irq_restore(flags);
67} 67}
68 68
69static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) 69static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
70 int vector)
70{ 71{
71 unsigned long flags; 72 unsigned long flags;
72 unsigned long query_cpu; 73 unsigned long query_cpu;
73 unsigned long this_cpu = smp_processor_id(); 74 unsigned long this_cpu = smp_processor_id();
74 75
75 local_irq_save(flags); 76 local_irq_save(flags);
76 for_each_cpu_mask_nr(query_cpu, *mask) { 77 for_each_cpu(query_cpu, mask) {
77 if (query_cpu != this_cpu) 78 if (query_cpu != this_cpu)
78 __x2apic_send_IPI_dest( 79 __x2apic_send_IPI_dest(
79 per_cpu(x86_cpu_to_apicid, query_cpu), 80 per_cpu(x86_cpu_to_apicid, query_cpu),
@@ -99,7 +100,7 @@ static void x2apic_send_IPI_allbutself(int vector)
99 100
100static void x2apic_send_IPI_all(int vector) 101static void x2apic_send_IPI_all(int vector)
101{ 102{
102 x2apic_send_IPI_mask(&cpu_online_map, vector); 103 x2apic_send_IPI_mask(cpu_online_mask, vector);
103} 104}
104 105
105static int x2apic_apic_id_registered(void) 106static int x2apic_apic_id_registered(void)
@@ -107,7 +108,7 @@ static int x2apic_apic_id_registered(void)
107 return 1; 108 return 1;
108} 109}
109 110
110static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) 111static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
111{ 112{
112 int cpu; 113 int cpu;
113 114
@@ -115,7 +116,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
115 * We're using fixed IRQ delivery, can only return one phys APIC ID. 116 * We're using fixed IRQ delivery, can only return one phys APIC ID.
116 * May as well be the first. 117 * May as well be the first.
117 */ 118 */
118 cpu = first_cpu(*cpumask); 119 cpu = cpumask_first(cpumask);
119 if ((unsigned)cpu < nr_cpu_ids) 120 if ((unsigned)cpu < nr_cpu_ids)
120 return per_cpu(x86_cpu_to_apicid, cpu); 121 return per_cpu(x86_cpu_to_apicid, cpu);
121 else 122 else
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 22596ec94c82..3984682cd849 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -75,15 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
75 75
76/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 76/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
77 77
78static const cpumask_t *uv_target_cpus(void) 78static const struct cpumask *uv_target_cpus(void)
79{ 79{
80 return &cpumask_of_cpu(0); 80 return cpumask_of(0);
81} 81}
82 82
83static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask) 83static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
84{ 84{
85 cpus_clear(*retmask); 85 cpumask_clear(retmask);
86 cpu_set(cpu, *retmask); 86 cpumask_set_cpu(cpu, retmask);
87} 87}
88 88
89int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 89int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
@@ -122,20 +122,20 @@ static void uv_send_IPI_one(int cpu, int vector)
122 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 122 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
123} 123}
124 124
125static void uv_send_IPI_mask(const cpumask_t *mask, int vector) 125static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
126{ 126{
127 unsigned int cpu; 127 unsigned int cpu;
128 128
129 for_each_cpu_mask_nr(cpu, *mask) 129 for_each_cpu(cpu, mask)
130 uv_send_IPI_one(cpu, vector); 130 uv_send_IPI_one(cpu, vector);
131} 131}
132 132
133static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) 133static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
134{ 134{
135 unsigned int cpu; 135 unsigned int cpu;
136 unsigned int this_cpu = smp_processor_id(); 136 unsigned int this_cpu = smp_processor_id();
137 137
138 for_each_cpu_mask_nr(cpu, *mask) 138 for_each_cpu(cpu, mask)
139 if (cpu != this_cpu) 139 if (cpu != this_cpu)
140 uv_send_IPI_one(cpu, vector); 140 uv_send_IPI_one(cpu, vector);
141} 141}
@@ -152,7 +152,7 @@ static void uv_send_IPI_allbutself(int vector)
152 152
153static void uv_send_IPI_all(int vector) 153static void uv_send_IPI_all(int vector)
154{ 154{
155 uv_send_IPI_mask(&cpu_online_map, vector); 155 uv_send_IPI_mask(cpu_online_mask, vector);
156} 156}
157 157
158static int uv_apic_id_registered(void) 158static int uv_apic_id_registered(void)
@@ -164,7 +164,7 @@ static void uv_init_apic_ldr(void)
164{ 164{
165} 165}
166 166
167static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) 167static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
168{ 168{
169 int cpu; 169 int cpu;
170 170
@@ -172,7 +172,7 @@ static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask)
172 * We're using fixed IRQ delivery, can only return one phys APIC ID. 172 * We're using fixed IRQ delivery, can only return one phys APIC ID.
173 * May as well be the first. 173 * May as well be the first.
174 */ 174 */
175 cpu = first_cpu(*cpumask); 175 cpu = cpumask_first(cpumask);
176 if ((unsigned)cpu < nr_cpu_ids) 176 if ((unsigned)cpu < nr_cpu_ids)
177 return per_cpu(x86_cpu_to_apicid, cpu); 177 return per_cpu(x86_cpu_to_apicid, cpu);
178 else 178 else
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index 86aa50fc65a1..285bbf8831fa 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
116/* 116/*
117 * This is only used on smaller machines. 117 * This is only used on smaller machines.
118 */ 118 */
119void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector) 119void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
120{ 120{
121 unsigned long mask = cpus_addr(*cpumask)[0]; 121 unsigned long mask = cpumask_bits(cpumask)[0];
122 unsigned long flags; 122 unsigned long flags;
123 123
124 local_irq_save(flags); 124 local_irq_save(flags);
125 WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); 125 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
126 __send_IPI_dest_field(mask, vector); 126 __send_IPI_dest_field(mask, vector);
127 local_irq_restore(flags); 127 local_irq_restore(flags);
128} 128}
129 129
130void send_IPI_mask_sequence(const cpumask_t *mask, int vector) 130void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
131{ 131{
132 unsigned long flags; 132 unsigned long flags;
133 unsigned int query_cpu; 133 unsigned int query_cpu;
@@ -139,12 +139,12 @@ void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
139 */ 139 */
140 140
141 local_irq_save(flags); 141 local_irq_save(flags);
142 for_each_cpu_mask_nr(query_cpu, *mask) 142 for_each_cpu(query_cpu, mask)
143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); 143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
144 local_irq_restore(flags); 144 local_irq_restore(flags);
145} 145}
146 146
147void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) 147void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
148{ 148{
149 unsigned long flags; 149 unsigned long flags;
150 unsigned int query_cpu; 150 unsigned int query_cpu;
@@ -153,7 +153,7 @@ void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
153 /* See Hack comment above */ 153 /* See Hack comment above */
154 154
155 local_irq_save(flags); 155 local_irq_save(flags);
156 for_each_cpu_mask_nr(query_cpu, *mask) 156 for_each_cpu(query_cpu, mask)
157 if (query_cpu != this_cpu) 157 if (query_cpu != this_cpu)
158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), 158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
159 vector); 159 vector);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 341df946f9a9..49ed667b06f3 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -118,15 +118,15 @@ static void native_smp_send_reschedule(int cpu)
118 WARN_ON(1); 118 WARN_ON(1);
119 return; 119 return;
120 } 120 }
121 send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 121 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
122} 122}
123 123
124void native_send_call_func_single_ipi(int cpu) 124void native_send_call_func_single_ipi(int cpu)
125{ 125{
126 send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); 126 send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
127} 127}
128 128
129void native_send_call_func_ipi(const cpumask_t *mask) 129void native_send_call_func_ipi(const struct cpumask *mask)
130{ 130{
131 cpumask_t allbutself; 131 cpumask_t allbutself;
132 132
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index b3a95868839b..c44e2069c7c7 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -411,22 +411,23 @@ static void xen_smp_send_reschedule(int cpu)
411 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 411 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
412} 412}
413 413
414static void xen_send_IPI_mask(const cpumask_t *mask, enum ipi_vector vector) 414static void xen_send_IPI_mask(const struct cpumask *mask,
415 enum ipi_vector vector)
415{ 416{
416 unsigned cpu; 417 unsigned cpu;
417 418
418 for_each_cpu_and(cpu, mask, &cpu_online_map) 419 for_each_cpu_and(cpu, mask, cpu_online_mask)
419 xen_send_IPI_one(cpu, vector); 420 xen_send_IPI_one(cpu, vector);
420} 421}
421 422
422static void xen_smp_send_call_function_ipi(const cpumask_t *mask) 423static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
423{ 424{
424 int cpu; 425 int cpu;
425 426
426 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 427 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
427 428
428 /* Make sure other vcpus get a chance to run if they need to. */ 429 /* Make sure other vcpus get a chance to run if they need to. */
429 for_each_cpu_mask_nr(cpu, *mask) { 430 for_each_cpu(cpu, mask) {
430 if (xen_vcpu_stolen(cpu)) { 431 if (xen_vcpu_stolen(cpu)) {
431 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 432 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
432 break; 433 break;
@@ -436,7 +437,7 @@ static void xen_smp_send_call_function_ipi(const cpumask_t *mask)
436 437
437static void xen_smp_send_call_function_single_ipi(int cpu) 438static void xen_smp_send_call_function_single_ipi(int cpu)
438{ 439{
439 xen_send_IPI_mask(&cpumask_of_cpu(cpu), 440 xen_send_IPI_mask(cpumask_of(cpu),
440 XEN_CALL_FUNCTION_SINGLE_VECTOR); 441 XEN_CALL_FUNCTION_SINGLE_VECTOR);
441} 442}
442 443