diff options
author | Mike Travis <travis@sgi.com> | 2008-12-16 20:33:52 -0500 |
---|---|---|
committer | Mike Travis <travis@sgi.com> | 2008-12-16 20:40:56 -0500 |
commit | e7986739a76cde5079da08809d8bbc6878387ae0 (patch) | |
tree | dd99ed6af66d459fe164f75ded7f95262dc0fb0d /arch/x86/kernel/genx2apic_uv_x.c | |
parent | 36f5101a60de8f79c0d1ca06e50660bf5129e02c (diff) |
x86 smp: modify send_IPI_mask interface to accept cpumask_t pointers
Impact: cleanup, change parameter passing
* Change genapic interfaces to accept cpumask_t pointers where possible.
* Modify external callers to use cpumask_t pointers in function calls.
* Create new send_IPI_mask_allbutself which is the same as the
send_IPI_mask functions but removes smp_processor_id() from list.
This removes another common need for a temporary cpumask_t variable.
* Functions that used a temp cpumask_t variable for:
cpumask_t allbutme = cpu_online_map;
cpu_clear(smp_processor_id(), allbutme);
if (!cpus_empty(allbutme))
...
become:
if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu)))
...
* Other minor code optimizations (like using cpus_clear instead of
CPU_MASK_NONE, etc.)
Applies to linux-2.6.tip/master.
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/genx2apic_uv_x.c')
-rw-r--r-- | arch/x86/kernel/genx2apic_uv_x.c | 43 |
1 files changed, 26 insertions, 17 deletions
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 2c7dbdb98278..010659415ae4 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); | |||
75 | 75 | ||
76 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 76 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
77 | 77 | ||
78 | static cpumask_t uv_target_cpus(void) | 78 | static const cpumask_t *uv_target_cpus(void) |
79 | { | 79 | { |
80 | return cpumask_of_cpu(0); | 80 | return &cpumask_of_cpu(0); |
81 | } | 81 | } |
82 | 82 | ||
83 | static cpumask_t uv_vector_allocation_domain(int cpu) | 83 | static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask) |
84 | { | 84 | { |
85 | cpumask_t domain = CPU_MASK_NONE; | 85 | cpus_clear(*retmask); |
86 | cpu_set(cpu, domain); | 86 | cpu_set(cpu, *retmask); |
87 | return domain; | ||
88 | } | 87 | } |
89 | 88 | ||
90 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | 89 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) |
@@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int vector) | |||
123 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 122 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
124 | } | 123 | } |
125 | 124 | ||
126 | static void uv_send_IPI_mask(cpumask_t mask, int vector) | 125 | static void uv_send_IPI_mask(const cpumask_t *mask, int vector) |
127 | { | 126 | { |
128 | unsigned int cpu; | 127 | unsigned int cpu; |
129 | 128 | ||
130 | for_each_possible_cpu(cpu) | 129 | for_each_cpu_mask_nr(cpu, *mask) |
131 | if (cpu_isset(cpu, mask)) | 130 | uv_send_IPI_one(cpu, vector); |
131 | } | ||
132 | |||
133 | static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) | ||
134 | { | ||
135 | unsigned int cpu; | ||
136 | unsigned int this_cpu = smp_processor_id(); | ||
137 | |||
138 | for_each_cpu_mask_nr(cpu, *mask) | ||
139 | if (cpu != this_cpu) | ||
132 | uv_send_IPI_one(cpu, vector); | 140 | uv_send_IPI_one(cpu, vector); |
133 | } | 141 | } |
134 | 142 | ||
135 | static void uv_send_IPI_allbutself(int vector) | 143 | static void uv_send_IPI_allbutself(int vector) |
136 | { | 144 | { |
137 | cpumask_t mask = cpu_online_map; | 145 | unsigned int cpu; |
138 | 146 | unsigned int this_cpu = smp_processor_id(); | |
139 | cpu_clear(smp_processor_id(), mask); | ||
140 | 147 | ||
141 | if (!cpus_empty(mask)) | 148 | for_each_online_cpu(cpu) |
142 | uv_send_IPI_mask(mask, vector); | 149 | if (cpu != this_cpu) |
150 | uv_send_IPI_one(cpu, vector); | ||
143 | } | 151 | } |
144 | 152 | ||
145 | static void uv_send_IPI_all(int vector) | 153 | static void uv_send_IPI_all(int vector) |
146 | { | 154 | { |
147 | uv_send_IPI_mask(cpu_online_map, vector); | 155 | uv_send_IPI_mask(&cpu_online_map, vector); |
148 | } | 156 | } |
149 | 157 | ||
150 | static int uv_apic_id_registered(void) | 158 | static int uv_apic_id_registered(void) |
@@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void) | |||
156 | { | 164 | { |
157 | } | 165 | } |
158 | 166 | ||
159 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | 167 | static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) |
160 | { | 168 | { |
161 | int cpu; | 169 | int cpu; |
162 | 170 | ||
@@ -164,7 +172,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | |||
164 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 172 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
165 | * May as well be the first. | 173 | * May as well be the first. |
166 | */ | 174 | */ |
167 | cpu = first_cpu(cpumask); | 175 | cpu = first_cpu(*cpumask); |
168 | if ((unsigned)cpu < nr_cpu_ids) | 176 | if ((unsigned)cpu < nr_cpu_ids) |
169 | return per_cpu(x86_cpu_to_apicid, cpu); | 177 | return per_cpu(x86_cpu_to_apicid, cpu); |
170 | else | 178 | else |
@@ -218,6 +226,7 @@ struct genapic apic_x2apic_uv_x = { | |||
218 | .send_IPI_all = uv_send_IPI_all, | 226 | .send_IPI_all = uv_send_IPI_all, |
219 | .send_IPI_allbutself = uv_send_IPI_allbutself, | 227 | .send_IPI_allbutself = uv_send_IPI_allbutself, |
220 | .send_IPI_mask = uv_send_IPI_mask, | 228 | .send_IPI_mask = uv_send_IPI_mask, |
229 | .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, | ||
221 | .send_IPI_self = uv_send_IPI_self, | 230 | .send_IPI_self = uv_send_IPI_self, |
222 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, | 231 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, |
223 | .phys_pkg_id = phys_pkg_id, | 232 | .phys_pkg_id = phys_pkg_id, |