aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smp.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2013-04-30 18:27:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 20:04:03 -0400
commite1d12f327037a59e06c66520951ab4e0bd29f9c4 (patch)
tree551fc711bf5ad115e5f239825bc00ba9632ec0f9 /kernel/smp.c
parent74e3d1e17b2e11d175970b85acd44f5927000ba2 (diff)
kernel/smp.c: cleanups
We sometimes use "struct call_single_data *data" and sometimes "struct call_single_data *csd". Use "csd" consistently. We sometimes use "struct call_function_data *data" and sometimes "struct call_function_data *cfd". Use "cfd" consistently. Also, avoid some 80-col layout tricks. Cc: Ingo Molnar <mingo@elte.hu> Cc: Jens Axboe <axboe@kernel.dk> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Shaohua Li <shli@fusionio.com> Cc: Shaohua Li <shli@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c91
1 files changed, 46 insertions, 45 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index b320622543e9..4dba0f7b72ad 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -100,16 +100,16 @@ void __init call_function_init(void)
100 * previous function call. For multi-cpu calls its even more interesting 100 * previous function call. For multi-cpu calls its even more interesting
101 * as we'll have to ensure no other cpu is observing our csd. 101 * as we'll have to ensure no other cpu is observing our csd.
102 */ 102 */
103static void csd_lock_wait(struct call_single_data *data) 103static void csd_lock_wait(struct call_single_data *csd)
104{ 104{
105 while (data->flags & CSD_FLAG_LOCK) 105 while (csd->flags & CSD_FLAG_LOCK)
106 cpu_relax(); 106 cpu_relax();
107} 107}
108 108
109static void csd_lock(struct call_single_data *data) 109static void csd_lock(struct call_single_data *csd)
110{ 110{
111 csd_lock_wait(data); 111 csd_lock_wait(csd);
112 data->flags |= CSD_FLAG_LOCK; 112 csd->flags |= CSD_FLAG_LOCK;
113 113
114 /* 114 /*
115 * prevent CPU from reordering the above assignment 115 * prevent CPU from reordering the above assignment
@@ -119,16 +119,16 @@ static void csd_lock(struct call_single_data *data)
119 smp_mb(); 119 smp_mb();
120} 120}
121 121
122static void csd_unlock(struct call_single_data *data) 122static void csd_unlock(struct call_single_data *csd)
123{ 123{
124 WARN_ON(!(data->flags & CSD_FLAG_LOCK)); 124 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
125 125
126 /* 126 /*
127 * ensure we're all done before releasing data: 127 * ensure we're all done before releasing data:
128 */ 128 */
129 smp_mb(); 129 smp_mb();
130 130
131 data->flags &= ~CSD_FLAG_LOCK; 131 csd->flags &= ~CSD_FLAG_LOCK;
132} 132}
133 133
134/* 134/*
@@ -137,7 +137,7 @@ static void csd_unlock(struct call_single_data *data)
137 * ->func, ->info, and ->flags set. 137 * ->func, ->info, and ->flags set.
138 */ 138 */
139static 139static
140void generic_exec_single(int cpu, struct call_single_data *data, int wait) 140void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
141{ 141{
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); 142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
143 unsigned long flags; 143 unsigned long flags;
@@ -145,7 +145,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
145 145
146 raw_spin_lock_irqsave(&dst->lock, flags); 146 raw_spin_lock_irqsave(&dst->lock, flags);
147 ipi = list_empty(&dst->list); 147 ipi = list_empty(&dst->list);
148 list_add_tail(&data->list, &dst->list); 148 list_add_tail(&csd->list, &dst->list);
149 raw_spin_unlock_irqrestore(&dst->lock, flags); 149 raw_spin_unlock_irqrestore(&dst->lock, flags);
150 150
151 /* 151 /*
@@ -163,7 +163,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
163 arch_send_call_function_single_ipi(cpu); 163 arch_send_call_function_single_ipi(cpu);
164 164
165 if (wait) 165 if (wait)
166 csd_lock_wait(data); 166 csd_lock_wait(csd);
167} 167}
168 168
169/* 169/*
@@ -173,7 +173,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
173void generic_smp_call_function_single_interrupt(void) 173void generic_smp_call_function_single_interrupt(void)
174{ 174{
175 struct call_single_queue *q = &__get_cpu_var(call_single_queue); 175 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
176 unsigned int data_flags;
177 LIST_HEAD(list); 176 LIST_HEAD(list);
178 177
179 /* 178 /*
@@ -186,25 +185,26 @@ void generic_smp_call_function_single_interrupt(void)
186 raw_spin_unlock(&q->lock); 185 raw_spin_unlock(&q->lock);
187 186
188 while (!list_empty(&list)) { 187 while (!list_empty(&list)) {
189 struct call_single_data *data; 188 struct call_single_data *csd;
189 unsigned int csd_flags;
190 190
191 data = list_entry(list.next, struct call_single_data, list); 191 csd = list_entry(list.next, struct call_single_data, list);
192 list_del(&data->list); 192 list_del(&csd->list);
193 193
194 /* 194 /*
195 * 'data' can be invalid after this call if flags == 0 195 * 'csd' can be invalid after this call if flags == 0
196 * (when called through generic_exec_single()), 196 * (when called through generic_exec_single()),
197 * so save them away before making the call: 197 * so save them away before making the call:
198 */ 198 */
199 data_flags = data->flags; 199 csd_flags = csd->flags;
200 200
201 data->func(data->info); 201 csd->func(csd->info);
202 202
203 /* 203 /*
204 * Unlocked CSDs are valid through generic_exec_single(): 204 * Unlocked CSDs are valid through generic_exec_single():
205 */ 205 */
206 if (data_flags & CSD_FLAG_LOCK) 206 if (csd_flags & CSD_FLAG_LOCK)
207 csd_unlock(data); 207 csd_unlock(csd);
208 } 208 }
209} 209}
210 210
@@ -249,16 +249,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
249 local_irq_restore(flags); 249 local_irq_restore(flags);
250 } else { 250 } else {
251 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { 251 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
252 struct call_single_data *data = &d; 252 struct call_single_data *csd = &d;
253 253
254 if (!wait) 254 if (!wait)
255 data = &__get_cpu_var(csd_data); 255 csd = &__get_cpu_var(csd_data);
256 256
257 csd_lock(data); 257 csd_lock(csd);
258 258
259 data->func = func; 259 csd->func = func;
260 data->info = info; 260 csd->info = info;
261 generic_exec_single(cpu, data, wait); 261 generic_exec_single(cpu, csd, wait);
262 } else { 262 } else {
263 err = -ENXIO; /* CPU not online */ 263 err = -ENXIO; /* CPU not online */
264 } 264 }
@@ -325,7 +325,7 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
325 * pre-allocated data structure. Useful for embedding @data inside 325 * pre-allocated data structure. Useful for embedding @data inside
326 * other structures, for instance. 326 * other structures, for instance.
327 */ 327 */
328void __smp_call_function_single(int cpu, struct call_single_data *data, 328void __smp_call_function_single(int cpu, struct call_single_data *csd,
329 int wait) 329 int wait)
330{ 330{
331 unsigned int this_cpu; 331 unsigned int this_cpu;
@@ -343,11 +343,11 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
343 343
344 if (cpu == this_cpu) { 344 if (cpu == this_cpu) {
345 local_irq_save(flags); 345 local_irq_save(flags);
346 data->func(data->info); 346 csd->func(csd->info);
347 local_irq_restore(flags); 347 local_irq_restore(flags);
348 } else { 348 } else {
349 csd_lock(data); 349 csd_lock(csd);
350 generic_exec_single(cpu, data, wait); 350 generic_exec_single(cpu, csd, wait);
351 } 351 }
352 put_cpu(); 352 put_cpu();
353} 353}
@@ -369,7 +369,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
369void smp_call_function_many(const struct cpumask *mask, 369void smp_call_function_many(const struct cpumask *mask,
370 smp_call_func_t func, void *info, bool wait) 370 smp_call_func_t func, void *info, bool wait)
371{ 371{
372 struct call_function_data *data; 372 struct call_function_data *cfd;
373 int cpu, next_cpu, this_cpu = smp_processor_id(); 373 int cpu, next_cpu, this_cpu = smp_processor_id();
374 374
375 /* 375 /*
@@ -401,24 +401,24 @@ void smp_call_function_many(const struct cpumask *mask,
401 return; 401 return;
402 } 402 }
403 403
404 data = &__get_cpu_var(cfd_data); 404 cfd = &__get_cpu_var(cfd_data);
405 405
406 cpumask_and(data->cpumask, mask, cpu_online_mask); 406 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
407 cpumask_clear_cpu(this_cpu, data->cpumask); 407 cpumask_clear_cpu(this_cpu, cfd->cpumask);
408 408
409 /* Some callers race with other cpus changing the passed mask */ 409 /* Some callers race with other cpus changing the passed mask */
410 if (unlikely(!cpumask_weight(data->cpumask))) 410 if (unlikely(!cpumask_weight(cfd->cpumask)))
411 return; 411 return;
412 412
413 /* 413 /*
414 * After we put an entry into the list, data->cpumask 414 * After we put an entry into the list, cfd->cpumask may be cleared
415 * may be cleared again when another CPU sends another IPI for 415 * again when another CPU sends another IPI for a SMP function call, so
416 * a SMP function call, so data->cpumask will be zero. 416 * cfd->cpumask will be zero.
417 */ 417 */
418 cpumask_copy(data->cpumask_ipi, data->cpumask); 418 cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
419 419
420 for_each_cpu(cpu, data->cpumask) { 420 for_each_cpu(cpu, cfd->cpumask) {
421 struct call_single_data *csd = per_cpu_ptr(data->csd, cpu); 421 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
422 struct call_single_queue *dst = 422 struct call_single_queue *dst =
423 &per_cpu(call_single_queue, cpu); 423 &per_cpu(call_single_queue, cpu);
424 unsigned long flags; 424 unsigned long flags;
@@ -433,12 +433,13 @@ void smp_call_function_many(const struct cpumask *mask,
433 } 433 }
434 434
435 /* Send a message to all CPUs in the map */ 435 /* Send a message to all CPUs in the map */
436 arch_send_call_function_ipi_mask(data->cpumask_ipi); 436 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
437 437
438 if (wait) { 438 if (wait) {
439 for_each_cpu(cpu, data->cpumask) { 439 for_each_cpu(cpu, cfd->cpumask) {
440 struct call_single_data *csd = 440 struct call_single_data *csd;
441 per_cpu_ptr(data->csd, cpu); 441
442 csd = per_cpu_ptr(cfd->csd, cpu);
442 csd_lock_wait(csd); 443 csd_lock_wait(csd);
443 } 444 }
444 } 445 }