diff options
-rw-r--r-- | kernel/smp.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 954548906afb..7cbd0f293df4 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -194,7 +194,7 @@ void generic_smp_call_function_interrupt(void) | |||
194 | */ | 194 | */ |
195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { | 195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { |
196 | int refs; | 196 | int refs; |
197 | void (*func) (void *info); | 197 | smp_call_func_t func; |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * Since we walk the list without any locks, we might | 200 | * Since we walk the list without any locks, we might |
@@ -214,17 +214,17 @@ void generic_smp_call_function_interrupt(void) | |||
214 | if (atomic_read(&data->refs) == 0) | 214 | if (atomic_read(&data->refs) == 0) |
215 | continue; | 215 | continue; |
216 | 216 | ||
217 | func = data->csd.func; /* for later warn */ | 217 | func = data->csd.func; /* save for later warn */ |
218 | data->csd.func(data->csd.info); | 218 | func(data->csd.info); |
219 | 219 | ||
220 | /* | 220 | /* |
221 | * If the cpu mask is not still set then it enabled interrupts, | 221 | * If the cpu mask is not still set then func enabled |
222 | * we took another smp interrupt, and executed the function | 222 | * interrupts (BUG), and this cpu took another smp call |
223 | * twice on this cpu. In theory that copy decremented refs. | 223 | * function interrupt and executed func(info) twice |
224 | * on this cpu. That nested execution decremented refs. | ||
224 | */ | 225 | */ |
225 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { | 226 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { |
226 | WARN(1, "%pS enabled interrupts and double executed\n", | 227 | WARN(1, "%pf enabled interrupts and double executed\n", func); |
227 | func); | ||
228 | continue; | 228 | continue; |
229 | } | 229 | } |
230 | 230 | ||