diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2008-08-10 23:49:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 09:21:28 -0400 |
commit | cc7a486cac78f6fc1a24e8cd63036bae8d2ab431 (patch) | |
tree | 258abdc1843d6f9ed76608412f1364178fb8c697 /kernel/smp.c | |
parent | 796aadeb1b2db9b5d463946766c5bbfd7717158c (diff) |
generic-ipi: fix stack and rcu interaction bug in smp_call_function_mask()
* Venki Pallipadi <venkatesh.pallipadi@intel.com> wrote:
> Found a OOPS on a big SMP box during an overnight reboot test with
> upstream git.
>
> Suresh and I looked at the oops and looks like the root cause is in
> generic_smp_call_function_interrupt() and smp_call_function_mask() with
> wait parameter.
>
> The actual oops looked like
>
> [ 11.277260] BUG: unable to handle kernel paging request at ffff8802ffffffff
> [ 11.277815] IP: [<ffff8802ffffffff>] 0xffff8802ffffffff
> [ 11.278155] PGD 202063 PUD 0
> [ 11.278576] Oops: 0010 [1] SMP
> [ 11.279006] CPU 5
> [ 11.279336] Modules linked in:
> [ 11.279752] Pid: 0, comm: swapper Not tainted 2.6.27-rc2-00020-g685d87f #290
> [ 11.280039] RIP: 0010:[<ffff8802ffffffff>] [<ffff8802ffffffff>] 0xffff8802ffffffff
> [ 11.280692] RSP: 0018:ffff88027f1f7f70 EFLAGS: 00010086
> [ 11.280976] RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
> [ 11.281264] RDX: 0000000000004f4e RSI: 0000000000000001 RDI: 0000000000000000
> [ 11.281624] RBP: ffff88027f1f7f98 R08: 0000000000000001 R09: ffffffff802509af
> [ 11.281925] R10: ffff8800280c2780 R11: 0000000000000000 R12: ffff88027f097d48
> [ 11.282214] R13: ffff88027f097d70 R14: 0000000000000005 R15: ffff88027e571000
> [ 11.282502] FS: 0000000000000000(0000) GS:ffff88027f1c3340(0000) knlGS:0000000000000000
> [ 11.283096] CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b
> [ 11.283382] CR2: ffff8802ffffffff CR3: 0000000000201000 CR4: 00000000000006e0
> [ 11.283760] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> [ 11.284048] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
> [ 11.284337] Process swapper (pid: 0, threadinfo ffff88027f1f2000, task ffff88027f1f0640)
> [ 11.284936] Stack: ffffffff80250963 0000000000000212 0000000000ee8c78 0000000000ee8a66
> [ 11.285802] ffff88027e571550 ffff88027f1f7fa8 ffffffff8021adb5 ffff88027f1f3e40
> [ 11.286599] ffffffff8020bdd6 ffff88027f1f3e40 <EOI> ffff88027f1f3ef8 0000000000000000
> [ 11.287120] Call Trace:
> [ 11.287768] <IRQ> [<ffffffff80250963>] ? generic_smp_call_function_interrupt+0x61/0x12c
> [ 11.288354] [<ffffffff8021adb5>] smp_call_function_interrupt+0x17/0x27
> [ 11.288744] [<ffffffff8020bdd6>] call_function_interrupt+0x66/0x70
> [ 11.289030] <EOI> [<ffffffff8024ab3b>] ? clockevents_notify+0x19/0x73
> [ 11.289380] [<ffffffff803b9b75>] ? acpi_idle_enter_simple+0x18b/0x1fa
> [ 11.289760] [<ffffffff803b9b6b>] ? acpi_idle_enter_simple+0x181/0x1fa
> [ 11.290051] [<ffffffff8053aeca>] ? cpuidle_idle_call+0x70/0xa2
> [ 11.290338] [<ffffffff80209f61>] ? cpu_idle+0x5f/0x7d
> [ 11.290723] [<ffffffff8060224a>] ? start_secondary+0x14d/0x152
> [ 11.291010]
> [ 11.291287]
> [ 11.291654] Code: Bad RIP value.
> [ 11.292041] RIP [<ffff8802ffffffff>] 0xffff8802ffffffff
> [ 11.292380] RSP <ffff88027f1f7f70>
> [ 11.292741] CR2: ffff8802ffffffff
> [ 11.310951] ---[ end trace 137c54d525305f1c ]---
>
> The problem is with the following sequence of events:
>
> - CPU A calls smp_call_function_mask() for CPU B with wait parameter
> - CPU A sets up the call_function_data on the stack and does an rcu add to
> call_function_queue
> - CPU A waits until the WAIT flag is cleared
> - CPU B gets the call function interrupt and starts going through the
> call_function_queue
> - CPU C also gets some other call function interrupt and starts going through
> the call_function_queue
> - CPU C, which is also going through the call_function_queue, starts referencing
> CPU A's stack, as that element is still in call_function_queue
> - CPU B finishes the function call that CPU A set up and as there are no other
> references to it, rcu deletes the call_function_data (which was from CPU A
> stack)
> - CPU B sees the wait flag and just clears the flag (no call_rcu to free)
> - CPU A which was waiting on the flag continues executing and the stack
> contents change
>
> - CPU C is still in rcu_read section accessing the CPU A's stack sees
> inconsistent call_funation_data and can try to execute
> function with some random pointer, causing stack corruption for A
> (by clearing the bits in mask field) and oops.
Nice debugging work.
I'd suggest something like the attached (boot tested) patch as the simple
fix for now.
I expect the benefits from the less synchronized, multiple-in-flight-data
global queue will still outweigh the costs of dynamic allocations. But
if worst comes to worst then we just go back to a globally synchronous
one-at-a-time implementation, but that would be pretty sad!
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 54 |
1 files changed, 47 insertions, 7 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 96fc7c0edc59..e6084f6efb4d 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -260,6 +260,41 @@ void __smp_call_function_single(int cpu, struct call_single_data *data) | |||
260 | generic_exec_single(cpu, data); | 260 | generic_exec_single(cpu, data); |
261 | } | 261 | } |
262 | 262 | ||
263 | /* Dummy function */ | ||
264 | static void quiesce_dummy(void *unused) | ||
265 | { | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * Ensure stack based data used in call function mask is safe to free. | ||
270 | * | ||
271 | * This is needed by smp_call_function_mask when using on-stack data, because | ||
272 | * a single call function queue is shared by all CPUs, and any CPU may pick up | ||
273 | * the data item on the queue at any time before it is deleted. So we need to | ||
274 | * ensure that all CPUs have transitioned through a quiescent state after | ||
275 | * this call. | ||
276 | * | ||
277 | * This is a very slow function, implemented by sending synchronous IPIs to | ||
278 | * all possible CPUs. For this reason, we have to alloc data rather than use | ||
279 | * stack based data even in the case of synchronous calls. The stack based | ||
280 | * data is then just used for deadlock/oom fallback which will be very rare. | ||
281 | * | ||
282 | * If a faster scheme can be made, we could go back to preferring stack based | ||
283 | * data -- the data allocation/free is non-zero cost. | ||
284 | */ | ||
285 | static void smp_call_function_mask_quiesce_stack(cpumask_t mask) | ||
286 | { | ||
287 | struct call_single_data data; | ||
288 | int cpu; | ||
289 | |||
290 | data.func = quiesce_dummy; | ||
291 | data.info = NULL; | ||
292 | data.flags = CSD_FLAG_WAIT; | ||
293 | |||
294 | for_each_cpu_mask(cpu, mask) | ||
295 | generic_exec_single(cpu, &data); | ||
296 | } | ||
297 | |||
263 | /** | 298 | /** |
264 | * smp_call_function_mask(): Run a function on a set of other CPUs. | 299 | * smp_call_function_mask(): Run a function on a set of other CPUs. |
265 | * @mask: The set of cpus to run on. | 300 | * @mask: The set of cpus to run on. |
@@ -285,6 +320,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
285 | cpumask_t allbutself; | 320 | cpumask_t allbutself; |
286 | unsigned long flags; | 321 | unsigned long flags; |
287 | int cpu, num_cpus; | 322 | int cpu, num_cpus; |
323 | int slowpath = 0; | ||
288 | 324 | ||
289 | /* Can deadlock when called with interrupts disabled */ | 325 | /* Can deadlock when called with interrupts disabled */ |
290 | WARN_ON(irqs_disabled()); | 326 | WARN_ON(irqs_disabled()); |
@@ -306,15 +342,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
306 | return smp_call_function_single(cpu, func, info, wait); | 342 | return smp_call_function_single(cpu, func, info, wait); |
307 | } | 343 | } |
308 | 344 | ||
309 | if (!wait) { | 345 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
310 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 346 | if (data) { |
311 | if (data) | 347 | data->csd.flags = CSD_FLAG_ALLOC; |
312 | data->csd.flags = CSD_FLAG_ALLOC; | 348 | if (wait) |
313 | } | 349 | data->csd.flags |= CSD_FLAG_WAIT; |
314 | if (!data) { | 350 | } else { |
315 | data = &d; | 351 | data = &d; |
316 | data->csd.flags = CSD_FLAG_WAIT; | 352 | data->csd.flags = CSD_FLAG_WAIT; |
317 | wait = 1; | 353 | wait = 1; |
354 | slowpath = 1; | ||
318 | } | 355 | } |
319 | 356 | ||
320 | spin_lock_init(&data->lock); | 357 | spin_lock_init(&data->lock); |
@@ -331,8 +368,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
331 | arch_send_call_function_ipi(mask); | 368 | arch_send_call_function_ipi(mask); |
332 | 369 | ||
333 | /* optionally wait for the CPUs to complete */ | 370 | /* optionally wait for the CPUs to complete */ |
334 | if (wait) | 371 | if (wait) { |
335 | csd_flag_wait(&data->csd); | 372 | csd_flag_wait(&data->csd); |
373 | if (unlikely(slowpath)) | ||
374 | smp_call_function_mask_quiesce_stack(allbutself); | ||
375 | } | ||
336 | 376 | ||
337 | return 0; | 377 | return 0; |
338 | } | 378 | } |