diff options
author | Tejun Heo <tj@kernel.org> | 2011-06-23 14:19:28 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-06-27 18:17:08 -0400 |
commit | f740e6cd0cb5e7468e46831aeb4d9c30e03d5ebc (patch) | |
tree | 83d81928de5d3ebc3b325f633dddd77a0e6ad662 /kernel/stop_machine.c | |
parent | fd7355ba1e936487f5aae6fc058c6cb300e44a64 (diff) |
stop_machine: implement stop_machine_from_inactive_cpu()
Currently, mtrr wants stop_machine functionality while a CPU is being
brought up. As stop_machine() requires the calling CPU to be active,
mtrr implements its own stop_machine using stop_one_cpu() on each
online CPU. This doesn't only unnecessarily duplicate complex logic
but also introduces a possibility of deadlock when it races against
the generic stop_machine().
This patch implements stop_machine_from_inactive_cpu() to serve such
use cases. Its functionality is basically the same as stop_machine();
however, it should be called from a CPU which isn't active and doesn't
depend on working scheduling on the calling CPU.
This is achieved by using busy loops for synchronization and
open-coding stop_cpus queuing and waiting with direct invocation of
fn() for local CPU inbetween.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20110623182056.982526827@sbsiddha-MOBL3.sc.intel.com
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r-- | kernel/stop_machine.c | 62 |
1 files changed, 61 insertions, 1 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 4c89ee9fc56b..e8f05b14cd43 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -439,8 +439,15 @@ static int stop_machine_cpu_stop(void *data) | |||
439 | struct stop_machine_data *smdata = data; | 439 | struct stop_machine_data *smdata = data; |
440 | enum stopmachine_state curstate = STOPMACHINE_NONE; | 440 | enum stopmachine_state curstate = STOPMACHINE_NONE; |
441 | int cpu = smp_processor_id(), err = 0; | 441 | int cpu = smp_processor_id(), err = 0; |
442 | unsigned long flags; | ||
442 | bool is_active; | 443 | bool is_active; |
443 | 444 | ||
445 | /* | ||
446 | * When called from stop_machine_from_inactive_cpu(), irq might | ||
447 | * already be disabled. Save the state and restore it on exit. | ||
448 | */ | ||
449 | local_save_flags(flags); | ||
450 | |||
444 | if (!smdata->active_cpus) | 451 | if (!smdata->active_cpus) |
445 | is_active = cpu == cpumask_first(cpu_online_mask); | 452 | is_active = cpu == cpumask_first(cpu_online_mask); |
446 | else | 453 | else |
@@ -468,7 +475,7 @@ static int stop_machine_cpu_stop(void *data) | |||
468 | } | 475 | } |
469 | } while (curstate != STOPMACHINE_EXIT); | 476 | } while (curstate != STOPMACHINE_EXIT); |
470 | 477 | ||
471 | local_irq_enable(); | 478 | local_irq_restore(flags); |
472 | return err; | 479 | return err; |
473 | } | 480 | } |
474 | 481 | ||
@@ -495,4 +502,57 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | |||
495 | } | 502 | } |
496 | EXPORT_SYMBOL_GPL(stop_machine); | 503 | EXPORT_SYMBOL_GPL(stop_machine); |
497 | 504 | ||
505 | /** | ||
506 | * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU | ||
507 | * @fn: the function to run | ||
508 | * @data: the data ptr for the @fn() | ||
509 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) | ||
510 | * | ||
511 | * This is identical to stop_machine() but can be called from a CPU which | ||
512 | * is not active. The local CPU is in the process of hotplug (so no other | ||
513 | * CPU hotplug can start) and not marked active and doesn't have enough | ||
514 | * context to sleep. | ||
515 | * | ||
516 | * This function provides stop_machine() functionality for such state by | ||
517 | * using busy-wait for synchronization and executing @fn directly for local | ||
518 | * CPU. | ||
519 | * | ||
520 | * CONTEXT: | ||
521 | * Local CPU is inactive. Temporarily stops all active CPUs. | ||
522 | * | ||
523 | * RETURNS: | ||
524 | * 0 if all executions of @fn returned 0, any non zero return value if any | ||
525 | * returned non zero. | ||
526 | */ | ||
527 | int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, | ||
528 | const struct cpumask *cpus) | ||
529 | { | ||
530 | struct stop_machine_data smdata = { .fn = fn, .data = data, | ||
531 | .active_cpus = cpus }; | ||
532 | struct cpu_stop_done done; | ||
533 | int ret; | ||
534 | |||
535 | /* Local CPU must be inactive and CPU hotplug in progress. */ | ||
536 | BUG_ON(cpu_active(raw_smp_processor_id())); | ||
537 | smdata.num_threads = num_active_cpus() + 1; /* +1 for local */ | ||
538 | |||
539 | /* No proper task established and can't sleep - busy wait for lock. */ | ||
540 | while (!mutex_trylock(&stop_cpus_mutex)) | ||
541 | cpu_relax(); | ||
542 | |||
543 | /* Schedule work on other CPUs and execute directly for local CPU */ | ||
544 | set_state(&smdata, STOPMACHINE_PREPARE); | ||
545 | cpu_stop_init_done(&done, num_active_cpus()); | ||
546 | queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata, | ||
547 | &done); | ||
548 | ret = stop_machine_cpu_stop(&smdata); | ||
549 | |||
550 | /* Busy wait for completion. */ | ||
551 | while (!completion_done(&done.completion)) | ||
552 | cpu_relax(); | ||
553 | |||
554 | mutex_unlock(&stop_cpus_mutex); | ||
555 | return ret ?: done.ret; | ||
556 | } | ||
557 | |||
498 | #endif /* CONFIG_STOP_MACHINE */ | 558 | #endif /* CONFIG_STOP_MACHINE */ |