diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-08-12 13:49:38 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-09-02 14:05:05 -0400 |
commit | a724632ca0c84b494875e9367e07e29472c139ba (patch) | |
tree | 6b488a83b33d21f660bae3b0b11589fc5f513c3f /kernel/cpu.c | |
parent | 0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (diff) |
cpu/hotplug: Rework callback invocation logic
This is preparation for the following patch.
This rework here changes the arguments of cpuhp_invoke_callback(). It
passes now `state' and whether `startup' or `teardown' callback should
be invoked. The callback then is looked up by the function.
The following is a clanup of callers:
- cpuhp_issue_call() has one argument less
- struct cpuhp_cpu_state (which is used by the hotplug thread) gets also
its callback removed. The decision if it is a single callback
invocation moved to the `single' variable. Also a `bringup' variable
has been added to distinguish between startup and teardown callback.
- take_cpu_down() needs to start one step earlier. We always get here
via CPUHP_TEARDOWN_CPU callback. Before that change cpuhp_ap_states +
CPUHP_TEARDOWN_CPU pointed to an empty entry because TEARDOWN is saved
in bp_states for this reason. Now that we use cpuhp_get_step() to
lookup the state we must explicitly skip it in order not to invoke it
twice.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/1471024183-12666-2-git-send-email-bigeasy@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 162 |
1 files changed, 80 insertions, 82 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index ec12b726fa6f..d36d8e0abfb8 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -37,8 +37,9 @@ | |||
37 | * @thread: Pointer to the hotplug thread | 37 | * @thread: Pointer to the hotplug thread |
38 | * @should_run: Thread should execute | 38 | * @should_run: Thread should execute |
39 | * @rollback: Perform a rollback | 39 | * @rollback: Perform a rollback |
40 | * @cb_stat: The state for a single callback (install/uninstall) | 40 | * @single: Single callback invocation |
41 | * @cb: Single callback function (install/uninstall) | 41 | * @bringup: Single callback bringup or teardown selector |
42 | * @cb_state: The state for a single callback (install/uninstall) | ||
42 | * @result: Result of the operation | 43 | * @result: Result of the operation |
43 | * @done: Signal completion to the issuer of the task | 44 | * @done: Signal completion to the issuer of the task |
44 | */ | 45 | */ |
@@ -49,8 +50,9 @@ struct cpuhp_cpu_state { | |||
49 | struct task_struct *thread; | 50 | struct task_struct *thread; |
50 | bool should_run; | 51 | bool should_run; |
51 | bool rollback; | 52 | bool rollback; |
53 | bool single; | ||
54 | bool bringup; | ||
52 | enum cpuhp_state cb_state; | 55 | enum cpuhp_state cb_state; |
53 | int (*cb)(unsigned int cpu); | ||
54 | int result; | 56 | int result; |
55 | struct completion done; | 57 | struct completion done; |
56 | #endif | 58 | #endif |
@@ -79,24 +81,43 @@ static DEFINE_MUTEX(cpuhp_state_mutex); | |||
79 | static struct cpuhp_step cpuhp_bp_states[]; | 81 | static struct cpuhp_step cpuhp_bp_states[]; |
80 | static struct cpuhp_step cpuhp_ap_states[]; | 82 | static struct cpuhp_step cpuhp_ap_states[]; |
81 | 83 | ||
84 | static bool cpuhp_is_ap_state(enum cpuhp_state state) | ||
85 | { | ||
86 | /* | ||
87 | * The extra check for CPUHP_TEARDOWN_CPU is only for documentation | ||
88 | * purposes as that state is handled explicitly in cpu_down. | ||
89 | */ | ||
90 | return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; | ||
91 | } | ||
92 | |||
93 | static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) | ||
94 | { | ||
95 | struct cpuhp_step *sp; | ||
96 | |||
97 | sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states; | ||
98 | return sp + state; | ||
99 | } | ||
100 | |||
82 | /** | 101 | /** |
83 | * cpuhp_invoke_callback _ Invoke the callbacks for a given state | 102 | * cpuhp_invoke_callback _ Invoke the callbacks for a given state |
84 | * @cpu: The cpu for which the callback should be invoked | 103 | * @cpu: The cpu for which the callback should be invoked |
85 | * @step: The step in the state machine | 104 | * @step: The step in the state machine |
86 | * @cb: The callback function to invoke | 105 | * @bringup: True if the bringup callback should be invoked |
87 | * | 106 | * |
88 | * Called from cpu hotplug and from the state register machinery | 107 | * Called from cpu hotplug and from the state register machinery |
89 | */ | 108 | */ |
90 | static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step, | 109 | static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, |
91 | int (*cb)(unsigned int)) | 110 | bool bringup) |
92 | { | 111 | { |
93 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 112 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
113 | struct cpuhp_step *step = cpuhp_get_step(state); | ||
114 | int (*cb)(unsigned int cpu) = bringup ? step->startup : step->teardown; | ||
94 | int ret = 0; | 115 | int ret = 0; |
95 | 116 | ||
96 | if (cb) { | 117 | if (cb) { |
97 | trace_cpuhp_enter(cpu, st->target, step, cb); | 118 | trace_cpuhp_enter(cpu, st->target, state, cb); |
98 | ret = cb(cpu); | 119 | ret = cb(cpu); |
99 | trace_cpuhp_exit(cpu, st->state, step, ret); | 120 | trace_cpuhp_exit(cpu, st->state, state, ret); |
100 | } | 121 | } |
101 | return ret; | 122 | return ret; |
102 | } | 123 | } |
@@ -371,62 +392,55 @@ static int bringup_cpu(unsigned int cpu) | |||
371 | /* | 392 | /* |
372 | * Hotplug state machine related functions | 393 | * Hotplug state machine related functions |
373 | */ | 394 | */ |
374 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st, | 395 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) |
375 | struct cpuhp_step *steps) | ||
376 | { | 396 | { |
377 | for (st->state++; st->state < st->target; st->state++) { | 397 | for (st->state++; st->state < st->target; st->state++) { |
378 | struct cpuhp_step *step = steps + st->state; | 398 | struct cpuhp_step *step = cpuhp_get_step(st->state); |
379 | 399 | ||
380 | if (!step->skip_onerr) | 400 | if (!step->skip_onerr) |
381 | cpuhp_invoke_callback(cpu, st->state, step->startup); | 401 | cpuhp_invoke_callback(cpu, st->state, true); |
382 | } | 402 | } |
383 | } | 403 | } |
384 | 404 | ||
385 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 405 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
386 | struct cpuhp_step *steps, enum cpuhp_state target) | 406 | enum cpuhp_state target) |
387 | { | 407 | { |
388 | enum cpuhp_state prev_state = st->state; | 408 | enum cpuhp_state prev_state = st->state; |
389 | int ret = 0; | 409 | int ret = 0; |
390 | 410 | ||
391 | for (; st->state > target; st->state--) { | 411 | for (; st->state > target; st->state--) { |
392 | struct cpuhp_step *step = steps + st->state; | 412 | ret = cpuhp_invoke_callback(cpu, st->state, false); |
393 | |||
394 | ret = cpuhp_invoke_callback(cpu, st->state, step->teardown); | ||
395 | if (ret) { | 413 | if (ret) { |
396 | st->target = prev_state; | 414 | st->target = prev_state; |
397 | undo_cpu_down(cpu, st, steps); | 415 | undo_cpu_down(cpu, st); |
398 | break; | 416 | break; |
399 | } | 417 | } |
400 | } | 418 | } |
401 | return ret; | 419 | return ret; |
402 | } | 420 | } |
403 | 421 | ||
404 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st, | 422 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) |
405 | struct cpuhp_step *steps) | ||
406 | { | 423 | { |
407 | for (st->state--; st->state > st->target; st->state--) { | 424 | for (st->state--; st->state > st->target; st->state--) { |
408 | struct cpuhp_step *step = steps + st->state; | 425 | struct cpuhp_step *step = cpuhp_get_step(st->state); |
409 | 426 | ||
410 | if (!step->skip_onerr) | 427 | if (!step->skip_onerr) |
411 | cpuhp_invoke_callback(cpu, st->state, step->teardown); | 428 | cpuhp_invoke_callback(cpu, st->state, false); |
412 | } | 429 | } |
413 | } | 430 | } |
414 | 431 | ||
415 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 432 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
416 | struct cpuhp_step *steps, enum cpuhp_state target) | 433 | enum cpuhp_state target) |
417 | { | 434 | { |
418 | enum cpuhp_state prev_state = st->state; | 435 | enum cpuhp_state prev_state = st->state; |
419 | int ret = 0; | 436 | int ret = 0; |
420 | 437 | ||
421 | while (st->state < target) { | 438 | while (st->state < target) { |
422 | struct cpuhp_step *step; | ||
423 | |||
424 | st->state++; | 439 | st->state++; |
425 | step = steps + st->state; | 440 | ret = cpuhp_invoke_callback(cpu, st->state, true); |
426 | ret = cpuhp_invoke_callback(cpu, st->state, step->startup); | ||
427 | if (ret) { | 441 | if (ret) { |
428 | st->target = prev_state; | 442 | st->target = prev_state; |
429 | undo_cpu_up(cpu, st, steps); | 443 | undo_cpu_up(cpu, st); |
430 | break; | 444 | break; |
431 | } | 445 | } |
432 | } | 446 | } |
@@ -455,13 +469,13 @@ static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st) | |||
455 | { | 469 | { |
456 | enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU); | 470 | enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU); |
457 | 471 | ||
458 | return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target); | 472 | return cpuhp_down_callbacks(cpu, st, target); |
459 | } | 473 | } |
460 | 474 | ||
461 | /* Execute the online startup callbacks. Used to be CPU_ONLINE */ | 475 | /* Execute the online startup callbacks. Used to be CPU_ONLINE */ |
462 | static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st) | 476 | static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st) |
463 | { | 477 | { |
464 | return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target); | 478 | return cpuhp_up_callbacks(cpu, st, st->target); |
465 | } | 479 | } |
466 | 480 | ||
467 | /* | 481 | /* |
@@ -484,18 +498,20 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
484 | st->should_run = false; | 498 | st->should_run = false; |
485 | 499 | ||
486 | /* Single callback invocation for [un]install ? */ | 500 | /* Single callback invocation for [un]install ? */ |
487 | if (st->cb) { | 501 | if (st->single) { |
488 | if (st->cb_state < CPUHP_AP_ONLINE) { | 502 | if (st->cb_state < CPUHP_AP_ONLINE) { |
489 | local_irq_disable(); | 503 | local_irq_disable(); |
490 | ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); | 504 | ret = cpuhp_invoke_callback(cpu, st->cb_state, |
505 | st->bringup); | ||
491 | local_irq_enable(); | 506 | local_irq_enable(); |
492 | } else { | 507 | } else { |
493 | ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); | 508 | ret = cpuhp_invoke_callback(cpu, st->cb_state, |
509 | st->bringup); | ||
494 | } | 510 | } |
495 | } else if (st->rollback) { | 511 | } else if (st->rollback) { |
496 | BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); | 512 | BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); |
497 | 513 | ||
498 | undo_cpu_down(cpu, st, cpuhp_ap_states); | 514 | undo_cpu_down(cpu, st); |
499 | /* | 515 | /* |
500 | * This is a momentary workaround to keep the notifier users | 516 | * This is a momentary workaround to keep the notifier users |
501 | * happy. Will go away once we got rid of the notifiers. | 517 | * happy. Will go away once we got rid of the notifiers. |
@@ -517,8 +533,8 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
517 | } | 533 | } |
518 | 534 | ||
519 | /* Invoke a single callback on a remote cpu */ | 535 | /* Invoke a single callback on a remote cpu */ |
520 | static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, | 536 | static int |
521 | int (*cb)(unsigned int)) | 537 | cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup) |
522 | { | 538 | { |
523 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 539 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
524 | 540 | ||
@@ -530,10 +546,12 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, | |||
530 | * we invoke the thread function directly. | 546 | * we invoke the thread function directly. |
531 | */ | 547 | */ |
532 | if (!st->thread) | 548 | if (!st->thread) |
533 | return cpuhp_invoke_callback(cpu, state, cb); | 549 | return cpuhp_invoke_callback(cpu, state, bringup); |
534 | 550 | ||
535 | st->cb_state = state; | 551 | st->cb_state = state; |
536 | st->cb = cb; | 552 | st->single = true; |
553 | st->bringup = bringup; | ||
554 | |||
537 | /* | 555 | /* |
538 | * Make sure the above stores are visible before should_run becomes | 556 | * Make sure the above stores are visible before should_run becomes |
539 | * true. Paired with the mb() above in cpuhp_thread_fun() | 557 | * true. Paired with the mb() above in cpuhp_thread_fun() |
@@ -549,7 +567,7 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, | |||
549 | static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st) | 567 | static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st) |
550 | { | 568 | { |
551 | st->result = 0; | 569 | st->result = 0; |
552 | st->cb = NULL; | 570 | st->single = false; |
553 | /* | 571 | /* |
554 | * Make sure the above stores are visible before should_run becomes | 572 | * Make sure the above stores are visible before should_run becomes |
555 | * true. Paired with the mb() above in cpuhp_thread_fun() | 573 | * true. Paired with the mb() above in cpuhp_thread_fun() |
@@ -700,12 +718,16 @@ static int take_cpu_down(void *_param) | |||
700 | if (err < 0) | 718 | if (err < 0) |
701 | return err; | 719 | return err; |
702 | 720 | ||
721 | /* | ||
722 | * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not | ||
723 | * do this step again. | ||
724 | */ | ||
725 | WARN_ON(st->state != CPUHP_TEARDOWN_CPU); | ||
726 | st->state--; | ||
703 | /* Invoke the former CPU_DYING callbacks */ | 727 | /* Invoke the former CPU_DYING callbacks */ |
704 | for (; st->state > target; st->state--) { | 728 | for (; st->state > target; st->state--) |
705 | struct cpuhp_step *step = cpuhp_ap_states + st->state; | 729 | cpuhp_invoke_callback(cpu, st->state, false); |
706 | 730 | ||
707 | cpuhp_invoke_callback(cpu, st->state, step->teardown); | ||
708 | } | ||
709 | /* Give up timekeeping duties */ | 731 | /* Give up timekeeping duties */ |
710 | tick_handover_do_timer(); | 732 | tick_handover_do_timer(); |
711 | /* Park the stopper thread */ | 733 | /* Park the stopper thread */ |
@@ -844,7 +866,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, | |||
844 | * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need | 866 | * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need |
845 | * to do the further cleanups. | 867 | * to do the further cleanups. |
846 | */ | 868 | */ |
847 | ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); | 869 | ret = cpuhp_down_callbacks(cpu, st, target); |
848 | if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { | 870 | if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { |
849 | st->target = prev_state; | 871 | st->target = prev_state; |
850 | st->rollback = true; | 872 | st->rollback = true; |
@@ -898,11 +920,8 @@ void notify_cpu_starting(unsigned int cpu) | |||
898 | enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); | 920 | enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); |
899 | 921 | ||
900 | while (st->state < target) { | 922 | while (st->state < target) { |
901 | struct cpuhp_step *step; | ||
902 | |||
903 | st->state++; | 923 | st->state++; |
904 | step = cpuhp_ap_states + st->state; | 924 | cpuhp_invoke_callback(cpu, st->state, true); |
905 | cpuhp_invoke_callback(cpu, st->state, step->startup); | ||
906 | } | 925 | } |
907 | } | 926 | } |
908 | 927 | ||
@@ -987,7 +1006,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) | |||
987 | * responsible for bringing it up to the target state. | 1006 | * responsible for bringing it up to the target state. |
988 | */ | 1007 | */ |
989 | target = min((int)target, CPUHP_BRINGUP_CPU); | 1008 | target = min((int)target, CPUHP_BRINGUP_CPU); |
990 | ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target); | 1009 | ret = cpuhp_up_callbacks(cpu, st, target); |
991 | out: | 1010 | out: |
992 | cpu_hotplug_done(); | 1011 | cpu_hotplug_done(); |
993 | return ret; | 1012 | return ret; |
@@ -1364,23 +1383,6 @@ static int cpuhp_cb_check(enum cpuhp_state state) | |||
1364 | return 0; | 1383 | return 0; |
1365 | } | 1384 | } |
1366 | 1385 | ||
1367 | static bool cpuhp_is_ap_state(enum cpuhp_state state) | ||
1368 | { | ||
1369 | /* | ||
1370 | * The extra check for CPUHP_TEARDOWN_CPU is only for documentation | ||
1371 | * purposes as that state is handled explicitely in cpu_down. | ||
1372 | */ | ||
1373 | return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; | ||
1374 | } | ||
1375 | |||
1376 | static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) | ||
1377 | { | ||
1378 | struct cpuhp_step *sp; | ||
1379 | |||
1380 | sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states; | ||
1381 | return sp + state; | ||
1382 | } | ||
1383 | |||
1384 | static void cpuhp_store_callbacks(enum cpuhp_state state, | 1386 | static void cpuhp_store_callbacks(enum cpuhp_state state, |
1385 | const char *name, | 1387 | const char *name, |
1386 | int (*startup)(unsigned int cpu), | 1388 | int (*startup)(unsigned int cpu), |
@@ -1406,12 +1408,12 @@ static void *cpuhp_get_teardown_cb(enum cpuhp_state state) | |||
1406 | * Call the startup/teardown function for a step either on the AP or | 1408 | * Call the startup/teardown function for a step either on the AP or |
1407 | * on the current CPU. | 1409 | * on the current CPU. |
1408 | */ | 1410 | */ |
1409 | static int cpuhp_issue_call(int cpu, enum cpuhp_state state, | 1411 | static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup) |
1410 | int (*cb)(unsigned int), bool bringup) | ||
1411 | { | 1412 | { |
1413 | struct cpuhp_step *sp = cpuhp_get_step(state); | ||
1412 | int ret; | 1414 | int ret; |
1413 | 1415 | ||
1414 | if (!cb) | 1416 | if ((bringup && !sp->startup) || (!bringup && !sp->teardown)) |
1415 | return 0; | 1417 | return 0; |
1416 | /* | 1418 | /* |
1417 | * The non AP bound callbacks can fail on bringup. On teardown | 1419 | * The non AP bound callbacks can fail on bringup. On teardown |
@@ -1419,11 +1421,11 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, | |||
1419 | */ | 1421 | */ |
1420 | #ifdef CONFIG_SMP | 1422 | #ifdef CONFIG_SMP |
1421 | if (cpuhp_is_ap_state(state)) | 1423 | if (cpuhp_is_ap_state(state)) |
1422 | ret = cpuhp_invoke_ap_callback(cpu, state, cb); | 1424 | ret = cpuhp_invoke_ap_callback(cpu, state, bringup); |
1423 | else | 1425 | else |
1424 | ret = cpuhp_invoke_callback(cpu, state, cb); | 1426 | ret = cpuhp_invoke_callback(cpu, state, bringup); |
1425 | #else | 1427 | #else |
1426 | ret = cpuhp_invoke_callback(cpu, state, cb); | 1428 | ret = cpuhp_invoke_callback(cpu, state, bringup); |
1427 | #endif | 1429 | #endif |
1428 | BUG_ON(ret && !bringup); | 1430 | BUG_ON(ret && !bringup); |
1429 | return ret; | 1431 | return ret; |
@@ -1434,14 +1436,10 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, | |||
1434 | * | 1436 | * |
1435 | * Note: The teardown callbacks for rollback are not allowed to fail! | 1437 | * Note: The teardown callbacks for rollback are not allowed to fail! |
1436 | */ | 1438 | */ |
1437 | static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, | 1439 | static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state) |
1438 | int (*teardown)(unsigned int cpu)) | ||
1439 | { | 1440 | { |
1440 | int cpu; | 1441 | int cpu; |
1441 | 1442 | ||
1442 | if (!teardown) | ||
1443 | return; | ||
1444 | |||
1445 | /* Roll back the already executed steps on the other cpus */ | 1443 | /* Roll back the already executed steps on the other cpus */ |
1446 | for_each_present_cpu(cpu) { | 1444 | for_each_present_cpu(cpu) { |
1447 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 1445 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
@@ -1452,7 +1450,7 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, | |||
1452 | 1450 | ||
1453 | /* Did we invoke the startup call on that cpu ? */ | 1451 | /* Did we invoke the startup call on that cpu ? */ |
1454 | if (cpustate >= state) | 1452 | if (cpustate >= state) |
1455 | cpuhp_issue_call(cpu, state, teardown, false); | 1453 | cpuhp_issue_call(cpu, state, false); |
1456 | } | 1454 | } |
1457 | } | 1455 | } |
1458 | 1456 | ||
@@ -1527,9 +1525,10 @@ int __cpuhp_setup_state(enum cpuhp_state state, | |||
1527 | if (cpustate < state) | 1525 | if (cpustate < state) |
1528 | continue; | 1526 | continue; |
1529 | 1527 | ||
1530 | ret = cpuhp_issue_call(cpu, state, startup, true); | 1528 | ret = cpuhp_issue_call(cpu, state, true); |
1531 | if (ret) { | 1529 | if (ret) { |
1532 | cpuhp_rollback_install(cpu, state, teardown); | 1530 | if (teardown) |
1531 | cpuhp_rollback_install(cpu, state); | ||
1533 | cpuhp_store_callbacks(state, NULL, NULL, NULL); | 1532 | cpuhp_store_callbacks(state, NULL, NULL, NULL); |
1534 | goto out; | 1533 | goto out; |
1535 | } | 1534 | } |
@@ -1553,14 +1552,13 @@ EXPORT_SYMBOL(__cpuhp_setup_state); | |||
1553 | */ | 1552 | */ |
1554 | void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) | 1553 | void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) |
1555 | { | 1554 | { |
1556 | int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state); | ||
1557 | int cpu; | 1555 | int cpu; |
1558 | 1556 | ||
1559 | BUG_ON(cpuhp_cb_check(state)); | 1557 | BUG_ON(cpuhp_cb_check(state)); |
1560 | 1558 | ||
1561 | get_online_cpus(); | 1559 | get_online_cpus(); |
1562 | 1560 | ||
1563 | if (!invoke || !teardown) | 1561 | if (!invoke || !cpuhp_get_teardown_cb(state)) |
1564 | goto remove; | 1562 | goto remove; |
1565 | 1563 | ||
1566 | /* | 1564 | /* |
@@ -1573,7 +1571,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) | |||
1573 | int cpustate = st->state; | 1571 | int cpustate = st->state; |
1574 | 1572 | ||
1575 | if (cpustate >= state) | 1573 | if (cpustate >= state) |
1576 | cpuhp_issue_call(cpu, state, teardown, false); | 1574 | cpuhp_issue_call(cpu, state, false); |
1577 | } | 1575 | } |
1578 | remove: | 1576 | remove: |
1579 | cpuhp_store_callbacks(state, NULL, NULL, NULL); | 1577 | cpuhp_store_callbacks(state, NULL, NULL, NULL); |