diff options
author | Mukesh Ojha <mojha@codeaurora.org> | 2018-08-28 02:54:54 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-08-31 08:13:03 -0400 |
commit | 6fb86d97207880c1286cd4cb3a7e6a598afbc727 (patch) | |
tree | d06ab3fbcb4e4989afe7dd3adf7c779a5072f237 | |
parent | 5b394b2ddf0347bef56e50c69a58773c94343ff3 (diff) |
cpu/hotplug: Remove skip_onerr field from cpuhp_step structure
When notifiers were there, `skip_onerr` was used to avoid calling
particular step startup/teardown callbacks in the CPU up/down rollback
path, which made the hotplug asymmetric.
As notifiers are gone now after the full state machine conversion, the
`skip_onerr` field is no longer required.
Remove it from the structure and its usage.
Signed-off-by: Mukesh Ojha <mojha@codeaurora.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1535439294-31426-1-git-send-email-mojha@codeaurora.org
-rw-r--r-- | kernel/cpu.c | 26 |
1 files changed, 4 insertions, 22 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index ed44d7d34c2d..aa7fe85ad62e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { } | |||
102 | * @name: Name of the step | 102 | * @name: Name of the step |
103 | * @startup: Startup function of the step | 103 | * @startup: Startup function of the step |
104 | * @teardown: Teardown function of the step | 104 | * @teardown: Teardown function of the step |
105 | * @skip_onerr: Do not invoke the functions on error rollback | ||
106 | * Will go away once the notifiers are gone | ||
107 | * @cant_stop: Bringup/teardown can't be stopped at this step | 105 | * @cant_stop: Bringup/teardown can't be stopped at this step |
108 | */ | 106 | */ |
109 | struct cpuhp_step { | 107 | struct cpuhp_step { |
@@ -119,7 +117,6 @@ struct cpuhp_step { | |||
119 | struct hlist_node *node); | 117 | struct hlist_node *node); |
120 | } teardown; | 118 | } teardown; |
121 | struct hlist_head list; | 119 | struct hlist_head list; |
122 | bool skip_onerr; | ||
123 | bool cant_stop; | 120 | bool cant_stop; |
124 | bool multi_instance; | 121 | bool multi_instance; |
125 | }; | 122 | }; |
@@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu) | |||
550 | 547 | ||
551 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) | 548 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) |
552 | { | 549 | { |
553 | for (st->state--; st->state > st->target; st->state--) { | 550 | for (st->state--; st->state > st->target; st->state--) |
554 | struct cpuhp_step *step = cpuhp_get_step(st->state); | 551 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
555 | |||
556 | if (!step->skip_onerr) | ||
557 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); | ||
558 | } | ||
559 | } | 552 | } |
560 | 553 | ||
561 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 554 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
@@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
644 | 637 | ||
645 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); | 638 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); |
646 | 639 | ||
647 | if (st->rollback) { | ||
648 | struct cpuhp_step *step = cpuhp_get_step(state); | ||
649 | if (step->skip_onerr) | ||
650 | goto next; | ||
651 | } | ||
652 | |||
653 | if (cpuhp_is_atomic_state(state)) { | 640 | if (cpuhp_is_atomic_state(state)) { |
654 | local_irq_disable(); | 641 | local_irq_disable(); |
655 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); | 642 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); |
@@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
673 | st->should_run = false; | 660 | st->should_run = false; |
674 | } | 661 | } |
675 | 662 | ||
676 | next: | ||
677 | cpuhp_lock_release(bringup); | 663 | cpuhp_lock_release(bringup); |
678 | 664 | ||
679 | if (!st->should_run) | 665 | if (!st->should_run) |
@@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void) | |||
916 | 902 | ||
917 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) | 903 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) |
918 | { | 904 | { |
919 | for (st->state++; st->state < st->target; st->state++) { | 905 | for (st->state++; st->state < st->target; st->state++) |
920 | struct cpuhp_step *step = cpuhp_get_step(st->state); | 906 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
921 | |||
922 | if (!step->skip_onerr) | ||
923 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); | ||
924 | } | ||
925 | } | 907 | } |
926 | 908 | ||
927 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 909 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |