diff options
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 524 |
1 files changed, 338 insertions, 186 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 341bf80f80bd..29de1a9352c0 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/tick.h> | 23 | #include <linux/tick.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/smpboot.h> | 25 | #include <linux/smpboot.h> |
26 | #include <linux/relay.h> | ||
27 | #include <linux/slab.h> | ||
26 | 28 | ||
27 | #include <trace/events/power.h> | 29 | #include <trace/events/power.h> |
28 | #define CREATE_TRACE_POINTS | 30 | #define CREATE_TRACE_POINTS |
@@ -37,8 +39,9 @@ | |||
37 | * @thread: Pointer to the hotplug thread | 39 | * @thread: Pointer to the hotplug thread |
38 | * @should_run: Thread should execute | 40 | * @should_run: Thread should execute |
39 | * @rollback: Perform a rollback | 41 | * @rollback: Perform a rollback |
40 | * @cb_stat: The state for a single callback (install/uninstall) | 42 | * @single: Single callback invocation |
41 | * @cb: Single callback function (install/uninstall) | 43 | * @bringup: Single callback bringup or teardown selector |
44 | * @cb_state: The state for a single callback (install/uninstall) | ||
42 | * @result: Result of the operation | 45 | * @result: Result of the operation |
43 | * @done: Signal completion to the issuer of the task | 46 | * @done: Signal completion to the issuer of the task |
44 | */ | 47 | */ |
@@ -49,8 +52,10 @@ struct cpuhp_cpu_state { | |||
49 | struct task_struct *thread; | 52 | struct task_struct *thread; |
50 | bool should_run; | 53 | bool should_run; |
51 | bool rollback; | 54 | bool rollback; |
55 | bool single; | ||
56 | bool bringup; | ||
57 | struct hlist_node *node; | ||
52 | enum cpuhp_state cb_state; | 58 | enum cpuhp_state cb_state; |
53 | int (*cb)(unsigned int cpu); | ||
54 | int result; | 59 | int result; |
55 | struct completion done; | 60 | struct completion done; |
56 | #endif | 61 | #endif |
@@ -68,35 +73,103 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); | |||
68 | * @cant_stop: Bringup/teardown can't be stopped at this step | 73 | * @cant_stop: Bringup/teardown can't be stopped at this step |
69 | */ | 74 | */ |
70 | struct cpuhp_step { | 75 | struct cpuhp_step { |
71 | const char *name; | 76 | const char *name; |
72 | int (*startup)(unsigned int cpu); | 77 | union { |
73 | int (*teardown)(unsigned int cpu); | 78 | int (*single)(unsigned int cpu); |
74 | bool skip_onerr; | 79 | int (*multi)(unsigned int cpu, |
75 | bool cant_stop; | 80 | struct hlist_node *node); |
81 | } startup; | ||
82 | union { | ||
83 | int (*single)(unsigned int cpu); | ||
84 | int (*multi)(unsigned int cpu, | ||
85 | struct hlist_node *node); | ||
86 | } teardown; | ||
87 | struct hlist_head list; | ||
88 | bool skip_onerr; | ||
89 | bool cant_stop; | ||
90 | bool multi_instance; | ||
76 | }; | 91 | }; |
77 | 92 | ||
78 | static DEFINE_MUTEX(cpuhp_state_mutex); | 93 | static DEFINE_MUTEX(cpuhp_state_mutex); |
79 | static struct cpuhp_step cpuhp_bp_states[]; | 94 | static struct cpuhp_step cpuhp_bp_states[]; |
80 | static struct cpuhp_step cpuhp_ap_states[]; | 95 | static struct cpuhp_step cpuhp_ap_states[]; |
81 | 96 | ||
97 | static bool cpuhp_is_ap_state(enum cpuhp_state state) | ||
98 | { | ||
99 | /* | ||
100 | * The extra check for CPUHP_TEARDOWN_CPU is only for documentation | ||
101 | * purposes as that state is handled explicitly in cpu_down. | ||
102 | */ | ||
103 | return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; | ||
104 | } | ||
105 | |||
106 | static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) | ||
107 | { | ||
108 | struct cpuhp_step *sp; | ||
109 | |||
110 | sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states; | ||
111 | return sp + state; | ||
112 | } | ||
113 | |||
82 | /** | 114 | /** |
83 | * cpuhp_invoke_callback _ Invoke the callbacks for a given state | 115 | * cpuhp_invoke_callback _ Invoke the callbacks for a given state |
84 | * @cpu: The cpu for which the callback should be invoked | 116 | * @cpu: The cpu for which the callback should be invoked |
85 | * @step: The step in the state machine | 117 | * @step: The step in the state machine |
86 | * @cb: The callback function to invoke | 118 | * @bringup: True if the bringup callback should be invoked |
87 | * | 119 | * |
88 | * Called from cpu hotplug and from the state register machinery | 120 | * Called from cpu hotplug and from the state register machinery. |
89 | */ | 121 | */ |
90 | static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step, | 122 | static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, |
91 | int (*cb)(unsigned int)) | 123 | bool bringup, struct hlist_node *node) |
92 | { | 124 | { |
93 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 125 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
94 | int ret = 0; | 126 | struct cpuhp_step *step = cpuhp_get_step(state); |
95 | 127 | int (*cbm)(unsigned int cpu, struct hlist_node *node); | |
96 | if (cb) { | 128 | int (*cb)(unsigned int cpu); |
97 | trace_cpuhp_enter(cpu, st->target, step, cb); | 129 | int ret, cnt; |
130 | |||
131 | if (!step->multi_instance) { | ||
132 | cb = bringup ? step->startup.single : step->teardown.single; | ||
133 | if (!cb) | ||
134 | return 0; | ||
135 | trace_cpuhp_enter(cpu, st->target, state, cb); | ||
98 | ret = cb(cpu); | 136 | ret = cb(cpu); |
99 | trace_cpuhp_exit(cpu, st->state, step, ret); | 137 | trace_cpuhp_exit(cpu, st->state, state, ret); |
138 | return ret; | ||
139 | } | ||
140 | cbm = bringup ? step->startup.multi : step->teardown.multi; | ||
141 | if (!cbm) | ||
142 | return 0; | ||
143 | |||
144 | /* Single invocation for instance add/remove */ | ||
145 | if (node) { | ||
146 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); | ||
147 | ret = cbm(cpu, node); | ||
148 | trace_cpuhp_exit(cpu, st->state, state, ret); | ||
149 | return ret; | ||
150 | } | ||
151 | |||
152 | /* State transition. Invoke on all instances */ | ||
153 | cnt = 0; | ||
154 | hlist_for_each(node, &step->list) { | ||
155 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); | ||
156 | ret = cbm(cpu, node); | ||
157 | trace_cpuhp_exit(cpu, st->state, state, ret); | ||
158 | if (ret) | ||
159 | goto err; | ||
160 | cnt++; | ||
161 | } | ||
162 | return 0; | ||
163 | err: | ||
164 | /* Rollback the instances if one failed */ | ||
165 | cbm = !bringup ? step->startup.multi : step->teardown.multi; | ||
166 | if (!cbm) | ||
167 | return ret; | ||
168 | |||
169 | hlist_for_each(node, &step->list) { | ||
170 | if (!cnt--) | ||
171 | break; | ||
172 | cbm(cpu, node); | ||
100 | } | 173 | } |
101 | return ret; | 174 | return ret; |
102 | } | 175 | } |
@@ -155,7 +228,7 @@ static struct { | |||
155 | .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), | 228 | .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), |
156 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), | 229 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), |
157 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 230 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
158 | .dep_map = {.name = "cpu_hotplug.lock" }, | 231 | .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map), |
159 | #endif | 232 | #endif |
160 | }; | 233 | }; |
161 | 234 | ||
@@ -260,10 +333,17 @@ void cpu_hotplug_disable(void) | |||
260 | } | 333 | } |
261 | EXPORT_SYMBOL_GPL(cpu_hotplug_disable); | 334 | EXPORT_SYMBOL_GPL(cpu_hotplug_disable); |
262 | 335 | ||
336 | static void __cpu_hotplug_enable(void) | ||
337 | { | ||
338 | if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) | ||
339 | return; | ||
340 | cpu_hotplug_disabled--; | ||
341 | } | ||
342 | |||
263 | void cpu_hotplug_enable(void) | 343 | void cpu_hotplug_enable(void) |
264 | { | 344 | { |
265 | cpu_maps_update_begin(); | 345 | cpu_maps_update_begin(); |
266 | WARN_ON(--cpu_hotplug_disabled < 0); | 346 | __cpu_hotplug_enable(); |
267 | cpu_maps_update_done(); | 347 | cpu_maps_update_done(); |
268 | } | 348 | } |
269 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); | 349 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); |
@@ -330,12 +410,6 @@ static int notify_online(unsigned int cpu) | |||
330 | return 0; | 410 | return 0; |
331 | } | 411 | } |
332 | 412 | ||
333 | static int notify_starting(unsigned int cpu) | ||
334 | { | ||
335 | cpu_notify(CPU_STARTING, cpu); | ||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static int bringup_wait_for_ap(unsigned int cpu) | 413 | static int bringup_wait_for_ap(unsigned int cpu) |
340 | { | 414 | { |
341 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 415 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
@@ -349,8 +423,16 @@ static int bringup_cpu(unsigned int cpu) | |||
349 | struct task_struct *idle = idle_thread_get(cpu); | 423 | struct task_struct *idle = idle_thread_get(cpu); |
350 | int ret; | 424 | int ret; |
351 | 425 | ||
426 | /* | ||
427 | * Some architectures have to walk the irq descriptors to | ||
428 | * setup the vector space for the cpu which comes online. | ||
429 | * Prevent irq alloc/free across the bringup. | ||
430 | */ | ||
431 | irq_lock_sparse(); | ||
432 | |||
352 | /* Arch-specific enabling code. */ | 433 | /* Arch-specific enabling code. */ |
353 | ret = __cpu_up(cpu, idle); | 434 | ret = __cpu_up(cpu, idle); |
435 | irq_unlock_sparse(); | ||
354 | if (ret) { | 436 | if (ret) { |
355 | cpu_notify(CPU_UP_CANCELED, cpu); | 437 | cpu_notify(CPU_UP_CANCELED, cpu); |
356 | return ret; | 438 | return ret; |
@@ -363,62 +445,55 @@ static int bringup_cpu(unsigned int cpu) | |||
363 | /* | 445 | /* |
364 | * Hotplug state machine related functions | 446 | * Hotplug state machine related functions |
365 | */ | 447 | */ |
366 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st, | 448 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) |
367 | struct cpuhp_step *steps) | ||
368 | { | 449 | { |
369 | for (st->state++; st->state < st->target; st->state++) { | 450 | for (st->state++; st->state < st->target; st->state++) { |
370 | struct cpuhp_step *step = steps + st->state; | 451 | struct cpuhp_step *step = cpuhp_get_step(st->state); |
371 | 452 | ||
372 | if (!step->skip_onerr) | 453 | if (!step->skip_onerr) |
373 | cpuhp_invoke_callback(cpu, st->state, step->startup); | 454 | cpuhp_invoke_callback(cpu, st->state, true, NULL); |
374 | } | 455 | } |
375 | } | 456 | } |
376 | 457 | ||
377 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 458 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
378 | struct cpuhp_step *steps, enum cpuhp_state target) | 459 | enum cpuhp_state target) |
379 | { | 460 | { |
380 | enum cpuhp_state prev_state = st->state; | 461 | enum cpuhp_state prev_state = st->state; |
381 | int ret = 0; | 462 | int ret = 0; |
382 | 463 | ||
383 | for (; st->state > target; st->state--) { | 464 | for (; st->state > target; st->state--) { |
384 | struct cpuhp_step *step = steps + st->state; | 465 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL); |
385 | |||
386 | ret = cpuhp_invoke_callback(cpu, st->state, step->teardown); | ||
387 | if (ret) { | 466 | if (ret) { |
388 | st->target = prev_state; | 467 | st->target = prev_state; |
389 | undo_cpu_down(cpu, st, steps); | 468 | undo_cpu_down(cpu, st); |
390 | break; | 469 | break; |
391 | } | 470 | } |
392 | } | 471 | } |
393 | return ret; | 472 | return ret; |
394 | } | 473 | } |
395 | 474 | ||
396 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st, | 475 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) |
397 | struct cpuhp_step *steps) | ||
398 | { | 476 | { |
399 | for (st->state--; st->state > st->target; st->state--) { | 477 | for (st->state--; st->state > st->target; st->state--) { |
400 | struct cpuhp_step *step = steps + st->state; | 478 | struct cpuhp_step *step = cpuhp_get_step(st->state); |
401 | 479 | ||
402 | if (!step->skip_onerr) | 480 | if (!step->skip_onerr) |
403 | cpuhp_invoke_callback(cpu, st->state, step->teardown); | 481 | cpuhp_invoke_callback(cpu, st->state, false, NULL); |
404 | } | 482 | } |
405 | } | 483 | } |
406 | 484 | ||
407 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 485 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
408 | struct cpuhp_step *steps, enum cpuhp_state target) | 486 | enum cpuhp_state target) |
409 | { | 487 | { |
410 | enum cpuhp_state prev_state = st->state; | 488 | enum cpuhp_state prev_state = st->state; |
411 | int ret = 0; | 489 | int ret = 0; |
412 | 490 | ||
413 | while (st->state < target) { | 491 | while (st->state < target) { |
414 | struct cpuhp_step *step; | ||
415 | |||
416 | st->state++; | 492 | st->state++; |
417 | step = steps + st->state; | 493 | ret = cpuhp_invoke_callback(cpu, st->state, true, NULL); |
418 | ret = cpuhp_invoke_callback(cpu, st->state, step->startup); | ||
419 | if (ret) { | 494 | if (ret) { |
420 | st->target = prev_state; | 495 | st->target = prev_state; |
421 | undo_cpu_up(cpu, st, steps); | 496 | undo_cpu_up(cpu, st); |
422 | break; | 497 | break; |
423 | } | 498 | } |
424 | } | 499 | } |
@@ -447,13 +522,13 @@ static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st) | |||
447 | { | 522 | { |
448 | enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU); | 523 | enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU); |
449 | 524 | ||
450 | return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target); | 525 | return cpuhp_down_callbacks(cpu, st, target); |
451 | } | 526 | } |
452 | 527 | ||
453 | /* Execute the online startup callbacks. Used to be CPU_ONLINE */ | 528 | /* Execute the online startup callbacks. Used to be CPU_ONLINE */ |
454 | static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st) | 529 | static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st) |
455 | { | 530 | { |
456 | return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target); | 531 | return cpuhp_up_callbacks(cpu, st, st->target); |
457 | } | 532 | } |
458 | 533 | ||
459 | /* | 534 | /* |
@@ -476,18 +551,20 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
476 | st->should_run = false; | 551 | st->should_run = false; |
477 | 552 | ||
478 | /* Single callback invocation for [un]install ? */ | 553 | /* Single callback invocation for [un]install ? */ |
479 | if (st->cb) { | 554 | if (st->single) { |
480 | if (st->cb_state < CPUHP_AP_ONLINE) { | 555 | if (st->cb_state < CPUHP_AP_ONLINE) { |
481 | local_irq_disable(); | 556 | local_irq_disable(); |
482 | ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); | 557 | ret = cpuhp_invoke_callback(cpu, st->cb_state, |
558 | st->bringup, st->node); | ||
483 | local_irq_enable(); | 559 | local_irq_enable(); |
484 | } else { | 560 | } else { |
485 | ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); | 561 | ret = cpuhp_invoke_callback(cpu, st->cb_state, |
562 | st->bringup, st->node); | ||
486 | } | 563 | } |
487 | } else if (st->rollback) { | 564 | } else if (st->rollback) { |
488 | BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); | 565 | BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); |
489 | 566 | ||
490 | undo_cpu_down(cpu, st, cpuhp_ap_states); | 567 | undo_cpu_down(cpu, st); |
491 | /* | 568 | /* |
492 | * This is a momentary workaround to keep the notifier users | 569 | * This is a momentary workaround to keep the notifier users |
493 | * happy. Will go away once we got rid of the notifiers. | 570 | * happy. Will go away once we got rid of the notifiers. |
@@ -509,8 +586,9 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
509 | } | 586 | } |
510 | 587 | ||
511 | /* Invoke a single callback on a remote cpu */ | 588 | /* Invoke a single callback on a remote cpu */ |
512 | static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, | 589 | static int |
513 | int (*cb)(unsigned int)) | 590 | cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, |
591 | struct hlist_node *node) | ||
514 | { | 592 | { |
515 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 593 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
516 | 594 | ||
@@ -522,10 +600,13 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, | |||
522 | * we invoke the thread function directly. | 600 | * we invoke the thread function directly. |
523 | */ | 601 | */ |
524 | if (!st->thread) | 602 | if (!st->thread) |
525 | return cpuhp_invoke_callback(cpu, state, cb); | 603 | return cpuhp_invoke_callback(cpu, state, bringup, node); |
526 | 604 | ||
527 | st->cb_state = state; | 605 | st->cb_state = state; |
528 | st->cb = cb; | 606 | st->single = true; |
607 | st->bringup = bringup; | ||
608 | st->node = node; | ||
609 | |||
529 | /* | 610 | /* |
530 | * Make sure the above stores are visible before should_run becomes | 611 | * Make sure the above stores are visible before should_run becomes |
531 | * true. Paired with the mb() above in cpuhp_thread_fun() | 612 | * true. Paired with the mb() above in cpuhp_thread_fun() |
@@ -541,7 +622,7 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, | |||
541 | static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st) | 622 | static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st) |
542 | { | 623 | { |
543 | st->result = 0; | 624 | st->result = 0; |
544 | st->cb = NULL; | 625 | st->single = false; |
545 | /* | 626 | /* |
546 | * Make sure the above stores are visible before should_run becomes | 627 | * Make sure the above stores are visible before should_run becomes |
547 | * true. Paired with the mb() above in cpuhp_thread_fun() | 628 | * true. Paired with the mb() above in cpuhp_thread_fun() |
@@ -674,12 +755,6 @@ static int notify_down_prepare(unsigned int cpu) | |||
674 | return err; | 755 | return err; |
675 | } | 756 | } |
676 | 757 | ||
677 | static int notify_dying(unsigned int cpu) | ||
678 | { | ||
679 | cpu_notify(CPU_DYING, cpu); | ||
680 | return 0; | ||
681 | } | ||
682 | |||
683 | /* Take this CPU down. */ | 758 | /* Take this CPU down. */ |
684 | static int take_cpu_down(void *_param) | 759 | static int take_cpu_down(void *_param) |
685 | { | 760 | { |
@@ -692,12 +767,16 @@ static int take_cpu_down(void *_param) | |||
692 | if (err < 0) | 767 | if (err < 0) |
693 | return err; | 768 | return err; |
694 | 769 | ||
770 | /* | ||
771 | * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not | ||
772 | * do this step again. | ||
773 | */ | ||
774 | WARN_ON(st->state != CPUHP_TEARDOWN_CPU); | ||
775 | st->state--; | ||
695 | /* Invoke the former CPU_DYING callbacks */ | 776 | /* Invoke the former CPU_DYING callbacks */ |
696 | for (; st->state > target; st->state--) { | 777 | for (; st->state > target; st->state--) |
697 | struct cpuhp_step *step = cpuhp_ap_states + st->state; | 778 | cpuhp_invoke_callback(cpu, st->state, false, NULL); |
698 | 779 | ||
699 | cpuhp_invoke_callback(cpu, st->state, step->teardown); | ||
700 | } | ||
701 | /* Give up timekeeping duties */ | 780 | /* Give up timekeeping duties */ |
702 | tick_handover_do_timer(); | 781 | tick_handover_do_timer(); |
703 | /* Park the stopper thread */ | 782 | /* Park the stopper thread */ |
@@ -734,7 +813,7 @@ static int takedown_cpu(unsigned int cpu) | |||
734 | BUG_ON(cpu_online(cpu)); | 813 | BUG_ON(cpu_online(cpu)); |
735 | 814 | ||
736 | /* | 815 | /* |
737 | * The migration_call() CPU_DYING callback will have removed all | 816 | * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all |
738 | * runnable tasks from the cpu, there's only the idle task left now | 817 | * runnable tasks from the cpu, there's only the idle task left now |
739 | * that the migration thread is done doing the stop_machine thing. | 818 | * that the migration thread is done doing the stop_machine thing. |
740 | * | 819 | * |
@@ -787,7 +866,6 @@ void cpuhp_report_idle_dead(void) | |||
787 | #define notify_down_prepare NULL | 866 | #define notify_down_prepare NULL |
788 | #define takedown_cpu NULL | 867 | #define takedown_cpu NULL |
789 | #define notify_dead NULL | 868 | #define notify_dead NULL |
790 | #define notify_dying NULL | ||
791 | #endif | 869 | #endif |
792 | 870 | ||
793 | #ifdef CONFIG_HOTPLUG_CPU | 871 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -836,7 +914,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, | |||
836 | * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need | 914 | * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need |
837 | * to do the further cleanups. | 915 | * to do the further cleanups. |
838 | */ | 916 | */ |
839 | ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); | 917 | ret = cpuhp_down_callbacks(cpu, st, target); |
840 | if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { | 918 | if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { |
841 | st->target = prev_state; | 919 | st->target = prev_state; |
842 | st->rollback = true; | 920 | st->rollback = true; |
@@ -877,10 +955,9 @@ EXPORT_SYMBOL(cpu_down); | |||
877 | #endif /*CONFIG_HOTPLUG_CPU*/ | 955 | #endif /*CONFIG_HOTPLUG_CPU*/ |
878 | 956 | ||
879 | /** | 957 | /** |
880 | * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers | 958 | * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU |
881 | * @cpu: cpu that just started | 959 | * @cpu: cpu that just started |
882 | * | 960 | * |
883 | * This function calls the cpu_chain notifiers with CPU_STARTING. | ||
884 | * It must be called by the arch code on the new cpu, before the new cpu | 961 | * It must be called by the arch code on the new cpu, before the new cpu |
885 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | 962 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). |
886 | */ | 963 | */ |
@@ -889,12 +966,10 @@ void notify_cpu_starting(unsigned int cpu) | |||
889 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 966 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
890 | enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); | 967 | enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); |
891 | 968 | ||
969 | rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ | ||
892 | while (st->state < target) { | 970 | while (st->state < target) { |
893 | struct cpuhp_step *step; | ||
894 | |||
895 | st->state++; | 971 | st->state++; |
896 | step = cpuhp_ap_states + st->state; | 972 | cpuhp_invoke_callback(cpu, st->state, true, NULL); |
897 | cpuhp_invoke_callback(cpu, st->state, step->startup); | ||
898 | } | 973 | } |
899 | } | 974 | } |
900 | 975 | ||
@@ -979,7 +1054,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) | |||
979 | * responsible for bringing it up to the target state. | 1054 | * responsible for bringing it up to the target state. |
980 | */ | 1055 | */ |
981 | target = min((int)target, CPUHP_BRINGUP_CPU); | 1056 | target = min((int)target, CPUHP_BRINGUP_CPU); |
982 | ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target); | 1057 | ret = cpuhp_up_callbacks(cpu, st, target); |
983 | out: | 1058 | out: |
984 | cpu_hotplug_done(); | 1059 | cpu_hotplug_done(); |
985 | return ret; | 1060 | return ret; |
@@ -1024,12 +1099,13 @@ EXPORT_SYMBOL_GPL(cpu_up); | |||
1024 | #ifdef CONFIG_PM_SLEEP_SMP | 1099 | #ifdef CONFIG_PM_SLEEP_SMP |
1025 | static cpumask_var_t frozen_cpus; | 1100 | static cpumask_var_t frozen_cpus; |
1026 | 1101 | ||
1027 | int disable_nonboot_cpus(void) | 1102 | int freeze_secondary_cpus(int primary) |
1028 | { | 1103 | { |
1029 | int cpu, first_cpu, error = 0; | 1104 | int cpu, error = 0; |
1030 | 1105 | ||
1031 | cpu_maps_update_begin(); | 1106 | cpu_maps_update_begin(); |
1032 | first_cpu = cpumask_first(cpu_online_mask); | 1107 | if (!cpu_online(primary)) |
1108 | primary = cpumask_first(cpu_online_mask); | ||
1033 | /* | 1109 | /* |
1034 | * We take down all of the non-boot CPUs in one shot to avoid races | 1110 | * We take down all of the non-boot CPUs in one shot to avoid races |
1035 | * with the userspace trying to use the CPU hotplug at the same time | 1111 | * with the userspace trying to use the CPU hotplug at the same time |
@@ -1038,7 +1114,7 @@ int disable_nonboot_cpus(void) | |||
1038 | 1114 | ||
1039 | pr_info("Disabling non-boot CPUs ...\n"); | 1115 | pr_info("Disabling non-boot CPUs ...\n"); |
1040 | for_each_online_cpu(cpu) { | 1116 | for_each_online_cpu(cpu) { |
1041 | if (cpu == first_cpu) | 1117 | if (cpu == primary) |
1042 | continue; | 1118 | continue; |
1043 | trace_suspend_resume(TPS("CPU_OFF"), cpu, true); | 1119 | trace_suspend_resume(TPS("CPU_OFF"), cpu, true); |
1044 | error = _cpu_down(cpu, 1, CPUHP_OFFLINE); | 1120 | error = _cpu_down(cpu, 1, CPUHP_OFFLINE); |
@@ -1081,7 +1157,7 @@ void enable_nonboot_cpus(void) | |||
1081 | 1157 | ||
1082 | /* Allow everyone to use the CPU hotplug again */ | 1158 | /* Allow everyone to use the CPU hotplug again */ |
1083 | cpu_maps_update_begin(); | 1159 | cpu_maps_update_begin(); |
1084 | WARN_ON(--cpu_hotplug_disabled < 0); | 1160 | __cpu_hotplug_enable(); |
1085 | if (cpumask_empty(frozen_cpus)) | 1161 | if (cpumask_empty(frozen_cpus)) |
1086 | goto out; | 1162 | goto out; |
1087 | 1163 | ||
@@ -1170,40 +1246,50 @@ core_initcall(cpu_hotplug_pm_sync_init); | |||
1170 | static struct cpuhp_step cpuhp_bp_states[] = { | 1246 | static struct cpuhp_step cpuhp_bp_states[] = { |
1171 | [CPUHP_OFFLINE] = { | 1247 | [CPUHP_OFFLINE] = { |
1172 | .name = "offline", | 1248 | .name = "offline", |
1173 | .startup = NULL, | 1249 | .startup.single = NULL, |
1174 | .teardown = NULL, | 1250 | .teardown.single = NULL, |
1175 | }, | 1251 | }, |
1176 | #ifdef CONFIG_SMP | 1252 | #ifdef CONFIG_SMP |
1177 | [CPUHP_CREATE_THREADS]= { | 1253 | [CPUHP_CREATE_THREADS]= { |
1178 | .name = "threads:create", | 1254 | .name = "threads:prepare", |
1179 | .startup = smpboot_create_threads, | 1255 | .startup.single = smpboot_create_threads, |
1180 | .teardown = NULL, | 1256 | .teardown.single = NULL, |
1181 | .cant_stop = true, | 1257 | .cant_stop = true, |
1182 | }, | 1258 | }, |
1183 | [CPUHP_PERF_PREPARE] = { | 1259 | [CPUHP_PERF_PREPARE] = { |
1184 | .name = "perf prepare", | 1260 | .name = "perf:prepare", |
1185 | .startup = perf_event_init_cpu, | 1261 | .startup.single = perf_event_init_cpu, |
1186 | .teardown = perf_event_exit_cpu, | 1262 | .teardown.single = perf_event_exit_cpu, |
1187 | }, | 1263 | }, |
1188 | [CPUHP_WORKQUEUE_PREP] = { | 1264 | [CPUHP_WORKQUEUE_PREP] = { |
1189 | .name = "workqueue prepare", | 1265 | .name = "workqueue:prepare", |
1190 | .startup = workqueue_prepare_cpu, | 1266 | .startup.single = workqueue_prepare_cpu, |
1191 | .teardown = NULL, | 1267 | .teardown.single = NULL, |
1192 | }, | 1268 | }, |
1193 | [CPUHP_HRTIMERS_PREPARE] = { | 1269 | [CPUHP_HRTIMERS_PREPARE] = { |
1194 | .name = "hrtimers prepare", | 1270 | .name = "hrtimers:prepare", |
1195 | .startup = hrtimers_prepare_cpu, | 1271 | .startup.single = hrtimers_prepare_cpu, |
1196 | .teardown = hrtimers_dead_cpu, | 1272 | .teardown.single = hrtimers_dead_cpu, |
1197 | }, | 1273 | }, |
1198 | [CPUHP_SMPCFD_PREPARE] = { | 1274 | [CPUHP_SMPCFD_PREPARE] = { |
1199 | .name = "SMPCFD prepare", | 1275 | .name = "smpcfd:prepare", |
1200 | .startup = smpcfd_prepare_cpu, | 1276 | .startup.single = smpcfd_prepare_cpu, |
1201 | .teardown = smpcfd_dead_cpu, | 1277 | .teardown.single = smpcfd_dead_cpu, |
1278 | }, | ||
1279 | [CPUHP_RELAY_PREPARE] = { | ||
1280 | .name = "relay:prepare", | ||
1281 | .startup.single = relay_prepare_cpu, | ||
1282 | .teardown.single = NULL, | ||
1283 | }, | ||
1284 | [CPUHP_SLAB_PREPARE] = { | ||
1285 | .name = "slab:prepare", | ||
1286 | .startup.single = slab_prepare_cpu, | ||
1287 | .teardown.single = slab_dead_cpu, | ||
1202 | }, | 1288 | }, |
1203 | [CPUHP_RCUTREE_PREP] = { | 1289 | [CPUHP_RCUTREE_PREP] = { |
1204 | .name = "RCU-tree prepare", | 1290 | .name = "RCU/tree:prepare", |
1205 | .startup = rcutree_prepare_cpu, | 1291 | .startup.single = rcutree_prepare_cpu, |
1206 | .teardown = rcutree_dead_cpu, | 1292 | .teardown.single = rcutree_dead_cpu, |
1207 | }, | 1293 | }, |
1208 | /* | 1294 | /* |
1209 | * Preparatory and dead notifiers. Will be replaced once the notifiers | 1295 | * Preparatory and dead notifiers. Will be replaced once the notifiers |
@@ -1211,8 +1297,8 @@ static struct cpuhp_step cpuhp_bp_states[] = { | |||
1211 | */ | 1297 | */ |
1212 | [CPUHP_NOTIFY_PREPARE] = { | 1298 | [CPUHP_NOTIFY_PREPARE] = { |
1213 | .name = "notify:prepare", | 1299 | .name = "notify:prepare", |
1214 | .startup = notify_prepare, | 1300 | .startup.single = notify_prepare, |
1215 | .teardown = notify_dead, | 1301 | .teardown.single = notify_dead, |
1216 | .skip_onerr = true, | 1302 | .skip_onerr = true, |
1217 | .cant_stop = true, | 1303 | .cant_stop = true, |
1218 | }, | 1304 | }, |
@@ -1222,20 +1308,21 @@ static struct cpuhp_step cpuhp_bp_states[] = { | |||
1222 | * otherwise a RCU stall occurs. | 1308 | * otherwise a RCU stall occurs. |
1223 | */ | 1309 | */ |
1224 | [CPUHP_TIMERS_DEAD] = { | 1310 | [CPUHP_TIMERS_DEAD] = { |
1225 | .name = "timers dead", | 1311 | .name = "timers:dead", |
1226 | .startup = NULL, | 1312 | .startup.single = NULL, |
1227 | .teardown = timers_dead_cpu, | 1313 | .teardown.single = timers_dead_cpu, |
1228 | }, | 1314 | }, |
1229 | /* Kicks the plugged cpu into life */ | 1315 | /* Kicks the plugged cpu into life */ |
1230 | [CPUHP_BRINGUP_CPU] = { | 1316 | [CPUHP_BRINGUP_CPU] = { |
1231 | .name = "cpu:bringup", | 1317 | .name = "cpu:bringup", |
1232 | .startup = bringup_cpu, | 1318 | .startup.single = bringup_cpu, |
1233 | .teardown = NULL, | 1319 | .teardown.single = NULL, |
1234 | .cant_stop = true, | 1320 | .cant_stop = true, |
1235 | }, | 1321 | }, |
1236 | [CPUHP_AP_SMPCFD_DYING] = { | 1322 | [CPUHP_AP_SMPCFD_DYING] = { |
1237 | .startup = NULL, | 1323 | .name = "smpcfd:dying", |
1238 | .teardown = smpcfd_dying_cpu, | 1324 | .startup.single = NULL, |
1325 | .teardown.single = smpcfd_dying_cpu, | ||
1239 | }, | 1326 | }, |
1240 | /* | 1327 | /* |
1241 | * Handled on controll processor until the plugged processor manages | 1328 | * Handled on controll processor until the plugged processor manages |
@@ -1243,8 +1330,8 @@ static struct cpuhp_step cpuhp_bp_states[] = { | |||
1243 | */ | 1330 | */ |
1244 | [CPUHP_TEARDOWN_CPU] = { | 1331 | [CPUHP_TEARDOWN_CPU] = { |
1245 | .name = "cpu:teardown", | 1332 | .name = "cpu:teardown", |
1246 | .startup = NULL, | 1333 | .startup.single = NULL, |
1247 | .teardown = takedown_cpu, | 1334 | .teardown.single = takedown_cpu, |
1248 | .cant_stop = true, | 1335 | .cant_stop = true, |
1249 | }, | 1336 | }, |
1250 | #else | 1337 | #else |
@@ -1270,24 +1357,13 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1270 | /* First state is scheduler control. Interrupts are disabled */ | 1357 | /* First state is scheduler control. Interrupts are disabled */ |
1271 | [CPUHP_AP_SCHED_STARTING] = { | 1358 | [CPUHP_AP_SCHED_STARTING] = { |
1272 | .name = "sched:starting", | 1359 | .name = "sched:starting", |
1273 | .startup = sched_cpu_starting, | 1360 | .startup.single = sched_cpu_starting, |
1274 | .teardown = sched_cpu_dying, | 1361 | .teardown.single = sched_cpu_dying, |
1275 | }, | 1362 | }, |
1276 | [CPUHP_AP_RCUTREE_DYING] = { | 1363 | [CPUHP_AP_RCUTREE_DYING] = { |
1277 | .startup = NULL, | 1364 | .name = "RCU/tree:dying", |
1278 | .teardown = rcutree_dying_cpu, | 1365 | .startup.single = NULL, |
1279 | }, | 1366 | .teardown.single = rcutree_dying_cpu, |
1280 | /* | ||
1281 | * Low level startup/teardown notifiers. Run with interrupts | ||
1282 | * disabled. Will be removed once the notifiers are converted to | ||
1283 | * states. | ||
1284 | */ | ||
1285 | [CPUHP_AP_NOTIFY_STARTING] = { | ||
1286 | .name = "notify:starting", | ||
1287 | .startup = notify_starting, | ||
1288 | .teardown = notify_dying, | ||
1289 | .skip_onerr = true, | ||
1290 | .cant_stop = true, | ||
1291 | }, | 1367 | }, |
1292 | /* Entry state on starting. Interrupts enabled from here on. Transient | 1368 | /* Entry state on starting. Interrupts enabled from here on. Transient |
1293 | * state for synchronsization */ | 1369 | * state for synchronsization */ |
@@ -1296,24 +1372,24 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1296 | }, | 1372 | }, |
1297 | /* Handle smpboot threads park/unpark */ | 1373 | /* Handle smpboot threads park/unpark */ |
1298 | [CPUHP_AP_SMPBOOT_THREADS] = { | 1374 | [CPUHP_AP_SMPBOOT_THREADS] = { |
1299 | .name = "smpboot:threads", | 1375 | .name = "smpboot/threads:online", |
1300 | .startup = smpboot_unpark_threads, | 1376 | .startup.single = smpboot_unpark_threads, |
1301 | .teardown = NULL, | 1377 | .teardown.single = NULL, |
1302 | }, | 1378 | }, |
1303 | [CPUHP_AP_PERF_ONLINE] = { | 1379 | [CPUHP_AP_PERF_ONLINE] = { |
1304 | .name = "perf online", | 1380 | .name = "perf:online", |
1305 | .startup = perf_event_init_cpu, | 1381 | .startup.single = perf_event_init_cpu, |
1306 | .teardown = perf_event_exit_cpu, | 1382 | .teardown.single = perf_event_exit_cpu, |
1307 | }, | 1383 | }, |
1308 | [CPUHP_AP_WORKQUEUE_ONLINE] = { | 1384 | [CPUHP_AP_WORKQUEUE_ONLINE] = { |
1309 | .name = "workqueue online", | 1385 | .name = "workqueue:online", |
1310 | .startup = workqueue_online_cpu, | 1386 | .startup.single = workqueue_online_cpu, |
1311 | .teardown = workqueue_offline_cpu, | 1387 | .teardown.single = workqueue_offline_cpu, |
1312 | }, | 1388 | }, |
1313 | [CPUHP_AP_RCUTREE_ONLINE] = { | 1389 | [CPUHP_AP_RCUTREE_ONLINE] = { |
1314 | .name = "RCU-tree online", | 1390 | .name = "RCU/tree:online", |
1315 | .startup = rcutree_online_cpu, | 1391 | .startup.single = rcutree_online_cpu, |
1316 | .teardown = rcutree_offline_cpu, | 1392 | .teardown.single = rcutree_offline_cpu, |
1317 | }, | 1393 | }, |
1318 | 1394 | ||
1319 | /* | 1395 | /* |
@@ -1322,8 +1398,8 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1322 | */ | 1398 | */ |
1323 | [CPUHP_AP_NOTIFY_ONLINE] = { | 1399 | [CPUHP_AP_NOTIFY_ONLINE] = { |
1324 | .name = "notify:online", | 1400 | .name = "notify:online", |
1325 | .startup = notify_online, | 1401 | .startup.single = notify_online, |
1326 | .teardown = notify_down_prepare, | 1402 | .teardown.single = notify_down_prepare, |
1327 | .skip_onerr = true, | 1403 | .skip_onerr = true, |
1328 | }, | 1404 | }, |
1329 | #endif | 1405 | #endif |
@@ -1335,16 +1411,16 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1335 | /* Last state is scheduler control setting the cpu active */ | 1411 | /* Last state is scheduler control setting the cpu active */ |
1336 | [CPUHP_AP_ACTIVE] = { | 1412 | [CPUHP_AP_ACTIVE] = { |
1337 | .name = "sched:active", | 1413 | .name = "sched:active", |
1338 | .startup = sched_cpu_activate, | 1414 | .startup.single = sched_cpu_activate, |
1339 | .teardown = sched_cpu_deactivate, | 1415 | .teardown.single = sched_cpu_deactivate, |
1340 | }, | 1416 | }, |
1341 | #endif | 1417 | #endif |
1342 | 1418 | ||
1343 | /* CPU is fully up and running. */ | 1419 | /* CPU is fully up and running. */ |
1344 | [CPUHP_ONLINE] = { | 1420 | [CPUHP_ONLINE] = { |
1345 | .name = "online", | 1421 | .name = "online", |
1346 | .startup = NULL, | 1422 | .startup.single = NULL, |
1347 | .teardown = NULL, | 1423 | .teardown.single = NULL, |
1348 | }, | 1424 | }, |
1349 | }; | 1425 | }; |
1350 | 1426 | ||
@@ -1356,54 +1432,42 @@ static int cpuhp_cb_check(enum cpuhp_state state) | |||
1356 | return 0; | 1432 | return 0; |
1357 | } | 1433 | } |
1358 | 1434 | ||
1359 | static bool cpuhp_is_ap_state(enum cpuhp_state state) | ||
1360 | { | ||
1361 | /* | ||
1362 | * The extra check for CPUHP_TEARDOWN_CPU is only for documentation | ||
1363 | * purposes as that state is handled explicitely in cpu_down. | ||
1364 | */ | ||
1365 | return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; | ||
1366 | } | ||
1367 | |||
1368 | static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) | ||
1369 | { | ||
1370 | struct cpuhp_step *sp; | ||
1371 | |||
1372 | sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states; | ||
1373 | return sp + state; | ||
1374 | } | ||
1375 | |||
1376 | static void cpuhp_store_callbacks(enum cpuhp_state state, | 1435 | static void cpuhp_store_callbacks(enum cpuhp_state state, |
1377 | const char *name, | 1436 | const char *name, |
1378 | int (*startup)(unsigned int cpu), | 1437 | int (*startup)(unsigned int cpu), |
1379 | int (*teardown)(unsigned int cpu)) | 1438 | int (*teardown)(unsigned int cpu), |
1439 | bool multi_instance) | ||
1380 | { | 1440 | { |
1381 | /* (Un)Install the callbacks for further cpu hotplug operations */ | 1441 | /* (Un)Install the callbacks for further cpu hotplug operations */ |
1382 | struct cpuhp_step *sp; | 1442 | struct cpuhp_step *sp; |
1383 | 1443 | ||
1384 | mutex_lock(&cpuhp_state_mutex); | 1444 | mutex_lock(&cpuhp_state_mutex); |
1385 | sp = cpuhp_get_step(state); | 1445 | sp = cpuhp_get_step(state); |
1386 | sp->startup = startup; | 1446 | sp->startup.single = startup; |
1387 | sp->teardown = teardown; | 1447 | sp->teardown.single = teardown; |
1388 | sp->name = name; | 1448 | sp->name = name; |
1449 | sp->multi_instance = multi_instance; | ||
1450 | INIT_HLIST_HEAD(&sp->list); | ||
1389 | mutex_unlock(&cpuhp_state_mutex); | 1451 | mutex_unlock(&cpuhp_state_mutex); |
1390 | } | 1452 | } |
1391 | 1453 | ||
1392 | static void *cpuhp_get_teardown_cb(enum cpuhp_state state) | 1454 | static void *cpuhp_get_teardown_cb(enum cpuhp_state state) |
1393 | { | 1455 | { |
1394 | return cpuhp_get_step(state)->teardown; | 1456 | return cpuhp_get_step(state)->teardown.single; |
1395 | } | 1457 | } |
1396 | 1458 | ||
1397 | /* | 1459 | /* |
1398 | * Call the startup/teardown function for a step either on the AP or | 1460 | * Call the startup/teardown function for a step either on the AP or |
1399 | * on the current CPU. | 1461 | * on the current CPU. |
1400 | */ | 1462 | */ |
1401 | static int cpuhp_issue_call(int cpu, enum cpuhp_state state, | 1463 | static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, |
1402 | int (*cb)(unsigned int), bool bringup) | 1464 | struct hlist_node *node) |
1403 | { | 1465 | { |
1466 | struct cpuhp_step *sp = cpuhp_get_step(state); | ||
1404 | int ret; | 1467 | int ret; |
1405 | 1468 | ||
1406 | if (!cb) | 1469 | if ((bringup && !sp->startup.single) || |
1470 | (!bringup && !sp->teardown.single)) | ||
1407 | return 0; | 1471 | return 0; |
1408 | /* | 1472 | /* |
1409 | * The non AP bound callbacks can fail on bringup. On teardown | 1473 | * The non AP bound callbacks can fail on bringup. On teardown |
@@ -1411,11 +1475,11 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, | |||
1411 | */ | 1475 | */ |
1412 | #ifdef CONFIG_SMP | 1476 | #ifdef CONFIG_SMP |
1413 | if (cpuhp_is_ap_state(state)) | 1477 | if (cpuhp_is_ap_state(state)) |
1414 | ret = cpuhp_invoke_ap_callback(cpu, state, cb); | 1478 | ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); |
1415 | else | 1479 | else |
1416 | ret = cpuhp_invoke_callback(cpu, state, cb); | 1480 | ret = cpuhp_invoke_callback(cpu, state, bringup, node); |
1417 | #else | 1481 | #else |
1418 | ret = cpuhp_invoke_callback(cpu, state, cb); | 1482 | ret = cpuhp_invoke_callback(cpu, state, bringup, node); |
1419 | #endif | 1483 | #endif |
1420 | BUG_ON(ret && !bringup); | 1484 | BUG_ON(ret && !bringup); |
1421 | return ret; | 1485 | return ret; |
@@ -1427,13 +1491,10 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, | |||
1427 | * Note: The teardown callbacks for rollback are not allowed to fail! | 1491 | * Note: The teardown callbacks for rollback are not allowed to fail! |
1428 | */ | 1492 | */ |
1429 | static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, | 1493 | static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, |
1430 | int (*teardown)(unsigned int cpu)) | 1494 | struct hlist_node *node) |
1431 | { | 1495 | { |
1432 | int cpu; | 1496 | int cpu; |
1433 | 1497 | ||
1434 | if (!teardown) | ||
1435 | return; | ||
1436 | |||
1437 | /* Roll back the already executed steps on the other cpus */ | 1498 | /* Roll back the already executed steps on the other cpus */ |
1438 | for_each_present_cpu(cpu) { | 1499 | for_each_present_cpu(cpu) { |
1439 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 1500 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
@@ -1444,7 +1505,7 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, | |||
1444 | 1505 | ||
1445 | /* Did we invoke the startup call on that cpu ? */ | 1506 | /* Did we invoke the startup call on that cpu ? */ |
1446 | if (cpustate >= state) | 1507 | if (cpustate >= state) |
1447 | cpuhp_issue_call(cpu, state, teardown, false); | 1508 | cpuhp_issue_call(cpu, state, false, node); |
1448 | } | 1509 | } |
1449 | } | 1510 | } |
1450 | 1511 | ||
@@ -1471,6 +1532,52 @@ static int cpuhp_reserve_state(enum cpuhp_state state) | |||
1471 | return -ENOSPC; | 1532 | return -ENOSPC; |
1472 | } | 1533 | } |
1473 | 1534 | ||
1535 | int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, | ||
1536 | bool invoke) | ||
1537 | { | ||
1538 | struct cpuhp_step *sp; | ||
1539 | int cpu; | ||
1540 | int ret; | ||
1541 | |||
1542 | sp = cpuhp_get_step(state); | ||
1543 | if (sp->multi_instance == false) | ||
1544 | return -EINVAL; | ||
1545 | |||
1546 | get_online_cpus(); | ||
1547 | |||
1548 | if (!invoke || !sp->startup.multi) | ||
1549 | goto add_node; | ||
1550 | |||
1551 | /* | ||
1552 | * Try to call the startup callback for each present cpu | ||
1553 | * depending on the hotplug state of the cpu. | ||
1554 | */ | ||
1555 | for_each_present_cpu(cpu) { | ||
1556 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | ||
1557 | int cpustate = st->state; | ||
1558 | |||
1559 | if (cpustate < state) | ||
1560 | continue; | ||
1561 | |||
1562 | ret = cpuhp_issue_call(cpu, state, true, node); | ||
1563 | if (ret) { | ||
1564 | if (sp->teardown.multi) | ||
1565 | cpuhp_rollback_install(cpu, state, node); | ||
1566 | goto err; | ||
1567 | } | ||
1568 | } | ||
1569 | add_node: | ||
1570 | ret = 0; | ||
1571 | mutex_lock(&cpuhp_state_mutex); | ||
1572 | hlist_add_head(node, &sp->list); | ||
1573 | mutex_unlock(&cpuhp_state_mutex); | ||
1574 | |||
1575 | err: | ||
1576 | put_online_cpus(); | ||
1577 | return ret; | ||
1578 | } | ||
1579 | EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance); | ||
1580 | |||
1474 | /** | 1581 | /** |
1475 | * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state | 1582 | * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state |
1476 | * @state: The state to setup | 1583 | * @state: The state to setup |
@@ -1484,7 +1591,8 @@ static int cpuhp_reserve_state(enum cpuhp_state state) | |||
1484 | int __cpuhp_setup_state(enum cpuhp_state state, | 1591 | int __cpuhp_setup_state(enum cpuhp_state state, |
1485 | const char *name, bool invoke, | 1592 | const char *name, bool invoke, |
1486 | int (*startup)(unsigned int cpu), | 1593 | int (*startup)(unsigned int cpu), |
1487 | int (*teardown)(unsigned int cpu)) | 1594 | int (*teardown)(unsigned int cpu), |
1595 | bool multi_instance) | ||
1488 | { | 1596 | { |
1489 | int cpu, ret = 0; | 1597 | int cpu, ret = 0; |
1490 | int dyn_state = 0; | 1598 | int dyn_state = 0; |
@@ -1503,7 +1611,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, | |||
1503 | state = ret; | 1611 | state = ret; |
1504 | } | 1612 | } |
1505 | 1613 | ||
1506 | cpuhp_store_callbacks(state, name, startup, teardown); | 1614 | cpuhp_store_callbacks(state, name, startup, teardown, multi_instance); |
1507 | 1615 | ||
1508 | if (!invoke || !startup) | 1616 | if (!invoke || !startup) |
1509 | goto out; | 1617 | goto out; |
@@ -1519,10 +1627,11 @@ int __cpuhp_setup_state(enum cpuhp_state state, | |||
1519 | if (cpustate < state) | 1627 | if (cpustate < state) |
1520 | continue; | 1628 | continue; |
1521 | 1629 | ||
1522 | ret = cpuhp_issue_call(cpu, state, startup, true); | 1630 | ret = cpuhp_issue_call(cpu, state, true, NULL); |
1523 | if (ret) { | 1631 | if (ret) { |
1524 | cpuhp_rollback_install(cpu, state, teardown); | 1632 | if (teardown) |
1525 | cpuhp_store_callbacks(state, NULL, NULL, NULL); | 1633 | cpuhp_rollback_install(cpu, state, NULL); |
1634 | cpuhp_store_callbacks(state, NULL, NULL, NULL, false); | ||
1526 | goto out; | 1635 | goto out; |
1527 | } | 1636 | } |
1528 | } | 1637 | } |
@@ -1534,6 +1643,42 @@ out: | |||
1534 | } | 1643 | } |
1535 | EXPORT_SYMBOL(__cpuhp_setup_state); | 1644 | EXPORT_SYMBOL(__cpuhp_setup_state); |
1536 | 1645 | ||
1646 | int __cpuhp_state_remove_instance(enum cpuhp_state state, | ||
1647 | struct hlist_node *node, bool invoke) | ||
1648 | { | ||
1649 | struct cpuhp_step *sp = cpuhp_get_step(state); | ||
1650 | int cpu; | ||
1651 | |||
1652 | BUG_ON(cpuhp_cb_check(state)); | ||
1653 | |||
1654 | if (!sp->multi_instance) | ||
1655 | return -EINVAL; | ||
1656 | |||
1657 | get_online_cpus(); | ||
1658 | if (!invoke || !cpuhp_get_teardown_cb(state)) | ||
1659 | goto remove; | ||
1660 | /* | ||
1661 | * Call the teardown callback for each present cpu depending | ||
1662 | * on the hotplug state of the cpu. This function is not | ||
1663 | * allowed to fail currently! | ||
1664 | */ | ||
1665 | for_each_present_cpu(cpu) { | ||
1666 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | ||
1667 | int cpustate = st->state; | ||
1668 | |||
1669 | if (cpustate >= state) | ||
1670 | cpuhp_issue_call(cpu, state, false, node); | ||
1671 | } | ||
1672 | |||
1673 | remove: | ||
1674 | mutex_lock(&cpuhp_state_mutex); | ||
1675 | hlist_del(node); | ||
1676 | mutex_unlock(&cpuhp_state_mutex); | ||
1677 | put_online_cpus(); | ||
1678 | |||
1679 | return 0; | ||
1680 | } | ||
1681 | EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); | ||
1537 | /** | 1682 | /** |
1538 | * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state | 1683 | * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state |
1539 | * @state: The state to remove | 1684 | * @state: The state to remove |
@@ -1545,14 +1690,21 @@ EXPORT_SYMBOL(__cpuhp_setup_state); | |||
1545 | */ | 1690 | */ |
1546 | void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) | 1691 | void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) |
1547 | { | 1692 | { |
1548 | int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state); | 1693 | struct cpuhp_step *sp = cpuhp_get_step(state); |
1549 | int cpu; | 1694 | int cpu; |
1550 | 1695 | ||
1551 | BUG_ON(cpuhp_cb_check(state)); | 1696 | BUG_ON(cpuhp_cb_check(state)); |
1552 | 1697 | ||
1553 | get_online_cpus(); | 1698 | get_online_cpus(); |
1554 | 1699 | ||
1555 | if (!invoke || !teardown) | 1700 | if (sp->multi_instance) { |
1701 | WARN(!hlist_empty(&sp->list), | ||
1702 | "Error: Removing state %d which has instances left.\n", | ||
1703 | state); | ||
1704 | goto remove; | ||
1705 | } | ||
1706 | |||
1707 | if (!invoke || !cpuhp_get_teardown_cb(state)) | ||
1556 | goto remove; | 1708 | goto remove; |
1557 | 1709 | ||
1558 | /* | 1710 | /* |
@@ -1565,10 +1717,10 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) | |||
1565 | int cpustate = st->state; | 1717 | int cpustate = st->state; |
1566 | 1718 | ||
1567 | if (cpustate >= state) | 1719 | if (cpustate >= state) |
1568 | cpuhp_issue_call(cpu, state, teardown, false); | 1720 | cpuhp_issue_call(cpu, state, false, NULL); |
1569 | } | 1721 | } |
1570 | remove: | 1722 | remove: |
1571 | cpuhp_store_callbacks(state, NULL, NULL, NULL); | 1723 | cpuhp_store_callbacks(state, NULL, NULL, NULL, false); |
1572 | put_online_cpus(); | 1724 | put_online_cpus(); |
1573 | } | 1725 | } |
1574 | EXPORT_SYMBOL(__cpuhp_remove_state); | 1726 | EXPORT_SYMBOL(__cpuhp_remove_state); |