summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 16:55:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 16:55:30 -0400
commita6408f6cb63ac0958fee7dbce7861ffb540d8a49 (patch)
treec94a835d343974171951e3b805e6bbbb02852ebc /drivers
parent1a81a8f2a5918956e214bb718099a89e500e7ec5 (diff)
parent4fae16dffb812f0e0d98a0b2b0856ca48ca63e6c (diff)
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug updates from Thomas Gleixner: "This is the next part of the hotplug rework. - Convert all notifiers with a priority assigned - Convert all CPU_STARTING/DYING notifiers The final removal of the STARTING/DYING infrastructure will happen when the merge window closes. Another 700 hundred line of unpenetrable maze gone :)" * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) timers/core: Correct callback order during CPU hot plug leds/trigger/cpu: Move from CPU_STARTING to ONLINE level powerpc/numa: Convert to hotplug state machine arm/perf: Fix hotplug state machine conversion irqchip/armada: Avoid unused function warnings ARC/time: Convert to hotplug state machine clocksource/atlas7: Convert to hotplug state machine clocksource/armada-370-xp: Convert to hotplug state machine clocksource/exynos_mct: Convert to hotplug state machine clocksource/arm_global_timer: Convert to hotplug state machine rcu: Convert rcutree to hotplug state machine KVM/arm/arm64/vgic-new: Convert to hotplug state machine smp/cfd: Convert core to hotplug state machine x86/x2apic: Convert to CPU hotplug state machine profile: Convert to hotplug state machine timers/core: Convert to hotplug state machine hrtimer: Convert to hotplug state machine x86/tboot: Convert to hotplug state machine arm64/armv8 deprecated: Convert to hotplug state machine hwtracing/coresight-etm4x: Convert to hotplug state machine ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_driver.c11
-rw-r--r--drivers/bus/arm-cci.c53
-rw-r--r--drivers/bus/arm-ccn.c57
-rw-r--r--drivers/clocksource/arm_arch_timer.c54
-rw-r--r--drivers/clocksource/arm_global_timer.c39
-rw-r--r--drivers/clocksource/dummy_timer.c36
-rw-r--r--drivers/clocksource/exynos_mct.c46
-rw-r--r--drivers/clocksource/metag_generic.c33
-rw-r--r--drivers/clocksource/mips-gic-timer.c38
-rw-r--r--drivers/clocksource/qcom-timer.c41
-rw-r--r--drivers/clocksource/time-armada-370-xp.c41
-rw-r--r--drivers/clocksource/timer-atlas7.c41
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c90
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c87
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c44
-rw-r--r--drivers/irqchip/irq-bcm2836.c34
-rw-r--r--drivers/irqchip/irq-gic-v3.c22
-rw-r--r--drivers/irqchip/irq-gic.c23
-rw-r--r--drivers/irqchip/irq-hip04.c25
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c32
-rw-r--r--drivers/perf/arm_pmu.c59
21 files changed, 357 insertions, 549 deletions
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 0ca14ac7bb28..0553aeebb228 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -118,12 +118,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
118 struct acpi_device *device; 118 struct acpi_device *device;
119 action &= ~CPU_TASKS_FROZEN; 119 action &= ~CPU_TASKS_FROZEN;
120 120
121 /* 121 switch (action) {
122 * CPU_STARTING and CPU_DYING must not sleep. Return here since 122 case CPU_ONLINE:
123 * acpi_bus_get_device() may sleep. 123 case CPU_DEAD:
124 */ 124 break;
125 if (action == CPU_STARTING || action == CPU_DYING) 125 default:
126 return NOTIFY_DONE; 126 return NOTIFY_DONE;
127 }
127 128
128 if (!pr || acpi_bus_get_device(pr->handle, &device)) 129 if (!pr || acpi_bus_get_device(pr->handle, &device))
129 return NOTIFY_DONE; 130 return NOTIFY_DONE;
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index a49b28378d59..5755907f836f 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -144,12 +144,15 @@ struct cci_pmu {
144 int num_cntrs; 144 int num_cntrs;
145 atomic_t active_events; 145 atomic_t active_events;
146 struct mutex reserve_mutex; 146 struct mutex reserve_mutex;
147 struct notifier_block cpu_nb; 147 struct list_head entry;
148 cpumask_t cpus; 148 cpumask_t cpus;
149}; 149};
150 150
151#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) 151#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
152 152
153static DEFINE_MUTEX(cci_pmu_mutex);
154static LIST_HEAD(cci_pmu_list);
155
153enum cci_models { 156enum cci_models {
154#ifdef CONFIG_ARM_CCI400_PMU 157#ifdef CONFIG_ARM_CCI400_PMU
155 CCI400_R0, 158 CCI400_R0,
@@ -1503,31 +1506,26 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1503 return perf_pmu_register(&cci_pmu->pmu, name, -1); 1506 return perf_pmu_register(&cci_pmu->pmu, name, -1);
1504} 1507}
1505 1508
1506static int cci_pmu_cpu_notifier(struct notifier_block *self, 1509static int cci_pmu_offline_cpu(unsigned int cpu)
1507 unsigned long action, void *hcpu)
1508{ 1510{
1509 struct cci_pmu *cci_pmu = container_of(self, 1511 struct cci_pmu *cci_pmu;
1510 struct cci_pmu, cpu_nb);
1511 unsigned int cpu = (long)hcpu;
1512 unsigned int target; 1512 unsigned int target;
1513 1513
1514 switch (action & ~CPU_TASKS_FROZEN) { 1514 mutex_lock(&cci_pmu_mutex);
1515 case CPU_DOWN_PREPARE: 1515 list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
1516 if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) 1516 if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
1517 break; 1517 continue;
1518 target = cpumask_any_but(cpu_online_mask, cpu); 1518 target = cpumask_any_but(cpu_online_mask, cpu);
1519 if (target >= nr_cpu_ids) // UP, last CPU 1519 if (target >= nr_cpu_ids)
1520 break; 1520 continue;
1521 /* 1521 /*
1522 * TODO: migrate context once core races on event->ctx have 1522 * TODO: migrate context once core races on event->ctx have
1523 * been fixed. 1523 * been fixed.
1524 */ 1524 */
1525 cpumask_set_cpu(target, &cci_pmu->cpus); 1525 cpumask_set_cpu(target, &cci_pmu->cpus);
1526 default:
1527 break;
1528 } 1526 }
1529 1527 mutex_unlock(&cci_pmu_mutex);
1530 return NOTIFY_OK; 1528 return 0;
1531} 1529}
1532 1530
1533static struct cci_pmu_model cci_pmu_models[] = { 1531static struct cci_pmu_model cci_pmu_models[] = {
@@ -1766,24 +1764,13 @@ static int cci_pmu_probe(struct platform_device *pdev)
1766 atomic_set(&cci_pmu->active_events, 0); 1764 atomic_set(&cci_pmu->active_events, 0);
1767 cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); 1765 cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
1768 1766
1769 cci_pmu->cpu_nb = (struct notifier_block) { 1767 ret = cci_pmu_init(cci_pmu, pdev);
1770 .notifier_call = cci_pmu_cpu_notifier,
1771 /*
1772 * to migrate uncore events, our notifier should be executed
1773 * before perf core's notifier.
1774 */
1775 .priority = CPU_PRI_PERF + 1,
1776 };
1777
1778 ret = register_cpu_notifier(&cci_pmu->cpu_nb);
1779 if (ret) 1768 if (ret)
1780 return ret; 1769 return ret;
1781 1770
1782 ret = cci_pmu_init(cci_pmu, pdev); 1771 mutex_lock(&cci_pmu_mutex);
1783 if (ret) { 1772 list_add(&cci_pmu->entry, &cci_pmu_list);
1784 unregister_cpu_notifier(&cci_pmu->cpu_nb); 1773 mutex_unlock(&cci_pmu_mutex);
1785 return ret;
1786 }
1787 1774
1788 pr_info("ARM %s PMU driver probed", cci_pmu->model->name); 1775 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
1789 return 0; 1776 return 0;
@@ -1817,6 +1804,12 @@ static int __init cci_platform_init(void)
1817{ 1804{
1818 int ret; 1805 int ret;
1819 1806
1807 ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
1808 "AP_PERF_ARM_CCI_ONLINE", NULL,
1809 cci_pmu_offline_cpu);
1810 if (ret)
1811 return ret;
1812
1820 ret = platform_driver_register(&cci_pmu_driver); 1813 ret = platform_driver_register(&cci_pmu_driver);
1821 if (ret) 1814 if (ret)
1822 return ret; 1815 return ret;
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index acc3eb542c74..97a9185af433 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -167,7 +167,7 @@ struct arm_ccn_dt {
167 struct hrtimer hrtimer; 167 struct hrtimer hrtimer;
168 168
169 cpumask_t cpu; 169 cpumask_t cpu;
170 struct notifier_block cpu_nb; 170 struct list_head entry;
171 171
172 struct pmu pmu; 172 struct pmu pmu;
173}; 173};
@@ -189,6 +189,8 @@ struct arm_ccn {
189 struct arm_ccn_dt dt; 189 struct arm_ccn_dt dt;
190}; 190};
191 191
192static DEFINE_MUTEX(arm_ccn_mutex);
193static LIST_HEAD(arm_ccn_list);
192 194
193static int arm_ccn_node_to_xp(int node) 195static int arm_ccn_node_to_xp(int node)
194{ 196{
@@ -1171,30 +1173,27 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
1171} 1173}
1172 1174
1173 1175
1174static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, 1176static int arm_ccn_pmu_offline_cpu(unsigned int cpu)
1175 unsigned long action, void *hcpu)
1176{ 1177{
1177 struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb); 1178 struct arm_ccn_dt *dt;
1178 struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
1179 unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */
1180 unsigned int target; 1179 unsigned int target;
1181 1180
1182 switch (action & ~CPU_TASKS_FROZEN) { 1181 mutex_lock(&arm_ccn_mutex);
1183 case CPU_DOWN_PREPARE: 1182 list_for_each_entry(dt, &arm_ccn_list, entry) {
1183 struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
1184
1184 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) 1185 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
1185 break; 1186 continue;
1186 target = cpumask_any_but(cpu_online_mask, cpu); 1187 target = cpumask_any_but(cpu_online_mask, cpu);
1187 if (target >= nr_cpu_ids) 1188 if (target >= nr_cpu_ids)
1188 break; 1189 continue;
1189 perf_pmu_migrate_context(&dt->pmu, cpu, target); 1190 perf_pmu_migrate_context(&dt->pmu, cpu, target);
1190 cpumask_set_cpu(target, &dt->cpu); 1191 cpumask_set_cpu(target, &dt->cpu);
1191 if (ccn->irq) 1192 if (ccn->irq)
1192 WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); 1193 WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
1193 default:
1194 break;
1195 } 1194 }
1196 1195 mutex_unlock(&arm_ccn_mutex);
1197 return NOTIFY_OK; 1196 return 0;
1198} 1197}
1199 1198
1200 1199
@@ -1266,16 +1265,6 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1266 /* Pick one CPU which we will use to collect data from CCN... */ 1265 /* Pick one CPU which we will use to collect data from CCN... */
1267 cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); 1266 cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
1268 1267
1269 /*
1270 * ... and change the selection when it goes offline. Priority is
1271 * picked to have a chance to migrate events before perf is notified.
1272 */
1273 ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier;
1274 ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1,
1275 err = register_cpu_notifier(&ccn->dt.cpu_nb);
1276 if (err)
1277 goto error_cpu_notifier;
1278
1279 /* Also make sure that the overflow interrupt is handled by this CPU */ 1268 /* Also make sure that the overflow interrupt is handled by this CPU */
1280 if (ccn->irq) { 1269 if (ccn->irq) {
1281 err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); 1270 err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
@@ -1289,12 +1278,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1289 if (err) 1278 if (err)
1290 goto error_pmu_register; 1279 goto error_pmu_register;
1291 1280
1281 mutex_lock(&arm_ccn_mutex);
1282 list_add(&ccn->dt.entry, &arm_ccn_list);
1283 mutex_unlock(&arm_ccn_mutex);
1292 return 0; 1284 return 0;
1293 1285
1294error_pmu_register: 1286error_pmu_register:
1295error_set_affinity: 1287error_set_affinity:
1296 unregister_cpu_notifier(&ccn->dt.cpu_nb);
1297error_cpu_notifier:
1298 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); 1288 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
1299 for (i = 0; i < ccn->num_xps; i++) 1289 for (i = 0; i < ccn->num_xps; i++)
1300 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); 1290 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1306,9 +1296,12 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
1306{ 1296{
1307 int i; 1297 int i;
1308 1298
1299 mutex_lock(&arm_ccn_mutex);
1300 list_del(&ccn->dt.entry);
1301 mutex_unlock(&arm_ccn_mutex);
1302
1309 if (ccn->irq) 1303 if (ccn->irq)
1310 irq_set_affinity_hint(ccn->irq, NULL); 1304 irq_set_affinity_hint(ccn->irq, NULL);
1311 unregister_cpu_notifier(&ccn->dt.cpu_nb);
1312 for (i = 0; i < ccn->num_xps; i++) 1305 for (i = 0; i < ccn->num_xps; i++)
1313 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); 1306 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
1314 writel(0, ccn->dt.base + CCN_DT_PMCR); 1307 writel(0, ccn->dt.base + CCN_DT_PMCR);
@@ -1316,7 +1309,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
1316 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); 1309 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
1317} 1310}
1318 1311
1319
1320static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, 1312static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
1321 int (*callback)(struct arm_ccn *ccn, int region, 1313 int (*callback)(struct arm_ccn *ccn, int region,
1322 void __iomem *base, u32 type, u32 id)) 1314 void __iomem *base, u32 type, u32 id))
@@ -1533,7 +1525,13 @@ static struct platform_driver arm_ccn_driver = {
1533 1525
1534static int __init arm_ccn_init(void) 1526static int __init arm_ccn_init(void)
1535{ 1527{
1536 int i; 1528 int i, ret;
1529
1530 ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1531 "AP_PERF_ARM_CCN_ONLINE", NULL,
1532 arm_ccn_pmu_offline_cpu);
1533 if (ret)
1534 return ret;
1537 1535
1538 for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) 1536 for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
1539 arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; 1537 arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
@@ -1543,6 +1541,7 @@ static int __init arm_ccn_init(void)
1543 1541
1544static void __exit arm_ccn_exit(void) 1542static void __exit arm_ccn_exit(void)
1545{ 1543{
1544 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1546 platform_driver_unregister(&arm_ccn_driver); 1545 platform_driver_unregister(&arm_ccn_driver);
1547} 1546}
1548 1547
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 5effd3027319..28bce3f4f81d 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -370,8 +370,10 @@ static bool arch_timer_has_nonsecure_ppi(void)
370 arch_timer_ppi[PHYS_NONSECURE_PPI]); 370 arch_timer_ppi[PHYS_NONSECURE_PPI]);
371} 371}
372 372
373static int arch_timer_setup(struct clock_event_device *clk) 373static int arch_timer_starting_cpu(unsigned int cpu)
374{ 374{
375 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
376
375 __arch_timer_setup(ARCH_CP15_TIMER, clk); 377 __arch_timer_setup(ARCH_CP15_TIMER, clk);
376 378
377 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); 379 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
@@ -527,29 +529,14 @@ static void arch_timer_stop(struct clock_event_device *clk)
527 clk->set_state_shutdown(clk); 529 clk->set_state_shutdown(clk);
528} 530}
529 531
530static int arch_timer_cpu_notify(struct notifier_block *self, 532static int arch_timer_dying_cpu(unsigned int cpu)
531 unsigned long action, void *hcpu)
532{ 533{
533 /* 534 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
534 * Grab cpu pointer in each case to avoid spurious
535 * preemptible warnings
536 */
537 switch (action & ~CPU_TASKS_FROZEN) {
538 case CPU_STARTING:
539 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
540 break;
541 case CPU_DYING:
542 arch_timer_stop(this_cpu_ptr(arch_timer_evt));
543 break;
544 }
545 535
546 return NOTIFY_OK; 536 arch_timer_stop(clk);
537 return 0;
547} 538}
548 539
549static struct notifier_block arch_timer_cpu_nb = {
550 .notifier_call = arch_timer_cpu_notify,
551};
552
553#ifdef CONFIG_CPU_PM 540#ifdef CONFIG_CPU_PM
554static unsigned int saved_cntkctl; 541static unsigned int saved_cntkctl;
555static int arch_timer_cpu_pm_notify(struct notifier_block *self, 542static int arch_timer_cpu_pm_notify(struct notifier_block *self,
@@ -570,11 +557,21 @@ static int __init arch_timer_cpu_pm_init(void)
570{ 557{
571 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); 558 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
572} 559}
560
561static void __init arch_timer_cpu_pm_deinit(void)
562{
563 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
564}
565
573#else 566#else
574static int __init arch_timer_cpu_pm_init(void) 567static int __init arch_timer_cpu_pm_init(void)
575{ 568{
576 return 0; 569 return 0;
577} 570}
571
572static void __init arch_timer_cpu_pm_deinit(void)
573{
574}
578#endif 575#endif
579 576
580static int __init arch_timer_register(void) 577static int __init arch_timer_register(void)
@@ -621,22 +618,23 @@ static int __init arch_timer_register(void)
621 goto out_free; 618 goto out_free;
622 } 619 }
623 620
624 err = register_cpu_notifier(&arch_timer_cpu_nb);
625 if (err)
626 goto out_free_irq;
627
628 err = arch_timer_cpu_pm_init(); 621 err = arch_timer_cpu_pm_init();
629 if (err) 622 if (err)
630 goto out_unreg_notify; 623 goto out_unreg_notify;
631 624
632 /* Immediately configure the timer on the boot CPU */
633 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
634 625
626 /* Register and immediately configure the timer on the boot CPU */
627 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
628 "AP_ARM_ARCH_TIMER_STARTING",
629 arch_timer_starting_cpu, arch_timer_dying_cpu);
630 if (err)
631 goto out_unreg_cpupm;
635 return 0; 632 return 0;
636 633
634out_unreg_cpupm:
635 arch_timer_cpu_pm_deinit();
636
637out_unreg_notify: 637out_unreg_notify:
638 unregister_cpu_notifier(&arch_timer_cpu_nb);
639out_free_irq:
640 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); 638 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
641 if (arch_timer_has_nonsecure_ppi()) 639 if (arch_timer_has_nonsecure_ppi())
642 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 640 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 2a9ceb6e93f9..8da03298f844 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -165,9 +165,9 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
165 return IRQ_HANDLED; 165 return IRQ_HANDLED;
166} 166}
167 167
168static int gt_clockevents_init(struct clock_event_device *clk) 168static int gt_starting_cpu(unsigned int cpu)
169{ 169{
170 int cpu = smp_processor_id(); 170 struct clock_event_device *clk = this_cpu_ptr(gt_evt);
171 171
172 clk->name = "arm_global_timer"; 172 clk->name = "arm_global_timer";
173 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 173 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -186,10 +186,13 @@ static int gt_clockevents_init(struct clock_event_device *clk)
186 return 0; 186 return 0;
187} 187}
188 188
189static void gt_clockevents_stop(struct clock_event_device *clk) 189static int gt_dying_cpu(unsigned int cpu)
190{ 190{
191 struct clock_event_device *clk = this_cpu_ptr(gt_evt);
192
191 gt_clockevent_shutdown(clk); 193 gt_clockevent_shutdown(clk);
192 disable_percpu_irq(clk->irq); 194 disable_percpu_irq(clk->irq);
195 return 0;
193} 196}
194 197
195static cycle_t gt_clocksource_read(struct clocksource *cs) 198static cycle_t gt_clocksource_read(struct clocksource *cs)
@@ -252,24 +255,6 @@ static int __init gt_clocksource_init(void)
252 return clocksource_register_hz(&gt_clocksource, gt_clk_rate); 255 return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
253} 256}
254 257
255static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
256 void *hcpu)
257{
258 switch (action & ~CPU_TASKS_FROZEN) {
259 case CPU_STARTING:
260 gt_clockevents_init(this_cpu_ptr(gt_evt));
261 break;
262 case CPU_DYING:
263 gt_clockevents_stop(this_cpu_ptr(gt_evt));
264 break;
265 }
266
267 return NOTIFY_OK;
268}
269static struct notifier_block gt_cpu_nb = {
270 .notifier_call = gt_cpu_notify,
271};
272
273static int __init global_timer_of_register(struct device_node *np) 258static int __init global_timer_of_register(struct device_node *np)
274{ 259{
275 struct clk *gt_clk; 260 struct clk *gt_clk;
@@ -325,18 +310,14 @@ static int __init global_timer_of_register(struct device_node *np)
325 goto out_free; 310 goto out_free;
326 } 311 }
327 312
328 err = register_cpu_notifier(&gt_cpu_nb); 313 /* Register and immediately configure the timer on the boot CPU */
329 if (err) {
330 pr_warn("global-timer: unable to register cpu notifier.\n");
331 goto out_irq;
332 }
333
334 /* Immediately configure the timer on the boot CPU */
335 err = gt_clocksource_init(); 314 err = gt_clocksource_init();
336 if (err) 315 if (err)
337 goto out_irq; 316 goto out_irq;
338 317
339 err = gt_clockevents_init(this_cpu_ptr(gt_evt)); 318 err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
319 "AP_ARM_GLOBAL_TIMER_STARTING",
320 gt_starting_cpu, gt_dying_cpu);
340 if (err) 321 if (err)
341 goto out_irq; 322 goto out_irq;
342 323
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 776b6c86dcd5..89f1c2edbe02 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -16,10 +16,9 @@
16 16
17static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt); 17static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt);
18 18
19static void dummy_timer_setup(void) 19static int dummy_timer_starting_cpu(unsigned int cpu)
20{ 20{
21 int cpu = smp_processor_id(); 21 struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu);
22 struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt);
23 22
24 evt->name = "dummy_timer"; 23 evt->name = "dummy_timer";
25 evt->features = CLOCK_EVT_FEAT_PERIODIC | 24 evt->features = CLOCK_EVT_FEAT_PERIODIC |
@@ -29,36 +28,13 @@ static void dummy_timer_setup(void)
29 evt->cpumask = cpumask_of(cpu); 28 evt->cpumask = cpumask_of(cpu);
30 29
31 clockevents_register_device(evt); 30 clockevents_register_device(evt);
31 return 0;
32} 32}
33 33
34static int dummy_timer_cpu_notify(struct notifier_block *self,
35 unsigned long action, void *hcpu)
36{
37 if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
38 dummy_timer_setup();
39
40 return NOTIFY_OK;
41}
42
43static struct notifier_block dummy_timer_cpu_nb = {
44 .notifier_call = dummy_timer_cpu_notify,
45};
46
47static int __init dummy_timer_register(void) 34static int __init dummy_timer_register(void)
48{ 35{
49 int err = 0; 36 return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
50 37 "AP_DUMMY_TIMER_STARTING",
51 cpu_notifier_register_begin(); 38 dummy_timer_starting_cpu, NULL);
52 err = __register_cpu_notifier(&dummy_timer_cpu_nb);
53 if (err)
54 goto out;
55
56 /* We won't get a call on the boot CPU, so register immediately */
57 if (num_possible_cpus() > 1)
58 dummy_timer_setup();
59
60out:
61 cpu_notifier_register_done();
62 return err;
63} 39}
64early_initcall(dummy_timer_register); 40early_initcall(dummy_timer_register);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 0d18dd4b3bd2..41840d02c331 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -443,10 +443,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
443 return IRQ_HANDLED; 443 return IRQ_HANDLED;
444} 444}
445 445
446static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) 446static int exynos4_mct_starting_cpu(unsigned int cpu)
447{ 447{
448 struct mct_clock_event_device *mevt =
449 per_cpu_ptr(&percpu_mct_tick, cpu);
448 struct clock_event_device *evt = &mevt->evt; 450 struct clock_event_device *evt = &mevt->evt;
449 unsigned int cpu = smp_processor_id();
450 451
451 mevt->base = EXYNOS4_MCT_L_BASE(cpu); 452 mevt->base = EXYNOS4_MCT_L_BASE(cpu);
452 snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); 453 snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
@@ -480,8 +481,10 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
480 return 0; 481 return 0;
481} 482}
482 483
483static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) 484static int exynos4_mct_dying_cpu(unsigned int cpu)
484{ 485{
486 struct mct_clock_event_device *mevt =
487 per_cpu_ptr(&percpu_mct_tick, cpu);
485 struct clock_event_device *evt = &mevt->evt; 488 struct clock_event_device *evt = &mevt->evt;
486 489
487 evt->set_state_shutdown(evt); 490 evt->set_state_shutdown(evt);
@@ -491,39 +494,12 @@ static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
491 } else { 494 } else {
492 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); 495 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
493 } 496 }
497 return 0;
494} 498}
495 499
496static int exynos4_mct_cpu_notify(struct notifier_block *self,
497 unsigned long action, void *hcpu)
498{
499 struct mct_clock_event_device *mevt;
500
501 /*
502 * Grab cpu pointer in each case to avoid spurious
503 * preemptible warnings
504 */
505 switch (action & ~CPU_TASKS_FROZEN) {
506 case CPU_STARTING:
507 mevt = this_cpu_ptr(&percpu_mct_tick);
508 exynos4_local_timer_setup(mevt);
509 break;
510 case CPU_DYING:
511 mevt = this_cpu_ptr(&percpu_mct_tick);
512 exynos4_local_timer_stop(mevt);
513 break;
514 }
515
516 return NOTIFY_OK;
517}
518
519static struct notifier_block exynos4_mct_cpu_nb = {
520 .notifier_call = exynos4_mct_cpu_notify,
521};
522
523static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) 500static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
524{ 501{
525 int err, cpu; 502 int err, cpu;
526 struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
527 struct clk *mct_clk, *tick_clk; 503 struct clk *mct_clk, *tick_clk;
528 504
529 tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : 505 tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
@@ -570,12 +546,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
570 } 546 }
571 } 547 }
572 548
573 err = register_cpu_notifier(&exynos4_mct_cpu_nb); 549 /* Install hotplug callbacks which configure the timer on this CPU */
550 err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
551 "AP_EXYNOS4_MCT_TIMER_STARTING",
552 exynos4_mct_starting_cpu,
553 exynos4_mct_dying_cpu);
574 if (err) 554 if (err)
575 goto out_irq; 555 goto out_irq;
576 556
577 /* Immediately configure the timer on the boot CPU */
578 exynos4_local_timer_setup(mevt);
579 return 0; 557 return 0;
580 558
581out_irq: 559out_irq:
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index bcd5c0d602a0..a80ab3e446b7 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -90,7 +90,7 @@ unsigned long long sched_clock(void)
90 return ticks << HARDWARE_TO_NS_SHIFT; 90 return ticks << HARDWARE_TO_NS_SHIFT;
91} 91}
92 92
93static void arch_timer_setup(unsigned int cpu) 93static int arch_timer_starting_cpu(unsigned int cpu)
94{ 94{
95 unsigned int txdivtime; 95 unsigned int txdivtime;
96 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); 96 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
@@ -132,27 +132,9 @@ static void arch_timer_setup(unsigned int cpu)
132 val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0); 132 val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0);
133 __core_reg_set(TXTIMER, val); 133 __core_reg_set(TXTIMER, val);
134 } 134 }
135 return 0;
135} 136}
136 137
137static int arch_timer_cpu_notify(struct notifier_block *self,
138 unsigned long action, void *hcpu)
139{
140 int cpu = (long)hcpu;
141
142 switch (action) {
143 case CPU_STARTING:
144 case CPU_STARTING_FROZEN:
145 arch_timer_setup(cpu);
146 break;
147 }
148
149 return NOTIFY_OK;
150}
151
152static struct notifier_block arch_timer_cpu_nb = {
153 .notifier_call = arch_timer_cpu_notify,
154};
155
156int __init metag_generic_timer_init(void) 138int __init metag_generic_timer_init(void)
157{ 139{
158 /* 140 /*
@@ -170,11 +152,8 @@ int __init metag_generic_timer_init(void)
170 152
171 setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); 153 setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
172 154
173 /* Configure timer on boot CPU */ 155 /* Hook cpu boot to configure the CPU's timers */
174 arch_timer_setup(smp_processor_id()); 156 return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
175 157 "AP_METAG_TIMER_STARTING",
176 /* Hook cpu boot to configure other CPU's timers */ 158 arch_timer_starting_cpu, NULL);
177 register_cpu_notifier(&arch_timer_cpu_nb);
178
179 return 0;
180} 159}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 1572c7a778ab..d91e8725917c 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -49,10 +49,9 @@ struct irqaction gic_compare_irqaction = {
49 .name = "timer", 49 .name = "timer",
50}; 50};
51 51
52static void gic_clockevent_cpu_init(struct clock_event_device *cd) 52static void gic_clockevent_cpu_init(unsigned int cpu,
53 struct clock_event_device *cd)
53{ 54{
54 unsigned int cpu = smp_processor_id();
55
56 cd->name = "MIPS GIC"; 55 cd->name = "MIPS GIC";
57 cd->features = CLOCK_EVT_FEAT_ONESHOT | 56 cd->features = CLOCK_EVT_FEAT_ONESHOT |
58 CLOCK_EVT_FEAT_C3STOP; 57 CLOCK_EVT_FEAT_C3STOP;
@@ -79,19 +78,10 @@ static void gic_update_frequency(void *data)
79 clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate); 78 clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
80} 79}
81 80
82static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action, 81static int gic_starting_cpu(unsigned int cpu)
83 void *data)
84{ 82{
85 switch (action & ~CPU_TASKS_FROZEN) { 83 gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
86 case CPU_STARTING: 84 return 0;
87 gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
88 break;
89 case CPU_DYING:
90 gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
91 break;
92 }
93
94 return NOTIFY_OK;
95} 85}
96 86
97static int gic_clk_notifier(struct notifier_block *nb, unsigned long action, 87static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
@@ -105,10 +95,11 @@ static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
105 return NOTIFY_OK; 95 return NOTIFY_OK;
106} 96}
107 97
108 98static int gic_dying_cpu(unsigned int cpu)
109static struct notifier_block gic_cpu_nb = { 99{
110 .notifier_call = gic_cpu_notifier, 100 gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
111}; 101 return 0;
102}
112 103
113static struct notifier_block gic_clk_nb = { 104static struct notifier_block gic_clk_nb = {
114 .notifier_call = gic_clk_notifier, 105 .notifier_call = gic_clk_notifier,
@@ -125,12 +116,9 @@ static int gic_clockevent_init(void)
125 if (ret < 0) 116 if (ret < 0)
126 return ret; 117 return ret;
127 118
128 ret = register_cpu_notifier(&gic_cpu_nb); 119 cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
129 if (ret < 0) 120 "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
130 pr_warn("GIC: Unable to register CPU notifier\n"); 121 gic_dying_cpu);
131
132 gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
133
134 return 0; 122 return 0;
135} 123}
136 124
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index 662576339049..3283cfa2aa52 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -105,9 +105,9 @@ static struct clocksource msm_clocksource = {
105static int msm_timer_irq; 105static int msm_timer_irq;
106static int msm_timer_has_ppi; 106static int msm_timer_has_ppi;
107 107
108static int msm_local_timer_setup(struct clock_event_device *evt) 108static int msm_local_timer_starting_cpu(unsigned int cpu)
109{ 109{
110 int cpu = smp_processor_id(); 110 struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
111 int err; 111 int err;
112 112
113 evt->irq = msm_timer_irq; 113 evt->irq = msm_timer_irq;
@@ -135,35 +135,15 @@ static int msm_local_timer_setup(struct clock_event_device *evt)
135 return 0; 135 return 0;
136} 136}
137 137
138static void msm_local_timer_stop(struct clock_event_device *evt) 138static int msm_local_timer_dying_cpu(unsigned int cpu)
139{ 139{
140 struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
141
140 evt->set_state_shutdown(evt); 142 evt->set_state_shutdown(evt);
141 disable_percpu_irq(evt->irq); 143 disable_percpu_irq(evt->irq);
144 return 0;
142} 145}
143 146
144static int msm_timer_cpu_notify(struct notifier_block *self,
145 unsigned long action, void *hcpu)
146{
147 /*
148 * Grab cpu pointer in each case to avoid spurious
149 * preemptible warnings
150 */
151 switch (action & ~CPU_TASKS_FROZEN) {
152 case CPU_STARTING:
153 msm_local_timer_setup(this_cpu_ptr(msm_evt));
154 break;
155 case CPU_DYING:
156 msm_local_timer_stop(this_cpu_ptr(msm_evt));
157 break;
158 }
159
160 return NOTIFY_OK;
161}
162
163static struct notifier_block msm_timer_cpu_nb = {
164 .notifier_call = msm_timer_cpu_notify,
165};
166
167static u64 notrace msm_sched_clock_read(void) 147static u64 notrace msm_sched_clock_read(void)
168{ 148{
169 return msm_clocksource.read(&msm_clocksource); 149 return msm_clocksource.read(&msm_clocksource);
@@ -200,14 +180,15 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
200 if (res) { 180 if (res) {
201 pr_err("request_percpu_irq failed\n"); 181 pr_err("request_percpu_irq failed\n");
202 } else { 182 } else {
203 res = register_cpu_notifier(&msm_timer_cpu_nb); 183 /* Install and invoke hotplug callbacks */
184 res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
185 "AP_QCOM_TIMER_STARTING",
186 msm_local_timer_starting_cpu,
187 msm_local_timer_dying_cpu);
204 if (res) { 188 if (res) {
205 free_percpu_irq(irq, msm_evt); 189 free_percpu_irq(irq, msm_evt);
206 goto err; 190 goto err;
207 } 191 }
208
209 /* Immediately configure the timer on the boot CPU */
210 msm_local_timer_setup(raw_cpu_ptr(msm_evt));
211 } 192 }
212 193
213err: 194err:
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 20ec066481fe..719b478d136e 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -170,10 +170,10 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
170/* 170/*
171 * Setup the local clock events for a CPU. 171 * Setup the local clock events for a CPU.
172 */ 172 */
173static int armada_370_xp_timer_setup(struct clock_event_device *evt) 173static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
174{ 174{
175 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
175 u32 clr = 0, set = 0; 176 u32 clr = 0, set = 0;
176 int cpu = smp_processor_id();
177 177
178 if (timer25Mhz) 178 if (timer25Mhz)
179 set = TIMER0_25MHZ; 179 set = TIMER0_25MHZ;
@@ -200,35 +200,15 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt)
200 return 0; 200 return 0;
201} 201}
202 202
203static void armada_370_xp_timer_stop(struct clock_event_device *evt) 203static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
204{ 204{
205 struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
206
205 evt->set_state_shutdown(evt); 207 evt->set_state_shutdown(evt);
206 disable_percpu_irq(evt->irq); 208 disable_percpu_irq(evt->irq);
209 return 0;
207} 210}
208 211
209static int armada_370_xp_timer_cpu_notify(struct notifier_block *self,
210 unsigned long action, void *hcpu)
211{
212 /*
213 * Grab cpu pointer in each case to avoid spurious
214 * preemptible warnings
215 */
216 switch (action & ~CPU_TASKS_FROZEN) {
217 case CPU_STARTING:
218 armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
219 break;
220 case CPU_DYING:
221 armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt));
222 break;
223 }
224
225 return NOTIFY_OK;
226}
227
228static struct notifier_block armada_370_xp_timer_cpu_nb = {
229 .notifier_call = armada_370_xp_timer_cpu_notify,
230};
231
232static u32 timer0_ctrl_reg, timer0_local_ctrl_reg; 212static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
233 213
234static int armada_370_xp_timer_suspend(void) 214static int armada_370_xp_timer_suspend(void)
@@ -322,8 +302,6 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
322 return res; 302 return res;
323 } 303 }
324 304
325 register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
326
327 armada_370_xp_evt = alloc_percpu(struct clock_event_device); 305 armada_370_xp_evt = alloc_percpu(struct clock_event_device);
328 if (!armada_370_xp_evt) 306 if (!armada_370_xp_evt)
329 return -ENOMEM; 307 return -ENOMEM;
@@ -341,9 +319,12 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
341 return res; 319 return res;
342 } 320 }
343 321
344 res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); 322 res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
323 "AP_ARMADA_TIMER_STARTING",
324 armada_370_xp_timer_starting_cpu,
325 armada_370_xp_timer_dying_cpu);
345 if (res) { 326 if (res) {
346 pr_err("Failed to setup timer"); 327 pr_err("Failed to setup hotplug state and timer");
347 return res; 328 return res;
348 } 329 }
349 330
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 90f8fbc154a4..4334e0330ada 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -172,9 +172,9 @@ static struct irqaction sirfsoc_timer1_irq = {
172 .handler = sirfsoc_timer_interrupt, 172 .handler = sirfsoc_timer_interrupt,
173}; 173};
174 174
175static int sirfsoc_local_timer_setup(struct clock_event_device *ce) 175static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
176{ 176{
177 int cpu = smp_processor_id(); 177 struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
178 struct irqaction *action; 178 struct irqaction *action;
179 179
180 if (cpu == 0) 180 if (cpu == 0)
@@ -203,50 +203,27 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
203 return 0; 203 return 0;
204} 204}
205 205
206static void sirfsoc_local_timer_stop(struct clock_event_device *ce) 206static int sirfsoc_local_timer_dying_cpu(unsigned int cpu)
207{ 207{
208 int cpu = smp_processor_id();
209
210 sirfsoc_timer_count_disable(1); 208 sirfsoc_timer_count_disable(1);
211 209
212 if (cpu == 0) 210 if (cpu == 0)
213 remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); 211 remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
214 else 212 else
215 remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); 213 remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
214 return 0;
216} 215}
217 216
218static int sirfsoc_cpu_notify(struct notifier_block *self,
219 unsigned long action, void *hcpu)
220{
221 /*
222 * Grab cpu pointer in each case to avoid spurious
223 * preemptible warnings
224 */
225 switch (action & ~CPU_TASKS_FROZEN) {
226 case CPU_STARTING:
227 sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
228 break;
229 case CPU_DYING:
230 sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent));
231 break;
232 }
233
234 return NOTIFY_OK;
235}
236
237static struct notifier_block sirfsoc_cpu_nb = {
238 .notifier_call = sirfsoc_cpu_notify,
239};
240
241static int __init sirfsoc_clockevent_init(void) 217static int __init sirfsoc_clockevent_init(void)
242{ 218{
243 sirfsoc_clockevent = alloc_percpu(struct clock_event_device); 219 sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
244 BUG_ON(!sirfsoc_clockevent); 220 BUG_ON(!sirfsoc_clockevent);
245 221
246 BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); 222 /* Install and invoke hotplug callbacks */
247 223 return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
248 /* Immediately configure the timer on the boot CPU */ 224 "AP_MARCO_TIMER_STARTING",
249 return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); 225 sirfsoc_local_timer_starting_cpu,
226 sirfsoc_local_timer_dying_cpu);
250} 227}
251 228
252/* initialize the kernel jiffy timer source */ 229/* initialize the kernel jiffy timer source */
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index d83ab82672e4..2de4cad9c5ed 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -51,6 +51,8 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
51static int etm_count; 51static int etm_count;
52static struct etm_drvdata *etmdrvdata[NR_CPUS]; 52static struct etm_drvdata *etmdrvdata[NR_CPUS];
53 53
54static enum cpuhp_state hp_online;
55
54/* 56/*
55 * Memory mapped writes to clear os lock are not supported on some processors 57 * Memory mapped writes to clear os lock are not supported on some processors
56 * and OS lock must be unlocked before any memory mapped access on such 58 * and OS lock must be unlocked before any memory mapped access on such
@@ -481,8 +483,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
481 483
482 /* 484 /*
483 * Configure the ETM only if the CPU is online. If it isn't online 485 * Configure the ETM only if the CPU is online. If it isn't online
484 * hw configuration will take place when 'CPU_STARTING' is received 486 * hw configuration will take place on the local CPU during bring up.
485 * in @etm_cpu_callback.
486 */ 487 */
487 if (cpu_online(drvdata->cpu)) { 488 if (cpu_online(drvdata->cpu)) {
488 ret = smp_call_function_single(drvdata->cpu, 489 ret = smp_call_function_single(drvdata->cpu,
@@ -641,47 +642,44 @@ static const struct coresight_ops etm_cs_ops = {
641 .source_ops = &etm_source_ops, 642 .source_ops = &etm_source_ops,
642}; 643};
643 644
644static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, 645static int etm_online_cpu(unsigned int cpu)
645 void *hcpu)
646{ 646{
647 unsigned int cpu = (unsigned long)hcpu;
648
649 if (!etmdrvdata[cpu]) 647 if (!etmdrvdata[cpu])
650 goto out; 648 return 0;
651 649
652 switch (action & (~CPU_TASKS_FROZEN)) { 650 if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
653 case CPU_STARTING: 651 coresight_enable(etmdrvdata[cpu]->csdev);
654 spin_lock(&etmdrvdata[cpu]->spinlock); 652 return 0;
655 if (!etmdrvdata[cpu]->os_unlock) { 653}
656 etm_os_unlock(etmdrvdata[cpu]);
657 etmdrvdata[cpu]->os_unlock = true;
658 }
659
660 if (local_read(&etmdrvdata[cpu]->mode))
661 etm_enable_hw(etmdrvdata[cpu]);
662 spin_unlock(&etmdrvdata[cpu]->spinlock);
663 break;
664 654
665 case CPU_ONLINE: 655static int etm_starting_cpu(unsigned int cpu)
666 if (etmdrvdata[cpu]->boot_enable && 656{
667 !etmdrvdata[cpu]->sticky_enable) 657 if (!etmdrvdata[cpu])
668 coresight_enable(etmdrvdata[cpu]->csdev); 658 return 0;
669 break;
670 659
671 case CPU_DYING: 660 spin_lock(&etmdrvdata[cpu]->spinlock);
672 spin_lock(&etmdrvdata[cpu]->spinlock); 661 if (!etmdrvdata[cpu]->os_unlock) {
673 if (local_read(&etmdrvdata[cpu]->mode)) 662 etm_os_unlock(etmdrvdata[cpu]);
674 etm_disable_hw(etmdrvdata[cpu]); 663 etmdrvdata[cpu]->os_unlock = true;
675 spin_unlock(&etmdrvdata[cpu]->spinlock);
676 break;
677 } 664 }
678out: 665
679 return NOTIFY_OK; 666 if (local_read(&etmdrvdata[cpu]->mode))
667 etm_enable_hw(etmdrvdata[cpu]);
668 spin_unlock(&etmdrvdata[cpu]->spinlock);
669 return 0;
680} 670}
681 671
682static struct notifier_block etm_cpu_notifier = { 672static int etm_dying_cpu(unsigned int cpu)
683 .notifier_call = etm_cpu_callback, 673{
684}; 674 if (!etmdrvdata[cpu])
675 return 0;
676
677 spin_lock(&etmdrvdata[cpu]->spinlock);
678 if (local_read(&etmdrvdata[cpu]->mode))
679 etm_disable_hw(etmdrvdata[cpu]);
680 spin_unlock(&etmdrvdata[cpu]->spinlock);
681 return 0;
682}
685 683
686static bool etm_arch_supported(u8 arch) 684static bool etm_arch_supported(u8 arch)
687{ 685{
@@ -806,9 +804,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
806 etm_init_arch_data, drvdata, 1)) 804 etm_init_arch_data, drvdata, 1))
807 dev_err(dev, "ETM arch init failed\n"); 805 dev_err(dev, "ETM arch init failed\n");
808 806
809 if (!etm_count++) 807 if (!etm_count++) {
810 register_hotcpu_notifier(&etm_cpu_notifier); 808 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
811 809 "AP_ARM_CORESIGHT_STARTING",
810 etm_starting_cpu, etm_dying_cpu);
811 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
812 "AP_ARM_CORESIGHT_ONLINE",
813 etm_online_cpu, NULL);
814 if (ret < 0)
815 goto err_arch_supported;
816 hp_online = ret;
817 }
812 put_online_cpus(); 818 put_online_cpus();
813 819
814 if (etm_arch_supported(drvdata->arch) == false) { 820 if (etm_arch_supported(drvdata->arch) == false) {
@@ -839,7 +845,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
839 845
840 pm_runtime_put(&adev->dev); 846 pm_runtime_put(&adev->dev);
841 dev_info(dev, "%s initialized\n", (char *)id->data); 847 dev_info(dev, "%s initialized\n", (char *)id->data);
842
843 if (boot_enable) { 848 if (boot_enable) {
844 coresight_enable(drvdata->csdev); 849 coresight_enable(drvdata->csdev);
845 drvdata->boot_enable = true; 850 drvdata->boot_enable = true;
@@ -848,8 +853,11 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
848 return 0; 853 return 0;
849 854
850err_arch_supported: 855err_arch_supported:
851 if (--etm_count == 0) 856 if (--etm_count == 0) {
852 unregister_hotcpu_notifier(&etm_cpu_notifier); 857 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
858 if (hp_online)
859 cpuhp_remove_state_nocalls(hp_online);
860 }
853 return ret; 861 return ret;
854} 862}
855 863
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 462f0dc15757..1a5e0d14c1dd 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -48,6 +48,8 @@ static int etm4_count;
48static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; 48static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
49static void etm4_set_default(struct etmv4_config *config); 49static void etm4_set_default(struct etmv4_config *config);
50 50
51static enum cpuhp_state hp_online;
52
51static void etm4_os_unlock(struct etmv4_drvdata *drvdata) 53static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
52{ 54{
53 /* Writing any value to ETMOSLAR unlocks the trace registers */ 55 /* Writing any value to ETMOSLAR unlocks the trace registers */
@@ -673,47 +675,44 @@ void etm4_config_trace_mode(struct etmv4_config *config)
673 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; 675 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
674} 676}
675 677
676static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, 678static int etm4_online_cpu(unsigned int cpu)
677 void *hcpu)
678{ 679{
679 unsigned int cpu = (unsigned long)hcpu;
680
681 if (!etmdrvdata[cpu]) 680 if (!etmdrvdata[cpu])
682 goto out; 681 return 0;
683
684 switch (action & (~CPU_TASKS_FROZEN)) {
685 case CPU_STARTING:
686 spin_lock(&etmdrvdata[cpu]->spinlock);
687 if (!etmdrvdata[cpu]->os_unlock) {
688 etm4_os_unlock(etmdrvdata[cpu]);
689 etmdrvdata[cpu]->os_unlock = true;
690 }
691
692 if (local_read(&etmdrvdata[cpu]->mode))
693 etm4_enable_hw(etmdrvdata[cpu]);
694 spin_unlock(&etmdrvdata[cpu]->spinlock);
695 break;
696 682
697 case CPU_ONLINE: 683 if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
698 if (etmdrvdata[cpu]->boot_enable && 684 coresight_enable(etmdrvdata[cpu]->csdev);
699 !etmdrvdata[cpu]->sticky_enable) 685 return 0;
700 coresight_enable(etmdrvdata[cpu]->csdev); 686}
701 break;
702 687
703 case CPU_DYING: 688static int etm4_starting_cpu(unsigned int cpu)
704 spin_lock(&etmdrvdata[cpu]->spinlock); 689{
705 if (local_read(&etmdrvdata[cpu]->mode)) 690 if (!etmdrvdata[cpu])
706 etm4_disable_hw(etmdrvdata[cpu]); 691 return 0;
707 spin_unlock(&etmdrvdata[cpu]->spinlock); 692
708 break; 693 spin_lock(&etmdrvdata[cpu]->spinlock);
694 if (!etmdrvdata[cpu]->os_unlock) {
695 etm4_os_unlock(etmdrvdata[cpu]);
696 etmdrvdata[cpu]->os_unlock = true;
709 } 697 }
710out: 698
711 return NOTIFY_OK; 699 if (local_read(&etmdrvdata[cpu]->mode))
700 etm4_enable_hw(etmdrvdata[cpu]);
701 spin_unlock(&etmdrvdata[cpu]->spinlock);
702 return 0;
712} 703}
713 704
714static struct notifier_block etm4_cpu_notifier = { 705static int etm4_dying_cpu(unsigned int cpu)
715 .notifier_call = etm4_cpu_callback, 706{
716}; 707 if (!etmdrvdata[cpu])
708 return 0;
709
710 spin_lock(&etmdrvdata[cpu]->spinlock);
711 if (local_read(&etmdrvdata[cpu]->mode))
712 etm4_disable_hw(etmdrvdata[cpu]);
713 spin_unlock(&etmdrvdata[cpu]->spinlock);
714 return 0;
715}
717 716
718static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) 717static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
719{ 718{
@@ -767,8 +766,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
767 etm4_init_arch_data, drvdata, 1)) 766 etm4_init_arch_data, drvdata, 1))
768 dev_err(dev, "ETM arch init failed\n"); 767 dev_err(dev, "ETM arch init failed\n");
769 768
770 if (!etm4_count++) 769 if (!etm4_count++) {
771 register_hotcpu_notifier(&etm4_cpu_notifier); 770 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
771 "AP_ARM_CORESIGHT4_STARTING",
772 etm4_starting_cpu, etm4_dying_cpu);
773 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
774 "AP_ARM_CORESIGHT4_ONLINE",
775 etm4_online_cpu, NULL);
776 if (ret < 0)
777 goto err_arch_supported;
778 hp_online = ret;
779 }
772 780
773 put_online_cpus(); 781 put_online_cpus();
774 782
@@ -809,8 +817,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
809 return 0; 817 return 0;
810 818
811err_arch_supported: 819err_arch_supported:
812 if (--etm4_count == 0) 820 if (--etm4_count == 0) {
813 unregister_hotcpu_notifier(&etm4_cpu_notifier); 821 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING);
822 if (hp_online)
823 cpuhp_remove_state_nocalls(hp_online);
824 }
814 return ret; 825 return ret;
815} 826}
816 827
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 7c42b1d13faf..8bcee65a0b8c 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -345,38 +345,20 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask,
345 ARMADA_370_XP_SW_TRIG_INT_OFFS); 345 ARMADA_370_XP_SW_TRIG_INT_OFFS);
346} 346}
347 347
348static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, 348static int armada_xp_mpic_starting_cpu(unsigned int cpu)
349 unsigned long action, void *hcpu)
350{ 349{
351 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { 350 armada_xp_mpic_perf_init();
352 armada_xp_mpic_perf_init(); 351 armada_xp_mpic_smp_cpu_init();
353 armada_xp_mpic_smp_cpu_init(); 352 return 0;
354 }
355
356 return NOTIFY_OK;
357} 353}
358 354
359static struct notifier_block armada_370_xp_mpic_cpu_notifier = { 355static int mpic_cascaded_starting_cpu(unsigned int cpu)
360 .notifier_call = armada_xp_mpic_secondary_init,
361 .priority = 100,
362};
363
364static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
365 unsigned long action, void *hcpu)
366{ 356{
367 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { 357 armada_xp_mpic_perf_init();
368 armada_xp_mpic_perf_init(); 358 enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
369 enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); 359 return 0;
370 }
371
372 return NOTIFY_OK;
373} 360}
374 361#endif
375static struct notifier_block mpic_cascaded_cpu_notifier = {
376 .notifier_call = mpic_cascaded_secondary_init,
377 .priority = 100,
378};
379#endif /* CONFIG_SMP */
380 362
381static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = { 363static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
382 .map = armada_370_xp_mpic_irq_map, 364 .map = armada_370_xp_mpic_irq_map,
@@ -595,11 +577,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
595 set_handle_irq(armada_370_xp_handle_irq); 577 set_handle_irq(armada_370_xp_handle_irq);
596#ifdef CONFIG_SMP 578#ifdef CONFIG_SMP
597 set_smp_cross_call(armada_mpic_send_doorbell); 579 set_smp_cross_call(armada_mpic_send_doorbell);
598 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); 580 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
581 "AP_IRQ_ARMADA_XP_STARTING",
582 armada_xp_mpic_starting_cpu, NULL);
599#endif 583#endif
600 } else { 584 } else {
601#ifdef CONFIG_SMP 585#ifdef CONFIG_SMP
602 register_cpu_notifier(&mpic_cascaded_cpu_notifier); 586 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
587 "AP_IRQ_ARMADA_CASC_STARTING",
588 mpic_cascaded_starting_cpu, NULL);
603#endif 589#endif
604 irq_set_chained_handler(parent_irq, 590 irq_set_chained_handler(parent_irq,
605 armada_370_xp_mpic_handle_cascade_irq); 591 armada_370_xp_mpic_handle_cascade_irq);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index df1949c0aa23..d96b2c947e74 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -202,26 +202,19 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
202 } 202 }
203} 203}
204 204
205/* Unmasks the IPI on the CPU when it's online. */ 205static int bcm2836_cpu_starting(unsigned int cpu)
206static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb,
207 unsigned long action, void *hcpu)
208{ 206{
209 unsigned int cpu = (unsigned long)hcpu; 207 bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
210 unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0; 208 cpu);
211 unsigned int mailbox = 0; 209 return 0;
212
213 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
214 bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu);
215 else if (action == CPU_DYING)
216 bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu);
217
218 return NOTIFY_OK;
219} 210}
220 211
221static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = { 212static int bcm2836_cpu_dying(unsigned int cpu)
222 .notifier_call = bcm2836_arm_irqchip_cpu_notify, 213{
223 .priority = 100, 214 bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
224}; 215 cpu);
216 return 0;
217}
225 218
226#ifdef CONFIG_ARM 219#ifdef CONFIG_ARM
227static int __init bcm2836_smp_boot_secondary(unsigned int cpu, 220static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
@@ -251,10 +244,9 @@ bcm2836_arm_irqchip_smp_init(void)
251{ 244{
252#ifdef CONFIG_SMP 245#ifdef CONFIG_SMP
253 /* Unmask IPIs to the boot CPU. */ 246 /* Unmask IPIs to the boot CPU. */
254 bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier, 247 cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
255 CPU_STARTING, 248 "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting,
256 (void *)(uintptr_t)smp_processor_id()); 249 bcm2836_cpu_dying);
257 register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
258 250
259 set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); 251 set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
260 252
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2c5ba0e704bf..6fc56c3466b0 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -538,23 +538,13 @@ static void gic_cpu_init(void)
538} 538}
539 539
540#ifdef CONFIG_SMP 540#ifdef CONFIG_SMP
541static int gic_secondary_init(struct notifier_block *nfb, 541
542 unsigned long action, void *hcpu) 542static int gic_starting_cpu(unsigned int cpu)
543{ 543{
544 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 544 gic_cpu_init();
545 gic_cpu_init(); 545 return 0;
546 return NOTIFY_OK;
547} 546}
548 547
549/*
550 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
551 * priority because the GIC needs to be up before the ARM generic timers.
552 */
553static struct notifier_block gic_cpu_notifier = {
554 .notifier_call = gic_secondary_init,
555 .priority = 100,
556};
557
558static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 548static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
559 unsigned long cluster_id) 549 unsigned long cluster_id)
560{ 550{
@@ -634,7 +624,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
634static void gic_smp_init(void) 624static void gic_smp_init(void)
635{ 625{
636 set_smp_cross_call(gic_raise_softirq); 626 set_smp_cross_call(gic_raise_softirq);
637 register_cpu_notifier(&gic_cpu_notifier); 627 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
628 "AP_IRQ_GICV3_STARTING", gic_starting_cpu,
629 NULL);
638} 630}
639 631
640static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 632static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1de07eb5839c..c2cab572c511 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -984,25 +984,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
984 return -EINVAL; 984 return -EINVAL;
985} 985}
986 986
987#ifdef CONFIG_SMP 987static int gic_starting_cpu(unsigned int cpu)
988static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
989 void *hcpu)
990{ 988{
991 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 989 gic_cpu_init(&gic_data[0]);
992 gic_cpu_init(&gic_data[0]); 990 return 0;
993 return NOTIFY_OK;
994} 991}
995 992
996/*
997 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
998 * priority because the GIC needs to be up before the ARM generic timers.
999 */
1000static struct notifier_block gic_cpu_notifier = {
1001 .notifier_call = gic_secondary_init,
1002 .priority = 100,
1003};
1004#endif
1005
1006static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 993static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1007 unsigned int nr_irqs, void *arg) 994 unsigned int nr_irqs, void *arg)
1008{ 995{
@@ -1177,8 +1164,10 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
1177 gic_cpu_map[i] = 0xff; 1164 gic_cpu_map[i] = 0xff;
1178#ifdef CONFIG_SMP 1165#ifdef CONFIG_SMP
1179 set_smp_cross_call(gic_raise_softirq); 1166 set_smp_cross_call(gic_raise_softirq);
1180 register_cpu_notifier(&gic_cpu_notifier);
1181#endif 1167#endif
1168 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1169 "AP_IRQ_GIC_STARTING",
1170 gic_starting_cpu, NULL);
1182 set_handle_irq(gic_handle_irq); 1171 set_handle_irq(gic_handle_irq);
1183 if (static_key_true(&supports_deactivate)) 1172 if (static_key_true(&supports_deactivate))
1184 pr_info("GIC: Using split EOI/Deactivate mode\n"); 1173 pr_info("GIC: Using split EOI/Deactivate mode\n");
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9e25d8ce08e5..021b0e0833c1 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -342,26 +342,12 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
342 return ret; 342 return ret;
343} 343}
344 344
345#ifdef CONFIG_SMP 345static int hip04_irq_starting_cpu(unsigned int cpu)
346static int hip04_irq_secondary_init(struct notifier_block *nfb,
347 unsigned long action,
348 void *hcpu)
349{ 346{
350 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 347 hip04_irq_cpu_init(&hip04_data);
351 hip04_irq_cpu_init(&hip04_data); 348 return 0;
352 return NOTIFY_OK;
353} 349}
354 350
355/*
356 * Notifier for enabling the INTC CPU interface. Set an arbitrarily high
357 * priority because the GIC needs to be up before the ARM generic timers.
358 */
359static struct notifier_block hip04_irq_cpu_notifier = {
360 .notifier_call = hip04_irq_secondary_init,
361 .priority = 100,
362};
363#endif
364
365static const struct irq_domain_ops hip04_irq_domain_ops = { 351static const struct irq_domain_ops hip04_irq_domain_ops = {
366 .map = hip04_irq_domain_map, 352 .map = hip04_irq_domain_map,
367 .xlate = hip04_irq_domain_xlate, 353 .xlate = hip04_irq_domain_xlate,
@@ -417,13 +403,12 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
417 403
418#ifdef CONFIG_SMP 404#ifdef CONFIG_SMP
419 set_smp_cross_call(hip04_raise_softirq); 405 set_smp_cross_call(hip04_raise_softirq);
420 register_cpu_notifier(&hip04_irq_cpu_notifier);
421#endif 406#endif
422 set_handle_irq(hip04_handle_irq); 407 set_handle_irq(hip04_handle_irq);
423 408
424 hip04_irq_dist_init(&hip04_data); 409 hip04_irq_dist_init(&hip04_data);
425 hip04_irq_cpu_init(&hip04_data); 410 cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING",
426 411 hip04_irq_starting_cpu, NULL);
427 return 0; 412 return 0;
428} 413}
429IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init); 414IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 938467fb82be..22f0634dd3fa 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -92,29 +92,22 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = {
92 .resume = ledtrig_cpu_syscore_resume, 92 .resume = ledtrig_cpu_syscore_resume,
93}; 93};
94 94
95static int ledtrig_cpu_notify(struct notifier_block *self, 95static int ledtrig_online_cpu(unsigned int cpu)
96 unsigned long action, void *hcpu)
97{ 96{
98 switch (action & ~CPU_TASKS_FROZEN) { 97 ledtrig_cpu(CPU_LED_START);
99 case CPU_STARTING: 98 return 0;
100 ledtrig_cpu(CPU_LED_START);
101 break;
102 case CPU_DYING:
103 ledtrig_cpu(CPU_LED_STOP);
104 break;
105 }
106
107 return NOTIFY_OK;
108} 99}
109 100
110 101static int ledtrig_prepare_down_cpu(unsigned int cpu)
111static struct notifier_block ledtrig_cpu_nb = { 102{
112 .notifier_call = ledtrig_cpu_notify, 103 ledtrig_cpu(CPU_LED_STOP);
113}; 104 return 0;
105}
114 106
115static int __init ledtrig_cpu_init(void) 107static int __init ledtrig_cpu_init(void)
116{ 108{
117 int cpu; 109 int cpu;
110 int ret;
118 111
119 /* Supports up to 9999 cpu cores */ 112 /* Supports up to 9999 cpu cores */
120 BUILD_BUG_ON(CONFIG_NR_CPUS > 9999); 113 BUILD_BUG_ON(CONFIG_NR_CPUS > 9999);
@@ -133,7 +126,12 @@ static int __init ledtrig_cpu_init(void)
133 } 126 }
134 127
135 register_syscore_ops(&ledtrig_cpu_syscore_ops); 128 register_syscore_ops(&ledtrig_cpu_syscore_ops);
136 register_cpu_notifier(&ledtrig_cpu_nb); 129
130 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING",
131 ledtrig_online_cpu, ledtrig_prepare_down_cpu);
132 if (ret < 0)
133 pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",
134 ret);
137 135
138 pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n"); 136 pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
139 137
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 8e4d7f590b06..6ccb994bdfcb 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -688,30 +688,29 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
688 return 0; 688 return 0;
689} 689}
690 690
691static DEFINE_MUTEX(arm_pmu_mutex);
692static LIST_HEAD(arm_pmu_list);
693
691/* 694/*
692 * PMU hardware loses all context when a CPU goes offline. 695 * PMU hardware loses all context when a CPU goes offline.
693 * When a CPU is hotplugged back in, since some hardware registers are 696 * When a CPU is hotplugged back in, since some hardware registers are
694 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 697 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
695 * junk values out of them. 698 * junk values out of them.
696 */ 699 */
697static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, 700static int arm_perf_starting_cpu(unsigned int cpu)
698 void *hcpu)
699{ 701{
700 int cpu = (unsigned long)hcpu; 702 struct arm_pmu *pmu;
701 struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
702
703 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
704 return NOTIFY_DONE;
705
706 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
707 return NOTIFY_DONE;
708 703
709 if (pmu->reset) 704 mutex_lock(&arm_pmu_mutex);
710 pmu->reset(pmu); 705 list_for_each_entry(pmu, &arm_pmu_list, entry) {
711 else
712 return NOTIFY_DONE;
713 706
714 return NOTIFY_OK; 707 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
708 continue;
709 if (pmu->reset)
710 pmu->reset(pmu);
711 }
712 mutex_unlock(&arm_pmu_mutex);
713 return 0;
715} 714}
716 715
717#ifdef CONFIG_CPU_PM 716#ifdef CONFIG_CPU_PM
@@ -822,10 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
822 if (!cpu_hw_events) 821 if (!cpu_hw_events)
823 return -ENOMEM; 822 return -ENOMEM;
824 823
825 cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; 824 mutex_lock(&arm_pmu_mutex);
826 err = register_cpu_notifier(&cpu_pmu->hotplug_nb); 825 list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
827 if (err) 826 mutex_unlock(&arm_pmu_mutex);
828 goto out_hw_events;
829 827
830 err = cpu_pm_pmu_register(cpu_pmu); 828 err = cpu_pm_pmu_register(cpu_pmu);
831 if (err) 829 if (err)
@@ -861,8 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
861 return 0; 859 return 0;
862 860
863out_unregister: 861out_unregister:
864 unregister_cpu_notifier(&cpu_pmu->hotplug_nb); 862 mutex_lock(&arm_pmu_mutex);
865out_hw_events: 863 list_del(&cpu_pmu->entry);
864 mutex_unlock(&arm_pmu_mutex);
866 free_percpu(cpu_hw_events); 865 free_percpu(cpu_hw_events);
867 return err; 866 return err;
868} 867}
@@ -870,7 +869,9 @@ out_hw_events:
870static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 869static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
871{ 870{
872 cpu_pm_pmu_unregister(cpu_pmu); 871 cpu_pm_pmu_unregister(cpu_pmu);
873 unregister_cpu_notifier(&cpu_pmu->hotplug_nb); 872 mutex_lock(&arm_pmu_mutex);
873 list_del(&cpu_pmu->entry);
874 mutex_unlock(&arm_pmu_mutex);
874 free_percpu(cpu_pmu->hw_events); 875 free_percpu(cpu_pmu->hw_events);
875} 876}
876 877
@@ -1061,3 +1062,17 @@ out_free:
1061 kfree(pmu); 1062 kfree(pmu);
1062 return ret; 1063 return ret;
1063} 1064}
1065
1066static int arm_pmu_hp_init(void)
1067{
1068 int ret;
1069
1070 ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING,
1071 "AP_PERF_ARM_STARTING",
1072 arm_perf_starting_cpu, NULL);
1073 if (ret)
1074 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
1075 ret);
1076 return ret;
1077}
1078subsys_initcall(arm_pmu_hp_init);