aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2015-04-02 20:38:05 -0400
committerIngo Molnar <mingo@kernel.org>2015-04-03 02:44:37 -0400
commita49b116dcb1265f238f3169507424257b0519069 (patch)
tree948a98d4adcc47a89d1148033c7c130619eb2b06 /kernel/time
parent52c063d1adbc16c76e70fffa20727fcd4e9343b3 (diff)
clockevents: Cleanup dead cpu explicitely
clockevents_notify() is a leftover from the early design of the clockevents facility. It's really not a notification mechanism, it's a multiplex call. We are way better off to have explicit calls instead of this monstrosity. Split out the cleanup function for a dead cpu and invoke it directly from the cpu down code. Make it conditional on CPU_HOTPLUG as well. Temporary change, will be refined in the future. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [ Rebased, added clockevents_notify() removal ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1735025.raBZdQHM3m@vostro.rjw.lan Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/clockevents.c51
-rw-r--r--kernel/time/hrtimer.c3
-rw-r--r--kernel/time/tick-broadcast.c39
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-internal.h10
5 files changed, 49 insertions, 60 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 88fb3b96c7cc..25d942d1da27 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -642,49 +642,40 @@ void clockevents_resume(void)
642 dev->resume(dev); 642 dev->resume(dev);
643} 643}
644 644
645#ifdef CONFIG_HOTPLUG_CPU
645/** 646/**
646 * clockevents_notify - notification about relevant events 647 * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
647 * Returns 0 on success, any other value on error
648 */ 648 */
649int clockevents_notify(unsigned long reason, void *arg) 649void tick_cleanup_dead_cpu(int cpu)
650{ 650{
651 struct clock_event_device *dev, *tmp; 651 struct clock_event_device *dev, *tmp;
652 unsigned long flags; 652 unsigned long flags;
653 int cpu, ret = 0;
654 653
655 raw_spin_lock_irqsave(&clockevents_lock, flags); 654 raw_spin_lock_irqsave(&clockevents_lock, flags);
656 655
657 switch (reason) { 656 tick_shutdown_broadcast_oneshot(cpu);
658 case CLOCK_EVT_NOTIFY_CPU_DEAD: 657 tick_shutdown_broadcast(cpu);
659 tick_shutdown_broadcast_oneshot(arg); 658 tick_shutdown(cpu);
660 tick_shutdown_broadcast(arg); 659 /*
661 tick_shutdown(arg); 660 * Unregister the clock event devices which were
662 /* 661 * released from the users in the notify chain.
663 * Unregister the clock event devices which were 662 */
664 * released from the users in the notify chain. 663 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
665 */ 664 list_del(&dev->list);
666 list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 665 /*
666 * Now check whether the CPU has left unused per cpu devices
667 */
668 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
669 if (cpumask_test_cpu(cpu, dev->cpumask) &&
670 cpumask_weight(dev->cpumask) == 1 &&
671 !tick_is_broadcast_device(dev)) {
672 BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED);
667 list_del(&dev->list); 673 list_del(&dev->list);
668 /*
669 * Now check whether the CPU has left unused per cpu devices
670 */
671 cpu = *((int *)arg);
672 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
673 if (cpumask_test_cpu(cpu, dev->cpumask) &&
674 cpumask_weight(dev->cpumask) == 1 &&
675 !tick_is_broadcast_device(dev)) {
676 BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED);
677 list_del(&dev->list);
678 }
679 } 674 }
680 break;
681 default:
682 break;
683 } 675 }
684 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 676 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
685 return ret;
686} 677}
687EXPORT_SYMBOL_GPL(clockevents_notify); 678#endif
688 679
689#ifdef CONFIG_SYSFS 680#ifdef CONFIG_SYSFS
690struct bus_type clockevents_subsys = { 681struct bus_type clockevents_subsys = {
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 6a7a64ec7d1b..76d4bd962b19 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1709,11 +1709,8 @@ static int hrtimer_cpu_notify(struct notifier_block *self,
1709#ifdef CONFIG_HOTPLUG_CPU 1709#ifdef CONFIG_HOTPLUG_CPU
1710 case CPU_DEAD: 1710 case CPU_DEAD:
1711 case CPU_DEAD_FROZEN: 1711 case CPU_DEAD_FROZEN:
1712 {
1713 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1714 migrate_hrtimers(scpu); 1712 migrate_hrtimers(scpu);
1715 break; 1713 break;
1716 }
1717#endif 1714#endif
1718 1715
1719 default: 1716 default:
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 55e43f20987a..7e8ca4f448a8 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -410,14 +410,14 @@ void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
410 dev->event_handler = tick_handle_periodic_broadcast; 410 dev->event_handler = tick_handle_periodic_broadcast;
411} 411}
412 412
413#ifdef CONFIG_HOTPLUG_CPU
413/* 414/*
414 * Remove a CPU from broadcasting 415 * Remove a CPU from broadcasting
415 */ 416 */
416void tick_shutdown_broadcast(unsigned int *cpup) 417void tick_shutdown_broadcast(unsigned int cpu)
417{ 418{
418 struct clock_event_device *bc; 419 struct clock_event_device *bc;
419 unsigned long flags; 420 unsigned long flags;
420 unsigned int cpu = *cpup;
421 421
422 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 422 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
423 423
@@ -432,6 +432,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
432 432
433 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 433 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
434} 434}
435#endif
435 436
436void tick_suspend_broadcast(void) 437void tick_suspend_broadcast(void)
437{ 438{
@@ -672,21 +673,6 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
672 clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 673 clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
673} 674}
674 675
675void hotplug_cpu__broadcast_tick_pull(int deadcpu)
676{
677 struct clock_event_device *bc;
678 unsigned long flags;
679
680 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
681 bc = tick_broadcast_device.evtdev;
682
683 if (bc && broadcast_needs_cpu(bc, deadcpu)) {
684 /* This moves the broadcast assignment to this CPU: */
685 clockevents_program_event(bc, bc->next_event, 1);
686 }
687 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
688}
689
690/** 676/**
691 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode 677 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
692 * @state: The target state (enter/exit) 678 * @state: The target state (enter/exit)
@@ -908,14 +894,28 @@ void tick_broadcast_switch_to_oneshot(void)
908 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 894 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
909} 895}
910 896
897#ifdef CONFIG_HOTPLUG_CPU
898void hotplug_cpu__broadcast_tick_pull(int deadcpu)
899{
900 struct clock_event_device *bc;
901 unsigned long flags;
902
903 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
904 bc = tick_broadcast_device.evtdev;
905
906 if (bc && broadcast_needs_cpu(bc, deadcpu)) {
907 /* This moves the broadcast assignment to this CPU: */
908 clockevents_program_event(bc, bc->next_event, 1);
909 }
910 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
911}
911 912
912/* 913/*
913 * Remove a dead CPU from broadcasting 914 * Remove a dead CPU from broadcasting
914 */ 915 */
915void tick_shutdown_broadcast_oneshot(unsigned int *cpup) 916void tick_shutdown_broadcast_oneshot(unsigned int cpu)
916{ 917{
917 unsigned long flags; 918 unsigned long flags;
918 unsigned int cpu = *cpup;
919 919
920 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 920 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
921 921
@@ -929,6 +929,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
929 929
930 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 930 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
931} 931}
932#endif
932 933
933/* 934/*
934 * Check, whether the broadcast device is in one shot mode 935 * Check, whether the broadcast device is in one shot mode
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 055c868f3ec9..fac3e98fec49 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -348,7 +348,6 @@ void tick_handover_do_timer(void)
348 TICK_DO_TIMER_NONE; 348 TICK_DO_TIMER_NONE;
349 } 349 }
350} 350}
351#endif
352 351
353/* 352/*
354 * Shutdown an event device on a given cpu: 353 * Shutdown an event device on a given cpu:
@@ -357,9 +356,9 @@ void tick_handover_do_timer(void)
357 * access the hardware device itself. 356 * access the hardware device itself.
358 * We just set the mode and remove it from the lists. 357 * We just set the mode and remove it from the lists.
359 */ 358 */
360void tick_shutdown(unsigned int *cpup) 359void tick_shutdown(unsigned int cpu)
361{ 360{
362 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); 361 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
363 struct clock_event_device *dev = td->evtdev; 362 struct clock_event_device *dev = td->evtdev;
364 363
365 td->mode = TICKDEV_MODE_PERIODIC; 364 td->mode = TICKDEV_MODE_PERIODIC;
@@ -375,6 +374,7 @@ void tick_shutdown(unsigned int *cpup)
375 td->evtdev = NULL; 374 td->evtdev = NULL;
376 } 375 }
377} 376}
377#endif
378 378
379/** 379/**
380 * tick_suspend_local - Suspend the local tick device 380 * tick_suspend_local - Suspend the local tick device
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index aabcb5d00cf2..b64fdd8054c5 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -20,7 +20,7 @@ extern int tick_do_timer_cpu __read_mostly;
20extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); 20extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
21extern void tick_handle_periodic(struct clock_event_device *dev); 21extern void tick_handle_periodic(struct clock_event_device *dev);
22extern void tick_check_new_device(struct clock_event_device *dev); 22extern void tick_check_new_device(struct clock_event_device *dev);
23extern void tick_shutdown(unsigned int *cpup); 23extern void tick_shutdown(unsigned int cpu);
24extern void tick_suspend(void); 24extern void tick_suspend(void);
25extern void tick_resume(void); 25extern void tick_resume(void);
26extern bool tick_check_replacement(struct clock_event_device *curdev, 26extern bool tick_check_replacement(struct clock_event_device *curdev,
@@ -52,7 +52,7 @@ extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
52extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu); 52extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
53extern void tick_install_broadcast_device(struct clock_event_device *dev); 53extern void tick_install_broadcast_device(struct clock_event_device *dev);
54extern int tick_is_broadcast_device(struct clock_event_device *dev); 54extern int tick_is_broadcast_device(struct clock_event_device *dev);
55extern void tick_shutdown_broadcast(unsigned int *cpup); 55extern void tick_shutdown_broadcast(unsigned int cpu);
56extern void tick_suspend_broadcast(void); 56extern void tick_suspend_broadcast(void);
57extern void tick_resume_broadcast(void); 57extern void tick_resume_broadcast(void);
58extern bool tick_resume_check_broadcast(void); 58extern bool tick_resume_check_broadcast(void);
@@ -66,7 +66,7 @@ static inline void tick_install_broadcast_device(struct clock_event_device *dev)
66static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; } 66static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; }
67static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; } 67static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; }
68static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } 68static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
69static inline void tick_shutdown_broadcast(unsigned int *cpup) { } 69static inline void tick_shutdown_broadcast(unsigned int cpu) { }
70static inline void tick_suspend_broadcast(void) { } 70static inline void tick_suspend_broadcast(void) { }
71static inline void tick_resume_broadcast(void) { } 71static inline void tick_resume_broadcast(void) { }
72static inline bool tick_resume_check_broadcast(void) { return false; } 72static inline bool tick_resume_check_broadcast(void) { return false; }
@@ -117,7 +117,7 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
117#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 117#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
118extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); 118extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
119extern void tick_broadcast_switch_to_oneshot(void); 119extern void tick_broadcast_switch_to_oneshot(void);
120extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); 120extern void tick_shutdown_broadcast_oneshot(unsigned int cpu);
121extern int tick_broadcast_oneshot_active(void); 121extern int tick_broadcast_oneshot_active(void);
122extern void tick_check_oneshot_broadcast_this_cpu(void); 122extern void tick_check_oneshot_broadcast_this_cpu(void);
123bool tick_broadcast_oneshot_available(void); 123bool tick_broadcast_oneshot_available(void);
@@ -125,7 +125,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
125#else /* !(BROADCAST && ONESHOT): */ 125#else /* !(BROADCAST && ONESHOT): */
126static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } 126static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
127static inline void tick_broadcast_switch_to_oneshot(void) { } 127static inline void tick_broadcast_switch_to_oneshot(void) { }
128static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 128static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { }
129static inline int tick_broadcast_oneshot_active(void) { return 0; } 129static inline int tick_broadcast_oneshot_active(void) { return 0; }
130static inline void tick_check_oneshot_broadcast_this_cpu(void) { } 130static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
131static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); } 131static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); }