aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c111
1 files changed, 93 insertions, 18 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index c61a7949387f..c850d06bd19e 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -34,6 +34,7 @@
34#include <linux/posix-timers.h> 34#include <linux/posix-timers.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/syscalls.h> 36#include <linux/syscalls.h>
37#include <linux/kallsyms.h>
37#include <linux/delay.h> 38#include <linux/delay.h>
38#include <linux/tick.h> 39#include <linux/tick.h>
39#include <linux/kallsyms.h> 40#include <linux/kallsyms.h>
@@ -74,6 +75,7 @@ struct tvec_root {
74struct tvec_base { 75struct tvec_base {
75 spinlock_t lock; 76 spinlock_t lock;
76 struct timer_list *running_timer; 77 struct timer_list *running_timer;
78 wait_queue_head_t wait_for_running_timer;
77 unsigned long timer_jiffies; 79 unsigned long timer_jiffies;
78 unsigned long next_timer; 80 unsigned long next_timer;
79 struct tvec_root tv1; 81 struct tvec_root tv1;
@@ -322,9 +324,7 @@ EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
322static inline void set_running_timer(struct tvec_base *base, 324static inline void set_running_timer(struct tvec_base *base,
323 struct timer_list *timer) 325 struct timer_list *timer)
324{ 326{
325#ifdef CONFIG_SMP
326 base->running_timer = timer; 327 base->running_timer = timer;
327#endif
328} 328}
329 329
330static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) 330static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
@@ -656,6 +656,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
656 656
657 debug_activate(timer, expires); 657 debug_activate(timer, expires);
658 658
659 preempt_disable();
659 cpu = smp_processor_id(); 660 cpu = smp_processor_id();
660 661
661#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 662#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
@@ -666,6 +667,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
666 cpu = preferred_cpu; 667 cpu = preferred_cpu;
667 } 668 }
668#endif 669#endif
670 preempt_enable();
671
669 new_base = per_cpu(tvec_bases, cpu); 672 new_base = per_cpu(tvec_bases, cpu);
670 673
671 if (base != new_base) { 674 if (base != new_base) {
@@ -825,6 +828,18 @@ void add_timer_on(struct timer_list *timer, int cpu)
825} 828}
826EXPORT_SYMBOL_GPL(add_timer_on); 829EXPORT_SYMBOL_GPL(add_timer_on);
827 830
831/*
832 * Wait for a running timer
833 */
834void wait_for_running_timer(struct timer_list *timer)
835{
836 struct tvec_base *base = timer->base;
837
838 if (base->running_timer == timer)
839 wait_event(base->wait_for_running_timer,
840 base->running_timer != timer);
841}
842
828/** 843/**
829 * del_timer - deactive a timer. 844 * del_timer - deactive a timer.
830 * @timer: the timer to be deactivated 845 * @timer: the timer to be deactivated
@@ -859,7 +874,34 @@ int del_timer(struct timer_list *timer)
859} 874}
860EXPORT_SYMBOL(del_timer); 875EXPORT_SYMBOL(del_timer);
861 876
862#ifdef CONFIG_SMP 877#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_SOFTIRQS)
878/*
879 * This function checks whether a timer is active and not running on any
880 * CPU. Upon successful (ret >= 0) exit the timer is not queued and the
881 * handler is not running on any CPU.
882 *
883 * It must not be called from interrupt contexts.
884 */
885int timer_pending_sync(struct timer_list *timer)
886{
887 struct tvec_base *base;
888 unsigned long flags;
889 int ret = -1;
890
891 base = lock_timer_base(timer, &flags);
892
893 if (base->running_timer == timer)
894 goto out;
895
896 ret = 0;
897 if (timer_pending(timer))
898 ret = 1;
899out:
900 spin_unlock_irqrestore(&base->lock, flags);
901
902 return ret;
903}
904
863/** 905/**
864 * try_to_del_timer_sync - Try to deactivate a timer 906 * try_to_del_timer_sync - Try to deactivate a timer
865 * @timer: timer do del 907 * @timer: timer do del
@@ -927,7 +969,7 @@ int del_timer_sync(struct timer_list *timer)
927 int ret = try_to_del_timer_sync(timer); 969 int ret = try_to_del_timer_sync(timer);
928 if (ret >= 0) 970 if (ret >= 0)
929 return ret; 971 return ret;
930 cpu_relax(); 972 wait_for_running_timer(timer);
931 } 973 }
932} 974}
933EXPORT_SYMBOL(del_timer_sync); 975EXPORT_SYMBOL(del_timer_sync);
@@ -972,6 +1014,20 @@ static inline void __run_timers(struct tvec_base *base)
972 struct list_head *head = &work_list; 1014 struct list_head *head = &work_list;
973 int index = base->timer_jiffies & TVR_MASK; 1015 int index = base->timer_jiffies & TVR_MASK;
974 1016
1017 if (softirq_need_resched()) {
1018 spin_unlock_irq(&base->lock);
1019 wake_up(&base->wait_for_running_timer);
1020 cond_resched_softirq_context();
1021 cpu_relax();
1022 spin_lock_irq(&base->lock);
1023 /*
1024 * We can simply continue after preemption, nobody
1025 * else can touch timer_jiffies so 'index' is still
1026 * valid. Any new jiffy will be taken care of in
1027 * subsequent loops:
1028 */
1029 }
1030
975 /* 1031 /*
976 * Cascade timers: 1032 * Cascade timers:
977 */ 1033 */
@@ -1027,18 +1083,17 @@ static inline void __run_timers(struct tvec_base *base)
1027 lock_map_release(&lockdep_map); 1083 lock_map_release(&lockdep_map);
1028 1084
1029 if (preempt_count != preempt_count()) { 1085 if (preempt_count != preempt_count()) {
1030 printk(KERN_ERR "huh, entered %p " 1086 print_symbol("BUG: unbalanced timer-handler preempt count in %s!\n", (unsigned long) fn);
1031 "with preempt_count %08x, exited" 1087 printk("entered with %08x, exited with %08x.\n", preempt_count, preempt_count());
1032 " with %08x?\n", 1088 preempt_count() = preempt_count;
1033 fn, preempt_count,
1034 preempt_count());
1035 BUG();
1036 } 1089 }
1037 } 1090 }
1091 set_running_timer(base, NULL);
1092 cond_resched_softirq_context();
1038 spin_lock_irq(&base->lock); 1093 spin_lock_irq(&base->lock);
1039 } 1094 }
1040 } 1095 }
1041 set_running_timer(base, NULL); 1096 wake_up(&base->wait_for_running_timer);
1042 spin_unlock_irq(&base->lock); 1097 spin_unlock_irq(&base->lock);
1043} 1098}
1044 1099
@@ -1171,6 +1226,18 @@ unsigned long get_next_timer_interrupt(unsigned long now)
1171 struct tvec_base *base = __get_cpu_var(tvec_bases); 1226 struct tvec_base *base = __get_cpu_var(tvec_bases);
1172 unsigned long expires; 1227 unsigned long expires;
1173 1228
1229#ifdef CONFIG_PREEMPT_RT
1230 /*
1231 * On PREEMPT_RT we cannot sleep here. If the trylock does not
1232 * succeed then we return the worst-case 'expires in 1 tick'
1233 * value:
1234 */
1235 if (spin_trylock(&base->lock)) {
1236 expires = __next_timer_interrupt(base);
1237 spin_unlock(&base->lock);
1238 } else
1239 expires = now + 1;
1240#else
1174 spin_lock(&base->lock); 1241 spin_lock(&base->lock);
1175 if (time_before_eq(base->next_timer, base->timer_jiffies)) 1242 if (time_before_eq(base->next_timer, base->timer_jiffies))
1176 base->next_timer = __next_timer_interrupt(base); 1243 base->next_timer = __next_timer_interrupt(base);
@@ -1179,7 +1246,7 @@ unsigned long get_next_timer_interrupt(unsigned long now)
1179 1246
1180 if (time_before_eq(expires, now)) 1247 if (time_before_eq(expires, now))
1181 return now; 1248 return now;
1182 1249#endif
1183 return cmp_next_hrtimer_event(now, expires); 1250 return cmp_next_hrtimer_event(now, expires);
1184} 1251}
1185#endif 1252#endif
@@ -1195,11 +1262,10 @@ void update_process_times(int user_tick)
1195 1262
1196 /* Note: this timer irq context must be accounted for as well. */ 1263 /* Note: this timer irq context must be accounted for as well. */
1197 account_process_tick(p, user_tick); 1264 account_process_tick(p, user_tick);
1265 scheduler_tick();
1198 run_local_timers(); 1266 run_local_timers();
1199 rcu_check_callbacks(cpu, user_tick); 1267 rcu_check_callbacks(cpu, user_tick);
1200 printk_tick();
1201 perf_event_do_pending(); 1268 perf_event_do_pending();
1202 scheduler_tick();
1203 run_posix_cpu_timers(p); 1269 run_posix_cpu_timers(p);
1204} 1270}
1205 1271
@@ -1208,9 +1274,11 @@ void update_process_times(int user_tick)
1208 */ 1274 */
1209static void run_timer_softirq(struct softirq_action *h) 1275static void run_timer_softirq(struct softirq_action *h)
1210{ 1276{
1211 struct tvec_base *base = __get_cpu_var(tvec_bases); 1277 struct tvec_base *base = per_cpu(tvec_bases, raw_smp_processor_id());
1212 1278
1279 printk_tick();
1213 hrtimer_run_pending(); 1280 hrtimer_run_pending();
1281 perf_event_do_pending_softirq();
1214 1282
1215 if (time_after_eq(jiffies, base->timer_jiffies)) 1283 if (time_after_eq(jiffies, base->timer_jiffies))
1216 __run_timers(base); 1284 __run_timers(base);
@@ -1550,6 +1618,7 @@ static int __cpuinit init_timers_cpu(int cpu)
1550 } 1618 }
1551 1619
1552 spin_lock_init(&base->lock); 1620 spin_lock_init(&base->lock);
1621 init_waitqueue_head(&base->wait_for_running_timer);
1553 1622
1554 for (j = 0; j < TVN_SIZE; j++) { 1623 for (j = 0; j < TVN_SIZE; j++) {
1555 INIT_LIST_HEAD(base->tv5.vec + j); 1624 INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1585,6 +1654,7 @@ static void __cpuinit migrate_timers(int cpu)
1585{ 1654{
1586 struct tvec_base *old_base; 1655 struct tvec_base *old_base;
1587 struct tvec_base *new_base; 1656 struct tvec_base *new_base;
1657 unsigned long flags;
1588 int i; 1658 int i;
1589 1659
1590 BUG_ON(cpu_online(cpu)); 1660 BUG_ON(cpu_online(cpu));
@@ -1594,8 +1664,11 @@ static void __cpuinit migrate_timers(int cpu)
1594 * The caller is globally serialized and nobody else 1664 * The caller is globally serialized and nobody else
1595 * takes two locks at once, deadlock is not possible. 1665 * takes two locks at once, deadlock is not possible.
1596 */ 1666 */
1597 spin_lock_irq(&new_base->lock); 1667 local_irq_save(flags);
1598 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1668 while (!spin_trylock(&new_base->lock))
1669 cpu_relax();
1670 while (!spin_trylock(&old_base->lock))
1671 cpu_relax();
1599 1672
1600 BUG_ON(old_base->running_timer); 1673 BUG_ON(old_base->running_timer);
1601 1674
@@ -1609,7 +1682,9 @@ static void __cpuinit migrate_timers(int cpu)
1609 } 1682 }
1610 1683
1611 spin_unlock(&old_base->lock); 1684 spin_unlock(&old_base->lock);
1612 spin_unlock_irq(&new_base->lock); 1685 spin_unlock(&new_base->lock);
1686 local_irq_restore(flags);
1687
1613 put_cpu_var(tvec_bases); 1688 put_cpu_var(tvec_bases);
1614} 1689}
1615#endif /* CONFIG_HOTPLUG_CPU */ 1690#endif /* CONFIG_HOTPLUG_CPU */