aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c151
1 files changed, 109 insertions, 42 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index aeb6a54f2771..ee305c8d4e18 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -319,6 +319,24 @@ unsigned long round_jiffies_up_relative(unsigned long j)
319} 319}
320EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 320EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
321 321
322/**
323 * set_timer_slack - set the allowed slack for a timer
324 * @slack_hz: the amount of time (in jiffies) allowed for rounding
325 *
326 * Set the amount of time, in jiffies, that a certain timer has
327 * in terms of slack. By setting this value, the timer subsystem
328 * will schedule the actual timer somewhere between
329 * the time mod_timer() asks for, and that time plus the slack.
330 *
331 * By setting the slack to -1, a percentage of the delay is used
332 * instead.
333 */
334void set_timer_slack(struct timer_list *timer, int slack_hz)
335{
336 timer->slack = slack_hz;
337}
338EXPORT_SYMBOL_GPL(set_timer_slack);
339
322 340
323static inline void set_running_timer(struct tvec_base *base, 341static inline void set_running_timer(struct tvec_base *base,
324 struct timer_list *timer) 342 struct timer_list *timer)
@@ -550,6 +568,7 @@ static void __init_timer(struct timer_list *timer,
550{ 568{
551 timer->entry.next = NULL; 569 timer->entry.next = NULL;
552 timer->base = __raw_get_cpu_var(tvec_bases); 570 timer->base = __raw_get_cpu_var(tvec_bases);
571 timer->slack = -1;
553#ifdef CONFIG_TIMER_STATS 572#ifdef CONFIG_TIMER_STATS
554 timer->start_site = NULL; 573 timer->start_site = NULL;
555 timer->start_pid = -1; 574 timer->start_pid = -1;
@@ -715,6 +734,46 @@ int mod_timer_pending(struct timer_list *timer, unsigned long expires)
715} 734}
716EXPORT_SYMBOL(mod_timer_pending); 735EXPORT_SYMBOL(mod_timer_pending);
717 736
737/*
738 * Decide where to put the timer while taking the slack into account
739 *
740 * Algorithm:
741 * 1) calculate the maximum (absolute) time
742 * 2) calculate the highest bit where the expires and new max are different
743 * 3) use this bit to make a mask
744 * 4) use the bitmask to round down the maximum time, so that all last
745 * bits are zeros
746 */
747static inline
748unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
749{
750 unsigned long expires_limit, mask;
751 int bit;
752
753 expires_limit = expires;
754
755 if (timer->slack >= 0) {
756 expires_limit = expires + timer->slack;
757 } else {
758 unsigned long now = jiffies;
759
760 /* No slack, if already expired else auto slack 0.4% */
761 if (time_after(expires, now))
762 expires_limit = expires + (expires - now)/256;
763 }
764 mask = expires ^ expires_limit;
765 if (mask == 0)
766 return expires;
767
768 bit = find_last_bit(&mask, BITS_PER_LONG);
769
770 mask = (1 << bit) - 1;
771
772 expires_limit = expires_limit & ~(mask);
773
774 return expires_limit;
775}
776
718/** 777/**
719 * mod_timer - modify a timer's timeout 778 * mod_timer - modify a timer's timeout
720 * @timer: the timer to be modified 779 * @timer: the timer to be modified
@@ -745,6 +804,8 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
745 if (timer_pending(timer) && timer->expires == expires) 804 if (timer_pending(timer) && timer->expires == expires)
746 return 1; 805 return 1;
747 806
807 expires = apply_slack(timer, expires);
808
748 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); 809 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
749} 810}
750EXPORT_SYMBOL(mod_timer); 811EXPORT_SYMBOL(mod_timer);
@@ -955,6 +1016,47 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index)
955 return index; 1016 return index;
956} 1017}
957 1018
1019static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1020 unsigned long data)
1021{
1022 int preempt_count = preempt_count();
1023
1024#ifdef CONFIG_LOCKDEP
1025 /*
1026 * It is permissible to free the timer from inside the
1027 * function that is called from it, this we need to take into
1028 * account for lockdep too. To avoid bogus "held lock freed"
1029 * warnings as well as problems when looking into
1030 * timer->lockdep_map, make a copy and use that here.
1031 */
1032 struct lockdep_map lockdep_map = timer->lockdep_map;
1033#endif
1034 /*
1035 * Couple the lock chain with the lock chain at
1036 * del_timer_sync() by acquiring the lock_map around the fn()
1037 * call here and in del_timer_sync().
1038 */
1039 lock_map_acquire(&lockdep_map);
1040
1041 trace_timer_expire_entry(timer);
1042 fn(data);
1043 trace_timer_expire_exit(timer);
1044
1045 lock_map_release(&lockdep_map);
1046
1047 if (preempt_count != preempt_count()) {
1048 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1049 fn, preempt_count, preempt_count());
1050 /*
1051 * Restore the preempt count. That gives us a decent
1052 * chance to survive and extract information. If the
1053 * callback kept a lock held, bad luck, but not worse
1054 * than the BUG() we had.
1055 */
1056 preempt_count() = preempt_count;
1057 }
1058}
1059
958#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) 1060#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
959 1061
960/** 1062/**
@@ -998,45 +1100,7 @@ static inline void __run_timers(struct tvec_base *base)
998 detach_timer(timer, 1); 1100 detach_timer(timer, 1);
999 1101
1000 spin_unlock_irq(&base->lock); 1102 spin_unlock_irq(&base->lock);
1001 { 1103 call_timer_fn(timer, fn, data);
1002 int preempt_count = preempt_count();
1003
1004#ifdef CONFIG_LOCKDEP
1005 /*
1006 * It is permissible to free the timer from
1007 * inside the function that is called from
1008 * it, this we need to take into account for
1009 * lockdep too. To avoid bogus "held lock
1010 * freed" warnings as well as problems when
1011 * looking into timer->lockdep_map, make a
1012 * copy and use that here.
1013 */
1014 struct lockdep_map lockdep_map =
1015 timer->lockdep_map;
1016#endif
1017 /*
1018 * Couple the lock chain with the lock chain at
1019 * del_timer_sync() by acquiring the lock_map
1020 * around the fn() call here and in
1021 * del_timer_sync().
1022 */
1023 lock_map_acquire(&lockdep_map);
1024
1025 trace_timer_expire_entry(timer);
1026 fn(data);
1027 trace_timer_expire_exit(timer);
1028
1029 lock_map_release(&lockdep_map);
1030
1031 if (preempt_count != preempt_count()) {
1032 printk(KERN_ERR "huh, entered %p "
1033 "with preempt_count %08x, exited"
1034 " with %08x?\n",
1035 fn, preempt_count,
1036 preempt_count());
1037 BUG();
1038 }
1039 }
1040 spin_lock_irq(&base->lock); 1104 spin_lock_irq(&base->lock);
1041 } 1105 }
1042 } 1106 }
@@ -1620,11 +1684,14 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1620 unsigned long action, void *hcpu) 1684 unsigned long action, void *hcpu)
1621{ 1685{
1622 long cpu = (long)hcpu; 1686 long cpu = (long)hcpu;
1687 int err;
1688
1623 switch(action) { 1689 switch(action) {
1624 case CPU_UP_PREPARE: 1690 case CPU_UP_PREPARE:
1625 case CPU_UP_PREPARE_FROZEN: 1691 case CPU_UP_PREPARE_FROZEN:
1626 if (init_timers_cpu(cpu) < 0) 1692 err = init_timers_cpu(cpu);
1627 return NOTIFY_BAD; 1693 if (err < 0)
1694 return notifier_from_errno(err);
1628 break; 1695 break;
1629#ifdef CONFIG_HOTPLUG_CPU 1696#ifdef CONFIG_HOTPLUG_CPU
1630 case CPU_DEAD: 1697 case CPU_DEAD:
@@ -1650,7 +1717,7 @@ void __init init_timers(void)
1650 1717
1651 init_timer_stats(); 1718 init_timer_stats();
1652 1719
1653 BUG_ON(err == NOTIFY_BAD); 1720 BUG_ON(err != NOTIFY_OK);
1654 register_cpu_notifier(&timers_nb); 1721 register_cpu_notifier(&timers_nb);
1655 open_softirq(TIMER_SOFTIRQ, run_timer_softirq); 1722 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1656} 1723}