aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c53
1 files changed, 44 insertions, 9 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index ee305c8d4e18..f1b8afe1ad86 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -90,8 +90,13 @@ static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
90 90
91/* 91/*
92 * Note that all tvec_bases are 2 byte aligned and lower bit of 92 * Note that all tvec_bases are 2 byte aligned and lower bit of
93 * base in timer_list is guaranteed to be zero. Use the LSB for 93 * base in timer_list is guaranteed to be zero. Use the LSB to
94 * the new flag to indicate whether the timer is deferrable 94 * indicate whether the timer is deferrable.
95 *
96 * A deferrable timer will work normally when the system is busy, but
97 * will not cause a CPU to come out of idle just to service it; instead,
98 * the timer will be serviced when the CPU eventually wakes up with a
99 * subsequent non-deferrable timer.
95 */ 100 */
96#define TBASE_DEFERRABLE_FLAG (0x1) 101#define TBASE_DEFERRABLE_FLAG (0x1)
97 102
@@ -577,6 +582,19 @@ static void __init_timer(struct timer_list *timer,
577 lockdep_init_map(&timer->lockdep_map, name, key, 0); 582 lockdep_init_map(&timer->lockdep_map, name, key, 0);
578} 583}
579 584
585void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
586 const char *name,
587 struct lock_class_key *key,
588 void (*function)(unsigned long),
589 unsigned long data)
590{
591 timer->function = function;
592 timer->data = data;
593 init_timer_on_stack_key(timer, name, key);
594 timer_set_deferrable(timer);
595}
596EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
597
580/** 598/**
581 * init_timer_key - initialize a timer 599 * init_timer_key - initialize a timer
582 * @timer: the timer to be initialized 600 * @timer: the timer to be initialized
@@ -679,12 +697,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
679 cpu = smp_processor_id(); 697 cpu = smp_processor_id();
680 698
681#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 699#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
682 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) { 700 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
683 int preferred_cpu = get_nohz_load_balancer(); 701 cpu = get_nohz_timer_target();
684
685 if (preferred_cpu >= 0)
686 cpu = preferred_cpu;
687 }
688#endif 702#endif
689 new_base = per_cpu(tvec_bases, cpu); 703 new_base = per_cpu(tvec_bases, cpu);
690 704
@@ -1289,7 +1303,6 @@ void run_local_timers(void)
1289{ 1303{
1290 hrtimer_run_queues(); 1304 hrtimer_run_queues();
1291 raise_softirq(TIMER_SOFTIRQ); 1305 raise_softirq(TIMER_SOFTIRQ);
1292 softlockup_tick();
1293} 1306}
1294 1307
1295/* 1308/*
@@ -1750,3 +1763,25 @@ unsigned long msleep_interruptible(unsigned int msecs)
1750} 1763}
1751 1764
1752EXPORT_SYMBOL(msleep_interruptible); 1765EXPORT_SYMBOL(msleep_interruptible);
1766
1767static int __sched do_usleep_range(unsigned long min, unsigned long max)
1768{
1769 ktime_t kmin;
1770 unsigned long delta;
1771
1772 kmin = ktime_set(0, min * NSEC_PER_USEC);
1773 delta = (max - min) * NSEC_PER_USEC;
1774 return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1775}
1776
1777/**
1778 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1779 * @min: Minimum time in usecs to sleep
1780 * @max: Maximum time in usecs to sleep
1781 */
1782void usleep_range(unsigned long min, unsigned long max)
1783{
1784 __set_current_state(TASK_UNINTERRUPTIBLE);
1785 do_usleep_range(min, max);
1786}
1787EXPORT_SYMBOL(usleep_range);