aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c86
1 files changed, 48 insertions, 38 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 0086628b6e97..ce669174f355 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -89,8 +89,8 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
89 89
90 do { 90 do {
91 seq = read_seqbegin(&xtime_lock); 91 seq = read_seqbegin(&xtime_lock);
92 xts = current_kernel_time(); 92 xts = __current_kernel_time();
93 tom = wall_to_monotonic; 93 tom = __get_wall_to_monotonic();
94 } while (read_seqretry(&xtime_lock, seq)); 94 } while (read_seqretry(&xtime_lock, seq));
95 95
96 xtim = timespec_to_ktime(xts); 96 xtim = timespec_to_ktime(xts);
@@ -144,12 +144,8 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
144static int hrtimer_get_target(int this_cpu, int pinned) 144static int hrtimer_get_target(int this_cpu, int pinned)
145{ 145{
146#ifdef CONFIG_NO_HZ 146#ifdef CONFIG_NO_HZ
147 if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) { 147 if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
148 int preferred_cpu = get_nohz_load_balancer(); 148 return get_nohz_timer_target();
149
150 if (preferred_cpu >= 0)
151 return preferred_cpu;
152 }
153#endif 149#endif
154 return this_cpu; 150 return this_cpu;
155} 151}
@@ -612,7 +608,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
612static void retrigger_next_event(void *arg) 608static void retrigger_next_event(void *arg)
613{ 609{
614 struct hrtimer_cpu_base *base; 610 struct hrtimer_cpu_base *base;
615 struct timespec realtime_offset; 611 struct timespec realtime_offset, wtm;
616 unsigned long seq; 612 unsigned long seq;
617 613
618 if (!hrtimer_hres_active()) 614 if (!hrtimer_hres_active())
@@ -620,10 +616,9 @@ static void retrigger_next_event(void *arg)
620 616
621 do { 617 do {
622 seq = read_seqbegin(&xtime_lock); 618 seq = read_seqbegin(&xtime_lock);
623 set_normalized_timespec(&realtime_offset, 619 wtm = __get_wall_to_monotonic();
624 -wall_to_monotonic.tv_sec,
625 -wall_to_monotonic.tv_nsec);
626 } while (read_seqretry(&xtime_lock, seq)); 620 } while (read_seqretry(&xtime_lock, seq));
621 set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
627 622
628 base = &__get_cpu_var(hrtimer_bases); 623 base = &__get_cpu_var(hrtimer_bases);
629 624
@@ -1749,35 +1744,15 @@ void __init hrtimers_init(void)
1749} 1744}
1750 1745
1751/** 1746/**
1752 * schedule_hrtimeout_range - sleep until timeout 1747 * schedule_hrtimeout_range_clock - sleep until timeout
1753 * @expires: timeout value (ktime_t) 1748 * @expires: timeout value (ktime_t)
1754 * @delta: slack in expires timeout (ktime_t) 1749 * @delta: slack in expires timeout (ktime_t)
1755 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1750 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1756 * 1751 * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
1757 * Make the current task sleep until the given expiry time has
1758 * elapsed. The routine will return immediately unless
1759 * the current task state has been set (see set_current_state()).
1760 *
1761 * The @delta argument gives the kernel the freedom to schedule the
1762 * actual wakeup to a time that is both power and performance friendly.
1763 * The kernel give the normal best effort behavior for "@expires+@delta",
1764 * but may decide to fire the timer earlier, but no earlier than @expires.
1765 *
1766 * You can set the task state as follows -
1767 *
1768 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1769 * pass before the routine returns.
1770 *
1771 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1772 * delivered to the current task.
1773 *
1774 * The current task state is guaranteed to be TASK_RUNNING when this
1775 * routine returns.
1776 *
1777 * Returns 0 when the timer has expired otherwise -EINTR
1778 */ 1752 */
1779int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, 1753int __sched
1780 const enum hrtimer_mode mode) 1754schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
1755 const enum hrtimer_mode mode, int clock)
1781{ 1756{
1782 struct hrtimer_sleeper t; 1757 struct hrtimer_sleeper t;
1783 1758
@@ -1799,7 +1774,7 @@ int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1799 return -EINTR; 1774 return -EINTR;
1800 } 1775 }
1801 1776
1802 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); 1777 hrtimer_init_on_stack(&t.timer, clock, mode);
1803 hrtimer_set_expires_range_ns(&t.timer, *expires, delta); 1778 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1804 1779
1805 hrtimer_init_sleeper(&t, current); 1780 hrtimer_init_sleeper(&t, current);
@@ -1818,6 +1793,41 @@ int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1818 1793
1819 return !t.task ? 0 : -EINTR; 1794 return !t.task ? 0 : -EINTR;
1820} 1795}
1796
1797/**
1798 * schedule_hrtimeout_range - sleep until timeout
1799 * @expires: timeout value (ktime_t)
1800 * @delta: slack in expires timeout (ktime_t)
1801 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1802 *
1803 * Make the current task sleep until the given expiry time has
1804 * elapsed. The routine will return immediately unless
1805 * the current task state has been set (see set_current_state()).
1806 *
1807 * The @delta argument gives the kernel the freedom to schedule the
1808 * actual wakeup to a time that is both power and performance friendly.
1809 * The kernel give the normal best effort behavior for "@expires+@delta",
1810 * but may decide to fire the timer earlier, but no earlier than @expires.
1811 *
1812 * You can set the task state as follows -
1813 *
1814 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1815 * pass before the routine returns.
1816 *
1817 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1818 * delivered to the current task.
1819 *
1820 * The current task state is guaranteed to be TASK_RUNNING when this
1821 * routine returns.
1822 *
1823 * Returns 0 when the timer has expired otherwise -EINTR
1824 */
1825int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1826 const enum hrtimer_mode mode)
1827{
1828 return schedule_hrtimeout_range_clock(expires, delta, mode,
1829 CLOCK_MONOTONIC);
1830}
1821EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); 1831EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1822 1832
1823/** 1833/**