aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 13:53:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 13:53:02 -0400
commit1f6d6e8ebe73ba9d9d4c693f7f6f50f661dbd6e4 (patch)
treebe7a2d20b1728da5a0d844a6f4cd382b2c2569fb /kernel
parentdb563fc2e80534f98c7f9121a6f7dfe41f177a79 (diff)
parent268a3dcfea2077fca60d3715caa5c96f9b5e6ea7 (diff)
Merge branch 'v28-range-hrtimers-for-linus-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'v28-range-hrtimers-for-linus-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (37 commits) hrtimers: add missing docbook comments to struct hrtimer hrtimers: simplify hrtimer_peek_ahead_timers() hrtimers: fix docbook comments DECLARE_PER_CPU needs linux/percpu.h hrtimers: fix typo rangetimers: fix the bug reported by Ingo for real rangetimer: fix BUG_ON reported by Ingo rangetimer: fix x86 build failure for the !HRTIMERS case select: fix alpha OSF wrapper select: fix alpha OSF wrapper hrtimer: peek at the timer queue just before going idle hrtimer: make the futex() system call use the per process slack value hrtimer: make the nanosleep() syscall use the per process slack hrtimer: fix signed/unsigned bug in slack estimator hrtimer: show the timer ranges in /proc/timer_list hrtimer: incorporate feedback from Peter Zijlstra hrtimer: add a hrtimer_start_range() function hrtimer: another build fix hrtimer: fix build bug found by Ingo hrtimer: make select() and poll() use the hrtimer range feature ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c11
-rw-r--r--kernel/hrtimer.c206
-rw-r--r--kernel/posix-timers.c10
-rw-r--r--kernel/rtmutex.c3
-rw-r--r--kernel/sched.c7
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/time.c18
-rw-r--r--kernel/time/ntp.c3
-rw-r--r--kernel/time/tick-sched.c25
-rw-r--r--kernel/time/timer_list.c8
11 files changed, 249 insertions, 54 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 4d093552dd6e..f6083561dfe0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1018,6 +1018,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1018 p->prev_utime = cputime_zero; 1018 p->prev_utime = cputime_zero;
1019 p->prev_stime = cputime_zero; 1019 p->prev_stime = cputime_zero;
1020 1020
1021 p->default_timer_slack_ns = current->timer_slack_ns;
1022
1021#ifdef CONFIG_DETECT_SOFTLOCKUP 1023#ifdef CONFIG_DETECT_SOFTLOCKUP
1022 p->last_switch_count = 0; 1024 p->last_switch_count = 0;
1023 p->last_switch_timestamp = 0; 1025 p->last_switch_timestamp = 0;
diff --git a/kernel/futex.c b/kernel/futex.c
index 7d1136e97c14..8af10027514b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1296,13 +1296,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1296 if (!abs_time) 1296 if (!abs_time)
1297 schedule(); 1297 schedule();
1298 else { 1298 else {
1299 unsigned long slack;
1300 slack = current->timer_slack_ns;
1301 if (rt_task(current))
1302 slack = 0;
1299 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, 1303 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
1300 HRTIMER_MODE_ABS); 1304 HRTIMER_MODE_ABS);
1301 hrtimer_init_sleeper(&t, current); 1305 hrtimer_init_sleeper(&t, current);
1302 t.timer.expires = *abs_time; 1306 hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
1303 1307
1304 hrtimer_start(&t.timer, t.timer.expires, 1308 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
1305 HRTIMER_MODE_ABS);
1306 if (!hrtimer_active(&t.timer)) 1309 if (!hrtimer_active(&t.timer))
1307 t.task = NULL; 1310 t.task = NULL;
1308 1311
@@ -1404,7 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1404 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, 1407 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1405 HRTIMER_MODE_ABS); 1408 HRTIMER_MODE_ABS);
1406 hrtimer_init_sleeper(to, current); 1409 hrtimer_init_sleeper(to, current);
1407 to->timer.expires = *time; 1410 hrtimer_set_expires(&to->timer, *time);
1408 } 1411 }
1409 1412
1410 q.pi_state = NULL; 1413 q.pi_state = NULL;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 95978f48e039..2b465dfde426 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -517,7 +517,7 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
517 if (!base->first) 517 if (!base->first)
518 continue; 518 continue;
519 timer = rb_entry(base->first, struct hrtimer, node); 519 timer = rb_entry(base->first, struct hrtimer, node);
520 expires = ktime_sub(timer->expires, base->offset); 520 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
521 if (expires.tv64 < cpu_base->expires_next.tv64) 521 if (expires.tv64 < cpu_base->expires_next.tv64)
522 cpu_base->expires_next = expires; 522 cpu_base->expires_next = expires;
523 } 523 }
@@ -539,10 +539,10 @@ static int hrtimer_reprogram(struct hrtimer *timer,
539 struct hrtimer_clock_base *base) 539 struct hrtimer_clock_base *base)
540{ 540{
541 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; 541 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
542 ktime_t expires = ktime_sub(timer->expires, base->offset); 542 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
543 int res; 543 int res;
544 544
545 WARN_ON_ONCE(timer->expires.tv64 < 0); 545 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
546 546
547 /* 547 /*
548 * When the callback is running, we do not reprogram the clock event 548 * When the callback is running, we do not reprogram the clock event
@@ -795,7 +795,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
795 u64 orun = 1; 795 u64 orun = 1;
796 ktime_t delta; 796 ktime_t delta;
797 797
798 delta = ktime_sub(now, timer->expires); 798 delta = ktime_sub(now, hrtimer_get_expires(timer));
799 799
800 if (delta.tv64 < 0) 800 if (delta.tv64 < 0)
801 return 0; 801 return 0;
@@ -807,8 +807,8 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
807 s64 incr = ktime_to_ns(interval); 807 s64 incr = ktime_to_ns(interval);
808 808
809 orun = ktime_divns(delta, incr); 809 orun = ktime_divns(delta, incr);
810 timer->expires = ktime_add_ns(timer->expires, incr * orun); 810 hrtimer_add_expires_ns(timer, incr * orun);
811 if (timer->expires.tv64 > now.tv64) 811 if (hrtimer_get_expires_tv64(timer) > now.tv64)
812 return orun; 812 return orun;
813 /* 813 /*
814 * This (and the ktime_add() below) is the 814 * This (and the ktime_add() below) is the
@@ -816,7 +816,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
816 */ 816 */
817 orun++; 817 orun++;
818 } 818 }
819 timer->expires = ktime_add_safe(timer->expires, interval); 819 hrtimer_add_expires(timer, interval);
820 820
821 return orun; 821 return orun;
822} 822}
@@ -848,7 +848,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
848 * We dont care about collisions. Nodes with 848 * We dont care about collisions. Nodes with
849 * the same expiry time stay together. 849 * the same expiry time stay together.
850 */ 850 */
851 if (timer->expires.tv64 < entry->expires.tv64) { 851 if (hrtimer_get_expires_tv64(timer) <
852 hrtimer_get_expires_tv64(entry)) {
852 link = &(*link)->rb_left; 853 link = &(*link)->rb_left;
853 } else { 854 } else {
854 link = &(*link)->rb_right; 855 link = &(*link)->rb_right;
@@ -945,9 +946,10 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
945} 946}
946 947
947/** 948/**
948 * hrtimer_start - (re)start an relative timer on the current CPU 949 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
949 * @timer: the timer to be added 950 * @timer: the timer to be added
950 * @tim: expiry time 951 * @tim: expiry time
952 * @delta_ns: "slack" range for the timer
951 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) 953 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
952 * 954 *
953 * Returns: 955 * Returns:
@@ -955,7 +957,8 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
955 * 1 when the timer was active 957 * 1 when the timer was active
956 */ 958 */
957int 959int
958hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) 960hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
961 const enum hrtimer_mode mode)
959{ 962{
960 struct hrtimer_clock_base *base, *new_base; 963 struct hrtimer_clock_base *base, *new_base;
961 unsigned long flags; 964 unsigned long flags;
@@ -983,7 +986,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
983#endif 986#endif
984 } 987 }
985 988
986 timer->expires = tim; 989 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
987 990
988 timer_stats_hrtimer_set_start_info(timer); 991 timer_stats_hrtimer_set_start_info(timer);
989 992
@@ -1016,8 +1019,26 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1016 1019
1017 return ret; 1020 return ret;
1018} 1021}
1022EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1023
1024/**
1025 * hrtimer_start - (re)start an hrtimer on the current CPU
1026 * @timer: the timer to be added
1027 * @tim: expiry time
1028 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
1029 *
1030 * Returns:
1031 * 0 on success
1032 * 1 when the timer was active
1033 */
1034int
1035hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1036{
1037 return hrtimer_start_range_ns(timer, tim, 0, mode);
1038}
1019EXPORT_SYMBOL_GPL(hrtimer_start); 1039EXPORT_SYMBOL_GPL(hrtimer_start);
1020 1040
1041
1021/** 1042/**
1022 * hrtimer_try_to_cancel - try to deactivate a timer 1043 * hrtimer_try_to_cancel - try to deactivate a timer
1023 * @timer: hrtimer to stop 1044 * @timer: hrtimer to stop
@@ -1077,7 +1098,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1077 ktime_t rem; 1098 ktime_t rem;
1078 1099
1079 base = lock_hrtimer_base(timer, &flags); 1100 base = lock_hrtimer_base(timer, &flags);
1080 rem = ktime_sub(timer->expires, base->get_time()); 1101 rem = hrtimer_expires_remaining(timer);
1081 unlock_hrtimer_base(timer, &flags); 1102 unlock_hrtimer_base(timer, &flags);
1082 1103
1083 return rem; 1104 return rem;
@@ -1109,7 +1130,7 @@ ktime_t hrtimer_get_next_event(void)
1109 continue; 1130 continue;
1110 1131
1111 timer = rb_entry(base->first, struct hrtimer, node); 1132 timer = rb_entry(base->first, struct hrtimer, node);
1112 delta.tv64 = timer->expires.tv64; 1133 delta.tv64 = hrtimer_get_expires_tv64(timer);
1113 delta = ktime_sub(delta, base->get_time()); 1134 delta = ktime_sub(delta, base->get_time());
1114 if (delta.tv64 < mindelta.tv64) 1135 if (delta.tv64 < mindelta.tv64)
1115 mindelta.tv64 = delta.tv64; 1136 mindelta.tv64 = delta.tv64;
@@ -1310,10 +1331,23 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1310 1331
1311 timer = rb_entry(node, struct hrtimer, node); 1332 timer = rb_entry(node, struct hrtimer, node);
1312 1333
1313 if (basenow.tv64 < timer->expires.tv64) { 1334 /*
1335 * The immediate goal for using the softexpires is
1336 * minimizing wakeups, not running timers at the
1337 * earliest interrupt after their soft expiration.
1338 * This allows us to avoid using a Priority Search
1339 * Tree, which can answer a stabbing querry for
1340 * overlapping intervals and instead use the simple
1341 * BST we already have.
1342 * We don't add extra wakeups by delaying timers that
1343 * are right-of a not yet expired timer, because that
1344 * timer will have to trigger a wakeup anyway.
1345 */
1346
1347 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
1314 ktime_t expires; 1348 ktime_t expires;
1315 1349
1316 expires = ktime_sub(timer->expires, 1350 expires = ktime_sub(hrtimer_get_expires(timer),
1317 base->offset); 1351 base->offset);
1318 if (expires.tv64 < expires_next.tv64) 1352 if (expires.tv64 < expires_next.tv64)
1319 expires_next = expires; 1353 expires_next = expires;
@@ -1349,6 +1383,30 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1349 raise_softirq(HRTIMER_SOFTIRQ); 1383 raise_softirq(HRTIMER_SOFTIRQ);
1350} 1384}
1351 1385
1386/**
1387 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1388 *
1389 * hrtimer_peek_ahead_timers will peek at the timer queue of
1390 * the current cpu and check if there are any timers for which
1391 * the soft expires time has passed. If any such timers exist,
1392 * they are run immediately and then removed from the timer queue.
1393 *
1394 */
1395void hrtimer_peek_ahead_timers(void)
1396{
1397 struct tick_device *td;
1398 unsigned long flags;
1399
1400 if (!hrtimer_hres_active())
1401 return;
1402
1403 local_irq_save(flags);
1404 td = &__get_cpu_var(tick_cpu_device);
1405 if (td && td->evtdev)
1406 hrtimer_interrupt(td->evtdev);
1407 local_irq_restore(flags);
1408}
1409
1352static void run_hrtimer_softirq(struct softirq_action *h) 1410static void run_hrtimer_softirq(struct softirq_action *h)
1353{ 1411{
1354 run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); 1412 run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
@@ -1414,7 +1472,8 @@ void hrtimer_run_queues(void)
1414 struct hrtimer *timer; 1472 struct hrtimer *timer;
1415 1473
1416 timer = rb_entry(node, struct hrtimer, node); 1474 timer = rb_entry(node, struct hrtimer, node);
1417 if (base->softirq_time.tv64 <= timer->expires.tv64) 1475 if (base->softirq_time.tv64 <=
1476 hrtimer_get_expires_tv64(timer))
1418 break; 1477 break;
1419 1478
1420 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { 1479 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
@@ -1462,7 +1521,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
1462 1521
1463 do { 1522 do {
1464 set_current_state(TASK_INTERRUPTIBLE); 1523 set_current_state(TASK_INTERRUPTIBLE);
1465 hrtimer_start(&t->timer, t->timer.expires, mode); 1524 hrtimer_start_expires(&t->timer, mode);
1466 if (!hrtimer_active(&t->timer)) 1525 if (!hrtimer_active(&t->timer))
1467 t->task = NULL; 1526 t->task = NULL;
1468 1527
@@ -1484,7 +1543,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1484 struct timespec rmt; 1543 struct timespec rmt;
1485 ktime_t rem; 1544 ktime_t rem;
1486 1545
1487 rem = ktime_sub(timer->expires, timer->base->get_time()); 1546 rem = hrtimer_expires_remaining(timer);
1488 if (rem.tv64 <= 0) 1547 if (rem.tv64 <= 0)
1489 return 0; 1548 return 0;
1490 rmt = ktime_to_timespec(rem); 1549 rmt = ktime_to_timespec(rem);
@@ -1503,7 +1562,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1503 1562
1504 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, 1563 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
1505 HRTIMER_MODE_ABS); 1564 HRTIMER_MODE_ABS);
1506 t.timer.expires.tv64 = restart->nanosleep.expires; 1565 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1507 1566
1508 if (do_nanosleep(&t, HRTIMER_MODE_ABS)) 1567 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1509 goto out; 1568 goto out;
@@ -1528,9 +1587,14 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1528 struct restart_block *restart; 1587 struct restart_block *restart;
1529 struct hrtimer_sleeper t; 1588 struct hrtimer_sleeper t;
1530 int ret = 0; 1589 int ret = 0;
1590 unsigned long slack;
1591
1592 slack = current->timer_slack_ns;
1593 if (rt_task(current))
1594 slack = 0;
1531 1595
1532 hrtimer_init_on_stack(&t.timer, clockid, mode); 1596 hrtimer_init_on_stack(&t.timer, clockid, mode);
1533 t.timer.expires = timespec_to_ktime(*rqtp); 1597 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1534 if (do_nanosleep(&t, mode)) 1598 if (do_nanosleep(&t, mode))
1535 goto out; 1599 goto out;
1536 1600
@@ -1550,7 +1614,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1550 restart->fn = hrtimer_nanosleep_restart; 1614 restart->fn = hrtimer_nanosleep_restart;
1551 restart->nanosleep.index = t.timer.base->index; 1615 restart->nanosleep.index = t.timer.base->index;
1552 restart->nanosleep.rmtp = rmtp; 1616 restart->nanosleep.rmtp = rmtp;
1553 restart->nanosleep.expires = t.timer.expires.tv64; 1617 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1554 1618
1555 ret = -ERESTART_RESTARTBLOCK; 1619 ret = -ERESTART_RESTARTBLOCK;
1556out: 1620out:
@@ -1752,3 +1816,103 @@ void __init hrtimers_init(void)
1752#endif 1816#endif
1753} 1817}
1754 1818
1819/**
1820 * schedule_hrtimeout_range - sleep until timeout
1821 * @expires: timeout value (ktime_t)
1822 * @delta: slack in expires timeout (ktime_t)
1823 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1824 *
1825 * Make the current task sleep until the given expiry time has
1826 * elapsed. The routine will return immediately unless
1827 * the current task state has been set (see set_current_state()).
1828 *
1829 * The @delta argument gives the kernel the freedom to schedule the
1830 * actual wakeup to a time that is both power and performance friendly.
1831 * The kernel give the normal best effort behavior for "@expires+@delta",
1832 * but may decide to fire the timer earlier, but no earlier than @expires.
1833 *
1834 * You can set the task state as follows -
1835 *
1836 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1837 * pass before the routine returns.
1838 *
1839 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1840 * delivered to the current task.
1841 *
1842 * The current task state is guaranteed to be TASK_RUNNING when this
1843 * routine returns.
1844 *
1845 * Returns 0 when the timer has expired otherwise -EINTR
1846 */
1847int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1848 const enum hrtimer_mode mode)
1849{
1850 struct hrtimer_sleeper t;
1851
1852 /*
1853 * Optimize when a zero timeout value is given. It does not
1854 * matter whether this is an absolute or a relative time.
1855 */
1856 if (expires && !expires->tv64) {
1857 __set_current_state(TASK_RUNNING);
1858 return 0;
1859 }
1860
1861 /*
1862 * A NULL parameter means "inifinte"
1863 */
1864 if (!expires) {
1865 schedule();
1866 __set_current_state(TASK_RUNNING);
1867 return -EINTR;
1868 }
1869
1870 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
1871 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1872
1873 hrtimer_init_sleeper(&t, current);
1874
1875 hrtimer_start_expires(&t.timer, mode);
1876 if (!hrtimer_active(&t.timer))
1877 t.task = NULL;
1878
1879 if (likely(t.task))
1880 schedule();
1881
1882 hrtimer_cancel(&t.timer);
1883 destroy_hrtimer_on_stack(&t.timer);
1884
1885 __set_current_state(TASK_RUNNING);
1886
1887 return !t.task ? 0 : -EINTR;
1888}
1889EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1890
1891/**
1892 * schedule_hrtimeout - sleep until timeout
1893 * @expires: timeout value (ktime_t)
1894 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1895 *
1896 * Make the current task sleep until the given expiry time has
1897 * elapsed. The routine will return immediately unless
1898 * the current task state has been set (see set_current_state()).
1899 *
1900 * You can set the task state as follows -
1901 *
1902 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1903 * pass before the routine returns.
1904 *
1905 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1906 * delivered to the current task.
1907 *
1908 * The current task state is guaranteed to be TASK_RUNNING when this
1909 * routine returns.
1910 *
1911 * Returns 0 when the timer has expired otherwise -EINTR
1912 */
1913int __sched schedule_hrtimeout(ktime_t *expires,
1914 const enum hrtimer_mode mode)
1915{
1916 return schedule_hrtimeout_range(expires, 0, mode);
1917}
1918EXPORT_SYMBOL_GPL(schedule_hrtimeout);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index b931d7cedbfa..5e79c662294b 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -639,7 +639,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
639 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 639 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
640 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); 640 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
641 641
642 remaining = ktime_sub(timer->expires, now); 642 remaining = ktime_sub(hrtimer_get_expires(timer), now);
643 /* Return 0 only, when the timer is expired and not pending */ 643 /* Return 0 only, when the timer is expired and not pending */
644 if (remaining.tv64 <= 0) { 644 if (remaining.tv64 <= 0) {
645 /* 645 /*
@@ -733,7 +733,7 @@ common_timer_set(struct k_itimer *timr, int flags,
733 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); 733 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
734 timr->it.real.timer.function = posix_timer_fn; 734 timr->it.real.timer.function = posix_timer_fn;
735 735
736 timer->expires = timespec_to_ktime(new_setting->it_value); 736 hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
737 737
738 /* Convert interval */ 738 /* Convert interval */
739 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); 739 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
@@ -742,14 +742,12 @@ common_timer_set(struct k_itimer *timr, int flags,
742 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { 742 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
743 /* Setup correct expiry time for relative timers */ 743 /* Setup correct expiry time for relative timers */
744 if (mode == HRTIMER_MODE_REL) { 744 if (mode == HRTIMER_MODE_REL) {
745 timer->expires = 745 hrtimer_add_expires(timer, timer->base->get_time());
746 ktime_add_safe(timer->expires,
747 timer->base->get_time());
748 } 746 }
749 return 0; 747 return 0;
750 } 748 }
751 749
752 hrtimer_start(timer, timer->expires, mode); 750 hrtimer_start_expires(timer, mode);
753 return 0; 751 return 0;
754} 752}
755 753
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 6522ae5b14a2..69d9cb921ffa 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -631,8 +631,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
631 631
632 /* Setup the timer, when timeout != NULL */ 632 /* Setup the timer, when timeout != NULL */
633 if (unlikely(timeout)) { 633 if (unlikely(timeout)) {
634 hrtimer_start(&timeout->timer, timeout->timer.expires, 634 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
635 HRTIMER_MODE_ABS);
636 if (!hrtimer_active(&timeout->timer)) 635 if (!hrtimer_active(&timeout->timer))
637 timeout->task = NULL; 636 timeout->task = NULL;
638 } 637 }
diff --git a/kernel/sched.c b/kernel/sched.c
index 945a97b9600d..1645c7211944 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -227,9 +227,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
227 227
228 now = hrtimer_cb_get_time(&rt_b->rt_period_timer); 228 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
229 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); 229 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
230 hrtimer_start(&rt_b->rt_period_timer, 230 hrtimer_start_expires(&rt_b->rt_period_timer,
231 rt_b->rt_period_timer.expires, 231 HRTIMER_MODE_ABS);
232 HRTIMER_MODE_ABS);
233 } 232 }
234 spin_unlock(&rt_b->rt_runtime_lock); 233 spin_unlock(&rt_b->rt_runtime_lock);
235} 234}
@@ -1071,7 +1070,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
1071 struct hrtimer *timer = &rq->hrtick_timer; 1070 struct hrtimer *timer = &rq->hrtick_timer;
1072 ktime_t time = ktime_add_ns(timer->base->get_time(), delay); 1071 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1073 1072
1074 timer->expires = time; 1073 hrtimer_set_expires(timer, time);
1075 1074
1076 if (rq == this_rq()) { 1075 if (rq == this_rq()) {
1077 hrtimer_restart(timer); 1076 hrtimer_restart(timer);
diff --git a/kernel/sys.c b/kernel/sys.c
index 53879cdae483..31deba8f7d16 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1716,6 +1716,16 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1716 case PR_SET_TSC: 1716 case PR_SET_TSC:
1717 error = SET_TSC_CTL(arg2); 1717 error = SET_TSC_CTL(arg2);
1718 break; 1718 break;
1719 case PR_GET_TIMERSLACK:
1720 error = current->timer_slack_ns;
1721 break;
1722 case PR_SET_TIMERSLACK:
1723 if (arg2 <= 0)
1724 current->timer_slack_ns =
1725 current->default_timer_slack_ns;
1726 else
1727 current->timer_slack_ns = arg2;
1728 break;
1719 default: 1729 default:
1720 error = -EINVAL; 1730 error = -EINVAL;
1721 break; 1731 break;
diff --git a/kernel/time.c b/kernel/time.c
index 6a08660b4fac..d63a4336fad6 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -669,3 +669,21 @@ EXPORT_SYMBOL(get_jiffies_64);
669#endif 669#endif
670 670
671EXPORT_SYMBOL(jiffies); 671EXPORT_SYMBOL(jiffies);
672
673/*
674 * Add two timespec values and do a safety check for overflow.
675 * It's assumed that both values are valid (>= 0)
676 */
677struct timespec timespec_add_safe(const struct timespec lhs,
678 const struct timespec rhs)
679{
680 struct timespec res;
681
682 set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
683 lhs.tv_nsec + rhs.tv_nsec);
684
685 if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
686 res.tv_sec = TIME_T_MAX;
687
688 return res;
689}
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 1a20715bfd6e..8ff15e5d486b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -142,8 +142,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
142 time_state = TIME_OOP; 142 time_state = TIME_OOP;
143 printk(KERN_NOTICE "Clock: " 143 printk(KERN_NOTICE "Clock: "
144 "inserting leap second 23:59:60 UTC\n"); 144 "inserting leap second 23:59:60 UTC\n");
145 leap_timer.expires = ktime_add_ns(leap_timer.expires, 145 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
146 NSEC_PER_SEC);
147 res = HRTIMER_RESTART; 146 res = HRTIMER_RESTART;
148 break; 147 break;
149 case TIME_DEL: 148 case TIME_DEL:
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 727c1ae0517a..5bbb1044f847 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -300,7 +300,7 @@ void tick_nohz_stop_sched_tick(int inidle)
300 goto out; 300 goto out;
301 } 301 }
302 302
303 ts->idle_tick = ts->sched_timer.expires; 303 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
304 ts->tick_stopped = 1; 304 ts->tick_stopped = 1;
305 ts->idle_jiffies = last_jiffies; 305 ts->idle_jiffies = last_jiffies;
306 rcu_enter_nohz(); 306 rcu_enter_nohz();
@@ -380,21 +380,21 @@ ktime_t tick_nohz_get_sleep_length(void)
380static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) 380static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
381{ 381{
382 hrtimer_cancel(&ts->sched_timer); 382 hrtimer_cancel(&ts->sched_timer);
383 ts->sched_timer.expires = ts->idle_tick; 383 hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
384 384
385 while (1) { 385 while (1) {
386 /* Forward the time to expire in the future */ 386 /* Forward the time to expire in the future */
387 hrtimer_forward(&ts->sched_timer, now, tick_period); 387 hrtimer_forward(&ts->sched_timer, now, tick_period);
388 388
389 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 389 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
390 hrtimer_start(&ts->sched_timer, 390 hrtimer_start_expires(&ts->sched_timer,
391 ts->sched_timer.expires,
392 HRTIMER_MODE_ABS); 391 HRTIMER_MODE_ABS);
393 /* Check, if the timer was already in the past */ 392 /* Check, if the timer was already in the past */
394 if (hrtimer_active(&ts->sched_timer)) 393 if (hrtimer_active(&ts->sched_timer))
395 break; 394 break;
396 } else { 395 } else {
397 if (!tick_program_event(ts->sched_timer.expires, 0)) 396 if (!tick_program_event(
397 hrtimer_get_expires(&ts->sched_timer), 0))
398 break; 398 break;
399 } 399 }
400 /* Update jiffies and reread time */ 400 /* Update jiffies and reread time */
@@ -456,14 +456,16 @@ void tick_nohz_restart_sched_tick(void)
456 */ 456 */
457 ts->tick_stopped = 0; 457 ts->tick_stopped = 0;
458 ts->idle_exittime = now; 458 ts->idle_exittime = now;
459
459 tick_nohz_restart(ts, now); 460 tick_nohz_restart(ts, now);
461
460 local_irq_enable(); 462 local_irq_enable();
461} 463}
462 464
463static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) 465static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
464{ 466{
465 hrtimer_forward(&ts->sched_timer, now, tick_period); 467 hrtimer_forward(&ts->sched_timer, now, tick_period);
466 return tick_program_event(ts->sched_timer.expires, 0); 468 return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
467} 469}
468 470
469/* 471/*
@@ -542,7 +544,7 @@ static void tick_nohz_switch_to_nohz(void)
542 next = tick_init_jiffy_update(); 544 next = tick_init_jiffy_update();
543 545
544 for (;;) { 546 for (;;) {
545 ts->sched_timer.expires = next; 547 hrtimer_set_expires(&ts->sched_timer, next);
546 if (!tick_program_event(next, 0)) 548 if (!tick_program_event(next, 0))
547 break; 549 break;
548 next = ktime_add(next, tick_period); 550 next = ktime_add(next, tick_period);
@@ -577,7 +579,7 @@ static void tick_nohz_kick_tick(int cpu)
577 * already reached or less/equal than the tick period. 579 * already reached or less/equal than the tick period.
578 */ 580 */
579 now = ktime_get(); 581 now = ktime_get();
580 delta = ktime_sub(ts->sched_timer.expires, now); 582 delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
581 if (delta.tv64 <= tick_period.tv64) 583 if (delta.tv64 <= tick_period.tv64)
582 return; 584 return;
583 585
@@ -678,16 +680,15 @@ void tick_setup_sched_timer(void)
678 ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; 680 ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
679 681
680 /* Get the next period (per cpu) */ 682 /* Get the next period (per cpu) */
681 ts->sched_timer.expires = tick_init_jiffy_update(); 683 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
682 offset = ktime_to_ns(tick_period) >> 1; 684 offset = ktime_to_ns(tick_period) >> 1;
683 do_div(offset, num_possible_cpus()); 685 do_div(offset, num_possible_cpus());
684 offset *= smp_processor_id(); 686 offset *= smp_processor_id();
685 ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset); 687 hrtimer_add_expires_ns(&ts->sched_timer, offset);
686 688
687 for (;;) { 689 for (;;) {
688 hrtimer_forward(&ts->sched_timer, now, tick_period); 690 hrtimer_forward(&ts->sched_timer, now, tick_period);
689 hrtimer_start(&ts->sched_timer, ts->sched_timer.expires, 691 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS);
690 HRTIMER_MODE_ABS);
691 /* Check, if the timer was already in the past */ 692 /* Check, if the timer was already in the past */
692 if (hrtimer_active(&ts->sched_timer)) 693 if (hrtimer_active(&ts->sched_timer))
693 break; 694 break;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index f6426911e35a..a999b92a1277 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -66,9 +66,11 @@ print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
66 SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); 66 SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
67#endif 67#endif
68 SEQ_printf(m, "\n"); 68 SEQ_printf(m, "\n");
69 SEQ_printf(m, " # expires at %Lu nsecs [in %Ld nsecs]\n", 69 SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
70 (unsigned long long)ktime_to_ns(timer->expires), 70 (unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)),
71 (long long)(ktime_to_ns(timer->expires) - now)); 71 (unsigned long long)ktime_to_ns(hrtimer_get_expires(timer)),
72 (long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now),
73 (long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now));
72} 74}
73 75
74static void 76static void