aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c212
1 files changed, 191 insertions, 21 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index cdec83e722fa..51ee90bca2de 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -517,7 +517,7 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
517 if (!base->first) 517 if (!base->first)
518 continue; 518 continue;
519 timer = rb_entry(base->first, struct hrtimer, node); 519 timer = rb_entry(base->first, struct hrtimer, node);
520 expires = ktime_sub(timer->expires, base->offset); 520 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
521 if (expires.tv64 < cpu_base->expires_next.tv64) 521 if (expires.tv64 < cpu_base->expires_next.tv64)
522 cpu_base->expires_next = expires; 522 cpu_base->expires_next = expires;
523 } 523 }
@@ -539,10 +539,10 @@ static int hrtimer_reprogram(struct hrtimer *timer,
539 struct hrtimer_clock_base *base) 539 struct hrtimer_clock_base *base)
540{ 540{
541 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; 541 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
542 ktime_t expires = ktime_sub(timer->expires, base->offset); 542 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
543 int res; 543 int res;
544 544
545 WARN_ON_ONCE(timer->expires.tv64 < 0); 545 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
546 546
547 /* 547 /*
548 * When the callback is running, we do not reprogram the clock event 548 * When the callback is running, we do not reprogram the clock event
@@ -795,7 +795,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
795 u64 orun = 1; 795 u64 orun = 1;
796 ktime_t delta; 796 ktime_t delta;
797 797
798 delta = ktime_sub(now, timer->expires); 798 delta = ktime_sub(now, hrtimer_get_expires(timer));
799 799
800 if (delta.tv64 < 0) 800 if (delta.tv64 < 0)
801 return 0; 801 return 0;
@@ -807,8 +807,8 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
807 s64 incr = ktime_to_ns(interval); 807 s64 incr = ktime_to_ns(interval);
808 808
809 orun = ktime_divns(delta, incr); 809 orun = ktime_divns(delta, incr);
810 timer->expires = ktime_add_ns(timer->expires, incr * orun); 810 hrtimer_add_expires_ns(timer, incr * orun);
811 if (timer->expires.tv64 > now.tv64) 811 if (hrtimer_get_expires_tv64(timer) > now.tv64)
812 return orun; 812 return orun;
813 /* 813 /*
814 * This (and the ktime_add() below) is the 814 * This (and the ktime_add() below) is the
@@ -816,7 +816,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
816 */ 816 */
817 orun++; 817 orun++;
818 } 818 }
819 timer->expires = ktime_add_safe(timer->expires, interval); 819 hrtimer_add_expires(timer, interval);
820 820
821 return orun; 821 return orun;
822} 822}
@@ -848,7 +848,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
848 * We dont care about collisions. Nodes with 848 * We dont care about collisions. Nodes with
849 * the same expiry time stay together. 849 * the same expiry time stay together.
850 */ 850 */
851 if (timer->expires.tv64 < entry->expires.tv64) { 851 if (hrtimer_get_expires_tv64(timer) <
852 hrtimer_get_expires_tv64(entry)) {
852 link = &(*link)->rb_left; 853 link = &(*link)->rb_left;
853 } else { 854 } else {
854 link = &(*link)->rb_right; 855 link = &(*link)->rb_right;
@@ -945,9 +946,10 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
945} 946}
946 947
947/** 948/**
948 * hrtimer_start - (re)start an relative timer on the current CPU 949 * hrtimer_start_range_ns - (re)start an relative timer on the current CPU
949 * @timer: the timer to be added 950 * @timer: the timer to be added
950 * @tim: expiry time 951 * @tim: expiry time
952 * @delta_ns: "slack" range for the timer
951 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) 953 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
952 * 954 *
953 * Returns: 955 * Returns:
@@ -955,7 +957,8 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
955 * 1 when the timer was active 957 * 1 when the timer was active
956 */ 958 */
957int 959int
958hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) 960hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
961 const enum hrtimer_mode mode)
959{ 962{
960 struct hrtimer_clock_base *base, *new_base; 963 struct hrtimer_clock_base *base, *new_base;
961 unsigned long flags; 964 unsigned long flags;
@@ -983,7 +986,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
983#endif 986#endif
984 } 987 }
985 988
986 timer->expires = tim; 989 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
987 990
988 timer_stats_hrtimer_set_start_info(timer); 991 timer_stats_hrtimer_set_start_info(timer);
989 992
@@ -1016,8 +1019,26 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1016 1019
1017 return ret; 1020 return ret;
1018} 1021}
1022EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1023
1024/**
1025 * hrtimer_start - (re)start an relative timer on the current CPU
1026 * @timer: the timer to be added
1027 * @tim: expiry time
1028 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
1029 *
1030 * Returns:
1031 * 0 on success
1032 * 1 when the timer was active
1033 */
1034int
1035hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1036{
1037 return hrtimer_start_range_ns(timer, tim, 0, mode);
1038}
1019EXPORT_SYMBOL_GPL(hrtimer_start); 1039EXPORT_SYMBOL_GPL(hrtimer_start);
1020 1040
1041
1021/** 1042/**
1022 * hrtimer_try_to_cancel - try to deactivate a timer 1043 * hrtimer_try_to_cancel - try to deactivate a timer
1023 * @timer: hrtimer to stop 1044 * @timer: hrtimer to stop
@@ -1077,7 +1098,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1077 ktime_t rem; 1098 ktime_t rem;
1078 1099
1079 base = lock_hrtimer_base(timer, &flags); 1100 base = lock_hrtimer_base(timer, &flags);
1080 rem = ktime_sub(timer->expires, base->get_time()); 1101 rem = hrtimer_expires_remaining(timer);
1081 unlock_hrtimer_base(timer, &flags); 1102 unlock_hrtimer_base(timer, &flags);
1082 1103
1083 return rem; 1104 return rem;
@@ -1109,7 +1130,7 @@ ktime_t hrtimer_get_next_event(void)
1109 continue; 1130 continue;
1110 1131
1111 timer = rb_entry(base->first, struct hrtimer, node); 1132 timer = rb_entry(base->first, struct hrtimer, node);
1112 delta.tv64 = timer->expires.tv64; 1133 delta.tv64 = hrtimer_get_expires_tv64(timer);
1113 delta = ktime_sub(delta, base->get_time()); 1134 delta = ktime_sub(delta, base->get_time());
1114 if (delta.tv64 < mindelta.tv64) 1135 if (delta.tv64 < mindelta.tv64)
1115 mindelta.tv64 = delta.tv64; 1136 mindelta.tv64 = delta.tv64;
@@ -1310,10 +1331,23 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1310 1331
1311 timer = rb_entry(node, struct hrtimer, node); 1332 timer = rb_entry(node, struct hrtimer, node);
1312 1333
1313 if (basenow.tv64 < timer->expires.tv64) { 1334 /*
1335 * The immediate goal for using the softexpires is
1336 * minimizing wakeups, not running timers at the
1337 * earliest interrupt after their soft expiration.
1338 * This allows us to avoid using a Priority Search
1339 * Tree, which can answer a stabbing querry for
1340 * overlapping intervals and instead use the simple
1341 * BST we already have.
1342 * We don't add extra wakeups by delaying timers that
1343 * are right-of a not yet expired timer, because that
1344 * timer will have to trigger a wakeup anyway.
1345 */
1346
1347 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
1314 ktime_t expires; 1348 ktime_t expires;
1315 1349
1316 expires = ktime_sub(timer->expires, 1350 expires = ktime_sub(hrtimer_get_expires(timer),
1317 base->offset); 1351 base->offset);
1318 if (expires.tv64 < expires_next.tv64) 1352 if (expires.tv64 < expires_next.tv64)
1319 expires_next = expires; 1353 expires_next = expires;
@@ -1349,6 +1383,36 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1349 raise_softirq(HRTIMER_SOFTIRQ); 1383 raise_softirq(HRTIMER_SOFTIRQ);
1350} 1384}
1351 1385
1386/**
1387 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1388 *
1389 * hrtimer_peek_ahead_timers will peek at the timer queue of
1390 * the current cpu and check if there are any timers for which
1391 * the soft expires time has passed. If any such timers exist,
1392 * they are run immediately and then removed from the timer queue.
1393 *
1394 */
1395void hrtimer_peek_ahead_timers(void)
1396{
1397 unsigned long flags;
1398 struct tick_device *td;
1399 struct clock_event_device *dev;
1400
1401 if (!hrtimer_hres_active())
1402 return;
1403
1404 local_irq_save(flags);
1405 td = &__get_cpu_var(tick_cpu_device);
1406 if (!td)
1407 goto out;
1408 dev = td->evtdev;
1409 if (!dev)
1410 goto out;
1411 hrtimer_interrupt(dev);
1412out:
1413 local_irq_restore(flags);
1414}
1415
1352static void run_hrtimer_softirq(struct softirq_action *h) 1416static void run_hrtimer_softirq(struct softirq_action *h)
1353{ 1417{
1354 run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); 1418 run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
@@ -1416,7 +1480,8 @@ void hrtimer_run_queues(void)
1416 struct hrtimer *timer; 1480 struct hrtimer *timer;
1417 1481
1418 timer = rb_entry(node, struct hrtimer, node); 1482 timer = rb_entry(node, struct hrtimer, node);
1419 if (base->softirq_time.tv64 <= timer->expires.tv64) 1483 if (base->softirq_time.tv64 <=
1484 hrtimer_get_expires_tv64(timer))
1420 break; 1485 break;
1421 1486
1422 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { 1487 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
@@ -1464,7 +1529,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
1464 1529
1465 do { 1530 do {
1466 set_current_state(TASK_INTERRUPTIBLE); 1531 set_current_state(TASK_INTERRUPTIBLE);
1467 hrtimer_start(&t->timer, t->timer.expires, mode); 1532 hrtimer_start_expires(&t->timer, mode);
1468 if (!hrtimer_active(&t->timer)) 1533 if (!hrtimer_active(&t->timer))
1469 t->task = NULL; 1534 t->task = NULL;
1470 1535
@@ -1486,7 +1551,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1486 struct timespec rmt; 1551 struct timespec rmt;
1487 ktime_t rem; 1552 ktime_t rem;
1488 1553
1489 rem = ktime_sub(timer->expires, timer->base->get_time()); 1554 rem = hrtimer_expires_remaining(timer);
1490 if (rem.tv64 <= 0) 1555 if (rem.tv64 <= 0)
1491 return 0; 1556 return 0;
1492 rmt = ktime_to_timespec(rem); 1557 rmt = ktime_to_timespec(rem);
@@ -1505,7 +1570,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1505 1570
1506 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, 1571 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
1507 HRTIMER_MODE_ABS); 1572 HRTIMER_MODE_ABS);
1508 t.timer.expires.tv64 = restart->nanosleep.expires; 1573 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1509 1574
1510 if (do_nanosleep(&t, HRTIMER_MODE_ABS)) 1575 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1511 goto out; 1576 goto out;
@@ -1530,9 +1595,14 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1530 struct restart_block *restart; 1595 struct restart_block *restart;
1531 struct hrtimer_sleeper t; 1596 struct hrtimer_sleeper t;
1532 int ret = 0; 1597 int ret = 0;
1598 unsigned long slack;
1599
1600 slack = current->timer_slack_ns;
1601 if (rt_task(current))
1602 slack = 0;
1533 1603
1534 hrtimer_init_on_stack(&t.timer, clockid, mode); 1604 hrtimer_init_on_stack(&t.timer, clockid, mode);
1535 t.timer.expires = timespec_to_ktime(*rqtp); 1605 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1536 if (do_nanosleep(&t, mode)) 1606 if (do_nanosleep(&t, mode))
1537 goto out; 1607 goto out;
1538 1608
@@ -1552,7 +1622,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1552 restart->fn = hrtimer_nanosleep_restart; 1622 restart->fn = hrtimer_nanosleep_restart;
1553 restart->nanosleep.index = t.timer.base->index; 1623 restart->nanosleep.index = t.timer.base->index;
1554 restart->nanosleep.rmtp = rmtp; 1624 restart->nanosleep.rmtp = rmtp;
1555 restart->nanosleep.expires = t.timer.expires.tv64; 1625 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1556 1626
1557 ret = -ERESTART_RESTARTBLOCK; 1627 ret = -ERESTART_RESTARTBLOCK;
1558out: 1628out:
@@ -1753,3 +1823,103 @@ void __init hrtimers_init(void)
1753#endif 1823#endif
1754} 1824}
1755 1825
1826/**
1827 * schedule_hrtimeout_range - sleep until timeout
1828 * @expires: timeout value (ktime_t)
1829 * @delta: slack in expires timeout (ktime_t)
1830 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1831 *
1832 * Make the current task sleep until the given expiry time has
1833 * elapsed. The routine will return immediately unless
1834 * the current task state has been set (see set_current_state()).
1835 *
1836 * The @delta argument gives the kernel the freedom to schedule the
1837 * actual wakeup to a time that is both power and performance friendly.
1838 * The kernel give the normal best effort behavior for "@expires+@delta",
1839 * but may decide to fire the timer earlier, but no earlier than @expires.
1840 *
1841 * You can set the task state as follows -
1842 *
1843 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1844 * pass before the routine returns.
1845 *
1846 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1847 * delivered to the current task.
1848 *
1849 * The current task state is guaranteed to be TASK_RUNNING when this
1850 * routine returns.
1851 *
1852 * Returns 0 when the timer has expired otherwise -EINTR
1853 */
1854int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1855 const enum hrtimer_mode mode)
1856{
1857 struct hrtimer_sleeper t;
1858
1859 /*
1860 * Optimize when a zero timeout value is given. It does not
1861 * matter whether this is an absolute or a relative time.
1862 */
1863 if (expires && !expires->tv64) {
1864 __set_current_state(TASK_RUNNING);
1865 return 0;
1866 }
1867
1868 /*
1869 * A NULL parameter means "inifinte"
1870 */
1871 if (!expires) {
1872 schedule();
1873 __set_current_state(TASK_RUNNING);
1874 return -EINTR;
1875 }
1876
1877 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
1878 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1879
1880 hrtimer_init_sleeper(&t, current);
1881
1882 hrtimer_start_expires(&t.timer, mode);
1883 if (!hrtimer_active(&t.timer))
1884 t.task = NULL;
1885
1886 if (likely(t.task))
1887 schedule();
1888
1889 hrtimer_cancel(&t.timer);
1890 destroy_hrtimer_on_stack(&t.timer);
1891
1892 __set_current_state(TASK_RUNNING);
1893
1894 return !t.task ? 0 : -EINTR;
1895}
1896EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1897
1898/**
1899 * schedule_hrtimeout - sleep until timeout
1900 * @expires: timeout value (ktime_t)
1901 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1902 *
1903 * Make the current task sleep until the given expiry time has
1904 * elapsed. The routine will return immediately unless
1905 * the current task state has been set (see set_current_state()).
1906 *
1907 * You can set the task state as follows -
1908 *
1909 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1910 * pass before the routine returns.
1911 *
1912 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1913 * delivered to the current task.
1914 *
1915 * The current task state is guaranteed to be TASK_RUNNING when this
1916 * routine returns.
1917 *
1918 * Returns 0 when the timer has expired otherwise -EINTR
1919 */
1920int __sched schedule_hrtimeout(ktime_t *expires,
1921 const enum hrtimer_mode mode)
1922{
1923 return schedule_hrtimeout_range(expires, 0, mode);
1924}
1925EXPORT_SYMBOL_GPL(schedule_hrtimeout);