diff options
Diffstat (limited to 'kernel/hrtimer.c')
| -rw-r--r-- | kernel/hrtimer.c | 206 |
1 files changed, 185 insertions, 21 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 95978f48e039..2b465dfde426 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -517,7 +517,7 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | |||
| 517 | if (!base->first) | 517 | if (!base->first) |
| 518 | continue; | 518 | continue; |
| 519 | timer = rb_entry(base->first, struct hrtimer, node); | 519 | timer = rb_entry(base->first, struct hrtimer, node); |
| 520 | expires = ktime_sub(timer->expires, base->offset); | 520 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
| 521 | if (expires.tv64 < cpu_base->expires_next.tv64) | 521 | if (expires.tv64 < cpu_base->expires_next.tv64) |
| 522 | cpu_base->expires_next = expires; | 522 | cpu_base->expires_next = expires; |
| 523 | } | 523 | } |
| @@ -539,10 +539,10 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
| 539 | struct hrtimer_clock_base *base) | 539 | struct hrtimer_clock_base *base) |
| 540 | { | 540 | { |
| 541 | ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; | 541 | ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; |
| 542 | ktime_t expires = ktime_sub(timer->expires, base->offset); | 542 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
| 543 | int res; | 543 | int res; |
| 544 | 544 | ||
| 545 | WARN_ON_ONCE(timer->expires.tv64 < 0); | 545 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); |
| 546 | 546 | ||
| 547 | /* | 547 | /* |
| 548 | * When the callback is running, we do not reprogram the clock event | 548 | * When the callback is running, we do not reprogram the clock event |
| @@ -795,7 +795,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | |||
| 795 | u64 orun = 1; | 795 | u64 orun = 1; |
| 796 | ktime_t delta; | 796 | ktime_t delta; |
| 797 | 797 | ||
| 798 | delta = ktime_sub(now, timer->expires); | 798 | delta = ktime_sub(now, hrtimer_get_expires(timer)); |
| 799 | 799 | ||
| 800 | if (delta.tv64 < 0) | 800 | if (delta.tv64 < 0) |
| 801 | return 0; | 801 | return 0; |
| @@ -807,8 +807,8 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | |||
| 807 | s64 incr = ktime_to_ns(interval); | 807 | s64 incr = ktime_to_ns(interval); |
| 808 | 808 | ||
| 809 | orun = ktime_divns(delta, incr); | 809 | orun = ktime_divns(delta, incr); |
| 810 | timer->expires = ktime_add_ns(timer->expires, incr * orun); | 810 | hrtimer_add_expires_ns(timer, incr * orun); |
| 811 | if (timer->expires.tv64 > now.tv64) | 811 | if (hrtimer_get_expires_tv64(timer) > now.tv64) |
| 812 | return orun; | 812 | return orun; |
| 813 | /* | 813 | /* |
| 814 | * This (and the ktime_add() below) is the | 814 | * This (and the ktime_add() below) is the |
| @@ -816,7 +816,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | |||
| 816 | */ | 816 | */ |
| 817 | orun++; | 817 | orun++; |
| 818 | } | 818 | } |
| 819 | timer->expires = ktime_add_safe(timer->expires, interval); | 819 | hrtimer_add_expires(timer, interval); |
| 820 | 820 | ||
| 821 | return orun; | 821 | return orun; |
| 822 | } | 822 | } |
| @@ -848,7 +848,8 @@ static void enqueue_hrtimer(struct hrtimer *timer, | |||
| 848 | * We dont care about collisions. Nodes with | 848 | * We dont care about collisions. Nodes with |
| 849 | * the same expiry time stay together. | 849 | * the same expiry time stay together. |
| 850 | */ | 850 | */ |
| 851 | if (timer->expires.tv64 < entry->expires.tv64) { | 851 | if (hrtimer_get_expires_tv64(timer) < |
| 852 | hrtimer_get_expires_tv64(entry)) { | ||
| 852 | link = &(*link)->rb_left; | 853 | link = &(*link)->rb_left; |
| 853 | } else { | 854 | } else { |
| 854 | link = &(*link)->rb_right; | 855 | link = &(*link)->rb_right; |
| @@ -945,9 +946,10 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |||
| 945 | } | 946 | } |
| 946 | 947 | ||
| 947 | /** | 948 | /** |
| 948 | * hrtimer_start - (re)start an relative timer on the current CPU | 949 | * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU |
| 949 | * @timer: the timer to be added | 950 | * @timer: the timer to be added |
| 950 | * @tim: expiry time | 951 | * @tim: expiry time |
| 952 | * @delta_ns: "slack" range for the timer | ||
| 951 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | 953 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) |
| 952 | * | 954 | * |
| 953 | * Returns: | 955 | * Returns: |
| @@ -955,7 +957,8 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |||
| 955 | * 1 when the timer was active | 957 | * 1 when the timer was active |
| 956 | */ | 958 | */ |
| 957 | int | 959 | int |
| 958 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | 960 | hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns, |
| 961 | const enum hrtimer_mode mode) | ||
| 959 | { | 962 | { |
| 960 | struct hrtimer_clock_base *base, *new_base; | 963 | struct hrtimer_clock_base *base, *new_base; |
| 961 | unsigned long flags; | 964 | unsigned long flags; |
| @@ -983,7 +986,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | |||
| 983 | #endif | 986 | #endif |
| 984 | } | 987 | } |
| 985 | 988 | ||
| 986 | timer->expires = tim; | 989 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); |
| 987 | 990 | ||
| 988 | timer_stats_hrtimer_set_start_info(timer); | 991 | timer_stats_hrtimer_set_start_info(timer); |
| 989 | 992 | ||
| @@ -1016,8 +1019,26 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | |||
| 1016 | 1019 | ||
| 1017 | return ret; | 1020 | return ret; |
| 1018 | } | 1021 | } |
| 1022 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); | ||
| 1023 | |||
| 1024 | /** | ||
| 1025 | * hrtimer_start - (re)start an hrtimer on the current CPU | ||
| 1026 | * @timer: the timer to be added | ||
| 1027 | * @tim: expiry time | ||
| 1028 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | ||
| 1029 | * | ||
| 1030 | * Returns: | ||
| 1031 | * 0 on success | ||
| 1032 | * 1 when the timer was active | ||
| 1033 | */ | ||
| 1034 | int | ||
| 1035 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | ||
| 1036 | { | ||
| 1037 | return hrtimer_start_range_ns(timer, tim, 0, mode); | ||
| 1038 | } | ||
| 1019 | EXPORT_SYMBOL_GPL(hrtimer_start); | 1039 | EXPORT_SYMBOL_GPL(hrtimer_start); |
| 1020 | 1040 | ||
| 1041 | |||
| 1021 | /** | 1042 | /** |
| 1022 | * hrtimer_try_to_cancel - try to deactivate a timer | 1043 | * hrtimer_try_to_cancel - try to deactivate a timer |
| 1023 | * @timer: hrtimer to stop | 1044 | * @timer: hrtimer to stop |
| @@ -1077,7 +1098,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | |||
| 1077 | ktime_t rem; | 1098 | ktime_t rem; |
| 1078 | 1099 | ||
| 1079 | base = lock_hrtimer_base(timer, &flags); | 1100 | base = lock_hrtimer_base(timer, &flags); |
| 1080 | rem = ktime_sub(timer->expires, base->get_time()); | 1101 | rem = hrtimer_expires_remaining(timer); |
| 1081 | unlock_hrtimer_base(timer, &flags); | 1102 | unlock_hrtimer_base(timer, &flags); |
| 1082 | 1103 | ||
| 1083 | return rem; | 1104 | return rem; |
| @@ -1109,7 +1130,7 @@ ktime_t hrtimer_get_next_event(void) | |||
| 1109 | continue; | 1130 | continue; |
| 1110 | 1131 | ||
| 1111 | timer = rb_entry(base->first, struct hrtimer, node); | 1132 | timer = rb_entry(base->first, struct hrtimer, node); |
| 1112 | delta.tv64 = timer->expires.tv64; | 1133 | delta.tv64 = hrtimer_get_expires_tv64(timer); |
| 1113 | delta = ktime_sub(delta, base->get_time()); | 1134 | delta = ktime_sub(delta, base->get_time()); |
| 1114 | if (delta.tv64 < mindelta.tv64) | 1135 | if (delta.tv64 < mindelta.tv64) |
| 1115 | mindelta.tv64 = delta.tv64; | 1136 | mindelta.tv64 = delta.tv64; |
| @@ -1310,10 +1331,23 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1310 | 1331 | ||
| 1311 | timer = rb_entry(node, struct hrtimer, node); | 1332 | timer = rb_entry(node, struct hrtimer, node); |
| 1312 | 1333 | ||
| 1313 | if (basenow.tv64 < timer->expires.tv64) { | 1334 | /* |
| 1335 | * The immediate goal for using the softexpires is | ||
| 1336 | * minimizing wakeups, not running timers at the | ||
| 1337 | * earliest interrupt after their soft expiration. | ||
| 1338 | * This allows us to avoid using a Priority Search | ||
| 1339 | * Tree, which can answer a stabbing querry for | ||
| 1340 | * overlapping intervals and instead use the simple | ||
| 1341 | * BST we already have. | ||
| 1342 | * We don't add extra wakeups by delaying timers that | ||
| 1343 | * are right-of a not yet expired timer, because that | ||
| 1344 | * timer will have to trigger a wakeup anyway. | ||
| 1345 | */ | ||
| 1346 | |||
| 1347 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { | ||
| 1314 | ktime_t expires; | 1348 | ktime_t expires; |
| 1315 | 1349 | ||
| 1316 | expires = ktime_sub(timer->expires, | 1350 | expires = ktime_sub(hrtimer_get_expires(timer), |
| 1317 | base->offset); | 1351 | base->offset); |
| 1318 | if (expires.tv64 < expires_next.tv64) | 1352 | if (expires.tv64 < expires_next.tv64) |
| 1319 | expires_next = expires; | 1353 | expires_next = expires; |
| @@ -1349,6 +1383,30 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1349 | raise_softirq(HRTIMER_SOFTIRQ); | 1383 | raise_softirq(HRTIMER_SOFTIRQ); |
| 1350 | } | 1384 | } |
| 1351 | 1385 | ||
| 1386 | /** | ||
| 1387 | * hrtimer_peek_ahead_timers -- run soft-expired timers now | ||
| 1388 | * | ||
| 1389 | * hrtimer_peek_ahead_timers will peek at the timer queue of | ||
| 1390 | * the current cpu and check if there are any timers for which | ||
| 1391 | * the soft expires time has passed. If any such timers exist, | ||
| 1392 | * they are run immediately and then removed from the timer queue. | ||
| 1393 | * | ||
| 1394 | */ | ||
| 1395 | void hrtimer_peek_ahead_timers(void) | ||
| 1396 | { | ||
| 1397 | struct tick_device *td; | ||
| 1398 | unsigned long flags; | ||
| 1399 | |||
| 1400 | if (!hrtimer_hres_active()) | ||
| 1401 | return; | ||
| 1402 | |||
| 1403 | local_irq_save(flags); | ||
| 1404 | td = &__get_cpu_var(tick_cpu_device); | ||
| 1405 | if (td && td->evtdev) | ||
| 1406 | hrtimer_interrupt(td->evtdev); | ||
| 1407 | local_irq_restore(flags); | ||
| 1408 | } | ||
| 1409 | |||
| 1352 | static void run_hrtimer_softirq(struct softirq_action *h) | 1410 | static void run_hrtimer_softirq(struct softirq_action *h) |
| 1353 | { | 1411 | { |
| 1354 | run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); | 1412 | run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); |
| @@ -1414,7 +1472,8 @@ void hrtimer_run_queues(void) | |||
| 1414 | struct hrtimer *timer; | 1472 | struct hrtimer *timer; |
| 1415 | 1473 | ||
| 1416 | timer = rb_entry(node, struct hrtimer, node); | 1474 | timer = rb_entry(node, struct hrtimer, node); |
| 1417 | if (base->softirq_time.tv64 <= timer->expires.tv64) | 1475 | if (base->softirq_time.tv64 <= |
| 1476 | hrtimer_get_expires_tv64(timer)) | ||
| 1418 | break; | 1477 | break; |
| 1419 | 1478 | ||
| 1420 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { | 1479 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { |
| @@ -1462,7 +1521,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod | |||
| 1462 | 1521 | ||
| 1463 | do { | 1522 | do { |
| 1464 | set_current_state(TASK_INTERRUPTIBLE); | 1523 | set_current_state(TASK_INTERRUPTIBLE); |
| 1465 | hrtimer_start(&t->timer, t->timer.expires, mode); | 1524 | hrtimer_start_expires(&t->timer, mode); |
| 1466 | if (!hrtimer_active(&t->timer)) | 1525 | if (!hrtimer_active(&t->timer)) |
| 1467 | t->task = NULL; | 1526 | t->task = NULL; |
| 1468 | 1527 | ||
| @@ -1484,7 +1543,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) | |||
| 1484 | struct timespec rmt; | 1543 | struct timespec rmt; |
| 1485 | ktime_t rem; | 1544 | ktime_t rem; |
| 1486 | 1545 | ||
| 1487 | rem = ktime_sub(timer->expires, timer->base->get_time()); | 1546 | rem = hrtimer_expires_remaining(timer); |
| 1488 | if (rem.tv64 <= 0) | 1547 | if (rem.tv64 <= 0) |
| 1489 | return 0; | 1548 | return 0; |
| 1490 | rmt = ktime_to_timespec(rem); | 1549 | rmt = ktime_to_timespec(rem); |
| @@ -1503,7 +1562,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | |||
| 1503 | 1562 | ||
| 1504 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, | 1563 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, |
| 1505 | HRTIMER_MODE_ABS); | 1564 | HRTIMER_MODE_ABS); |
| 1506 | t.timer.expires.tv64 = restart->nanosleep.expires; | 1565 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); |
| 1507 | 1566 | ||
| 1508 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) | 1567 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) |
| 1509 | goto out; | 1568 | goto out; |
| @@ -1528,9 +1587,14 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |||
| 1528 | struct restart_block *restart; | 1587 | struct restart_block *restart; |
| 1529 | struct hrtimer_sleeper t; | 1588 | struct hrtimer_sleeper t; |
| 1530 | int ret = 0; | 1589 | int ret = 0; |
| 1590 | unsigned long slack; | ||
| 1591 | |||
| 1592 | slack = current->timer_slack_ns; | ||
| 1593 | if (rt_task(current)) | ||
| 1594 | slack = 0; | ||
| 1531 | 1595 | ||
| 1532 | hrtimer_init_on_stack(&t.timer, clockid, mode); | 1596 | hrtimer_init_on_stack(&t.timer, clockid, mode); |
| 1533 | t.timer.expires = timespec_to_ktime(*rqtp); | 1597 | hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); |
| 1534 | if (do_nanosleep(&t, mode)) | 1598 | if (do_nanosleep(&t, mode)) |
| 1535 | goto out; | 1599 | goto out; |
| 1536 | 1600 | ||
| @@ -1550,7 +1614,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |||
| 1550 | restart->fn = hrtimer_nanosleep_restart; | 1614 | restart->fn = hrtimer_nanosleep_restart; |
| 1551 | restart->nanosleep.index = t.timer.base->index; | 1615 | restart->nanosleep.index = t.timer.base->index; |
| 1552 | restart->nanosleep.rmtp = rmtp; | 1616 | restart->nanosleep.rmtp = rmtp; |
| 1553 | restart->nanosleep.expires = t.timer.expires.tv64; | 1617 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
| 1554 | 1618 | ||
| 1555 | ret = -ERESTART_RESTARTBLOCK; | 1619 | ret = -ERESTART_RESTARTBLOCK; |
| 1556 | out: | 1620 | out: |
| @@ -1752,3 +1816,103 @@ void __init hrtimers_init(void) | |||
| 1752 | #endif | 1816 | #endif |
| 1753 | } | 1817 | } |
| 1754 | 1818 | ||
| 1819 | /** | ||
| 1820 | * schedule_hrtimeout_range - sleep until timeout | ||
| 1821 | * @expires: timeout value (ktime_t) | ||
| 1822 | * @delta: slack in expires timeout (ktime_t) | ||
| 1823 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | ||
| 1824 | * | ||
| 1825 | * Make the current task sleep until the given expiry time has | ||
| 1826 | * elapsed. The routine will return immediately unless | ||
| 1827 | * the current task state has been set (see set_current_state()). | ||
| 1828 | * | ||
| 1829 | * The @delta argument gives the kernel the freedom to schedule the | ||
| 1830 | * actual wakeup to a time that is both power and performance friendly. | ||
| 1831 | * The kernel give the normal best effort behavior for "@expires+@delta", | ||
| 1832 | * but may decide to fire the timer earlier, but no earlier than @expires. | ||
| 1833 | * | ||
| 1834 | * You can set the task state as follows - | ||
| 1835 | * | ||
| 1836 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | ||
| 1837 | * pass before the routine returns. | ||
| 1838 | * | ||
| 1839 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | ||
| 1840 | * delivered to the current task. | ||
| 1841 | * | ||
| 1842 | * The current task state is guaranteed to be TASK_RUNNING when this | ||
| 1843 | * routine returns. | ||
| 1844 | * | ||
| 1845 | * Returns 0 when the timer has expired otherwise -EINTR | ||
| 1846 | */ | ||
| 1847 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | ||
| 1848 | const enum hrtimer_mode mode) | ||
| 1849 | { | ||
| 1850 | struct hrtimer_sleeper t; | ||
| 1851 | |||
| 1852 | /* | ||
| 1853 | * Optimize when a zero timeout value is given. It does not | ||
| 1854 | * matter whether this is an absolute or a relative time. | ||
| 1855 | */ | ||
| 1856 | if (expires && !expires->tv64) { | ||
| 1857 | __set_current_state(TASK_RUNNING); | ||
| 1858 | return 0; | ||
| 1859 | } | ||
| 1860 | |||
| 1861 | /* | ||
| 1862 | * A NULL parameter means "inifinte" | ||
| 1863 | */ | ||
| 1864 | if (!expires) { | ||
| 1865 | schedule(); | ||
| 1866 | __set_current_state(TASK_RUNNING); | ||
| 1867 | return -EINTR; | ||
| 1868 | } | ||
| 1869 | |||
| 1870 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); | ||
| 1871 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); | ||
| 1872 | |||
| 1873 | hrtimer_init_sleeper(&t, current); | ||
| 1874 | |||
| 1875 | hrtimer_start_expires(&t.timer, mode); | ||
| 1876 | if (!hrtimer_active(&t.timer)) | ||
| 1877 | t.task = NULL; | ||
| 1878 | |||
| 1879 | if (likely(t.task)) | ||
| 1880 | schedule(); | ||
| 1881 | |||
| 1882 | hrtimer_cancel(&t.timer); | ||
| 1883 | destroy_hrtimer_on_stack(&t.timer); | ||
| 1884 | |||
| 1885 | __set_current_state(TASK_RUNNING); | ||
| 1886 | |||
| 1887 | return !t.task ? 0 : -EINTR; | ||
| 1888 | } | ||
| 1889 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); | ||
| 1890 | |||
| 1891 | /** | ||
| 1892 | * schedule_hrtimeout - sleep until timeout | ||
| 1893 | * @expires: timeout value (ktime_t) | ||
| 1894 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | ||
| 1895 | * | ||
| 1896 | * Make the current task sleep until the given expiry time has | ||
| 1897 | * elapsed. The routine will return immediately unless | ||
| 1898 | * the current task state has been set (see set_current_state()). | ||
| 1899 | * | ||
| 1900 | * You can set the task state as follows - | ||
| 1901 | * | ||
| 1902 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | ||
| 1903 | * pass before the routine returns. | ||
| 1904 | * | ||
| 1905 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | ||
| 1906 | * delivered to the current task. | ||
| 1907 | * | ||
| 1908 | * The current task state is guaranteed to be TASK_RUNNING when this | ||
| 1909 | * routine returns. | ||
| 1910 | * | ||
| 1911 | * Returns 0 when the timer has expired otherwise -EINTR | ||
| 1912 | */ | ||
| 1913 | int __sched schedule_hrtimeout(ktime_t *expires, | ||
| 1914 | const enum hrtimer_mode mode) | ||
| 1915 | { | ||
| 1916 | return schedule_hrtimeout_range(expires, 0, mode); | ||
| 1917 | } | ||
| 1918 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); | ||
