diff options
author | Arjan van de Ven <arjan@linux.intel.com> | 2008-09-01 18:47:08 -0400 |
---|---|---|
committer | Arjan van de Ven <arjan@linux.intel.com> | 2008-09-06 00:35:27 -0400 |
commit | 654c8e0b1c623b156c5b92f28d914ab38c9c2c90 (patch) | |
tree | b3b1f8cfa8f3b12170f0d8b8770857182a2f0309 | |
parent | 799b64de256ea68fbb5db63bb55f61c305870643 (diff) |
hrtimer: turn hrtimers into range timers
this patch turns hrtimers into range timers; they have 2 expire points
1) the soft expire point
2) the hard expire point
the kernel will do it's regular best effort attempt to get the timer run
at the hard expire point. However, if some other time fires after the soft
expire point, the kernel now has the freedom to fire this timer at this point,
and thus grouping the events and preventing a power-expensive wakeup in the
future.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-rw-r--r-- | include/linux/hrtimer.h | 31 | ||||
-rw-r--r-- | kernel/hrtimer.c | 56 |
2 files changed, 82 insertions, 5 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 485a634fd6e2..28259c336679 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -112,6 +112,7 @@ enum hrtimer_cb_mode { | |||
112 | struct hrtimer { | 112 | struct hrtimer { |
113 | struct rb_node node; | 113 | struct rb_node node; |
114 | ktime_t _expires; | 114 | ktime_t _expires; |
115 | ktime_t _softexpires; | ||
115 | enum hrtimer_restart (*function)(struct hrtimer *); | 116 | enum hrtimer_restart (*function)(struct hrtimer *); |
116 | struct hrtimer_clock_base *base; | 117 | struct hrtimer_clock_base *base; |
117 | unsigned long state; | 118 | unsigned long state; |
@@ -220,20 +221,37 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) | |||
220 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) | 221 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) |
221 | { | 222 | { |
222 | timer->_expires = time; | 223 | timer->_expires = time; |
224 | timer->_softexpires = time; | ||
223 | } | 225 | } |
226 | |||
227 | static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) | ||
228 | { | ||
229 | timer->_softexpires = time; | ||
230 | timer->_expires = ktime_add_safe(time, delta); | ||
231 | } | ||
232 | |||
233 | static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) | ||
234 | { | ||
235 | timer->_softexpires = time; | ||
236 | timer->_expires = ktime_add_safe(time, ns_to_ktime(delta)); | ||
237 | } | ||
238 | |||
224 | static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) | 239 | static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) |
225 | { | 240 | { |
226 | timer->_expires.tv64 = tv64; | 241 | timer->_expires.tv64 = tv64; |
242 | timer->_softexpires.tv64 = tv64; | ||
227 | } | 243 | } |
228 | 244 | ||
229 | static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) | 245 | static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) |
230 | { | 246 | { |
231 | timer->_expires = ktime_add_safe(timer->_expires, time); | 247 | timer->_expires = ktime_add_safe(timer->_expires, time); |
248 | timer->_softexpires = ktime_add_safe(timer->_softexpires, time); | ||
232 | } | 249 | } |
233 | 250 | ||
234 | static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns) | 251 | static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns) |
235 | { | 252 | { |
236 | timer->_expires = ktime_add_ns(timer->_expires, ns); | 253 | timer->_expires = ktime_add_ns(timer->_expires, ns); |
254 | timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); | ||
237 | } | 255 | } |
238 | 256 | ||
239 | static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) | 257 | static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) |
@@ -241,10 +259,19 @@ static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) | |||
241 | return timer->_expires; | 259 | return timer->_expires; |
242 | } | 260 | } |
243 | 261 | ||
262 | static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) | ||
263 | { | ||
264 | return timer->_softexpires; | ||
265 | } | ||
266 | |||
244 | static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) | 267 | static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) |
245 | { | 268 | { |
246 | return timer->_expires.tv64; | 269 | return timer->_expires.tv64; |
247 | } | 270 | } |
271 | static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) | ||
272 | { | ||
273 | return timer->_softexpires.tv64; | ||
274 | } | ||
248 | 275 | ||
249 | static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) | 276 | static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) |
250 | { | 277 | { |
@@ -334,7 +361,7 @@ static inline int hrtimer_start_expires(struct hrtimer *timer, | |||
334 | 361 | ||
335 | static inline int hrtimer_restart(struct hrtimer *timer) | 362 | static inline int hrtimer_restart(struct hrtimer *timer) |
336 | { | 363 | { |
337 | return hrtimer_start(timer, timer->_expires, HRTIMER_MODE_ABS); | 364 | return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); |
338 | } | 365 | } |
339 | 366 | ||
340 | /* Query timers: */ | 367 | /* Query timers: */ |
@@ -391,6 +418,8 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); | |||
391 | extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, | 418 | extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, |
392 | struct task_struct *tsk); | 419 | struct task_struct *tsk); |
393 | 420 | ||
421 | extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | ||
422 | const enum hrtimer_mode mode); | ||
394 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); | 423 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); |
395 | 424 | ||
396 | /* Soft interrupt function to run the hrtimer queues: */ | 425 | /* Soft interrupt function to run the hrtimer queues: */ |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ae307feec74c..01483004183d 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1309,7 +1309,20 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1309 | 1309 | ||
1310 | timer = rb_entry(node, struct hrtimer, node); | 1310 | timer = rb_entry(node, struct hrtimer, node); |
1311 | 1311 | ||
1312 | if (basenow.tv64 < hrtimer_get_expires_tv64(timer)) { | 1312 | /* |
1313 | * The immediate goal for using the softexpires is | ||
1314 | * minimizing wakeups, not running timers at the | ||
1315 | * earliest interrupt after their soft expiration. | ||
1316 | * This allows us to avoid using a Priority Search | ||
1317 | * Tree, which can answer a stabbing querry for | ||
1318 | * overlapping intervals and instead use the simple | ||
1319 | * BST we already have. | ||
1320 | * We don't add extra wakeups by delaying timers that | ||
1321 | * are right-of a not yet expired timer, because that | ||
1322 | * timer will have to trigger a wakeup anyway. | ||
1323 | */ | ||
1324 | |||
1325 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { | ||
1313 | ktime_t expires; | 1326 | ktime_t expires; |
1314 | 1327 | ||
1315 | expires = ktime_sub(hrtimer_get_expires(timer), | 1328 | expires = ktime_sub(hrtimer_get_expires(timer), |
@@ -1681,14 +1694,20 @@ void __init hrtimers_init(void) | |||
1681 | } | 1694 | } |
1682 | 1695 | ||
1683 | /** | 1696 | /** |
1684 | * schedule_hrtimeout - sleep until timeout | 1697 | * schedule_hrtimeout_range - sleep until timeout |
1685 | * @expires: timeout value (ktime_t) | 1698 | * @expires: timeout value (ktime_t) |
1699 | * @delta: slack in expires timeout (ktime_t) | ||
1686 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | 1700 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL |
1687 | * | 1701 | * |
1688 | * Make the current task sleep until the given expiry time has | 1702 | * Make the current task sleep until the given expiry time has |
1689 | * elapsed. The routine will return immediately unless | 1703 | * elapsed. The routine will return immediately unless |
1690 | * the current task state has been set (see set_current_state()). | 1704 | * the current task state has been set (see set_current_state()). |
1691 | * | 1705 | * |
1706 | * The @delta argument gives the kernel the freedom to schedule the | ||
1707 | * actual wakeup to a time that is both power and performance friendly. | ||
1708 | * The kernel give the normal best effort behavior for "@expires+@delta", | ||
1709 | * but may decide to fire the timer earlier, but no earlier than @expires. | ||
1710 | * | ||
1692 | * You can set the task state as follows - | 1711 | * You can set the task state as follows - |
1693 | * | 1712 | * |
1694 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | 1713 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to |
@@ -1702,7 +1721,7 @@ void __init hrtimers_init(void) | |||
1702 | * | 1721 | * |
1703 | * Returns 0 when the timer has expired otherwise -EINTR | 1722 | * Returns 0 when the timer has expired otherwise -EINTR |
1704 | */ | 1723 | */ |
1705 | int __sched schedule_hrtimeout(ktime_t *expires, | 1724 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, |
1706 | const enum hrtimer_mode mode) | 1725 | const enum hrtimer_mode mode) |
1707 | { | 1726 | { |
1708 | struct hrtimer_sleeper t; | 1727 | struct hrtimer_sleeper t; |
@@ -1726,7 +1745,7 @@ int __sched schedule_hrtimeout(ktime_t *expires, | |||
1726 | } | 1745 | } |
1727 | 1746 | ||
1728 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); | 1747 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); |
1729 | hrtimer_set_expires(&t.timer, *expires); | 1748 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); |
1730 | 1749 | ||
1731 | hrtimer_init_sleeper(&t, current); | 1750 | hrtimer_init_sleeper(&t, current); |
1732 | 1751 | ||
@@ -1744,4 +1763,33 @@ int __sched schedule_hrtimeout(ktime_t *expires, | |||
1744 | 1763 | ||
1745 | return !t.task ? 0 : -EINTR; | 1764 | return !t.task ? 0 : -EINTR; |
1746 | } | 1765 | } |
1766 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); | ||
1767 | |||
1768 | /** | ||
1769 | * schedule_hrtimeout - sleep until timeout | ||
1770 | * @expires: timeout value (ktime_t) | ||
1771 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | ||
1772 | * | ||
1773 | * Make the current task sleep until the given expiry time has | ||
1774 | * elapsed. The routine will return immediately unless | ||
1775 | * the current task state has been set (see set_current_state()). | ||
1776 | * | ||
1777 | * You can set the task state as follows - | ||
1778 | * | ||
1779 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | ||
1780 | * pass before the routine returns. | ||
1781 | * | ||
1782 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | ||
1783 | * delivered to the current task. | ||
1784 | * | ||
1785 | * The current task state is guaranteed to be TASK_RUNNING when this | ||
1786 | * routine returns. | ||
1787 | * | ||
1788 | * Returns 0 when the timer has expired otherwise -EINTR | ||
1789 | */ | ||
1790 | int __sched schedule_hrtimeout(ktime_t *expires, | ||
1791 | const enum hrtimer_mode mode) | ||
1792 | { | ||
1793 | return schedule_hrtimeout_range(expires, 0, mode); | ||
1794 | } | ||
1747 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); | 1795 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |