diff options
-rw-r--r-- | include/linux/hrtimer.h | 36 | ||||
-rw-r--r-- | kernel/hrtimer.c | 40 |
2 files changed, 67 insertions, 9 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index e00fc4d3d74f..d8cdac2d28d6 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -40,6 +40,34 @@ enum hrtimer_restart { | |||
40 | HRTIMER_RESTART, /* Timer must be restarted */ | 40 | HRTIMER_RESTART, /* Timer must be restarted */ |
41 | }; | 41 | }; |
42 | 42 | ||
43 | /* | ||
44 | * Bit values to track state of the timer | ||
45 | * | ||
46 | * Possible states: | ||
47 | * | ||
48 | * 0x00 inactive | ||
49 | * 0x01 enqueued into rbtree | ||
50 | * 0x02 callback function running | ||
51 | * 0x03 callback function running and enqueued | ||
52 | * (was requeued on another CPU) | ||
53 | * | ||
54 | * The "callback function running and enqueued" status is only possible on | ||
55 | * SMP. It happens for example when a posix timer expired and the callback | ||
56 | * queued a signal. Between dropping the lock which protects the posix timer | ||
57 | * and reacquiring the base lock of the hrtimer, another CPU can deliver the | ||
58 | * signal and rearm the timer. We have to preserve the callback running state, | ||
59 | * as otherwise the timer could be removed before the softirq code finishes the | ||
60 | * the handling of the timer. | ||
61 | * | ||
62 | * The HRTIMER_STATE_ENQUEUE bit is always or'ed to the current state to | ||
63 | * preserve the HRTIMER_STATE_CALLBACK bit in the above scenario. | ||
64 | * | ||
65 | * All state transitions are protected by cpu_base->lock. | ||
66 | */ | ||
67 | #define HRTIMER_STATE_INACTIVE 0x00 | ||
68 | #define HRTIMER_STATE_ENQUEUED 0x01 | ||
69 | #define HRTIMER_STATE_CALLBACK 0x02 | ||
70 | |||
43 | /** | 71 | /** |
44 | * struct hrtimer - the basic hrtimer structure | 72 | * struct hrtimer - the basic hrtimer structure |
45 | * @node: red black tree node for time ordered insertion | 73 | * @node: red black tree node for time ordered insertion |
@@ -48,6 +76,7 @@ enum hrtimer_restart { | |||
48 | * which the timer is based. | 76 | * which the timer is based. |
49 | * @function: timer expiry callback function | 77 | * @function: timer expiry callback function |
50 | * @base: pointer to the timer base (per cpu and per clock) | 78 | * @base: pointer to the timer base (per cpu and per clock) |
79 | * @state: state information (See bit values above) | ||
51 | * | 80 | * |
52 | * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() | 81 | * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() |
53 | */ | 82 | */ |
@@ -56,6 +85,7 @@ struct hrtimer { | |||
56 | ktime_t expires; | 85 | ktime_t expires; |
57 | enum hrtimer_restart (*function)(struct hrtimer *); | 86 | enum hrtimer_restart (*function)(struct hrtimer *); |
58 | struct hrtimer_clock_base *base; | 87 | struct hrtimer_clock_base *base; |
88 | unsigned long state; | ||
59 | }; | 89 | }; |
60 | 90 | ||
61 | /** | 91 | /** |
@@ -141,9 +171,13 @@ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); | |||
141 | extern ktime_t hrtimer_get_next_event(void); | 171 | extern ktime_t hrtimer_get_next_event(void); |
142 | #endif | 172 | #endif |
143 | 173 | ||
174 | /* | ||
175 | * A timer is active, when it is enqueued into the rbtree or the callback | ||
176 | * function is running. | ||
177 | */ | ||
144 | static inline int hrtimer_active(const struct hrtimer *timer) | 178 | static inline int hrtimer_active(const struct hrtimer *timer) |
145 | { | 179 | { |
146 | return rb_parent(&timer->node) != &timer->node; | 180 | return timer->state != HRTIMER_STATE_INACTIVE; |
147 | } | 181 | } |
148 | 182 | ||
149 | /* Forward a hrtimer so it expires after now: */ | 183 | /* Forward a hrtimer so it expires after now: */ |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 89a9f535b4ce..fee18b27252f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -150,6 +150,23 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | |||
150 | } | 150 | } |
151 | 151 | ||
152 | /* | 152 | /* |
153 | * Helper function to check, whether the timer is on one of the queues | ||
154 | */ | ||
155 | static inline int hrtimer_is_queued(struct hrtimer *timer) | ||
156 | { | ||
157 | return timer->state & HRTIMER_STATE_ENQUEUED; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * Helper function to check, whether the timer is running the callback | ||
162 | * function | ||
163 | */ | ||
164 | static inline int hrtimer_callback_running(struct hrtimer *timer) | ||
165 | { | ||
166 | return timer->state & HRTIMER_STATE_CALLBACK; | ||
167 | } | ||
168 | |||
169 | /* | ||
153 | * Functions and macros which are different for UP/SMP systems are kept in a | 170 | * Functions and macros which are different for UP/SMP systems are kept in a |
154 | * single place | 171 | * single place |
155 | */ | 172 | */ |
@@ -390,6 +407,11 @@ static void enqueue_hrtimer(struct hrtimer *timer, | |||
390 | */ | 407 | */ |
391 | rb_link_node(&timer->node, parent, link); | 408 | rb_link_node(&timer->node, parent, link); |
392 | rb_insert_color(&timer->node, &base->active); | 409 | rb_insert_color(&timer->node, &base->active); |
410 | /* | ||
411 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the | ||
412 | * state of a possibly running callback. | ||
413 | */ | ||
414 | timer->state |= HRTIMER_STATE_ENQUEUED; | ||
393 | 415 | ||
394 | if (!base->first || timer->expires.tv64 < | 416 | if (!base->first || timer->expires.tv64 < |
395 | rb_entry(base->first, struct hrtimer, node)->expires.tv64) | 417 | rb_entry(base->first, struct hrtimer, node)->expires.tv64) |
@@ -402,7 +424,8 @@ static void enqueue_hrtimer(struct hrtimer *timer, | |||
402 | * Caller must hold the base lock. | 424 | * Caller must hold the base lock. |
403 | */ | 425 | */ |
404 | static void __remove_hrtimer(struct hrtimer *timer, | 426 | static void __remove_hrtimer(struct hrtimer *timer, |
405 | struct hrtimer_clock_base *base) | 427 | struct hrtimer_clock_base *base, |
428 | unsigned long newstate) | ||
406 | { | 429 | { |
407 | /* | 430 | /* |
408 | * Remove the timer from the rbtree and replace the | 431 | * Remove the timer from the rbtree and replace the |
@@ -411,7 +434,7 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
411 | if (base->first == &timer->node) | 434 | if (base->first == &timer->node) |
412 | base->first = rb_next(&timer->node); | 435 | base->first = rb_next(&timer->node); |
413 | rb_erase(&timer->node, &base->active); | 436 | rb_erase(&timer->node, &base->active); |
414 | rb_set_parent(&timer->node, &timer->node); | 437 | timer->state = newstate; |
415 | } | 438 | } |
416 | 439 | ||
417 | /* | 440 | /* |
@@ -420,8 +443,8 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
420 | static inline int | 443 | static inline int |
421 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | 444 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
422 | { | 445 | { |
423 | if (hrtimer_active(timer)) { | 446 | if (hrtimer_is_queued(timer)) { |
424 | __remove_hrtimer(timer, base); | 447 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE); |
425 | return 1; | 448 | return 1; |
426 | } | 449 | } |
427 | return 0; | 450 | return 0; |
@@ -493,7 +516,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer) | |||
493 | 516 | ||
494 | base = lock_hrtimer_base(timer, &flags); | 517 | base = lock_hrtimer_base(timer, &flags); |
495 | 518 | ||
496 | if (base->cpu_base->curr_timer != timer) | 519 | if (!hrtimer_callback_running(timer)) |
497 | ret = remove_hrtimer(timer, base); | 520 | ret = remove_hrtimer(timer, base); |
498 | 521 | ||
499 | unlock_hrtimer_base(timer, &flags); | 522 | unlock_hrtimer_base(timer, &flags); |
@@ -598,7 +621,6 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |||
598 | clock_id = CLOCK_MONOTONIC; | 621 | clock_id = CLOCK_MONOTONIC; |
599 | 622 | ||
600 | timer->base = &cpu_base->clock_base[clock_id]; | 623 | timer->base = &cpu_base->clock_base[clock_id]; |
601 | rb_set_parent(&timer->node, &timer->node); | ||
602 | } | 624 | } |
603 | EXPORT_SYMBOL_GPL(hrtimer_init); | 625 | EXPORT_SYMBOL_GPL(hrtimer_init); |
604 | 626 | ||
@@ -649,13 +671,14 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, | |||
649 | 671 | ||
650 | fn = timer->function; | 672 | fn = timer->function; |
651 | set_curr_timer(cpu_base, timer); | 673 | set_curr_timer(cpu_base, timer); |
652 | __remove_hrtimer(timer, base); | 674 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK); |
653 | spin_unlock_irq(&cpu_base->lock); | 675 | spin_unlock_irq(&cpu_base->lock); |
654 | 676 | ||
655 | restart = fn(timer); | 677 | restart = fn(timer); |
656 | 678 | ||
657 | spin_lock_irq(&cpu_base->lock); | 679 | spin_lock_irq(&cpu_base->lock); |
658 | 680 | ||
681 | timer->state &= ~HRTIMER_STATE_CALLBACK; | ||
659 | if (restart != HRTIMER_NORESTART) { | 682 | if (restart != HRTIMER_NORESTART) { |
660 | BUG_ON(hrtimer_active(timer)); | 683 | BUG_ON(hrtimer_active(timer)); |
661 | enqueue_hrtimer(timer, base); | 684 | enqueue_hrtimer(timer, base); |
@@ -826,7 +849,8 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
826 | 849 | ||
827 | while ((node = rb_first(&old_base->active))) { | 850 | while ((node = rb_first(&old_base->active))) { |
828 | timer = rb_entry(node, struct hrtimer, node); | 851 | timer = rb_entry(node, struct hrtimer, node); |
829 | __remove_hrtimer(timer, old_base); | 852 | BUG_ON(timer->state & HRTIMER_STATE_CALLBACK); |
853 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE); | ||
830 | timer->base = new_base; | 854 | timer->base = new_base; |
831 | enqueue_hrtimer(timer, new_base); | 855 | enqueue_hrtimer(timer, new_base); |
832 | } | 856 | } |