diff options
Diffstat (limited to 'include/linux/hrtimer.h')
| -rw-r--r-- | include/linux/hrtimer.h | 167 |
1 files changed, 90 insertions, 77 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 05f6df1fdf5b..76dd4f0da5ca 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
| @@ -53,34 +53,25 @@ enum hrtimer_restart { | |||
| 53 | * | 53 | * |
| 54 | * 0x00 inactive | 54 | * 0x00 inactive |
| 55 | * 0x01 enqueued into rbtree | 55 | * 0x01 enqueued into rbtree |
| 56 | * 0x02 callback function running | ||
| 57 | * 0x04 timer is migrated to another cpu | ||
| 58 | * | 56 | * |
| 59 | * Special cases: | 57 | * The callback state is not part of the timer->state because clearing it would |
| 60 | * 0x03 callback function running and enqueued | 58 | * mean touching the timer after the callback, this makes it impossible to free |
| 61 | * (was requeued on another CPU) | 59 | * the timer from the callback function. |
| 62 | * 0x05 timer was migrated on CPU hotunplug | ||
| 63 | * | 60 | * |
| 64 | * The "callback function running and enqueued" status is only possible on | 61 | * Therefore we track the callback state in: |
| 65 | * SMP. It happens for example when a posix timer expired and the callback | 62 | * |
| 63 | * timer->base->cpu_base->running == timer | ||
| 64 | * | ||
| 65 | * On SMP it is possible to have a "callback function running and enqueued" | ||
| 66 | * status. It happens for example when a posix timer expired and the callback | ||
| 66 | * queued a signal. Between dropping the lock which protects the posix timer | 67 | * queued a signal. Between dropping the lock which protects the posix timer |
| 67 | * and reacquiring the base lock of the hrtimer, another CPU can deliver the | 68 | * and reacquiring the base lock of the hrtimer, another CPU can deliver the |
| 68 | * signal and rearm the timer. We have to preserve the callback running state, | 69 | * signal and rearm the timer. |
| 69 | * as otherwise the timer could be removed before the softirq code finishes the | ||
| 70 | * the handling of the timer. | ||
| 71 | * | ||
| 72 | * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state | ||
| 73 | * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This | ||
| 74 | * also affects HRTIMER_STATE_MIGRATE where the preservation is not | ||
| 75 | * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is | ||
| 76 | * enqueued on the new cpu. | ||
| 77 | * | 70 | * |
| 78 | * All state transitions are protected by cpu_base->lock. | 71 | * All state transitions are protected by cpu_base->lock. |
| 79 | */ | 72 | */ |
| 80 | #define HRTIMER_STATE_INACTIVE 0x00 | 73 | #define HRTIMER_STATE_INACTIVE 0x00 |
| 81 | #define HRTIMER_STATE_ENQUEUED 0x01 | 74 | #define HRTIMER_STATE_ENQUEUED 0x01 |
| 82 | #define HRTIMER_STATE_CALLBACK 0x02 | ||
| 83 | #define HRTIMER_STATE_MIGRATE 0x04 | ||
| 84 | 75 | ||
| 85 | /** | 76 | /** |
| 86 | * struct hrtimer - the basic hrtimer structure | 77 | * struct hrtimer - the basic hrtimer structure |
| @@ -130,6 +121,12 @@ struct hrtimer_sleeper { | |||
| 130 | struct task_struct *task; | 121 | struct task_struct *task; |
| 131 | }; | 122 | }; |
| 132 | 123 | ||
| 124 | #ifdef CONFIG_64BIT | ||
| 125 | # define HRTIMER_CLOCK_BASE_ALIGN 64 | ||
| 126 | #else | ||
| 127 | # define HRTIMER_CLOCK_BASE_ALIGN 32 | ||
| 128 | #endif | ||
| 129 | |||
| 133 | /** | 130 | /** |
| 134 | * struct hrtimer_clock_base - the timer base for a specific clock | 131 | * struct hrtimer_clock_base - the timer base for a specific clock |
| 135 | * @cpu_base: per cpu clock base | 132 | * @cpu_base: per cpu clock base |
| @@ -137,9 +134,7 @@ struct hrtimer_sleeper { | |||
| 137 | * timer to a base on another cpu. | 134 | * timer to a base on another cpu. |
| 138 | * @clockid: clock id for per_cpu support | 135 | * @clockid: clock id for per_cpu support |
| 139 | * @active: red black tree root node for the active timers | 136 | * @active: red black tree root node for the active timers |
| 140 | * @resolution: the resolution of the clock, in nanoseconds | ||
| 141 | * @get_time: function to retrieve the current time of the clock | 137 | * @get_time: function to retrieve the current time of the clock |
| 142 | * @softirq_time: the time when running the hrtimer queue in the softirq | ||
| 143 | * @offset: offset of this clock to the monotonic base | 138 | * @offset: offset of this clock to the monotonic base |
| 144 | */ | 139 | */ |
| 145 | struct hrtimer_clock_base { | 140 | struct hrtimer_clock_base { |
| @@ -147,11 +142,9 @@ struct hrtimer_clock_base { | |||
| 147 | int index; | 142 | int index; |
| 148 | clockid_t clockid; | 143 | clockid_t clockid; |
| 149 | struct timerqueue_head active; | 144 | struct timerqueue_head active; |
| 150 | ktime_t resolution; | ||
| 151 | ktime_t (*get_time)(void); | 145 | ktime_t (*get_time)(void); |
| 152 | ktime_t softirq_time; | ||
| 153 | ktime_t offset; | 146 | ktime_t offset; |
| 154 | }; | 147 | } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); |
| 155 | 148 | ||
| 156 | enum hrtimer_base_type { | 149 | enum hrtimer_base_type { |
| 157 | HRTIMER_BASE_MONOTONIC, | 150 | HRTIMER_BASE_MONOTONIC, |
| @@ -165,11 +158,16 @@ enum hrtimer_base_type { | |||
| 165 | * struct hrtimer_cpu_base - the per cpu clock bases | 158 | * struct hrtimer_cpu_base - the per cpu clock bases |
| 166 | * @lock: lock protecting the base and associated clock bases | 159 | * @lock: lock protecting the base and associated clock bases |
| 167 | * and timers | 160 | * and timers |
| 161 | * @seq: seqcount around __run_hrtimer | ||
| 162 | * @running: pointer to the currently running hrtimer | ||
| 168 | * @cpu: cpu number | 163 | * @cpu: cpu number |
| 169 | * @active_bases: Bitfield to mark bases with active timers | 164 | * @active_bases: Bitfield to mark bases with active timers |
| 170 | * @clock_was_set: Indicates that clock was set from irq context. | 165 | * @clock_was_set_seq: Sequence counter of clock was set events |
| 166 | * @migration_enabled: The migration of hrtimers to other cpus is enabled | ||
| 167 | * @nohz_active: The nohz functionality is enabled | ||
| 171 | * @expires_next: absolute time of the next event which was scheduled | 168 | * @expires_next: absolute time of the next event which was scheduled |
| 172 | * via clock_set_next_event() | 169 | * via clock_set_next_event() |
| 170 | * @next_timer: Pointer to the first expiring timer | ||
| 173 | * @in_hrtirq: hrtimer_interrupt() is currently executing | 171 | * @in_hrtirq: hrtimer_interrupt() is currently executing |
| 174 | * @hres_active: State of high resolution mode | 172 | * @hres_active: State of high resolution mode |
| 175 | * @hang_detected: The last hrtimer interrupt detected a hang | 173 | * @hang_detected: The last hrtimer interrupt detected a hang |
| @@ -178,27 +176,38 @@ enum hrtimer_base_type { | |||
| 178 | * @nr_hangs: Total number of hrtimer interrupt hangs | 176 | * @nr_hangs: Total number of hrtimer interrupt hangs |
| 179 | * @max_hang_time: Maximum time spent in hrtimer_interrupt | 177 | * @max_hang_time: Maximum time spent in hrtimer_interrupt |
| 180 | * @clock_base: array of clock bases for this cpu | 178 | * @clock_base: array of clock bases for this cpu |
| 179 | * | ||
| 180 | * Note: next_timer is just an optimization for __remove_hrtimer(). | ||
| 181 | * Do not dereference the pointer because it is not reliable on | ||
| 182 | * cross cpu removals. | ||
| 181 | */ | 183 | */ |
| 182 | struct hrtimer_cpu_base { | 184 | struct hrtimer_cpu_base { |
| 183 | raw_spinlock_t lock; | 185 | raw_spinlock_t lock; |
| 186 | seqcount_t seq; | ||
| 187 | struct hrtimer *running; | ||
| 184 | unsigned int cpu; | 188 | unsigned int cpu; |
| 185 | unsigned int active_bases; | 189 | unsigned int active_bases; |
| 186 | unsigned int clock_was_set; | 190 | unsigned int clock_was_set_seq; |
| 191 | bool migration_enabled; | ||
| 192 | bool nohz_active; | ||
| 187 | #ifdef CONFIG_HIGH_RES_TIMERS | 193 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 194 | unsigned int in_hrtirq : 1, | ||
| 195 | hres_active : 1, | ||
| 196 | hang_detected : 1; | ||
| 188 | ktime_t expires_next; | 197 | ktime_t expires_next; |
| 189 | int in_hrtirq; | 198 | struct hrtimer *next_timer; |
| 190 | int hres_active; | 199 | unsigned int nr_events; |
| 191 | int hang_detected; | 200 | unsigned int nr_retries; |
| 192 | unsigned long nr_events; | 201 | unsigned int nr_hangs; |
| 193 | unsigned long nr_retries; | 202 | unsigned int max_hang_time; |
| 194 | unsigned long nr_hangs; | ||
| 195 | ktime_t max_hang_time; | ||
| 196 | #endif | 203 | #endif |
| 197 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 204 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
| 198 | }; | 205 | } ____cacheline_aligned; |
| 199 | 206 | ||
| 200 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) | 207 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) |
| 201 | { | 208 | { |
| 209 | BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN); | ||
| 210 | |||
| 202 | timer->node.expires = time; | 211 | timer->node.expires = time; |
| 203 | timer->_softexpires = time; | 212 | timer->_softexpires = time; |
| 204 | } | 213 | } |
| @@ -262,19 +271,16 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) | |||
| 262 | return ktime_sub(timer->node.expires, timer->base->get_time()); | 271 | return ktime_sub(timer->node.expires, timer->base->get_time()); |
| 263 | } | 272 | } |
| 264 | 273 | ||
| 265 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
| 266 | struct clock_event_device; | ||
| 267 | |||
| 268 | extern void hrtimer_interrupt(struct clock_event_device *dev); | ||
| 269 | |||
| 270 | /* | ||
| 271 | * In high resolution mode the time reference must be read accurate | ||
| 272 | */ | ||
| 273 | static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | 274 | static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) |
| 274 | { | 275 | { |
| 275 | return timer->base->get_time(); | 276 | return timer->base->get_time(); |
| 276 | } | 277 | } |
| 277 | 278 | ||
| 279 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
| 280 | struct clock_event_device; | ||
| 281 | |||
| 282 | extern void hrtimer_interrupt(struct clock_event_device *dev); | ||
| 283 | |||
| 278 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | 284 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) |
| 279 | { | 285 | { |
| 280 | return timer->base->cpu_base->hres_active; | 286 | return timer->base->cpu_base->hres_active; |
| @@ -295,21 +301,16 @@ extern void hrtimer_peek_ahead_timers(void); | |||
| 295 | 301 | ||
| 296 | extern void clock_was_set_delayed(void); | 302 | extern void clock_was_set_delayed(void); |
| 297 | 303 | ||
| 304 | extern unsigned int hrtimer_resolution; | ||
| 305 | |||
| 298 | #else | 306 | #else |
| 299 | 307 | ||
| 300 | # define MONOTONIC_RES_NSEC LOW_RES_NSEC | 308 | # define MONOTONIC_RES_NSEC LOW_RES_NSEC |
| 301 | # define KTIME_MONOTONIC_RES KTIME_LOW_RES | 309 | # define KTIME_MONOTONIC_RES KTIME_LOW_RES |
| 302 | 310 | ||
| 303 | static inline void hrtimer_peek_ahead_timers(void) { } | 311 | #define hrtimer_resolution (unsigned int)LOW_RES_NSEC |
| 304 | 312 | ||
| 305 | /* | 313 | static inline void hrtimer_peek_ahead_timers(void) { } |
| 306 | * In non high resolution mode the time reference is taken from | ||
| 307 | * the base softirq time variable. | ||
| 308 | */ | ||
| 309 | static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | ||
| 310 | { | ||
| 311 | return timer->base->softirq_time; | ||
| 312 | } | ||
| 313 | 314 | ||
| 314 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | 315 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) |
| 315 | { | 316 | { |
| @@ -353,49 +354,47 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } | |||
| 353 | #endif | 354 | #endif |
| 354 | 355 | ||
| 355 | /* Basic timer operations: */ | 356 | /* Basic timer operations: */ |
| 356 | extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, | 357 | extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
| 357 | const enum hrtimer_mode mode); | ||
| 358 | extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | ||
| 359 | unsigned long range_ns, const enum hrtimer_mode mode); | 358 | unsigned long range_ns, const enum hrtimer_mode mode); |
| 360 | extern int | 359 | |
| 361 | __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | 360 | /** |
| 362 | unsigned long delta_ns, | 361 | * hrtimer_start - (re)start an hrtimer on the current CPU |
| 363 | const enum hrtimer_mode mode, int wakeup); | 362 | * @timer: the timer to be added |
| 363 | * @tim: expiry time | ||
| 364 | * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or | ||
| 365 | * relative (HRTIMER_MODE_REL) | ||
| 366 | */ | ||
| 367 | static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, | ||
| 368 | const enum hrtimer_mode mode) | ||
| 369 | { | ||
| 370 | hrtimer_start_range_ns(timer, tim, 0, mode); | ||
| 371 | } | ||
| 364 | 372 | ||
| 365 | extern int hrtimer_cancel(struct hrtimer *timer); | 373 | extern int hrtimer_cancel(struct hrtimer *timer); |
| 366 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); | 374 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); |
| 367 | 375 | ||
| 368 | static inline int hrtimer_start_expires(struct hrtimer *timer, | 376 | static inline void hrtimer_start_expires(struct hrtimer *timer, |
| 369 | enum hrtimer_mode mode) | 377 | enum hrtimer_mode mode) |
| 370 | { | 378 | { |
| 371 | unsigned long delta; | 379 | unsigned long delta; |
| 372 | ktime_t soft, hard; | 380 | ktime_t soft, hard; |
| 373 | soft = hrtimer_get_softexpires(timer); | 381 | soft = hrtimer_get_softexpires(timer); |
| 374 | hard = hrtimer_get_expires(timer); | 382 | hard = hrtimer_get_expires(timer); |
| 375 | delta = ktime_to_ns(ktime_sub(hard, soft)); | 383 | delta = ktime_to_ns(ktime_sub(hard, soft)); |
| 376 | return hrtimer_start_range_ns(timer, soft, delta, mode); | 384 | hrtimer_start_range_ns(timer, soft, delta, mode); |
| 377 | } | 385 | } |
| 378 | 386 | ||
| 379 | static inline int hrtimer_restart(struct hrtimer *timer) | 387 | static inline void hrtimer_restart(struct hrtimer *timer) |
| 380 | { | 388 | { |
| 381 | return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); | 389 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS); |
| 382 | } | 390 | } |
| 383 | 391 | ||
| 384 | /* Query timers: */ | 392 | /* Query timers: */ |
| 385 | extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); | 393 | extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); |
| 386 | extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); | ||
| 387 | 394 | ||
| 388 | extern ktime_t hrtimer_get_next_event(void); | 395 | extern u64 hrtimer_get_next_event(void); |
| 389 | 396 | ||
| 390 | /* | 397 | extern bool hrtimer_active(const struct hrtimer *timer); |
| 391 | * A timer is active, when it is enqueued into the rbtree or the | ||
| 392 | * callback function is running or it's in the state of being migrated | ||
| 393 | * to another cpu. | ||
| 394 | */ | ||
| 395 | static inline int hrtimer_active(const struct hrtimer *timer) | ||
| 396 | { | ||
| 397 | return timer->state != HRTIMER_STATE_INACTIVE; | ||
| 398 | } | ||
| 399 | 398 | ||
| 400 | /* | 399 | /* |
| 401 | * Helper function to check, whether the timer is on one of the queues | 400 | * Helper function to check, whether the timer is on one of the queues |
| @@ -411,14 +410,29 @@ static inline int hrtimer_is_queued(struct hrtimer *timer) | |||
| 411 | */ | 410 | */ |
| 412 | static inline int hrtimer_callback_running(struct hrtimer *timer) | 411 | static inline int hrtimer_callback_running(struct hrtimer *timer) |
| 413 | { | 412 | { |
| 414 | return timer->state & HRTIMER_STATE_CALLBACK; | 413 | return timer->base->cpu_base->running == timer; |
| 415 | } | 414 | } |
| 416 | 415 | ||
| 417 | /* Forward a hrtimer so it expires after now: */ | 416 | /* Forward a hrtimer so it expires after now: */ |
| 418 | extern u64 | 417 | extern u64 |
| 419 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); | 418 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); |
| 420 | 419 | ||
| 421 | /* Forward a hrtimer so it expires after the hrtimer's current now */ | 420 | /** |
| 421 | * hrtimer_forward_now - forward the timer expiry so it expires after now | ||
| 422 | * @timer: hrtimer to forward | ||
| 423 | * @interval: the interval to forward | ||
| 424 | * | ||
| 425 | * Forward the timer expiry so it will expire after the current time | ||
| 426 | * of the hrtimer clock base. Returns the number of overruns. | ||
| 427 | * | ||
| 428 | * Can be safely called from the callback function of @timer. If | ||
| 429 | * called from other contexts @timer must neither be enqueued nor | ||
| 430 | * running the callback and the caller needs to take care of | ||
| 431 | * serialization. | ||
| 432 | * | ||
| 433 | * Note: This only updates the timer expiry value and does not requeue | ||
| 434 | * the timer. | ||
| 435 | */ | ||
| 422 | static inline u64 hrtimer_forward_now(struct hrtimer *timer, | 436 | static inline u64 hrtimer_forward_now(struct hrtimer *timer, |
| 423 | ktime_t interval) | 437 | ktime_t interval) |
| 424 | { | 438 | { |
| @@ -443,7 +457,6 @@ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); | |||
| 443 | 457 | ||
| 444 | /* Soft interrupt function to run the hrtimer queues: */ | 458 | /* Soft interrupt function to run the hrtimer queues: */ |
| 445 | extern void hrtimer_run_queues(void); | 459 | extern void hrtimer_run_queues(void); |
| 446 | extern void hrtimer_run_pending(void); | ||
| 447 | 460 | ||
| 448 | /* Bootup initialization: */ | 461 | /* Bootup initialization: */ |
| 449 | extern void __init hrtimers_init(void); | 462 | extern void __init hrtimers_init(void); |
