diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 09:43:54 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 10:06:11 -0400 |
commit | 7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch) | |
tree | 5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /include/linux/hrtimer.h | |
parent | 7d754596756240fa918b94cd0c3011c77a638987 (diff) | |
parent | 02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff) |
Merge 'Linux v3.0' into Litmus
Some notes:
* Litmus^RT scheduling class is the topmost scheduling class
(above stop_sched_class).
* scheduler_ipi() function (e.g., in smp_reschedule_interrupt())
may increase IPI latencies.
* Added path into schedule() to quickly re-evaluate scheduling
decision without becoming preemptive again. This used to be
a standard path before the removal of BKL.
Conflicts:
Makefile
arch/arm/kernel/calls.S
arch/arm/kernel/smp.c
arch/x86/include/asm/unistd_32.h
arch/x86/kernel/smp.c
arch/x86/kernel/syscall_table_32.S
include/linux/hrtimer.h
kernel/printk.c
kernel/sched.c
kernel/sched_fair.c
Diffstat (limited to 'include/linux/hrtimer.h')
-rw-r--r-- | include/linux/hrtimer.h | 90 |
1 files changed, 49 insertions, 41 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 76da541c1f6..d91bba539ca 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | #include <linux/percpu.h> | 23 | #include <linux/percpu.h> |
24 | #include <linux/timer.h> | 24 | #include <linux/timer.h> |
25 | 25 | #include <linux/timerqueue.h> | |
26 | 26 | ||
27 | struct hrtimer_clock_base; | 27 | struct hrtimer_clock_base; |
28 | struct hrtimer_cpu_base; | 28 | struct hrtimer_cpu_base; |
@@ -54,11 +54,13 @@ enum hrtimer_restart { | |||
54 | * 0x00 inactive | 54 | * 0x00 inactive |
55 | * 0x01 enqueued into rbtree | 55 | * 0x01 enqueued into rbtree |
56 | * 0x02 callback function running | 56 | * 0x02 callback function running |
57 | * 0x04 timer is migrated to another cpu | ||
57 | * | 58 | * |
58 | * Special cases: | 59 | * Special cases: |
59 | * 0x03 callback function running and enqueued | 60 | * 0x03 callback function running and enqueued |
60 | * (was requeued on another CPU) | 61 | * (was requeued on another CPU) |
61 | * 0x09 timer was migrated on CPU hotunplug | 62 | * 0x05 timer was migrated on CPU hotunplug |
63 | * | ||
62 | * The "callback function running and enqueued" status is only possible on | 64 | * The "callback function running and enqueued" status is only possible on |
63 | * SMP. It happens for example when a posix timer expired and the callback | 65 | * SMP. It happens for example when a posix timer expired and the callback |
64 | * queued a signal. Between dropping the lock which protects the posix timer | 66 | * queued a signal. Between dropping the lock which protects the posix timer |
@@ -67,8 +69,11 @@ enum hrtimer_restart { | |||
67 | * as otherwise the timer could be removed before the softirq code finishes the | 69 | * as otherwise the timer could be removed before the softirq code finishes the |
68 | * the handling of the timer. | 70 | * the handling of the timer. |
69 | * | 71 | * |
70 | * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state to | 72 | * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state |
71 | * preserve the HRTIMER_STATE_CALLBACK bit in the above scenario. | 73 | * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This |
74 | * also affects HRTIMER_STATE_MIGRATE where the preservation is not | ||
75 | * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is | ||
76 | * enqueued on the new cpu. | ||
72 | * | 77 | * |
73 | * All state transitions are protected by cpu_base->lock. | 78 | * All state transitions are protected by cpu_base->lock. |
74 | */ | 79 | */ |
@@ -79,8 +84,8 @@ enum hrtimer_restart { | |||
79 | 84 | ||
80 | /** | 85 | /** |
81 | * struct hrtimer - the basic hrtimer structure | 86 | * struct hrtimer - the basic hrtimer structure |
82 | * @node: red black tree node for time ordered insertion | 87 | * @node: timerqueue node, which also manages node.expires, |
83 | * @_expires: the absolute expiry time in the hrtimers internal | 88 | * the absolute expiry time in the hrtimers internal |
84 | * representation. The time is related to the clock on | 89 | * representation. The time is related to the clock on |
85 | * which the timer is based. Is setup by adding | 90 | * which the timer is based. Is setup by adding |
86 | * slack to the _softexpires value. For non range timers | 91 | * slack to the _softexpires value. For non range timers |
@@ -101,8 +106,7 @@ enum hrtimer_restart { | |||
101 | * The hrtimer structure must be initialized by hrtimer_init() | 106 | * The hrtimer structure must be initialized by hrtimer_init() |
102 | */ | 107 | */ |
103 | struct hrtimer { | 108 | struct hrtimer { |
104 | struct rb_node node; | 109 | struct timerqueue_node node; |
105 | ktime_t _expires; | ||
106 | ktime_t _softexpires; | 110 | ktime_t _softexpires; |
107 | enum hrtimer_restart (*function)(struct hrtimer *); | 111 | enum hrtimer_restart (*function)(struct hrtimer *); |
108 | struct hrtimer_clock_base *base; | 112 | struct hrtimer_clock_base *base; |
@@ -131,8 +135,8 @@ struct hrtimer_sleeper { | |||
131 | * @cpu_base: per cpu clock base | 135 | * @cpu_base: per cpu clock base |
132 | * @index: clock type index for per_cpu support when moving a | 136 | * @index: clock type index for per_cpu support when moving a |
133 | * timer to a base on another cpu. | 137 | * timer to a base on another cpu. |
138 | * @clockid: clock id for per_cpu support | ||
134 | * @active: red black tree root node for the active timers | 139 | * @active: red black tree root node for the active timers |
135 | * @first: pointer to the timer node which expires first | ||
136 | * @resolution: the resolution of the clock, in nanoseconds | 140 | * @resolution: the resolution of the clock, in nanoseconds |
137 | * @get_time: function to retrieve the current time of the clock | 141 | * @get_time: function to retrieve the current time of the clock |
138 | * @softirq_time: the time when running the hrtimer queue in the softirq | 142 | * @softirq_time: the time when running the hrtimer queue in the softirq |
@@ -140,25 +144,27 @@ struct hrtimer_sleeper { | |||
140 | */ | 144 | */ |
141 | struct hrtimer_clock_base { | 145 | struct hrtimer_clock_base { |
142 | struct hrtimer_cpu_base *cpu_base; | 146 | struct hrtimer_cpu_base *cpu_base; |
143 | clockid_t index; | 147 | int index; |
144 | struct rb_root active; | 148 | clockid_t clockid; |
145 | struct rb_node *first; | 149 | struct timerqueue_head active; |
146 | ktime_t resolution; | 150 | ktime_t resolution; |
147 | ktime_t (*get_time)(void); | 151 | ktime_t (*get_time)(void); |
148 | ktime_t softirq_time; | 152 | ktime_t softirq_time; |
149 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
150 | ktime_t offset; | 153 | ktime_t offset; |
151 | #endif | ||
152 | }; | 154 | }; |
153 | 155 | ||
154 | #define HRTIMER_MAX_CLOCK_BASES 2 | 156 | enum hrtimer_base_type { |
157 | HRTIMER_BASE_MONOTONIC, | ||
158 | HRTIMER_BASE_REALTIME, | ||
159 | HRTIMER_BASE_BOOTTIME, | ||
160 | HRTIMER_MAX_CLOCK_BASES, | ||
161 | }; | ||
155 | 162 | ||
156 | /* | 163 | /* |
157 | * struct hrtimer_cpu_base - the per cpu clock bases | 164 | * struct hrtimer_cpu_base - the per cpu clock bases |
158 | * @lock: lock protecting the base and associated clock bases | 165 | * @lock: lock protecting the base and associated clock bases |
159 | * and timers | 166 | * and timers |
160 | * @clock_base: array of clock bases for this cpu | 167 | * @active_bases: Bitfield to mark bases with active timers |
161 | * @curr_timer: the timer which is executing a callback right now | ||
162 | * @expires_next: absolute time of the next event which was scheduled | 168 | * @expires_next: absolute time of the next event which was scheduled |
163 | * via clock_set_next_event() | 169 | * via clock_set_next_event() |
164 | * @hres_active: State of high resolution mode | 170 | * @hres_active: State of high resolution mode |
@@ -167,11 +173,12 @@ struct hrtimer_clock_base { | |||
167 | * @nr_retries: Total number of hrtimer interrupt retries | 173 | * @nr_retries: Total number of hrtimer interrupt retries |
168 | * @nr_hangs: Total number of hrtimer interrupt hangs | 174 | * @nr_hangs: Total number of hrtimer interrupt hangs |
169 | * @max_hang_time: Maximum time spent in hrtimer_interrupt | 175 | * @max_hang_time: Maximum time spent in hrtimer_interrupt |
176 | * @clock_base: array of clock bases for this cpu | ||
170 | * @to_pull: LITMUS^RT list of timers to be pulled on this cpu | 177 | * @to_pull: LITMUS^RT list of timers to be pulled on this cpu |
171 | */ | 178 | */ |
172 | struct hrtimer_cpu_base { | 179 | struct hrtimer_cpu_base { |
173 | raw_spinlock_t lock; | 180 | raw_spinlock_t lock; |
174 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 181 | unsigned long active_bases; |
175 | #ifdef CONFIG_HIGH_RES_TIMERS | 182 | #ifdef CONFIG_HIGH_RES_TIMERS |
176 | ktime_t expires_next; | 183 | ktime_t expires_next; |
177 | int hres_active; | 184 | int hres_active; |
@@ -181,6 +188,7 @@ struct hrtimer_cpu_base { | |||
181 | unsigned long nr_hangs; | 188 | unsigned long nr_hangs; |
182 | ktime_t max_hang_time; | 189 | ktime_t max_hang_time; |
183 | #endif | 190 | #endif |
191 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | ||
184 | struct list_head to_pull; | 192 | struct list_head to_pull; |
185 | }; | 193 | }; |
186 | 194 | ||
@@ -209,43 +217,43 @@ struct hrtimer_start_on_info { | |||
209 | 217 | ||
210 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) | 218 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) |
211 | { | 219 | { |
212 | timer->_expires = time; | 220 | timer->node.expires = time; |
213 | timer->_softexpires = time; | 221 | timer->_softexpires = time; |
214 | } | 222 | } |
215 | 223 | ||
216 | static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) | 224 | static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) |
217 | { | 225 | { |
218 | timer->_softexpires = time; | 226 | timer->_softexpires = time; |
219 | timer->_expires = ktime_add_safe(time, delta); | 227 | timer->node.expires = ktime_add_safe(time, delta); |
220 | } | 228 | } |
221 | 229 | ||
222 | static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) | 230 | static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) |
223 | { | 231 | { |
224 | timer->_softexpires = time; | 232 | timer->_softexpires = time; |
225 | timer->_expires = ktime_add_safe(time, ns_to_ktime(delta)); | 233 | timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); |
226 | } | 234 | } |
227 | 235 | ||
228 | static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) | 236 | static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) |
229 | { | 237 | { |
230 | timer->_expires.tv64 = tv64; | 238 | timer->node.expires.tv64 = tv64; |
231 | timer->_softexpires.tv64 = tv64; | 239 | timer->_softexpires.tv64 = tv64; |
232 | } | 240 | } |
233 | 241 | ||
234 | static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) | 242 | static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) |
235 | { | 243 | { |
236 | timer->_expires = ktime_add_safe(timer->_expires, time); | 244 | timer->node.expires = ktime_add_safe(timer->node.expires, time); |
237 | timer->_softexpires = ktime_add_safe(timer->_softexpires, time); | 245 | timer->_softexpires = ktime_add_safe(timer->_softexpires, time); |
238 | } | 246 | } |
239 | 247 | ||
240 | static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) | 248 | static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) |
241 | { | 249 | { |
242 | timer->_expires = ktime_add_ns(timer->_expires, ns); | 250 | timer->node.expires = ktime_add_ns(timer->node.expires, ns); |
243 | timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); | 251 | timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); |
244 | } | 252 | } |
245 | 253 | ||
246 | static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) | 254 | static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) |
247 | { | 255 | { |
248 | return timer->_expires; | 256 | return timer->node.expires; |
249 | } | 257 | } |
250 | 258 | ||
251 | static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) | 259 | static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) |
@@ -255,7 +263,7 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) | |||
255 | 263 | ||
256 | static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) | 264 | static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) |
257 | { | 265 | { |
258 | return timer->_expires.tv64; | 266 | return timer->node.expires.tv64; |
259 | } | 267 | } |
260 | static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) | 268 | static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) |
261 | { | 269 | { |
@@ -264,19 +272,17 @@ static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) | |||
264 | 272 | ||
265 | static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) | 273 | static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) |
266 | { | 274 | { |
267 | return ktime_to_ns(timer->_expires); | 275 | return ktime_to_ns(timer->node.expires); |
268 | } | 276 | } |
269 | 277 | ||
270 | static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) | 278 | static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) |
271 | { | 279 | { |
272 | return ktime_sub(timer->_expires, timer->base->get_time()); | 280 | return ktime_sub(timer->node.expires, timer->base->get_time()); |
273 | } | 281 | } |
274 | 282 | ||
275 | #ifdef CONFIG_HIGH_RES_TIMERS | 283 | #ifdef CONFIG_HIGH_RES_TIMERS |
276 | struct clock_event_device; | 284 | struct clock_event_device; |
277 | 285 | ||
278 | extern void clock_was_set(void); | ||
279 | extern void hres_timers_resume(void); | ||
280 | extern void hrtimer_interrupt(struct clock_event_device *dev); | 286 | extern void hrtimer_interrupt(struct clock_event_device *dev); |
281 | 287 | ||
282 | /* | 288 | /* |
@@ -310,16 +316,8 @@ extern void hrtimer_peek_ahead_timers(void); | |||
310 | # define MONOTONIC_RES_NSEC LOW_RES_NSEC | 316 | # define MONOTONIC_RES_NSEC LOW_RES_NSEC |
311 | # define KTIME_MONOTONIC_RES KTIME_LOW_RES | 317 | # define KTIME_MONOTONIC_RES KTIME_LOW_RES |
312 | 318 | ||
313 | /* | ||
314 | * clock_was_set() is a NOP for non- high-resolution systems. The | ||
315 | * time-sorted order guarantees that a timer does not expire early and | ||
316 | * is expired in the next softirq when the clock was advanced. | ||
317 | */ | ||
318 | static inline void clock_was_set(void) { } | ||
319 | static inline void hrtimer_peek_ahead_timers(void) { } | 319 | static inline void hrtimer_peek_ahead_timers(void) { } |
320 | 320 | ||
321 | static inline void hres_timers_resume(void) { } | ||
322 | |||
323 | /* | 321 | /* |
324 | * In non high resolution mode the time reference is taken from | 322 | * In non high resolution mode the time reference is taken from |
325 | * the base softirq time variable. | 323 | * the base softirq time variable. |
@@ -335,9 +333,18 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) | |||
335 | } | 333 | } |
336 | #endif | 334 | #endif |
337 | 335 | ||
336 | extern void clock_was_set(void); | ||
337 | #ifdef CONFIG_TIMERFD | ||
338 | extern void timerfd_clock_was_set(void); | ||
339 | #else | ||
340 | static inline void timerfd_clock_was_set(void) { } | ||
341 | #endif | ||
342 | extern void hrtimers_resume(void); | ||
343 | |||
338 | extern ktime_t ktime_get(void); | 344 | extern ktime_t ktime_get(void); |
339 | extern ktime_t ktime_get_real(void); | 345 | extern ktime_t ktime_get_real(void); |
340 | 346 | extern ktime_t ktime_get_boottime(void); | |
347 | extern ktime_t ktime_get_monotonic_offset(void); | ||
341 | 348 | ||
342 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 349 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
343 | 350 | ||
@@ -406,8 +413,9 @@ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); | |||
406 | extern ktime_t hrtimer_get_next_event(void); | 413 | extern ktime_t hrtimer_get_next_event(void); |
407 | 414 | ||
408 | /* | 415 | /* |
409 | * A timer is active, when it is enqueued into the rbtree or the callback | 416 | * A timer is active, when it is enqueued into the rbtree or the |
410 | * function is running. | 417 | * callback function is running or it's in the state of being migrated |
418 | * to another cpu. | ||
411 | */ | 419 | */ |
412 | static inline int hrtimer_active(const struct hrtimer *timer) | 420 | static inline int hrtimer_active(const struct hrtimer *timer) |
413 | { | 421 | { |