diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2006-01-09 23:52:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-10 11:01:37 -0500 |
commit | c0a3132963db68f1fbbd0e316b73de100fee3f08 (patch) | |
tree | cdd76aacf0ca7ae7780696a06bf9643f8b245ba1 /kernel/hrtimer.c | |
parent | 97fc79f97b1111c80010d34ee66312b88f531e41 (diff) |
[PATCH] hrtimer: hrtimer core code
hrtimer subsystem core. It is initialized at bootup and expired by the timer
interrupt, but is otherwise not utilized by any other subsystem yet.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 679 |
1 files changed, 679 insertions, 0 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c new file mode 100644 index 000000000000..690efd9d9adf --- /dev/null +++ b/kernel/hrtimer.c | |||
@@ -0,0 +1,679 @@ | |||
1 | /* | ||
2 | * linux/kernel/hrtimer.c | ||
3 | * | ||
4 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar | ||
6 | * | ||
7 | * High-resolution kernel timers | ||
8 | * | ||
9 | * In contrast to the low-resolution timeout API implemented in | ||
10 | * kernel/timer.c, hrtimers provide finer resolution and accuracy | ||
11 | * depending on system configuration and capabilities. | ||
12 | * | ||
13 | * These timers are currently used for: | ||
14 | * - itimers | ||
15 | * - POSIX timers | ||
16 | * - nanosleep | ||
17 | * - precise in-kernel timing | ||
18 | * | ||
19 | * Started by: Thomas Gleixner and Ingo Molnar | ||
20 | * | ||
21 | * Credits: | ||
22 | * based on kernel/timer.c | ||
23 | * | ||
24 | * For licencing details see kernel-base/COPYING | ||
25 | */ | ||
26 | |||
27 | #include <linux/cpu.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/percpu.h> | ||
30 | #include <linux/hrtimer.h> | ||
31 | #include <linux/notifier.h> | ||
32 | #include <linux/syscalls.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | |||
35 | #include <asm/uaccess.h> | ||
36 | |||
37 | /** | ||
38 | * ktime_get - get the monotonic time in ktime_t format | ||
39 | * | ||
40 | * returns the time in ktime_t format | ||
41 | */ | ||
42 | static ktime_t ktime_get(void) | ||
43 | { | ||
44 | struct timespec now; | ||
45 | |||
46 | ktime_get_ts(&now); | ||
47 | |||
48 | return timespec_to_ktime(now); | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * ktime_get_real - get the real (wall-) time in ktime_t format | ||
53 | * | ||
54 | * returns the time in ktime_t format | ||
55 | */ | ||
56 | static ktime_t ktime_get_real(void) | ||
57 | { | ||
58 | struct timespec now; | ||
59 | |||
60 | getnstimeofday(&now); | ||
61 | |||
62 | return timespec_to_ktime(now); | ||
63 | } | ||
64 | |||
65 | EXPORT_SYMBOL_GPL(ktime_get_real); | ||
66 | |||
67 | /* | ||
68 | * The timer bases: | ||
69 | */ | ||
70 | |||
71 | #define MAX_HRTIMER_BASES 2 | ||
72 | |||
73 | static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) = | ||
74 | { | ||
75 | { | ||
76 | .index = CLOCK_REALTIME, | ||
77 | .get_time = &ktime_get_real, | ||
78 | .resolution = KTIME_REALTIME_RES, | ||
79 | }, | ||
80 | { | ||
81 | .index = CLOCK_MONOTONIC, | ||
82 | .get_time = &ktime_get, | ||
83 | .resolution = KTIME_MONOTONIC_RES, | ||
84 | }, | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * ktime_get_ts - get the monotonic clock in timespec format | ||
89 | * | ||
90 | * @ts: pointer to timespec variable | ||
91 | * | ||
92 | * The function calculates the monotonic clock from the realtime | ||
93 | * clock and the wall_to_monotonic offset and stores the result | ||
94 | * in normalized timespec format in the variable pointed to by ts. | ||
95 | */ | ||
96 | void ktime_get_ts(struct timespec *ts) | ||
97 | { | ||
98 | struct timespec tomono; | ||
99 | unsigned long seq; | ||
100 | |||
101 | do { | ||
102 | seq = read_seqbegin(&xtime_lock); | ||
103 | getnstimeofday(ts); | ||
104 | tomono = wall_to_monotonic; | ||
105 | |||
106 | } while (read_seqretry(&xtime_lock, seq)); | ||
107 | |||
108 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | ||
109 | ts->tv_nsec + tomono.tv_nsec); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Functions and macros which are different for UP/SMP systems are kept in a | ||
114 | * single place | ||
115 | */ | ||
116 | #ifdef CONFIG_SMP | ||
117 | |||
118 | #define set_curr_timer(b, t) do { (b)->curr_timer = (t); } while (0) | ||
119 | |||
120 | /* | ||
121 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | ||
122 | * means that all timers which are tied to this base via timer->base are | ||
123 | * locked, and the base itself is locked too. | ||
124 | * | ||
125 | * So __run_timers/migrate_timers can safely modify all timers which could | ||
126 | * be found on the lists/queues. | ||
127 | * | ||
128 | * When the timer's base is locked, and the timer removed from list, it is | ||
129 | * possible to set timer->base = NULL and drop the lock: the timer remains | ||
130 | * locked. | ||
131 | */ | ||
132 | static struct hrtimer_base *lock_hrtimer_base(const struct hrtimer *timer, | ||
133 | unsigned long *flags) | ||
134 | { | ||
135 | struct hrtimer_base *base; | ||
136 | |||
137 | for (;;) { | ||
138 | base = timer->base; | ||
139 | if (likely(base != NULL)) { | ||
140 | spin_lock_irqsave(&base->lock, *flags); | ||
141 | if (likely(base == timer->base)) | ||
142 | return base; | ||
143 | /* The timer has migrated to another CPU: */ | ||
144 | spin_unlock_irqrestore(&base->lock, *flags); | ||
145 | } | ||
146 | cpu_relax(); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Switch the timer base to the current CPU when possible. | ||
152 | */ | ||
153 | static inline struct hrtimer_base * | ||
154 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base) | ||
155 | { | ||
156 | struct hrtimer_base *new_base; | ||
157 | |||
158 | new_base = &__get_cpu_var(hrtimer_bases[base->index]); | ||
159 | |||
160 | if (base != new_base) { | ||
161 | /* | ||
162 | * We are trying to schedule the timer on the local CPU. | ||
163 | * However we can't change timer's base while it is running, | ||
164 | * so we keep it on the same CPU. No hassle vs. reprogramming | ||
165 | * the event source in the high resolution case. The softirq | ||
166 | * code will take care of this when the timer function has | ||
167 | * completed. There is no conflict as we hold the lock until | ||
168 | * the timer is enqueued. | ||
169 | */ | ||
170 | if (unlikely(base->curr_timer == timer)) | ||
171 | return base; | ||
172 | |||
173 | /* See the comment in lock_timer_base() */ | ||
174 | timer->base = NULL; | ||
175 | spin_unlock(&base->lock); | ||
176 | spin_lock(&new_base->lock); | ||
177 | timer->base = new_base; | ||
178 | } | ||
179 | return new_base; | ||
180 | } | ||
181 | |||
182 | #else /* CONFIG_SMP */ | ||
183 | |||
184 | #define set_curr_timer(b, t) do { } while (0) | ||
185 | |||
186 | static inline struct hrtimer_base * | ||
187 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | ||
188 | { | ||
189 | struct hrtimer_base *base = timer->base; | ||
190 | |||
191 | spin_lock_irqsave(&base->lock, *flags); | ||
192 | |||
193 | return base; | ||
194 | } | ||
195 | |||
196 | #define switch_hrtimer_base(t, b) (b) | ||
197 | |||
198 | #endif /* !CONFIG_SMP */ | ||
199 | |||
200 | /* | ||
201 | * Functions for the union type storage format of ktime_t which are | ||
202 | * too large for inlining: | ||
203 | */ | ||
204 | #if BITS_PER_LONG < 64 | ||
205 | # ifndef CONFIG_KTIME_SCALAR | ||
206 | /** | ||
207 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | ||
208 | * | ||
209 | * @kt: addend | ||
210 | * @nsec: the scalar nsec value to add | ||
211 | * | ||
212 | * Returns the sum of kt and nsec in ktime_t format | ||
213 | */ | ||
214 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | ||
215 | { | ||
216 | ktime_t tmp; | ||
217 | |||
218 | if (likely(nsec < NSEC_PER_SEC)) { | ||
219 | tmp.tv64 = nsec; | ||
220 | } else { | ||
221 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | ||
222 | |||
223 | tmp = ktime_set((long)nsec, rem); | ||
224 | } | ||
225 | |||
226 | return ktime_add(kt, tmp); | ||
227 | } | ||
228 | |||
229 | #else /* CONFIG_KTIME_SCALAR */ | ||
230 | |||
231 | # endif /* !CONFIG_KTIME_SCALAR */ | ||
232 | |||
233 | /* | ||
234 | * Divide a ktime value by a nanosecond value | ||
235 | */ | ||
236 | static unsigned long ktime_divns(const ktime_t kt, nsec_t div) | ||
237 | { | ||
238 | u64 dclc, inc, dns; | ||
239 | int sft = 0; | ||
240 | |||
241 | dclc = dns = ktime_to_ns(kt); | ||
242 | inc = div; | ||
243 | /* Make sure the divisor is less than 2^32: */ | ||
244 | while (div >> 32) { | ||
245 | sft++; | ||
246 | div >>= 1; | ||
247 | } | ||
248 | dclc >>= sft; | ||
249 | do_div(dclc, (unsigned long) div); | ||
250 | |||
251 | return (unsigned long) dclc; | ||
252 | } | ||
253 | |||
254 | #else /* BITS_PER_LONG < 64 */ | ||
255 | # define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div)) | ||
256 | #endif /* BITS_PER_LONG >= 64 */ | ||
257 | |||
258 | /* | ||
259 | * Counterpart to lock_timer_base above: | ||
260 | */ | ||
261 | static inline | ||
262 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | ||
263 | { | ||
264 | spin_unlock_irqrestore(&timer->base->lock, *flags); | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * hrtimer_forward - forward the timer expiry | ||
269 | * | ||
270 | * @timer: hrtimer to forward | ||
271 | * @interval: the interval to forward | ||
272 | * | ||
273 | * Forward the timer expiry so it will expire in the future. | ||
274 | * The number of overruns is added to the overrun field. | ||
275 | */ | ||
276 | unsigned long | ||
277 | hrtimer_forward(struct hrtimer *timer, const ktime_t interval) | ||
278 | { | ||
279 | unsigned long orun = 1; | ||
280 | ktime_t delta, now; | ||
281 | |||
282 | now = timer->base->get_time(); | ||
283 | |||
284 | delta = ktime_sub(now, timer->expires); | ||
285 | |||
286 | if (delta.tv64 < 0) | ||
287 | return 0; | ||
288 | |||
289 | if (unlikely(delta.tv64 >= interval.tv64)) { | ||
290 | nsec_t incr = ktime_to_ns(interval); | ||
291 | |||
292 | orun = ktime_divns(delta, incr); | ||
293 | timer->expires = ktime_add_ns(timer->expires, incr * orun); | ||
294 | if (timer->expires.tv64 > now.tv64) | ||
295 | return orun; | ||
296 | /* | ||
297 | * This (and the ktime_add() below) is the | ||
298 | * correction for exact: | ||
299 | */ | ||
300 | orun++; | ||
301 | } | ||
302 | timer->expires = ktime_add(timer->expires, interval); | ||
303 | |||
304 | return orun; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * enqueue_hrtimer - internal function to (re)start a timer | ||
309 | * | ||
310 | * The timer is inserted in expiry order. Insertion into the | ||
311 | * red black tree is O(log(n)). Must hold the base lock. | ||
312 | */ | ||
313 | static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | ||
314 | { | ||
315 | struct rb_node **link = &base->active.rb_node; | ||
316 | struct list_head *prev = &base->pending; | ||
317 | struct rb_node *parent = NULL; | ||
318 | struct hrtimer *entry; | ||
319 | |||
320 | /* | ||
321 | * Find the right place in the rbtree: | ||
322 | */ | ||
323 | while (*link) { | ||
324 | parent = *link; | ||
325 | entry = rb_entry(parent, struct hrtimer, node); | ||
326 | /* | ||
327 | * We dont care about collisions. Nodes with | ||
328 | * the same expiry time stay together. | ||
329 | */ | ||
330 | if (timer->expires.tv64 < entry->expires.tv64) | ||
331 | link = &(*link)->rb_left; | ||
332 | else { | ||
333 | link = &(*link)->rb_right; | ||
334 | prev = &entry->list; | ||
335 | } | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * Insert the timer to the rbtree and to the sorted list: | ||
340 | */ | ||
341 | rb_link_node(&timer->node, parent, link); | ||
342 | rb_insert_color(&timer->node, &base->active); | ||
343 | list_add(&timer->list, prev); | ||
344 | |||
345 | timer->state = HRTIMER_PENDING; | ||
346 | } | ||
347 | |||
348 | |||
349 | /* | ||
350 | * __remove_hrtimer - internal function to remove a timer | ||
351 | * | ||
352 | * Caller must hold the base lock. | ||
353 | */ | ||
354 | static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | ||
355 | { | ||
356 | /* | ||
357 | * Remove the timer from the sorted list and from the rbtree: | ||
358 | */ | ||
359 | list_del(&timer->list); | ||
360 | rb_erase(&timer->node, &base->active); | ||
361 | } | ||
362 | |||
363 | /* | ||
364 | * remove hrtimer, called with base lock held | ||
365 | */ | ||
366 | static inline int | ||
367 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | ||
368 | { | ||
369 | if (hrtimer_active(timer)) { | ||
370 | __remove_hrtimer(timer, base); | ||
371 | timer->state = HRTIMER_INACTIVE; | ||
372 | return 1; | ||
373 | } | ||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * hrtimer_start - (re)start an relative timer on the current CPU | ||
379 | * | ||
380 | * @timer: the timer to be added | ||
381 | * @tim: expiry time | ||
382 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | ||
383 | * | ||
384 | * Returns: | ||
385 | * 0 on success | ||
386 | * 1 when the timer was active | ||
387 | */ | ||
388 | int | ||
389 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | ||
390 | { | ||
391 | struct hrtimer_base *base, *new_base; | ||
392 | unsigned long flags; | ||
393 | int ret; | ||
394 | |||
395 | base = lock_hrtimer_base(timer, &flags); | ||
396 | |||
397 | /* Remove an active timer from the queue: */ | ||
398 | ret = remove_hrtimer(timer, base); | ||
399 | |||
400 | /* Switch the timer base, if necessary: */ | ||
401 | new_base = switch_hrtimer_base(timer, base); | ||
402 | |||
403 | if (mode == HRTIMER_REL) | ||
404 | tim = ktime_add(tim, new_base->get_time()); | ||
405 | timer->expires = tim; | ||
406 | |||
407 | enqueue_hrtimer(timer, new_base); | ||
408 | |||
409 | unlock_hrtimer_base(timer, &flags); | ||
410 | |||
411 | return ret; | ||
412 | } | ||
413 | |||
414 | /** | ||
415 | * hrtimer_try_to_cancel - try to deactivate a timer | ||
416 | * | ||
417 | * @timer: hrtimer to stop | ||
418 | * | ||
419 | * Returns: | ||
420 | * 0 when the timer was not active | ||
421 | * 1 when the timer was active | ||
422 | * -1 when the timer is currently excuting the callback function and | ||
423 | * can not be stopped | ||
424 | */ | ||
425 | int hrtimer_try_to_cancel(struct hrtimer *timer) | ||
426 | { | ||
427 | struct hrtimer_base *base; | ||
428 | unsigned long flags; | ||
429 | int ret = -1; | ||
430 | |||
431 | base = lock_hrtimer_base(timer, &flags); | ||
432 | |||
433 | if (base->curr_timer != timer) | ||
434 | ret = remove_hrtimer(timer, base); | ||
435 | |||
436 | unlock_hrtimer_base(timer, &flags); | ||
437 | |||
438 | return ret; | ||
439 | |||
440 | } | ||
441 | |||
442 | /** | ||
443 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. | ||
444 | * | ||
445 | * @timer: the timer to be cancelled | ||
446 | * | ||
447 | * Returns: | ||
448 | * 0 when the timer was not active | ||
449 | * 1 when the timer was active | ||
450 | */ | ||
451 | int hrtimer_cancel(struct hrtimer *timer) | ||
452 | { | ||
453 | for (;;) { | ||
454 | int ret = hrtimer_try_to_cancel(timer); | ||
455 | |||
456 | if (ret >= 0) | ||
457 | return ret; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | /** | ||
462 | * hrtimer_get_remaining - get remaining time for the timer | ||
463 | * | ||
464 | * @timer: the timer to read | ||
465 | */ | ||
466 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | ||
467 | { | ||
468 | struct hrtimer_base *base; | ||
469 | unsigned long flags; | ||
470 | ktime_t rem; | ||
471 | |||
472 | base = lock_hrtimer_base(timer, &flags); | ||
473 | rem = ktime_sub(timer->expires, timer->base->get_time()); | ||
474 | unlock_hrtimer_base(timer, &flags); | ||
475 | |||
476 | return rem; | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * hrtimer_rebase - rebase an initialized hrtimer to a different base | ||
481 | * | ||
482 | * @timer: the timer to be rebased | ||
483 | * @clock_id: the clock to be used | ||
484 | */ | ||
485 | void hrtimer_rebase(struct hrtimer *timer, const clockid_t clock_id) | ||
486 | { | ||
487 | struct hrtimer_base *bases; | ||
488 | |||
489 | bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); | ||
490 | timer->base = &bases[clock_id]; | ||
491 | } | ||
492 | |||
493 | /** | ||
494 | * hrtimer_init - initialize a timer to the given clock | ||
495 | * | ||
496 | * @timer: the timer to be initialized | ||
497 | * @clock_id: the clock to be used | ||
498 | */ | ||
499 | void hrtimer_init(struct hrtimer *timer, const clockid_t clock_id) | ||
500 | { | ||
501 | memset(timer, 0, sizeof(struct hrtimer)); | ||
502 | hrtimer_rebase(timer, clock_id); | ||
503 | } | ||
504 | |||
505 | /** | ||
506 | * hrtimer_get_res - get the timer resolution for a clock | ||
507 | * | ||
508 | * @which_clock: which clock to query | ||
509 | * @tp: pointer to timespec variable to store the resolution | ||
510 | * | ||
511 | * Store the resolution of the clock selected by which_clock in the | ||
512 | * variable pointed to by tp. | ||
513 | */ | ||
514 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | ||
515 | { | ||
516 | struct hrtimer_base *bases; | ||
517 | |||
518 | tp->tv_sec = 0; | ||
519 | bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); | ||
520 | tp->tv_nsec = bases[which_clock].resolution; | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * Expire the per base hrtimer-queue: | ||
527 | */ | ||
528 | static inline void run_hrtimer_queue(struct hrtimer_base *base) | ||
529 | { | ||
530 | ktime_t now = base->get_time(); | ||
531 | |||
532 | spin_lock_irq(&base->lock); | ||
533 | |||
534 | while (!list_empty(&base->pending)) { | ||
535 | struct hrtimer *timer; | ||
536 | int (*fn)(void *); | ||
537 | int restart; | ||
538 | void *data; | ||
539 | |||
540 | timer = list_entry(base->pending.next, struct hrtimer, list); | ||
541 | if (now.tv64 <= timer->expires.tv64) | ||
542 | break; | ||
543 | |||
544 | fn = timer->function; | ||
545 | data = timer->data; | ||
546 | set_curr_timer(base, timer); | ||
547 | __remove_hrtimer(timer, base); | ||
548 | spin_unlock_irq(&base->lock); | ||
549 | |||
550 | /* | ||
551 | * fn == NULL is special case for the simplest timer | ||
552 | * variant - wake up process and do not restart: | ||
553 | */ | ||
554 | if (!fn) { | ||
555 | wake_up_process(data); | ||
556 | restart = HRTIMER_NORESTART; | ||
557 | } else | ||
558 | restart = fn(data); | ||
559 | |||
560 | spin_lock_irq(&base->lock); | ||
561 | |||
562 | if (restart == HRTIMER_RESTART) | ||
563 | enqueue_hrtimer(timer, base); | ||
564 | else | ||
565 | timer->state = HRTIMER_EXPIRED; | ||
566 | } | ||
567 | set_curr_timer(base, NULL); | ||
568 | spin_unlock_irq(&base->lock); | ||
569 | } | ||
570 | |||
571 | /* | ||
572 | * Called from timer softirq every jiffy, expire hrtimers: | ||
573 | */ | ||
574 | void hrtimer_run_queues(void) | ||
575 | { | ||
576 | struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); | ||
577 | int i; | ||
578 | |||
579 | for (i = 0; i < MAX_HRTIMER_BASES; i++) | ||
580 | run_hrtimer_queue(&base[i]); | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * Functions related to boot-time initialization: | ||
585 | */ | ||
586 | static void __devinit init_hrtimers_cpu(int cpu) | ||
587 | { | ||
588 | struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu); | ||
589 | int i; | ||
590 | |||
591 | for (i = 0; i < MAX_HRTIMER_BASES; i++) { | ||
592 | spin_lock_init(&base->lock); | ||
593 | INIT_LIST_HEAD(&base->pending); | ||
594 | base++; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | #ifdef CONFIG_HOTPLUG_CPU | ||
599 | |||
600 | static void migrate_hrtimer_list(struct hrtimer_base *old_base, | ||
601 | struct hrtimer_base *new_base) | ||
602 | { | ||
603 | struct hrtimer *timer; | ||
604 | struct rb_node *node; | ||
605 | |||
606 | while ((node = rb_first(&old_base->active))) { | ||
607 | timer = rb_entry(node, struct hrtimer, node); | ||
608 | __remove_hrtimer(timer, old_base); | ||
609 | timer->base = new_base; | ||
610 | enqueue_hrtimer(timer, new_base); | ||
611 | } | ||
612 | } | ||
613 | |||
614 | static void migrate_hrtimers(int cpu) | ||
615 | { | ||
616 | struct hrtimer_base *old_base, *new_base; | ||
617 | int i; | ||
618 | |||
619 | BUG_ON(cpu_online(cpu)); | ||
620 | old_base = per_cpu(hrtimer_bases, cpu); | ||
621 | new_base = get_cpu_var(hrtimer_bases); | ||
622 | |||
623 | local_irq_disable(); | ||
624 | |||
625 | for (i = 0; i < MAX_HRTIMER_BASES; i++) { | ||
626 | |||
627 | spin_lock(&new_base->lock); | ||
628 | spin_lock(&old_base->lock); | ||
629 | |||
630 | BUG_ON(old_base->curr_timer); | ||
631 | |||
632 | migrate_hrtimer_list(old_base, new_base); | ||
633 | |||
634 | spin_unlock(&old_base->lock); | ||
635 | spin_unlock(&new_base->lock); | ||
636 | old_base++; | ||
637 | new_base++; | ||
638 | } | ||
639 | |||
640 | local_irq_enable(); | ||
641 | put_cpu_var(hrtimer_bases); | ||
642 | } | ||
643 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
644 | |||
645 | static int __devinit hrtimer_cpu_notify(struct notifier_block *self, | ||
646 | unsigned long action, void *hcpu) | ||
647 | { | ||
648 | long cpu = (long)hcpu; | ||
649 | |||
650 | switch (action) { | ||
651 | |||
652 | case CPU_UP_PREPARE: | ||
653 | init_hrtimers_cpu(cpu); | ||
654 | break; | ||
655 | |||
656 | #ifdef CONFIG_HOTPLUG_CPU | ||
657 | case CPU_DEAD: | ||
658 | migrate_hrtimers(cpu); | ||
659 | break; | ||
660 | #endif | ||
661 | |||
662 | default: | ||
663 | break; | ||
664 | } | ||
665 | |||
666 | return NOTIFY_OK; | ||
667 | } | ||
668 | |||
669 | static struct notifier_block __devinitdata hrtimers_nb = { | ||
670 | .notifier_call = hrtimer_cpu_notify, | ||
671 | }; | ||
672 | |||
673 | void __init hrtimers_init(void) | ||
674 | { | ||
675 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | ||
676 | (void *)(long)smp_processor_id()); | ||
677 | register_cpu_notifier(&hrtimers_nb); | ||
678 | } | ||
679 | |||