diff options
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 826 |
1 files changed, 826 insertions, 0 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c new file mode 100644 index 000000000000..f1c4155b49ac --- /dev/null +++ b/kernel/hrtimer.c | |||
@@ -0,0 +1,826 @@ | |||
1 | /* | ||
2 | * linux/kernel/hrtimer.c | ||
3 | * | ||
4 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar | ||
6 | * | ||
7 | * High-resolution kernel timers | ||
8 | * | ||
9 | * In contrast to the low-resolution timeout API implemented in | ||
10 | * kernel/timer.c, hrtimers provide finer resolution and accuracy | ||
11 | * depending on system configuration and capabilities. | ||
12 | * | ||
13 | * These timers are currently used for: | ||
14 | * - itimers | ||
15 | * - POSIX timers | ||
16 | * - nanosleep | ||
17 | * - precise in-kernel timing | ||
18 | * | ||
19 | * Started by: Thomas Gleixner and Ingo Molnar | ||
20 | * | ||
21 | * Credits: | ||
22 | * based on kernel/timer.c | ||
23 | * | ||
24 | * For licencing details see kernel-base/COPYING | ||
25 | */ | ||
26 | |||
27 | #include <linux/cpu.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/percpu.h> | ||
30 | #include <linux/hrtimer.h> | ||
31 | #include <linux/notifier.h> | ||
32 | #include <linux/syscalls.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | |||
35 | #include <asm/uaccess.h> | ||
36 | |||
37 | /** | ||
38 | * ktime_get - get the monotonic time in ktime_t format | ||
39 | * | ||
40 | * returns the time in ktime_t format | ||
41 | */ | ||
42 | static ktime_t ktime_get(void) | ||
43 | { | ||
44 | struct timespec now; | ||
45 | |||
46 | ktime_get_ts(&now); | ||
47 | |||
48 | return timespec_to_ktime(now); | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * ktime_get_real - get the real (wall-) time in ktime_t format | ||
53 | * | ||
54 | * returns the time in ktime_t format | ||
55 | */ | ||
56 | static ktime_t ktime_get_real(void) | ||
57 | { | ||
58 | struct timespec now; | ||
59 | |||
60 | getnstimeofday(&now); | ||
61 | |||
62 | return timespec_to_ktime(now); | ||
63 | } | ||
64 | |||
65 | EXPORT_SYMBOL_GPL(ktime_get_real); | ||
66 | |||
67 | /* | ||
68 | * The timer bases: | ||
69 | */ | ||
70 | |||
71 | #define MAX_HRTIMER_BASES 2 | ||
72 | |||
73 | static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) = | ||
74 | { | ||
75 | { | ||
76 | .index = CLOCK_REALTIME, | ||
77 | .get_time = &ktime_get_real, | ||
78 | .resolution = KTIME_REALTIME_RES, | ||
79 | }, | ||
80 | { | ||
81 | .index = CLOCK_MONOTONIC, | ||
82 | .get_time = &ktime_get, | ||
83 | .resolution = KTIME_MONOTONIC_RES, | ||
84 | }, | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * ktime_get_ts - get the monotonic clock in timespec format | ||
89 | * | ||
90 | * @ts: pointer to timespec variable | ||
91 | * | ||
92 | * The function calculates the monotonic clock from the realtime | ||
93 | * clock and the wall_to_monotonic offset and stores the result | ||
94 | * in normalized timespec format in the variable pointed to by ts. | ||
95 | */ | ||
96 | void ktime_get_ts(struct timespec *ts) | ||
97 | { | ||
98 | struct timespec tomono; | ||
99 | unsigned long seq; | ||
100 | |||
101 | do { | ||
102 | seq = read_seqbegin(&xtime_lock); | ||
103 | getnstimeofday(ts); | ||
104 | tomono = wall_to_monotonic; | ||
105 | |||
106 | } while (read_seqretry(&xtime_lock, seq)); | ||
107 | |||
108 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | ||
109 | ts->tv_nsec + tomono.tv_nsec); | ||
110 | } | ||
111 | EXPORT_SYMBOL_GPL(ktime_get_ts); | ||
112 | |||
113 | /* | ||
114 | * Functions and macros which are different for UP/SMP systems are kept in a | ||
115 | * single place | ||
116 | */ | ||
117 | #ifdef CONFIG_SMP | ||
118 | |||
119 | #define set_curr_timer(b, t) do { (b)->curr_timer = (t); } while (0) | ||
120 | |||
121 | /* | ||
122 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | ||
123 | * means that all timers which are tied to this base via timer->base are | ||
124 | * locked, and the base itself is locked too. | ||
125 | * | ||
126 | * So __run_timers/migrate_timers can safely modify all timers which could | ||
127 | * be found on the lists/queues. | ||
128 | * | ||
129 | * When the timer's base is locked, and the timer removed from list, it is | ||
130 | * possible to set timer->base = NULL and drop the lock: the timer remains | ||
131 | * locked. | ||
132 | */ | ||
133 | static struct hrtimer_base *lock_hrtimer_base(const struct hrtimer *timer, | ||
134 | unsigned long *flags) | ||
135 | { | ||
136 | struct hrtimer_base *base; | ||
137 | |||
138 | for (;;) { | ||
139 | base = timer->base; | ||
140 | if (likely(base != NULL)) { | ||
141 | spin_lock_irqsave(&base->lock, *flags); | ||
142 | if (likely(base == timer->base)) | ||
143 | return base; | ||
144 | /* The timer has migrated to another CPU: */ | ||
145 | spin_unlock_irqrestore(&base->lock, *flags); | ||
146 | } | ||
147 | cpu_relax(); | ||
148 | } | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Switch the timer base to the current CPU when possible. | ||
153 | */ | ||
154 | static inline struct hrtimer_base * | ||
155 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base) | ||
156 | { | ||
157 | struct hrtimer_base *new_base; | ||
158 | |||
159 | new_base = &__get_cpu_var(hrtimer_bases[base->index]); | ||
160 | |||
161 | if (base != new_base) { | ||
162 | /* | ||
163 | * We are trying to schedule the timer on the local CPU. | ||
164 | * However we can't change timer's base while it is running, | ||
165 | * so we keep it on the same CPU. No hassle vs. reprogramming | ||
166 | * the event source in the high resolution case. The softirq | ||
167 | * code will take care of this when the timer function has | ||
168 | * completed. There is no conflict as we hold the lock until | ||
169 | * the timer is enqueued. | ||
170 | */ | ||
171 | if (unlikely(base->curr_timer == timer)) | ||
172 | return base; | ||
173 | |||
174 | /* See the comment in lock_timer_base() */ | ||
175 | timer->base = NULL; | ||
176 | spin_unlock(&base->lock); | ||
177 | spin_lock(&new_base->lock); | ||
178 | timer->base = new_base; | ||
179 | } | ||
180 | return new_base; | ||
181 | } | ||
182 | |||
183 | #else /* CONFIG_SMP */ | ||
184 | |||
185 | #define set_curr_timer(b, t) do { } while (0) | ||
186 | |||
187 | static inline struct hrtimer_base * | ||
188 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | ||
189 | { | ||
190 | struct hrtimer_base *base = timer->base; | ||
191 | |||
192 | spin_lock_irqsave(&base->lock, *flags); | ||
193 | |||
194 | return base; | ||
195 | } | ||
196 | |||
197 | #define switch_hrtimer_base(t, b) (b) | ||
198 | |||
199 | #endif /* !CONFIG_SMP */ | ||
200 | |||
201 | /* | ||
202 | * Functions for the union type storage format of ktime_t which are | ||
203 | * too large for inlining: | ||
204 | */ | ||
205 | #if BITS_PER_LONG < 64 | ||
206 | # ifndef CONFIG_KTIME_SCALAR | ||
207 | /** | ||
208 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | ||
209 | * | ||
210 | * @kt: addend | ||
211 | * @nsec: the scalar nsec value to add | ||
212 | * | ||
213 | * Returns the sum of kt and nsec in ktime_t format | ||
214 | */ | ||
215 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | ||
216 | { | ||
217 | ktime_t tmp; | ||
218 | |||
219 | if (likely(nsec < NSEC_PER_SEC)) { | ||
220 | tmp.tv64 = nsec; | ||
221 | } else { | ||
222 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | ||
223 | |||
224 | tmp = ktime_set((long)nsec, rem); | ||
225 | } | ||
226 | |||
227 | return ktime_add(kt, tmp); | ||
228 | } | ||
229 | |||
230 | #else /* CONFIG_KTIME_SCALAR */ | ||
231 | |||
232 | # endif /* !CONFIG_KTIME_SCALAR */ | ||
233 | |||
234 | /* | ||
235 | * Divide a ktime value by a nanosecond value | ||
236 | */ | ||
237 | static unsigned long ktime_divns(const ktime_t kt, nsec_t div) | ||
238 | { | ||
239 | u64 dclc, inc, dns; | ||
240 | int sft = 0; | ||
241 | |||
242 | dclc = dns = ktime_to_ns(kt); | ||
243 | inc = div; | ||
244 | /* Make sure the divisor is less than 2^32: */ | ||
245 | while (div >> 32) { | ||
246 | sft++; | ||
247 | div >>= 1; | ||
248 | } | ||
249 | dclc >>= sft; | ||
250 | do_div(dclc, (unsigned long) div); | ||
251 | |||
252 | return (unsigned long) dclc; | ||
253 | } | ||
254 | |||
255 | #else /* BITS_PER_LONG < 64 */ | ||
256 | # define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div)) | ||
257 | #endif /* BITS_PER_LONG >= 64 */ | ||
258 | |||
259 | /* | ||
260 | * Counterpart to lock_timer_base above: | ||
261 | */ | ||
262 | static inline | ||
263 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | ||
264 | { | ||
265 | spin_unlock_irqrestore(&timer->base->lock, *flags); | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * hrtimer_forward - forward the timer expiry | ||
270 | * | ||
271 | * @timer: hrtimer to forward | ||
272 | * @interval: the interval to forward | ||
273 | * | ||
274 | * Forward the timer expiry so it will expire in the future. | ||
275 | * Returns the number of overruns. | ||
276 | */ | ||
277 | unsigned long | ||
278 | hrtimer_forward(struct hrtimer *timer, ktime_t interval) | ||
279 | { | ||
280 | unsigned long orun = 1; | ||
281 | ktime_t delta, now; | ||
282 | |||
283 | now = timer->base->get_time(); | ||
284 | |||
285 | delta = ktime_sub(now, timer->expires); | ||
286 | |||
287 | if (delta.tv64 < 0) | ||
288 | return 0; | ||
289 | |||
290 | if (interval.tv64 < timer->base->resolution.tv64) | ||
291 | interval.tv64 = timer->base->resolution.tv64; | ||
292 | |||
293 | if (unlikely(delta.tv64 >= interval.tv64)) { | ||
294 | nsec_t incr = ktime_to_ns(interval); | ||
295 | |||
296 | orun = ktime_divns(delta, incr); | ||
297 | timer->expires = ktime_add_ns(timer->expires, incr * orun); | ||
298 | if (timer->expires.tv64 > now.tv64) | ||
299 | return orun; | ||
300 | /* | ||
301 | * This (and the ktime_add() below) is the | ||
302 | * correction for exact: | ||
303 | */ | ||
304 | orun++; | ||
305 | } | ||
306 | timer->expires = ktime_add(timer->expires, interval); | ||
307 | |||
308 | return orun; | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * enqueue_hrtimer - internal function to (re)start a timer | ||
313 | * | ||
314 | * The timer is inserted in expiry order. Insertion into the | ||
315 | * red black tree is O(log(n)). Must hold the base lock. | ||
316 | */ | ||
317 | static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | ||
318 | { | ||
319 | struct rb_node **link = &base->active.rb_node; | ||
320 | struct rb_node *parent = NULL; | ||
321 | struct hrtimer *entry; | ||
322 | |||
323 | /* | ||
324 | * Find the right place in the rbtree: | ||
325 | */ | ||
326 | while (*link) { | ||
327 | parent = *link; | ||
328 | entry = rb_entry(parent, struct hrtimer, node); | ||
329 | /* | ||
330 | * We dont care about collisions. Nodes with | ||
331 | * the same expiry time stay together. | ||
332 | */ | ||
333 | if (timer->expires.tv64 < entry->expires.tv64) | ||
334 | link = &(*link)->rb_left; | ||
335 | else | ||
336 | link = &(*link)->rb_right; | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * Insert the timer to the rbtree and check whether it | ||
341 | * replaces the first pending timer | ||
342 | */ | ||
343 | rb_link_node(&timer->node, parent, link); | ||
344 | rb_insert_color(&timer->node, &base->active); | ||
345 | |||
346 | timer->state = HRTIMER_PENDING; | ||
347 | |||
348 | if (!base->first || timer->expires.tv64 < | ||
349 | rb_entry(base->first, struct hrtimer, node)->expires.tv64) | ||
350 | base->first = &timer->node; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * __remove_hrtimer - internal function to remove a timer | ||
355 | * | ||
356 | * Caller must hold the base lock. | ||
357 | */ | ||
358 | static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | ||
359 | { | ||
360 | /* | ||
361 | * Remove the timer from the rbtree and replace the | ||
362 | * first entry pointer if necessary. | ||
363 | */ | ||
364 | if (base->first == &timer->node) | ||
365 | base->first = rb_next(&timer->node); | ||
366 | rb_erase(&timer->node, &base->active); | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * remove hrtimer, called with base lock held | ||
371 | */ | ||
372 | static inline int | ||
373 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | ||
374 | { | ||
375 | if (hrtimer_active(timer)) { | ||
376 | __remove_hrtimer(timer, base); | ||
377 | timer->state = HRTIMER_INACTIVE; | ||
378 | return 1; | ||
379 | } | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * hrtimer_start - (re)start an relative timer on the current CPU | ||
385 | * | ||
386 | * @timer: the timer to be added | ||
387 | * @tim: expiry time | ||
388 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | ||
389 | * | ||
390 | * Returns: | ||
391 | * 0 on success | ||
392 | * 1 when the timer was active | ||
393 | */ | ||
394 | int | ||
395 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | ||
396 | { | ||
397 | struct hrtimer_base *base, *new_base; | ||
398 | unsigned long flags; | ||
399 | int ret; | ||
400 | |||
401 | base = lock_hrtimer_base(timer, &flags); | ||
402 | |||
403 | /* Remove an active timer from the queue: */ | ||
404 | ret = remove_hrtimer(timer, base); | ||
405 | |||
406 | /* Switch the timer base, if necessary: */ | ||
407 | new_base = switch_hrtimer_base(timer, base); | ||
408 | |||
409 | if (mode == HRTIMER_REL) | ||
410 | tim = ktime_add(tim, new_base->get_time()); | ||
411 | timer->expires = tim; | ||
412 | |||
413 | enqueue_hrtimer(timer, new_base); | ||
414 | |||
415 | unlock_hrtimer_base(timer, &flags); | ||
416 | |||
417 | return ret; | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * hrtimer_try_to_cancel - try to deactivate a timer | ||
422 | * | ||
423 | * @timer: hrtimer to stop | ||
424 | * | ||
425 | * Returns: | ||
426 | * 0 when the timer was not active | ||
427 | * 1 when the timer was active | ||
428 | * -1 when the timer is currently excuting the callback function and | ||
429 | * can not be stopped | ||
430 | */ | ||
431 | int hrtimer_try_to_cancel(struct hrtimer *timer) | ||
432 | { | ||
433 | struct hrtimer_base *base; | ||
434 | unsigned long flags; | ||
435 | int ret = -1; | ||
436 | |||
437 | base = lock_hrtimer_base(timer, &flags); | ||
438 | |||
439 | if (base->curr_timer != timer) | ||
440 | ret = remove_hrtimer(timer, base); | ||
441 | |||
442 | unlock_hrtimer_base(timer, &flags); | ||
443 | |||
444 | return ret; | ||
445 | |||
446 | } | ||
447 | |||
448 | /** | ||
449 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. | ||
450 | * | ||
451 | * @timer: the timer to be cancelled | ||
452 | * | ||
453 | * Returns: | ||
454 | * 0 when the timer was not active | ||
455 | * 1 when the timer was active | ||
456 | */ | ||
457 | int hrtimer_cancel(struct hrtimer *timer) | ||
458 | { | ||
459 | for (;;) { | ||
460 | int ret = hrtimer_try_to_cancel(timer); | ||
461 | |||
462 | if (ret >= 0) | ||
463 | return ret; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | /** | ||
468 | * hrtimer_get_remaining - get remaining time for the timer | ||
469 | * | ||
470 | * @timer: the timer to read | ||
471 | */ | ||
472 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | ||
473 | { | ||
474 | struct hrtimer_base *base; | ||
475 | unsigned long flags; | ||
476 | ktime_t rem; | ||
477 | |||
478 | base = lock_hrtimer_base(timer, &flags); | ||
479 | rem = ktime_sub(timer->expires, timer->base->get_time()); | ||
480 | unlock_hrtimer_base(timer, &flags); | ||
481 | |||
482 | return rem; | ||
483 | } | ||
484 | |||
485 | /** | ||
486 | * hrtimer_rebase - rebase an initialized hrtimer to a different base | ||
487 | * | ||
488 | * @timer: the timer to be rebased | ||
489 | * @clock_id: the clock to be used | ||
490 | */ | ||
491 | void hrtimer_rebase(struct hrtimer *timer, const clockid_t clock_id) | ||
492 | { | ||
493 | struct hrtimer_base *bases; | ||
494 | |||
495 | bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); | ||
496 | timer->base = &bases[clock_id]; | ||
497 | } | ||
498 | |||
499 | /** | ||
500 | * hrtimer_init - initialize a timer to the given clock | ||
501 | * | ||
502 | * @timer: the timer to be initialized | ||
503 | * @clock_id: the clock to be used | ||
504 | */ | ||
505 | void hrtimer_init(struct hrtimer *timer, const clockid_t clock_id) | ||
506 | { | ||
507 | memset(timer, 0, sizeof(struct hrtimer)); | ||
508 | hrtimer_rebase(timer, clock_id); | ||
509 | } | ||
510 | |||
511 | /** | ||
512 | * hrtimer_get_res - get the timer resolution for a clock | ||
513 | * | ||
514 | * @which_clock: which clock to query | ||
515 | * @tp: pointer to timespec variable to store the resolution | ||
516 | * | ||
517 | * Store the resolution of the clock selected by which_clock in the | ||
518 | * variable pointed to by tp. | ||
519 | */ | ||
520 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | ||
521 | { | ||
522 | struct hrtimer_base *bases; | ||
523 | |||
524 | bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); | ||
525 | *tp = ktime_to_timespec(bases[which_clock].resolution); | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | /* | ||
531 | * Expire the per base hrtimer-queue: | ||
532 | */ | ||
533 | static inline void run_hrtimer_queue(struct hrtimer_base *base) | ||
534 | { | ||
535 | ktime_t now = base->get_time(); | ||
536 | struct rb_node *node; | ||
537 | |||
538 | spin_lock_irq(&base->lock); | ||
539 | |||
540 | while ((node = base->first)) { | ||
541 | struct hrtimer *timer; | ||
542 | int (*fn)(void *); | ||
543 | int restart; | ||
544 | void *data; | ||
545 | |||
546 | timer = rb_entry(node, struct hrtimer, node); | ||
547 | if (now.tv64 <= timer->expires.tv64) | ||
548 | break; | ||
549 | |||
550 | fn = timer->function; | ||
551 | data = timer->data; | ||
552 | set_curr_timer(base, timer); | ||
553 | __remove_hrtimer(timer, base); | ||
554 | spin_unlock_irq(&base->lock); | ||
555 | |||
556 | /* | ||
557 | * fn == NULL is special case for the simplest timer | ||
558 | * variant - wake up process and do not restart: | ||
559 | */ | ||
560 | if (!fn) { | ||
561 | wake_up_process(data); | ||
562 | restart = HRTIMER_NORESTART; | ||
563 | } else | ||
564 | restart = fn(data); | ||
565 | |||
566 | spin_lock_irq(&base->lock); | ||
567 | |||
568 | if (restart == HRTIMER_RESTART) | ||
569 | enqueue_hrtimer(timer, base); | ||
570 | else | ||
571 | timer->state = HRTIMER_EXPIRED; | ||
572 | } | ||
573 | set_curr_timer(base, NULL); | ||
574 | spin_unlock_irq(&base->lock); | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * Called from timer softirq every jiffy, expire hrtimers: | ||
579 | */ | ||
580 | void hrtimer_run_queues(void) | ||
581 | { | ||
582 | struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); | ||
583 | int i; | ||
584 | |||
585 | for (i = 0; i < MAX_HRTIMER_BASES; i++) | ||
586 | run_hrtimer_queue(&base[i]); | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * Sleep related functions: | ||
591 | */ | ||
592 | |||
593 | /** | ||
594 | * schedule_hrtimer - sleep until timeout | ||
595 | * | ||
596 | * @timer: hrtimer variable initialized with the correct clock base | ||
597 | * @mode: timeout value is abs/rel | ||
598 | * | ||
599 | * Make the current task sleep until @timeout is | ||
600 | * elapsed. | ||
601 | * | ||
602 | * You can set the task state as follows - | ||
603 | * | ||
604 | * %TASK_UNINTERRUPTIBLE - at least @timeout is guaranteed to | ||
605 | * pass before the routine returns. The routine will return 0 | ||
606 | * | ||
607 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | ||
608 | * delivered to the current task. In this case the remaining time | ||
609 | * will be returned | ||
610 | * | ||
611 | * The current task state is guaranteed to be TASK_RUNNING when this | ||
612 | * routine returns. | ||
613 | */ | ||
614 | static ktime_t __sched | ||
615 | schedule_hrtimer(struct hrtimer *timer, const enum hrtimer_mode mode) | ||
616 | { | ||
617 | /* fn stays NULL, meaning single-shot wakeup: */ | ||
618 | timer->data = current; | ||
619 | |||
620 | hrtimer_start(timer, timer->expires, mode); | ||
621 | |||
622 | schedule(); | ||
623 | hrtimer_cancel(timer); | ||
624 | |||
625 | /* Return the remaining time: */ | ||
626 | if (timer->state != HRTIMER_EXPIRED) | ||
627 | return ktime_sub(timer->expires, timer->base->get_time()); | ||
628 | else | ||
629 | return (ktime_t) {.tv64 = 0 }; | ||
630 | } | ||
631 | |||
632 | static inline ktime_t __sched | ||
633 | schedule_hrtimer_interruptible(struct hrtimer *timer, | ||
634 | const enum hrtimer_mode mode) | ||
635 | { | ||
636 | set_current_state(TASK_INTERRUPTIBLE); | ||
637 | |||
638 | return schedule_hrtimer(timer, mode); | ||
639 | } | ||
640 | |||
641 | static long __sched | ||
642 | nanosleep_restart(struct restart_block *restart, clockid_t clockid) | ||
643 | { | ||
644 | struct timespec __user *rmtp; | ||
645 | struct timespec tu; | ||
646 | void *rfn_save = restart->fn; | ||
647 | struct hrtimer timer; | ||
648 | ktime_t rem; | ||
649 | |||
650 | restart->fn = do_no_restart_syscall; | ||
651 | |||
652 | hrtimer_init(&timer, clockid); | ||
653 | |||
654 | timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0; | ||
655 | |||
656 | rem = schedule_hrtimer_interruptible(&timer, HRTIMER_ABS); | ||
657 | |||
658 | if (rem.tv64 <= 0) | ||
659 | return 0; | ||
660 | |||
661 | rmtp = (struct timespec __user *) restart->arg2; | ||
662 | tu = ktime_to_timespec(rem); | ||
663 | if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) | ||
664 | return -EFAULT; | ||
665 | |||
666 | restart->fn = rfn_save; | ||
667 | |||
668 | /* The other values in restart are already filled in */ | ||
669 | return -ERESTART_RESTARTBLOCK; | ||
670 | } | ||
671 | |||
672 | static long __sched nanosleep_restart_mono(struct restart_block *restart) | ||
673 | { | ||
674 | return nanosleep_restart(restart, CLOCK_MONOTONIC); | ||
675 | } | ||
676 | |||
677 | static long __sched nanosleep_restart_real(struct restart_block *restart) | ||
678 | { | ||
679 | return nanosleep_restart(restart, CLOCK_REALTIME); | ||
680 | } | ||
681 | |||
682 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | ||
683 | const enum hrtimer_mode mode, const clockid_t clockid) | ||
684 | { | ||
685 | struct restart_block *restart; | ||
686 | struct hrtimer timer; | ||
687 | struct timespec tu; | ||
688 | ktime_t rem; | ||
689 | |||
690 | hrtimer_init(&timer, clockid); | ||
691 | |||
692 | timer.expires = timespec_to_ktime(*rqtp); | ||
693 | |||
694 | rem = schedule_hrtimer_interruptible(&timer, mode); | ||
695 | if (rem.tv64 <= 0) | ||
696 | return 0; | ||
697 | |||
698 | /* Absolute timers do not update the rmtp value: */ | ||
699 | if (mode == HRTIMER_ABS) | ||
700 | return -ERESTARTNOHAND; | ||
701 | |||
702 | tu = ktime_to_timespec(rem); | ||
703 | |||
704 | if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) | ||
705 | return -EFAULT; | ||
706 | |||
707 | restart = ¤t_thread_info()->restart_block; | ||
708 | restart->fn = (clockid == CLOCK_MONOTONIC) ? | ||
709 | nanosleep_restart_mono : nanosleep_restart_real; | ||
710 | restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF; | ||
711 | restart->arg1 = timer.expires.tv64 >> 32; | ||
712 | restart->arg2 = (unsigned long) rmtp; | ||
713 | |||
714 | return -ERESTART_RESTARTBLOCK; | ||
715 | } | ||
716 | |||
717 | asmlinkage long | ||
718 | sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) | ||
719 | { | ||
720 | struct timespec tu; | ||
721 | |||
722 | if (copy_from_user(&tu, rqtp, sizeof(tu))) | ||
723 | return -EFAULT; | ||
724 | |||
725 | if (!timespec_valid(&tu)) | ||
726 | return -EINVAL; | ||
727 | |||
728 | return hrtimer_nanosleep(&tu, rmtp, HRTIMER_REL, CLOCK_MONOTONIC); | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * Functions related to boot-time initialization: | ||
733 | */ | ||
734 | static void __devinit init_hrtimers_cpu(int cpu) | ||
735 | { | ||
736 | struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu); | ||
737 | int i; | ||
738 | |||
739 | for (i = 0; i < MAX_HRTIMER_BASES; i++) { | ||
740 | spin_lock_init(&base->lock); | ||
741 | base++; | ||
742 | } | ||
743 | } | ||
744 | |||
745 | #ifdef CONFIG_HOTPLUG_CPU | ||
746 | |||
747 | static void migrate_hrtimer_list(struct hrtimer_base *old_base, | ||
748 | struct hrtimer_base *new_base) | ||
749 | { | ||
750 | struct hrtimer *timer; | ||
751 | struct rb_node *node; | ||
752 | |||
753 | while ((node = rb_first(&old_base->active))) { | ||
754 | timer = rb_entry(node, struct hrtimer, node); | ||
755 | __remove_hrtimer(timer, old_base); | ||
756 | timer->base = new_base; | ||
757 | enqueue_hrtimer(timer, new_base); | ||
758 | } | ||
759 | } | ||
760 | |||
761 | static void migrate_hrtimers(int cpu) | ||
762 | { | ||
763 | struct hrtimer_base *old_base, *new_base; | ||
764 | int i; | ||
765 | |||
766 | BUG_ON(cpu_online(cpu)); | ||
767 | old_base = per_cpu(hrtimer_bases, cpu); | ||
768 | new_base = get_cpu_var(hrtimer_bases); | ||
769 | |||
770 | local_irq_disable(); | ||
771 | |||
772 | for (i = 0; i < MAX_HRTIMER_BASES; i++) { | ||
773 | |||
774 | spin_lock(&new_base->lock); | ||
775 | spin_lock(&old_base->lock); | ||
776 | |||
777 | BUG_ON(old_base->curr_timer); | ||
778 | |||
779 | migrate_hrtimer_list(old_base, new_base); | ||
780 | |||
781 | spin_unlock(&old_base->lock); | ||
782 | spin_unlock(&new_base->lock); | ||
783 | old_base++; | ||
784 | new_base++; | ||
785 | } | ||
786 | |||
787 | local_irq_enable(); | ||
788 | put_cpu_var(hrtimer_bases); | ||
789 | } | ||
790 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
791 | |||
792 | static int __devinit hrtimer_cpu_notify(struct notifier_block *self, | ||
793 | unsigned long action, void *hcpu) | ||
794 | { | ||
795 | long cpu = (long)hcpu; | ||
796 | |||
797 | switch (action) { | ||
798 | |||
799 | case CPU_UP_PREPARE: | ||
800 | init_hrtimers_cpu(cpu); | ||
801 | break; | ||
802 | |||
803 | #ifdef CONFIG_HOTPLUG_CPU | ||
804 | case CPU_DEAD: | ||
805 | migrate_hrtimers(cpu); | ||
806 | break; | ||
807 | #endif | ||
808 | |||
809 | default: | ||
810 | break; | ||
811 | } | ||
812 | |||
813 | return NOTIFY_OK; | ||
814 | } | ||
815 | |||
816 | static struct notifier_block __devinitdata hrtimers_nb = { | ||
817 | .notifier_call = hrtimer_cpu_notify, | ||
818 | }; | ||
819 | |||
820 | void __init hrtimers_init(void) | ||
821 | { | ||
822 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | ||
823 | (void *)(long)smp_processor_id()); | ||
824 | register_cpu_notifier(&hrtimers_nb); | ||
825 | } | ||
826 | |||