aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c313
1 files changed, 152 insertions, 161 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index cb49883b64e5..11e896903828 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -55,11 +55,10 @@
55/* 55/*
56 * The timer bases: 56 * The timer bases:
57 * 57 *
58 * Note: If we want to add new timer bases, we have to skip the two 58 * There are more clockids then hrtimer bases. Thus, we index
59 * clock ids captured by the cpu-timers. We do this by holding empty 59 * into the timer bases by the hrtimer_base_type enum. When trying
60 * entries rather than doing math adjustment of the clock ids. 60 * to reach a base using a clockid, hrtimer_clockid_to_base()
61 * This ensures that we capture erroneous accesses to these clock ids 61 * is used to convert from clockid to the proper hrtimer_base_type.
62 * rather than moving them into the range of valid clock id's.
63 */ 62 */
64DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = 63DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
65{ 64{
@@ -67,39 +66,55 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
67 .clock_base = 66 .clock_base =
68 { 67 {
69 { 68 {
70 .index = CLOCK_REALTIME, 69 .index = HRTIMER_BASE_MONOTONIC,
70 .clockid = CLOCK_MONOTONIC,
71 .get_time = &ktime_get,
72 .resolution = KTIME_LOW_RES,
73 },
74 {
75 .index = HRTIMER_BASE_REALTIME,
76 .clockid = CLOCK_REALTIME,
71 .get_time = &ktime_get_real, 77 .get_time = &ktime_get_real,
72 .resolution = KTIME_LOW_RES, 78 .resolution = KTIME_LOW_RES,
73 }, 79 },
74 { 80 {
75 .index = CLOCK_MONOTONIC, 81 .index = HRTIMER_BASE_BOOTTIME,
76 .get_time = &ktime_get, 82 .clockid = CLOCK_BOOTTIME,
83 .get_time = &ktime_get_boottime,
77 .resolution = KTIME_LOW_RES, 84 .resolution = KTIME_LOW_RES,
78 }, 85 },
79 } 86 }
80}; 87};
81 88
89static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
90 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
91 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
92 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
93};
94
95static inline int hrtimer_clockid_to_base(clockid_t clock_id)
96{
97 return hrtimer_clock_to_base_table[clock_id];
98}
99
100
82/* 101/*
83 * Get the coarse grained time at the softirq based on xtime and 102 * Get the coarse grained time at the softirq based on xtime and
84 * wall_to_monotonic. 103 * wall_to_monotonic.
85 */ 104 */
86static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) 105static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
87{ 106{
88 ktime_t xtim, tomono; 107 ktime_t xtim, mono, boot;
89 struct timespec xts, tom; 108 struct timespec xts, tom, slp;
90 unsigned long seq;
91 109
92 do { 110 get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
93 seq = read_seqbegin(&xtime_lock);
94 xts = __current_kernel_time();
95 tom = __get_wall_to_monotonic();
96 } while (read_seqretry(&xtime_lock, seq));
97 111
98 xtim = timespec_to_ktime(xts); 112 xtim = timespec_to_ktime(xts);
99 tomono = timespec_to_ktime(tom); 113 mono = ktime_add(xtim, timespec_to_ktime(tom));
100 base->clock_base[CLOCK_REALTIME].softirq_time = xtim; 114 boot = ktime_add(mono, timespec_to_ktime(slp));
101 base->clock_base[CLOCK_MONOTONIC].softirq_time = 115 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
102 ktime_add(xtim, tomono); 116 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
117 base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
103} 118}
104 119
105/* 120/*
@@ -186,10 +201,11 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
186 struct hrtimer_cpu_base *new_cpu_base; 201 struct hrtimer_cpu_base *new_cpu_base;
187 int this_cpu = smp_processor_id(); 202 int this_cpu = smp_processor_id();
188 int cpu = hrtimer_get_target(this_cpu, pinned); 203 int cpu = hrtimer_get_target(this_cpu, pinned);
204 int basenum = base->index;
189 205
190again: 206again:
191 new_cpu_base = &per_cpu(hrtimer_bases, cpu); 207 new_cpu_base = &per_cpu(hrtimer_bases, cpu);
192 new_base = &new_cpu_base->clock_base[base->index]; 208 new_base = &new_cpu_base->clock_base[basenum];
193 209
194 if (base != new_base) { 210 if (base != new_base) {
195 /* 211 /*
@@ -336,6 +352,11 @@ EXPORT_SYMBOL_GPL(ktime_add_safe);
336 352
337static struct debug_obj_descr hrtimer_debug_descr; 353static struct debug_obj_descr hrtimer_debug_descr;
338 354
355static void *hrtimer_debug_hint(void *addr)
356{
357 return ((struct hrtimer *) addr)->function;
358}
359
339/* 360/*
340 * fixup_init is called when: 361 * fixup_init is called when:
341 * - an active object is initialized 362 * - an active object is initialized
@@ -395,6 +416,7 @@ static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
395 416
396static struct debug_obj_descr hrtimer_debug_descr = { 417static struct debug_obj_descr hrtimer_debug_descr = {
397 .name = "hrtimer", 418 .name = "hrtimer",
419 .debug_hint = hrtimer_debug_hint,
398 .fixup_init = hrtimer_fixup_init, 420 .fixup_init = hrtimer_fixup_init,
399 .fixup_activate = hrtimer_fixup_activate, 421 .fixup_activate = hrtimer_fixup_activate,
400 .fixup_free = hrtimer_fixup_free, 422 .fixup_free = hrtimer_fixup_free,
@@ -499,7 +521,7 @@ static inline int hrtimer_is_hres_enabled(void)
499 */ 521 */
500static inline int hrtimer_hres_active(void) 522static inline int hrtimer_hres_active(void)
501{ 523{
502 return __get_cpu_var(hrtimer_bases).hres_active; 524 return __this_cpu_read(hrtimer_bases.hres_active);
503} 525}
504 526
505/* 527/*
@@ -518,10 +540,13 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
518 540
519 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 541 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
520 struct hrtimer *timer; 542 struct hrtimer *timer;
543 struct timerqueue_node *next;
521 544
522 if (!base->first) 545 next = timerqueue_getnext(&base->active);
546 if (!next)
523 continue; 547 continue;
524 timer = rb_entry(base->first, struct hrtimer, node); 548 timer = container_of(next, struct hrtimer, node);
549
525 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 550 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
526 /* 551 /*
527 * clock_was_set() has changed base->offset so the 552 * clock_was_set() has changed base->offset so the
@@ -601,67 +626,6 @@ static int hrtimer_reprogram(struct hrtimer *timer,
601 return res; 626 return res;
602} 627}
603 628
604
605/*
606 * Retrigger next event is called after clock was set
607 *
608 * Called with interrupts disabled via on_each_cpu()
609 */
610static void retrigger_next_event(void *arg)
611{
612 struct hrtimer_cpu_base *base;
613 struct timespec realtime_offset, wtm;
614 unsigned long seq;
615
616 if (!hrtimer_hres_active())
617 return;
618
619 do {
620 seq = read_seqbegin(&xtime_lock);
621 wtm = __get_wall_to_monotonic();
622 } while (read_seqretry(&xtime_lock, seq));
623 set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
624
625 base = &__get_cpu_var(hrtimer_bases);
626
627 /* Adjust CLOCK_REALTIME offset */
628 raw_spin_lock(&base->lock);
629 base->clock_base[CLOCK_REALTIME].offset =
630 timespec_to_ktime(realtime_offset);
631
632 hrtimer_force_reprogram(base, 0);
633 raw_spin_unlock(&base->lock);
634}
635
636/*
637 * Clock realtime was set
638 *
639 * Change the offset of the realtime clock vs. the monotonic
640 * clock.
641 *
642 * We might have to reprogram the high resolution timer interrupt. On
643 * SMP we call the architecture specific code to retrigger _all_ high
644 * resolution timer interrupts. On UP we just disable interrupts and
645 * call the high resolution interrupt code.
646 */
647void clock_was_set(void)
648{
649 /* Retrigger the CPU local events everywhere */
650 on_each_cpu(retrigger_next_event, NULL, 1);
651}
652
653/*
654 * During resume we might have to reprogram the high resolution timer
655 * interrupt (on the local CPU):
656 */
657void hres_timers_resume(void)
658{
659 WARN_ONCE(!irqs_disabled(),
660 KERN_INFO "hres_timers_resume() called with IRQs enabled!");
661
662 retrigger_next_event(NULL);
663}
664
665/* 629/*
666 * Initialize the high resolution related parts of cpu_base 630 * Initialize the high resolution related parts of cpu_base
667 */ 631 */
@@ -672,14 +636,6 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
672} 636}
673 637
674/* 638/*
675 * Initialize the high resolution related parts of a hrtimer
676 */
677static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
678{
679}
680
681
682/*
683 * When High resolution timers are active, try to reprogram. Note, that in case 639 * When High resolution timers are active, try to reprogram. Note, that in case
684 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry 640 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
685 * check happens. The timer gets enqueued into the rbtree. The reprogramming 641 * check happens. The timer gets enqueued into the rbtree. The reprogramming
@@ -704,11 +660,39 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
704} 660}
705 661
706/* 662/*
663 * Retrigger next event is called after clock was set
664 *
665 * Called with interrupts disabled via on_each_cpu()
666 */
667static void retrigger_next_event(void *arg)
668{
669 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
670 struct timespec realtime_offset, xtim, wtm, sleep;
671
672 if (!hrtimer_hres_active())
673 return;
674
675 /* Optimized out for !HIGH_RES */
676 get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
677 set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
678
679 /* Adjust CLOCK_REALTIME offset */
680 raw_spin_lock(&base->lock);
681 base->clock_base[HRTIMER_BASE_REALTIME].offset =
682 timespec_to_ktime(realtime_offset);
683 base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
684 timespec_to_ktime(sleep);
685
686 hrtimer_force_reprogram(base, 0);
687 raw_spin_unlock(&base->lock);
688}
689
690/*
707 * Switch to high resolution mode 691 * Switch to high resolution mode
708 */ 692 */
709static int hrtimer_switch_to_hres(void) 693static int hrtimer_switch_to_hres(void)
710{ 694{
711 int cpu = smp_processor_id(); 695 int i, cpu = smp_processor_id();
712 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); 696 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
713 unsigned long flags; 697 unsigned long flags;
714 698
@@ -724,8 +708,8 @@ static int hrtimer_switch_to_hres(void)
724 return 0; 708 return 0;
725 } 709 }
726 base->hres_active = 1; 710 base->hres_active = 1;
727 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; 711 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
728 base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; 712 base->clock_base[i].resolution = KTIME_HIGH_RES;
729 713
730 tick_setup_sched_timer(); 714 tick_setup_sched_timer();
731 715
@@ -749,10 +733,43 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
749 return 0; 733 return 0;
750} 734}
751static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } 735static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
752static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } 736static inline void retrigger_next_event(void *arg) { }
753 737
754#endif /* CONFIG_HIGH_RES_TIMERS */ 738#endif /* CONFIG_HIGH_RES_TIMERS */
755 739
740/*
741 * Clock realtime was set
742 *
743 * Change the offset of the realtime clock vs. the monotonic
744 * clock.
745 *
746 * We might have to reprogram the high resolution timer interrupt. On
747 * SMP we call the architecture specific code to retrigger _all_ high
748 * resolution timer interrupts. On UP we just disable interrupts and
749 * call the high resolution interrupt code.
750 */
751void clock_was_set(void)
752{
753#ifdef CONFIG_HIGH_RES_TIMERS
754 /* Retrigger the CPU local events everywhere */
755 on_each_cpu(retrigger_next_event, NULL, 1);
756#endif
757 timerfd_clock_was_set();
758}
759
760/*
761 * During resume we might have to reprogram the high resolution timer
762 * interrupt (on the local CPU):
763 */
764void hrtimers_resume(void)
765{
766 WARN_ONCE(!irqs_disabled(),
767 KERN_INFO "hrtimers_resume() called with IRQs enabled!");
768
769 retrigger_next_event(NULL);
770 timerfd_clock_was_set();
771}
772
756static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) 773static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
757{ 774{
758#ifdef CONFIG_TIMER_STATS 775#ifdef CONFIG_TIMER_STATS
@@ -842,48 +859,18 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
842static int enqueue_hrtimer(struct hrtimer *timer, 859static int enqueue_hrtimer(struct hrtimer *timer,
843 struct hrtimer_clock_base *base) 860 struct hrtimer_clock_base *base)
844{ 861{
845 struct rb_node **link = &base->active.rb_node;
846 struct rb_node *parent = NULL;
847 struct hrtimer *entry;
848 int leftmost = 1;
849
850 debug_activate(timer); 862 debug_activate(timer);
851 863
852 /* 864 timerqueue_add(&base->active, &timer->node);
853 * Find the right place in the rbtree: 865 base->cpu_base->active_bases |= 1 << base->index;
854 */
855 while (*link) {
856 parent = *link;
857 entry = rb_entry(parent, struct hrtimer, node);
858 /*
859 * We dont care about collisions. Nodes with
860 * the same expiry time stay together.
861 */
862 if (hrtimer_get_expires_tv64(timer) <
863 hrtimer_get_expires_tv64(entry)) {
864 link = &(*link)->rb_left;
865 } else {
866 link = &(*link)->rb_right;
867 leftmost = 0;
868 }
869 }
870
871 /*
872 * Insert the timer to the rbtree and check whether it
873 * replaces the first pending timer
874 */
875 if (leftmost)
876 base->first = &timer->node;
877 866
878 rb_link_node(&timer->node, parent, link);
879 rb_insert_color(&timer->node, &base->active);
880 /* 867 /*
881 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the 868 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
882 * state of a possibly running callback. 869 * state of a possibly running callback.
883 */ 870 */
884 timer->state |= HRTIMER_STATE_ENQUEUED; 871 timer->state |= HRTIMER_STATE_ENQUEUED;
885 872
886 return leftmost; 873 return (&timer->node == base->active.next);
887} 874}
888 875
889/* 876/*
@@ -903,12 +890,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
903 if (!(timer->state & HRTIMER_STATE_ENQUEUED)) 890 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
904 goto out; 891 goto out;
905 892
906 /* 893 if (&timer->node == timerqueue_getnext(&base->active)) {
907 * Remove the timer from the rbtree and replace the first
908 * entry pointer if necessary.
909 */
910 if (base->first == &timer->node) {
911 base->first = rb_next(&timer->node);
912#ifdef CONFIG_HIGH_RES_TIMERS 894#ifdef CONFIG_HIGH_RES_TIMERS
913 /* Reprogram the clock event device. if enabled */ 895 /* Reprogram the clock event device. if enabled */
914 if (reprogram && hrtimer_hres_active()) { 896 if (reprogram && hrtimer_hres_active()) {
@@ -921,7 +903,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
921 } 903 }
922#endif 904#endif
923 } 905 }
924 rb_erase(&timer->node, &base->active); 906 timerqueue_del(&base->active, &timer->node);
907 if (!timerqueue_getnext(&base->active))
908 base->cpu_base->active_bases &= ~(1 << base->index);
925out: 909out:
926 timer->state = newstate; 910 timer->state = newstate;
927} 911}
@@ -1222,11 +1206,13 @@ ktime_t hrtimer_get_next_event(void)
1222 if (!hrtimer_hres_active()) { 1206 if (!hrtimer_hres_active()) {
1223 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 1207 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
1224 struct hrtimer *timer; 1208 struct hrtimer *timer;
1209 struct timerqueue_node *next;
1225 1210
1226 if (!base->first) 1211 next = timerqueue_getnext(&base->active);
1212 if (!next)
1227 continue; 1213 continue;
1228 1214
1229 timer = rb_entry(base->first, struct hrtimer, node); 1215 timer = container_of(next, struct hrtimer, node);
1230 delta.tv64 = hrtimer_get_expires_tv64(timer); 1216 delta.tv64 = hrtimer_get_expires_tv64(timer);
1231 delta = ktime_sub(delta, base->get_time()); 1217 delta = ktime_sub(delta, base->get_time());
1232 if (delta.tv64 < mindelta.tv64) 1218 if (delta.tv64 < mindelta.tv64)
@@ -1246,6 +1232,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1246 enum hrtimer_mode mode) 1232 enum hrtimer_mode mode)
1247{ 1233{
1248 struct hrtimer_cpu_base *cpu_base; 1234 struct hrtimer_cpu_base *cpu_base;
1235 int base;
1249 1236
1250 memset(timer, 0, sizeof(struct hrtimer)); 1237 memset(timer, 0, sizeof(struct hrtimer));
1251 1238
@@ -1254,8 +1241,9 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1254 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) 1241 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1255 clock_id = CLOCK_MONOTONIC; 1242 clock_id = CLOCK_MONOTONIC;
1256 1243
1257 timer->base = &cpu_base->clock_base[clock_id]; 1244 base = hrtimer_clockid_to_base(clock_id);
1258 hrtimer_init_timer_hres(timer); 1245 timer->base = &cpu_base->clock_base[base];
1246 timerqueue_init(&timer->node);
1259 1247
1260#ifdef CONFIG_TIMER_STATS 1248#ifdef CONFIG_TIMER_STATS
1261 timer->start_site = NULL; 1249 timer->start_site = NULL;
@@ -1289,9 +1277,10 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
1289int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) 1277int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1290{ 1278{
1291 struct hrtimer_cpu_base *cpu_base; 1279 struct hrtimer_cpu_base *cpu_base;
1280 int base = hrtimer_clockid_to_base(which_clock);
1292 1281
1293 cpu_base = &__raw_get_cpu_var(hrtimer_bases); 1282 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1294 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); 1283 *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
1295 1284
1296 return 0; 1285 return 0;
1297} 1286}
@@ -1346,7 +1335,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1346void hrtimer_interrupt(struct clock_event_device *dev) 1335void hrtimer_interrupt(struct clock_event_device *dev)
1347{ 1336{
1348 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1337 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1349 struct hrtimer_clock_base *base;
1350 ktime_t expires_next, now, entry_time, delta; 1338 ktime_t expires_next, now, entry_time, delta;
1351 int i, retries = 0; 1339 int i, retries = 0;
1352 1340
@@ -1368,18 +1356,21 @@ retry:
1368 */ 1356 */
1369 cpu_base->expires_next.tv64 = KTIME_MAX; 1357 cpu_base->expires_next.tv64 = KTIME_MAX;
1370 1358
1371 base = cpu_base->clock_base;
1372
1373 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1359 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1360 struct hrtimer_clock_base *base;
1361 struct timerqueue_node *node;
1374 ktime_t basenow; 1362 ktime_t basenow;
1375 struct rb_node *node;
1376 1363
1364 if (!(cpu_base->active_bases & (1 << i)))
1365 continue;
1366
1367 base = cpu_base->clock_base + i;
1377 basenow = ktime_add(now, base->offset); 1368 basenow = ktime_add(now, base->offset);
1378 1369
1379 while ((node = base->first)) { 1370 while ((node = timerqueue_getnext(&base->active))) {
1380 struct hrtimer *timer; 1371 struct hrtimer *timer;
1381 1372
1382 timer = rb_entry(node, struct hrtimer, node); 1373 timer = container_of(node, struct hrtimer, node);
1383 1374
1384 /* 1375 /*
1385 * The immediate goal for using the softexpires is 1376 * The immediate goal for using the softexpires is
@@ -1406,7 +1397,6 @@ retry:
1406 1397
1407 __run_hrtimer(timer, &basenow); 1398 __run_hrtimer(timer, &basenow);
1408 } 1399 }
1409 base++;
1410 } 1400 }
1411 1401
1412 /* 1402 /*
@@ -1535,7 +1525,7 @@ void hrtimer_run_pending(void)
1535 */ 1525 */
1536void hrtimer_run_queues(void) 1526void hrtimer_run_queues(void)
1537{ 1527{
1538 struct rb_node *node; 1528 struct timerqueue_node *node;
1539 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1529 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1540 struct hrtimer_clock_base *base; 1530 struct hrtimer_clock_base *base;
1541 int index, gettime = 1; 1531 int index, gettime = 1;
@@ -1545,8 +1535,7 @@ void hrtimer_run_queues(void)
1545 1535
1546 for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { 1536 for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
1547 base = &cpu_base->clock_base[index]; 1537 base = &cpu_base->clock_base[index];
1548 1538 if (!timerqueue_getnext(&base->active))
1549 if (!base->first)
1550 continue; 1539 continue;
1551 1540
1552 if (gettime) { 1541 if (gettime) {
@@ -1556,10 +1545,10 @@ void hrtimer_run_queues(void)
1556 1545
1557 raw_spin_lock(&cpu_base->lock); 1546 raw_spin_lock(&cpu_base->lock);
1558 1547
1559 while ((node = base->first)) { 1548 while ((node = timerqueue_getnext(&base->active))) {
1560 struct hrtimer *timer; 1549 struct hrtimer *timer;
1561 1550
1562 timer = rb_entry(node, struct hrtimer, node); 1551 timer = container_of(node, struct hrtimer, node);
1563 if (base->softirq_time.tv64 <= 1552 if (base->softirq_time.tv64 <=
1564 hrtimer_get_expires_tv64(timer)) 1553 hrtimer_get_expires_tv64(timer))
1565 break; 1554 break;
@@ -1638,7 +1627,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1638 struct timespec __user *rmtp; 1627 struct timespec __user *rmtp;
1639 int ret = 0; 1628 int ret = 0;
1640 1629
1641 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, 1630 hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
1642 HRTIMER_MODE_ABS); 1631 HRTIMER_MODE_ABS);
1643 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); 1632 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1644 1633
@@ -1690,7 +1679,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1690 1679
1691 restart = &current_thread_info()->restart_block; 1680 restart = &current_thread_info()->restart_block;
1692 restart->fn = hrtimer_nanosleep_restart; 1681 restart->fn = hrtimer_nanosleep_restart;
1693 restart->nanosleep.index = t.timer.base->index; 1682 restart->nanosleep.clockid = t.timer.base->clockid;
1694 restart->nanosleep.rmtp = rmtp; 1683 restart->nanosleep.rmtp = rmtp;
1695 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); 1684 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1696 1685
@@ -1724,8 +1713,10 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1724 1713
1725 raw_spin_lock_init(&cpu_base->lock); 1714 raw_spin_lock_init(&cpu_base->lock);
1726 1715
1727 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1716 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1728 cpu_base->clock_base[i].cpu_base = cpu_base; 1717 cpu_base->clock_base[i].cpu_base = cpu_base;
1718 timerqueue_init_head(&cpu_base->clock_base[i].active);
1719 }
1729 1720
1730 hrtimer_init_hres(cpu_base); 1721 hrtimer_init_hres(cpu_base);
1731 INIT_LIST_HEAD(&cpu_base->to_pull); 1722 INIT_LIST_HEAD(&cpu_base->to_pull);
@@ -1737,10 +1728,10 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1737 struct hrtimer_clock_base *new_base) 1728 struct hrtimer_clock_base *new_base)
1738{ 1729{
1739 struct hrtimer *timer; 1730 struct hrtimer *timer;
1740 struct rb_node *node; 1731 struct timerqueue_node *node;
1741 1732
1742 while ((node = rb_first(&old_base->active))) { 1733 while ((node = timerqueue_getnext(&old_base->active))) {
1743 timer = rb_entry(node, struct hrtimer, node); 1734 timer = container_of(node, struct hrtimer, node);
1744 BUG_ON(hrtimer_callback_running(timer)); 1735 BUG_ON(hrtimer_callback_running(timer));
1745 debug_deactivate(timer); 1736 debug_deactivate(timer);
1746 1737
@@ -1869,7 +1860,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
1869 } 1860 }
1870 1861
1871 /* 1862 /*
1872 * A NULL parameter means "inifinte" 1863 * A NULL parameter means "infinite"
1873 */ 1864 */
1874 if (!expires) { 1865 if (!expires) {
1875 schedule(); 1866 schedule();