diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-09-29 13:04:40 -0400 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-09-29 13:04:40 -0400 |
| commit | daf1e620bff2cb6d830ef66725369bba9c858f62 (patch) | |
| tree | 1aed8f7cb55371c70d2139b6754d90ea89a26147 /kernel/hrtimer.c | |
| parent | 451ed3b075c2a8e322e5a44f177e2470426a821d (diff) | |
| parent | 1cb90226816c7af7808be4c0de866c54da17ecc9 (diff) | |
Merge branch 'wip-color' into wip-mc
Conflicts:
include/litmus/budget.h
include/litmus/litmus.h
include/litmus/rt_param.h
include/litmus/sched_trace.h
include/litmus/trace.h
include/trace/events/litmus.h
litmus/Makefile
litmus/budget.c
litmus/ftdev.c
litmus/jobs.c
litmus/litmus.c
litmus/locking.c
litmus/preempt.c
litmus/rt_domain.c
litmus/sched_gsn_edf.c
litmus/trace.c
Diffstat (limited to 'kernel/hrtimer.c')
| -rw-r--r-- | kernel/hrtimer.c | 313 |
1 files changed, 152 insertions, 161 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d6a15ddf6caf..6cf73d371203 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -55,11 +55,10 @@ | |||
| 55 | /* | 55 | /* |
| 56 | * The timer bases: | 56 | * The timer bases: |
| 57 | * | 57 | * |
| 58 | * Note: If we want to add new timer bases, we have to skip the two | 58 | * There are more clockids then hrtimer bases. Thus, we index |
| 59 | * clock ids captured by the cpu-timers. We do this by holding empty | 59 | * into the timer bases by the hrtimer_base_type enum. When trying |
| 60 | * entries rather than doing math adjustment of the clock ids. | 60 | * to reach a base using a clockid, hrtimer_clockid_to_base() |
| 61 | * This ensures that we capture erroneous accesses to these clock ids | 61 | * is used to convert from clockid to the proper hrtimer_base_type. |
| 62 | * rather than moving them into the range of valid clock id's. | ||
| 63 | */ | 62 | */ |
| 64 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | 63 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
| 65 | { | 64 | { |
| @@ -67,39 +66,55 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |||
| 67 | .clock_base = | 66 | .clock_base = |
| 68 | { | 67 | { |
| 69 | { | 68 | { |
| 70 | .index = CLOCK_REALTIME, | 69 | .index = HRTIMER_BASE_MONOTONIC, |
| 70 | .clockid = CLOCK_MONOTONIC, | ||
| 71 | .get_time = &ktime_get, | ||
| 72 | .resolution = KTIME_LOW_RES, | ||
| 73 | }, | ||
| 74 | { | ||
| 75 | .index = HRTIMER_BASE_REALTIME, | ||
| 76 | .clockid = CLOCK_REALTIME, | ||
| 71 | .get_time = &ktime_get_real, | 77 | .get_time = &ktime_get_real, |
| 72 | .resolution = KTIME_LOW_RES, | 78 | .resolution = KTIME_LOW_RES, |
| 73 | }, | 79 | }, |
| 74 | { | 80 | { |
| 75 | .index = CLOCK_MONOTONIC, | 81 | .index = HRTIMER_BASE_BOOTTIME, |
| 76 | .get_time = &ktime_get, | 82 | .clockid = CLOCK_BOOTTIME, |
| 83 | .get_time = &ktime_get_boottime, | ||
| 77 | .resolution = KTIME_LOW_RES, | 84 | .resolution = KTIME_LOW_RES, |
| 78 | }, | 85 | }, |
| 79 | } | 86 | } |
| 80 | }; | 87 | }; |
| 81 | 88 | ||
| 89 | static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { | ||
| 90 | [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, | ||
| 91 | [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, | ||
| 92 | [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, | ||
| 93 | }; | ||
| 94 | |||
| 95 | static inline int hrtimer_clockid_to_base(clockid_t clock_id) | ||
| 96 | { | ||
| 97 | return hrtimer_clock_to_base_table[clock_id]; | ||
| 98 | } | ||
| 99 | |||
| 100 | |||
| 82 | /* | 101 | /* |
| 83 | * Get the coarse grained time at the softirq based on xtime and | 102 | * Get the coarse grained time at the softirq based on xtime and |
| 84 | * wall_to_monotonic. | 103 | * wall_to_monotonic. |
| 85 | */ | 104 | */ |
| 86 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | 105 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) |
| 87 | { | 106 | { |
| 88 | ktime_t xtim, tomono; | 107 | ktime_t xtim, mono, boot; |
| 89 | struct timespec xts, tom; | 108 | struct timespec xts, tom, slp; |
| 90 | unsigned long seq; | ||
| 91 | 109 | ||
| 92 | do { | 110 | get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp); |
| 93 | seq = read_seqbegin(&xtime_lock); | ||
| 94 | xts = __current_kernel_time(); | ||
| 95 | tom = __get_wall_to_monotonic(); | ||
| 96 | } while (read_seqretry(&xtime_lock, seq)); | ||
| 97 | 111 | ||
| 98 | xtim = timespec_to_ktime(xts); | 112 | xtim = timespec_to_ktime(xts); |
| 99 | tomono = timespec_to_ktime(tom); | 113 | mono = ktime_add(xtim, timespec_to_ktime(tom)); |
| 100 | base->clock_base[CLOCK_REALTIME].softirq_time = xtim; | 114 | boot = ktime_add(mono, timespec_to_ktime(slp)); |
| 101 | base->clock_base[CLOCK_MONOTONIC].softirq_time = | 115 | base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; |
| 102 | ktime_add(xtim, tomono); | 116 | base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; |
| 117 | base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; | ||
| 103 | } | 118 | } |
| 104 | 119 | ||
| 105 | /* | 120 | /* |
| @@ -186,10 +201,11 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |||
| 186 | struct hrtimer_cpu_base *new_cpu_base; | 201 | struct hrtimer_cpu_base *new_cpu_base; |
| 187 | int this_cpu = smp_processor_id(); | 202 | int this_cpu = smp_processor_id(); |
| 188 | int cpu = hrtimer_get_target(this_cpu, pinned); | 203 | int cpu = hrtimer_get_target(this_cpu, pinned); |
| 204 | int basenum = base->index; | ||
| 189 | 205 | ||
| 190 | again: | 206 | again: |
| 191 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); | 207 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); |
| 192 | new_base = &new_cpu_base->clock_base[base->index]; | 208 | new_base = &new_cpu_base->clock_base[basenum]; |
| 193 | 209 | ||
| 194 | if (base != new_base) { | 210 | if (base != new_base) { |
| 195 | /* | 211 | /* |
| @@ -336,6 +352,11 @@ EXPORT_SYMBOL_GPL(ktime_add_safe); | |||
| 336 | 352 | ||
| 337 | static struct debug_obj_descr hrtimer_debug_descr; | 353 | static struct debug_obj_descr hrtimer_debug_descr; |
| 338 | 354 | ||
| 355 | static void *hrtimer_debug_hint(void *addr) | ||
| 356 | { | ||
| 357 | return ((struct hrtimer *) addr)->function; | ||
| 358 | } | ||
| 359 | |||
| 339 | /* | 360 | /* |
| 340 | * fixup_init is called when: | 361 | * fixup_init is called when: |
| 341 | * - an active object is initialized | 362 | * - an active object is initialized |
| @@ -395,6 +416,7 @@ static int hrtimer_fixup_free(void *addr, enum debug_obj_state state) | |||
| 395 | 416 | ||
| 396 | static struct debug_obj_descr hrtimer_debug_descr = { | 417 | static struct debug_obj_descr hrtimer_debug_descr = { |
| 397 | .name = "hrtimer", | 418 | .name = "hrtimer", |
| 419 | .debug_hint = hrtimer_debug_hint, | ||
| 398 | .fixup_init = hrtimer_fixup_init, | 420 | .fixup_init = hrtimer_fixup_init, |
| 399 | .fixup_activate = hrtimer_fixup_activate, | 421 | .fixup_activate = hrtimer_fixup_activate, |
| 400 | .fixup_free = hrtimer_fixup_free, | 422 | .fixup_free = hrtimer_fixup_free, |
| @@ -499,7 +521,7 @@ static inline int hrtimer_is_hres_enabled(void) | |||
| 499 | */ | 521 | */ |
| 500 | static inline int hrtimer_hres_active(void) | 522 | static inline int hrtimer_hres_active(void) |
| 501 | { | 523 | { |
| 502 | return __get_cpu_var(hrtimer_bases).hres_active; | 524 | return __this_cpu_read(hrtimer_bases.hres_active); |
| 503 | } | 525 | } |
| 504 | 526 | ||
| 505 | /* | 527 | /* |
| @@ -518,10 +540,13 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |||
| 518 | 540 | ||
| 519 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 541 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
| 520 | struct hrtimer *timer; | 542 | struct hrtimer *timer; |
| 543 | struct timerqueue_node *next; | ||
| 521 | 544 | ||
| 522 | if (!base->first) | 545 | next = timerqueue_getnext(&base->active); |
| 546 | if (!next) | ||
| 523 | continue; | 547 | continue; |
| 524 | timer = rb_entry(base->first, struct hrtimer, node); | 548 | timer = container_of(next, struct hrtimer, node); |
| 549 | |||
| 525 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 550 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
| 526 | /* | 551 | /* |
| 527 | * clock_was_set() has changed base->offset so the | 552 | * clock_was_set() has changed base->offset so the |
| @@ -601,67 +626,6 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
| 601 | return res; | 626 | return res; |
| 602 | } | 627 | } |
| 603 | 628 | ||
| 604 | |||
| 605 | /* | ||
| 606 | * Retrigger next event is called after clock was set | ||
| 607 | * | ||
| 608 | * Called with interrupts disabled via on_each_cpu() | ||
| 609 | */ | ||
| 610 | static void retrigger_next_event(void *arg) | ||
| 611 | { | ||
| 612 | struct hrtimer_cpu_base *base; | ||
| 613 | struct timespec realtime_offset, wtm; | ||
| 614 | unsigned long seq; | ||
| 615 | |||
| 616 | if (!hrtimer_hres_active()) | ||
| 617 | return; | ||
| 618 | |||
| 619 | do { | ||
| 620 | seq = read_seqbegin(&xtime_lock); | ||
| 621 | wtm = __get_wall_to_monotonic(); | ||
| 622 | } while (read_seqretry(&xtime_lock, seq)); | ||
| 623 | set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); | ||
| 624 | |||
| 625 | base = &__get_cpu_var(hrtimer_bases); | ||
| 626 | |||
| 627 | /* Adjust CLOCK_REALTIME offset */ | ||
| 628 | raw_spin_lock(&base->lock); | ||
| 629 | base->clock_base[CLOCK_REALTIME].offset = | ||
| 630 | timespec_to_ktime(realtime_offset); | ||
| 631 | |||
| 632 | hrtimer_force_reprogram(base, 0); | ||
| 633 | raw_spin_unlock(&base->lock); | ||
| 634 | } | ||
| 635 | |||
| 636 | /* | ||
| 637 | * Clock realtime was set | ||
| 638 | * | ||
| 639 | * Change the offset of the realtime clock vs. the monotonic | ||
| 640 | * clock. | ||
| 641 | * | ||
| 642 | * We might have to reprogram the high resolution timer interrupt. On | ||
| 643 | * SMP we call the architecture specific code to retrigger _all_ high | ||
| 644 | * resolution timer interrupts. On UP we just disable interrupts and | ||
| 645 | * call the high resolution interrupt code. | ||
| 646 | */ | ||
| 647 | void clock_was_set(void) | ||
| 648 | { | ||
| 649 | /* Retrigger the CPU local events everywhere */ | ||
| 650 | on_each_cpu(retrigger_next_event, NULL, 1); | ||
| 651 | } | ||
| 652 | |||
| 653 | /* | ||
| 654 | * During resume we might have to reprogram the high resolution timer | ||
| 655 | * interrupt (on the local CPU): | ||
| 656 | */ | ||
| 657 | void hres_timers_resume(void) | ||
| 658 | { | ||
| 659 | WARN_ONCE(!irqs_disabled(), | ||
| 660 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | ||
| 661 | |||
| 662 | retrigger_next_event(NULL); | ||
| 663 | } | ||
| 664 | |||
| 665 | /* | 629 | /* |
| 666 | * Initialize the high resolution related parts of cpu_base | 630 | * Initialize the high resolution related parts of cpu_base |
| 667 | */ | 631 | */ |
| @@ -672,14 +636,6 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | |||
| 672 | } | 636 | } |
| 673 | 637 | ||
| 674 | /* | 638 | /* |
| 675 | * Initialize the high resolution related parts of a hrtimer | ||
| 676 | */ | ||
| 677 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | ||
| 678 | { | ||
| 679 | } | ||
| 680 | |||
| 681 | |||
| 682 | /* | ||
| 683 | * When High resolution timers are active, try to reprogram. Note, that in case | 639 | * When High resolution timers are active, try to reprogram. Note, that in case |
| 684 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | 640 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry |
| 685 | * check happens. The timer gets enqueued into the rbtree. The reprogramming | 641 | * check happens. The timer gets enqueued into the rbtree. The reprogramming |
| @@ -704,11 +660,39 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
| 704 | } | 660 | } |
| 705 | 661 | ||
| 706 | /* | 662 | /* |
| 663 | * Retrigger next event is called after clock was set | ||
| 664 | * | ||
| 665 | * Called with interrupts disabled via on_each_cpu() | ||
| 666 | */ | ||
| 667 | static void retrigger_next_event(void *arg) | ||
| 668 | { | ||
| 669 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | ||
| 670 | struct timespec realtime_offset, xtim, wtm, sleep; | ||
| 671 | |||
| 672 | if (!hrtimer_hres_active()) | ||
| 673 | return; | ||
| 674 | |||
| 675 | /* Optimized out for !HIGH_RES */ | ||
| 676 | get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep); | ||
| 677 | set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); | ||
| 678 | |||
| 679 | /* Adjust CLOCK_REALTIME offset */ | ||
| 680 | raw_spin_lock(&base->lock); | ||
| 681 | base->clock_base[HRTIMER_BASE_REALTIME].offset = | ||
| 682 | timespec_to_ktime(realtime_offset); | ||
| 683 | base->clock_base[HRTIMER_BASE_BOOTTIME].offset = | ||
| 684 | timespec_to_ktime(sleep); | ||
| 685 | |||
| 686 | hrtimer_force_reprogram(base, 0); | ||
| 687 | raw_spin_unlock(&base->lock); | ||
| 688 | } | ||
| 689 | |||
| 690 | /* | ||
| 707 | * Switch to high resolution mode | 691 | * Switch to high resolution mode |
| 708 | */ | 692 | */ |
| 709 | static int hrtimer_switch_to_hres(void) | 693 | static int hrtimer_switch_to_hres(void) |
| 710 | { | 694 | { |
| 711 | int cpu = smp_processor_id(); | 695 | int i, cpu = smp_processor_id(); |
| 712 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); | 696 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); |
| 713 | unsigned long flags; | 697 | unsigned long flags; |
| 714 | 698 | ||
| @@ -724,8 +708,8 @@ static int hrtimer_switch_to_hres(void) | |||
| 724 | return 0; | 708 | return 0; |
| 725 | } | 709 | } |
| 726 | base->hres_active = 1; | 710 | base->hres_active = 1; |
| 727 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; | 711 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
| 728 | base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; | 712 | base->clock_base[i].resolution = KTIME_HIGH_RES; |
| 729 | 713 | ||
| 730 | tick_setup_sched_timer(); | 714 | tick_setup_sched_timer(); |
| 731 | 715 | ||
| @@ -749,10 +733,43 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
| 749 | return 0; | 733 | return 0; |
| 750 | } | 734 | } |
| 751 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } | 735 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } |
| 752 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } | 736 | static inline void retrigger_next_event(void *arg) { } |
| 753 | 737 | ||
| 754 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 738 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
| 755 | 739 | ||
| 740 | /* | ||
| 741 | * Clock realtime was set | ||
| 742 | * | ||
| 743 | * Change the offset of the realtime clock vs. the monotonic | ||
| 744 | * clock. | ||
| 745 | * | ||
| 746 | * We might have to reprogram the high resolution timer interrupt. On | ||
| 747 | * SMP we call the architecture specific code to retrigger _all_ high | ||
| 748 | * resolution timer interrupts. On UP we just disable interrupts and | ||
| 749 | * call the high resolution interrupt code. | ||
| 750 | */ | ||
| 751 | void clock_was_set(void) | ||
| 752 | { | ||
| 753 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
| 754 | /* Retrigger the CPU local events everywhere */ | ||
| 755 | on_each_cpu(retrigger_next_event, NULL, 1); | ||
| 756 | #endif | ||
| 757 | timerfd_clock_was_set(); | ||
| 758 | } | ||
| 759 | |||
| 760 | /* | ||
| 761 | * During resume we might have to reprogram the high resolution timer | ||
| 762 | * interrupt (on the local CPU): | ||
| 763 | */ | ||
| 764 | void hrtimers_resume(void) | ||
| 765 | { | ||
| 766 | WARN_ONCE(!irqs_disabled(), | ||
| 767 | KERN_INFO "hrtimers_resume() called with IRQs enabled!"); | ||
| 768 | |||
| 769 | retrigger_next_event(NULL); | ||
| 770 | timerfd_clock_was_set(); | ||
| 771 | } | ||
| 772 | |||
| 756 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) | 773 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) |
| 757 | { | 774 | { |
| 758 | #ifdef CONFIG_TIMER_STATS | 775 | #ifdef CONFIG_TIMER_STATS |
| @@ -842,48 +859,18 @@ EXPORT_SYMBOL_GPL(hrtimer_forward); | |||
| 842 | static int enqueue_hrtimer(struct hrtimer *timer, | 859 | static int enqueue_hrtimer(struct hrtimer *timer, |
| 843 | struct hrtimer_clock_base *base) | 860 | struct hrtimer_clock_base *base) |
| 844 | { | 861 | { |
| 845 | struct rb_node **link = &base->active.rb_node; | ||
| 846 | struct rb_node *parent = NULL; | ||
| 847 | struct hrtimer *entry; | ||
| 848 | int leftmost = 1; | ||
| 849 | |||
| 850 | debug_activate(timer); | 862 | debug_activate(timer); |
| 851 | 863 | ||
| 852 | /* | 864 | timerqueue_add(&base->active, &timer->node); |
| 853 | * Find the right place in the rbtree: | 865 | base->cpu_base->active_bases |= 1 << base->index; |
| 854 | */ | ||
| 855 | while (*link) { | ||
| 856 | parent = *link; | ||
| 857 | entry = rb_entry(parent, struct hrtimer, node); | ||
| 858 | /* | ||
| 859 | * We dont care about collisions. Nodes with | ||
| 860 | * the same expiry time stay together. | ||
| 861 | */ | ||
| 862 | if (hrtimer_get_expires_tv64(timer) < | ||
| 863 | hrtimer_get_expires_tv64(entry)) { | ||
| 864 | link = &(*link)->rb_left; | ||
| 865 | } else { | ||
| 866 | link = &(*link)->rb_right; | ||
| 867 | leftmost = 0; | ||
| 868 | } | ||
| 869 | } | ||
| 870 | |||
| 871 | /* | ||
| 872 | * Insert the timer to the rbtree and check whether it | ||
| 873 | * replaces the first pending timer | ||
| 874 | */ | ||
| 875 | if (leftmost) | ||
| 876 | base->first = &timer->node; | ||
| 877 | 866 | ||
| 878 | rb_link_node(&timer->node, parent, link); | ||
| 879 | rb_insert_color(&timer->node, &base->active); | ||
| 880 | /* | 867 | /* |
| 881 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the | 868 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the |
| 882 | * state of a possibly running callback. | 869 | * state of a possibly running callback. |
| 883 | */ | 870 | */ |
| 884 | timer->state |= HRTIMER_STATE_ENQUEUED; | 871 | timer->state |= HRTIMER_STATE_ENQUEUED; |
| 885 | 872 | ||
| 886 | return leftmost; | 873 | return (&timer->node == base->active.next); |
| 887 | } | 874 | } |
| 888 | 875 | ||
| 889 | /* | 876 | /* |
| @@ -903,12 +890,7 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
| 903 | if (!(timer->state & HRTIMER_STATE_ENQUEUED)) | 890 | if (!(timer->state & HRTIMER_STATE_ENQUEUED)) |
| 904 | goto out; | 891 | goto out; |
| 905 | 892 | ||
| 906 | /* | 893 | if (&timer->node == timerqueue_getnext(&base->active)) { |
| 907 | * Remove the timer from the rbtree and replace the first | ||
| 908 | * entry pointer if necessary. | ||
| 909 | */ | ||
| 910 | if (base->first == &timer->node) { | ||
| 911 | base->first = rb_next(&timer->node); | ||
| 912 | #ifdef CONFIG_HIGH_RES_TIMERS | 894 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 913 | /* Reprogram the clock event device. if enabled */ | 895 | /* Reprogram the clock event device. if enabled */ |
| 914 | if (reprogram && hrtimer_hres_active()) { | 896 | if (reprogram && hrtimer_hres_active()) { |
| @@ -921,7 +903,9 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
| 921 | } | 903 | } |
| 922 | #endif | 904 | #endif |
| 923 | } | 905 | } |
| 924 | rb_erase(&timer->node, &base->active); | 906 | timerqueue_del(&base->active, &timer->node); |
| 907 | if (!timerqueue_getnext(&base->active)) | ||
| 908 | base->cpu_base->active_bases &= ~(1 << base->index); | ||
| 925 | out: | 909 | out: |
| 926 | timer->state = newstate; | 910 | timer->state = newstate; |
| 927 | } | 911 | } |
| @@ -1244,11 +1228,13 @@ ktime_t hrtimer_get_next_event(void) | |||
| 1244 | if (!hrtimer_hres_active()) { | 1228 | if (!hrtimer_hres_active()) { |
| 1245 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 1229 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
| 1246 | struct hrtimer *timer; | 1230 | struct hrtimer *timer; |
| 1231 | struct timerqueue_node *next; | ||
| 1247 | 1232 | ||
| 1248 | if (!base->first) | 1233 | next = timerqueue_getnext(&base->active); |
| 1234 | if (!next) | ||
| 1249 | continue; | 1235 | continue; |
| 1250 | 1236 | ||
| 1251 | timer = rb_entry(base->first, struct hrtimer, node); | 1237 | timer = container_of(next, struct hrtimer, node); |
| 1252 | delta.tv64 = hrtimer_get_expires_tv64(timer); | 1238 | delta.tv64 = hrtimer_get_expires_tv64(timer); |
| 1253 | delta = ktime_sub(delta, base->get_time()); | 1239 | delta = ktime_sub(delta, base->get_time()); |
| 1254 | if (delta.tv64 < mindelta.tv64) | 1240 | if (delta.tv64 < mindelta.tv64) |
| @@ -1268,6 +1254,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |||
| 1268 | enum hrtimer_mode mode) | 1254 | enum hrtimer_mode mode) |
| 1269 | { | 1255 | { |
| 1270 | struct hrtimer_cpu_base *cpu_base; | 1256 | struct hrtimer_cpu_base *cpu_base; |
| 1257 | int base; | ||
| 1271 | 1258 | ||
| 1272 | memset(timer, 0, sizeof(struct hrtimer)); | 1259 | memset(timer, 0, sizeof(struct hrtimer)); |
| 1273 | 1260 | ||
| @@ -1276,8 +1263,9 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |||
| 1276 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) | 1263 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
| 1277 | clock_id = CLOCK_MONOTONIC; | 1264 | clock_id = CLOCK_MONOTONIC; |
| 1278 | 1265 | ||
| 1279 | timer->base = &cpu_base->clock_base[clock_id]; | 1266 | base = hrtimer_clockid_to_base(clock_id); |
| 1280 | hrtimer_init_timer_hres(timer); | 1267 | timer->base = &cpu_base->clock_base[base]; |
| 1268 | timerqueue_init(&timer->node); | ||
| 1281 | 1269 | ||
| 1282 | #ifdef CONFIG_TIMER_STATS | 1270 | #ifdef CONFIG_TIMER_STATS |
| 1283 | timer->start_site = NULL; | 1271 | timer->start_site = NULL; |
| @@ -1311,9 +1299,10 @@ EXPORT_SYMBOL_GPL(hrtimer_init); | |||
| 1311 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | 1299 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) |
| 1312 | { | 1300 | { |
| 1313 | struct hrtimer_cpu_base *cpu_base; | 1301 | struct hrtimer_cpu_base *cpu_base; |
| 1302 | int base = hrtimer_clockid_to_base(which_clock); | ||
| 1314 | 1303 | ||
| 1315 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); | 1304 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
| 1316 | *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); | 1305 | *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution); |
| 1317 | 1306 | ||
| 1318 | return 0; | 1307 | return 0; |
| 1319 | } | 1308 | } |
| @@ -1368,7 +1357,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
| 1368 | void hrtimer_interrupt(struct clock_event_device *dev) | 1357 | void hrtimer_interrupt(struct clock_event_device *dev) |
| 1369 | { | 1358 | { |
| 1370 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1359 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
| 1371 | struct hrtimer_clock_base *base; | ||
| 1372 | ktime_t expires_next, now, entry_time, delta; | 1360 | ktime_t expires_next, now, entry_time, delta; |
| 1373 | int i, retries = 0; | 1361 | int i, retries = 0; |
| 1374 | 1362 | ||
| @@ -1390,18 +1378,21 @@ retry: | |||
| 1390 | */ | 1378 | */ |
| 1391 | cpu_base->expires_next.tv64 = KTIME_MAX; | 1379 | cpu_base->expires_next.tv64 = KTIME_MAX; |
| 1392 | 1380 | ||
| 1393 | base = cpu_base->clock_base; | ||
| 1394 | |||
| 1395 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1381 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
| 1382 | struct hrtimer_clock_base *base; | ||
| 1383 | struct timerqueue_node *node; | ||
| 1396 | ktime_t basenow; | 1384 | ktime_t basenow; |
| 1397 | struct rb_node *node; | ||
| 1398 | 1385 | ||
| 1386 | if (!(cpu_base->active_bases & (1 << i))) | ||
| 1387 | continue; | ||
| 1388 | |||
| 1389 | base = cpu_base->clock_base + i; | ||
| 1399 | basenow = ktime_add(now, base->offset); | 1390 | basenow = ktime_add(now, base->offset); |
| 1400 | 1391 | ||
| 1401 | while ((node = base->first)) { | 1392 | while ((node = timerqueue_getnext(&base->active))) { |
| 1402 | struct hrtimer *timer; | 1393 | struct hrtimer *timer; |
| 1403 | 1394 | ||
| 1404 | timer = rb_entry(node, struct hrtimer, node); | 1395 | timer = container_of(node, struct hrtimer, node); |
| 1405 | 1396 | ||
| 1406 | /* | 1397 | /* |
| 1407 | * The immediate goal for using the softexpires is | 1398 | * The immediate goal for using the softexpires is |
| @@ -1428,7 +1419,6 @@ retry: | |||
| 1428 | 1419 | ||
| 1429 | __run_hrtimer(timer, &basenow); | 1420 | __run_hrtimer(timer, &basenow); |
| 1430 | } | 1421 | } |
| 1431 | base++; | ||
| 1432 | } | 1422 | } |
| 1433 | 1423 | ||
| 1434 | /* | 1424 | /* |
| @@ -1557,7 +1547,7 @@ void hrtimer_run_pending(void) | |||
| 1557 | */ | 1547 | */ |
| 1558 | void hrtimer_run_queues(void) | 1548 | void hrtimer_run_queues(void) |
| 1559 | { | 1549 | { |
| 1560 | struct rb_node *node; | 1550 | struct timerqueue_node *node; |
| 1561 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1551 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
| 1562 | struct hrtimer_clock_base *base; | 1552 | struct hrtimer_clock_base *base; |
| 1563 | int index, gettime = 1; | 1553 | int index, gettime = 1; |
| @@ -1567,8 +1557,7 @@ void hrtimer_run_queues(void) | |||
| 1567 | 1557 | ||
| 1568 | for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { | 1558 | for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { |
| 1569 | base = &cpu_base->clock_base[index]; | 1559 | base = &cpu_base->clock_base[index]; |
| 1570 | 1560 | if (!timerqueue_getnext(&base->active)) | |
| 1571 | if (!base->first) | ||
| 1572 | continue; | 1561 | continue; |
| 1573 | 1562 | ||
| 1574 | if (gettime) { | 1563 | if (gettime) { |
| @@ -1578,10 +1567,10 @@ void hrtimer_run_queues(void) | |||
| 1578 | 1567 | ||
| 1579 | raw_spin_lock(&cpu_base->lock); | 1568 | raw_spin_lock(&cpu_base->lock); |
| 1580 | 1569 | ||
| 1581 | while ((node = base->first)) { | 1570 | while ((node = timerqueue_getnext(&base->active))) { |
| 1582 | struct hrtimer *timer; | 1571 | struct hrtimer *timer; |
| 1583 | 1572 | ||
| 1584 | timer = rb_entry(node, struct hrtimer, node); | 1573 | timer = container_of(node, struct hrtimer, node); |
| 1585 | if (base->softirq_time.tv64 <= | 1574 | if (base->softirq_time.tv64 <= |
| 1586 | hrtimer_get_expires_tv64(timer)) | 1575 | hrtimer_get_expires_tv64(timer)) |
| 1587 | break; | 1576 | break; |
| @@ -1660,7 +1649,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | |||
| 1660 | struct timespec __user *rmtp; | 1649 | struct timespec __user *rmtp; |
| 1661 | int ret = 0; | 1650 | int ret = 0; |
| 1662 | 1651 | ||
| 1663 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, | 1652 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, |
| 1664 | HRTIMER_MODE_ABS); | 1653 | HRTIMER_MODE_ABS); |
| 1665 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); | 1654 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); |
| 1666 | 1655 | ||
| @@ -1712,7 +1701,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |||
| 1712 | 1701 | ||
| 1713 | restart = ¤t_thread_info()->restart_block; | 1702 | restart = ¤t_thread_info()->restart_block; |
| 1714 | restart->fn = hrtimer_nanosleep_restart; | 1703 | restart->fn = hrtimer_nanosleep_restart; |
| 1715 | restart->nanosleep.index = t.timer.base->index; | 1704 | restart->nanosleep.clockid = t.timer.base->clockid; |
| 1716 | restart->nanosleep.rmtp = rmtp; | 1705 | restart->nanosleep.rmtp = rmtp; |
| 1717 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); | 1706 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
| 1718 | 1707 | ||
| @@ -1746,8 +1735,10 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
| 1746 | 1735 | ||
| 1747 | raw_spin_lock_init(&cpu_base->lock); | 1736 | raw_spin_lock_init(&cpu_base->lock); |
| 1748 | 1737 | ||
| 1749 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1738 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
| 1750 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1739 | cpu_base->clock_base[i].cpu_base = cpu_base; |
| 1740 | timerqueue_init_head(&cpu_base->clock_base[i].active); | ||
| 1741 | } | ||
| 1751 | 1742 | ||
| 1752 | hrtimer_init_hres(cpu_base); | 1743 | hrtimer_init_hres(cpu_base); |
| 1753 | INIT_LIST_HEAD(&cpu_base->to_pull); | 1744 | INIT_LIST_HEAD(&cpu_base->to_pull); |
| @@ -1759,10 +1750,10 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
| 1759 | struct hrtimer_clock_base *new_base) | 1750 | struct hrtimer_clock_base *new_base) |
| 1760 | { | 1751 | { |
| 1761 | struct hrtimer *timer; | 1752 | struct hrtimer *timer; |
| 1762 | struct rb_node *node; | 1753 | struct timerqueue_node *node; |
| 1763 | 1754 | ||
| 1764 | while ((node = rb_first(&old_base->active))) { | 1755 | while ((node = timerqueue_getnext(&old_base->active))) { |
| 1765 | timer = rb_entry(node, struct hrtimer, node); | 1756 | timer = container_of(node, struct hrtimer, node); |
| 1766 | BUG_ON(hrtimer_callback_running(timer)); | 1757 | BUG_ON(hrtimer_callback_running(timer)); |
| 1767 | debug_deactivate(timer); | 1758 | debug_deactivate(timer); |
| 1768 | 1759 | ||
| @@ -1891,7 +1882,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, | |||
| 1891 | } | 1882 | } |
| 1892 | 1883 | ||
| 1893 | /* | 1884 | /* |
| 1894 | * A NULL parameter means "inifinte" | 1885 | * A NULL parameter means "infinite" |
| 1895 | */ | 1886 | */ |
| 1896 | if (!expires) { | 1887 | if (!expires) { |
| 1897 | schedule(); | 1888 | schedule(); |
