diff options
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 41 |
1 files changed, 21 insertions, 20 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 6db7a5ed52b5..14be27feda49 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <linux/err.h> | 44 | #include <linux/err.h> |
45 | #include <linux/debugobjects.h> | 45 | #include <linux/debugobjects.h> |
46 | #include <linux/sched.h> | 46 | #include <linux/sched.h> |
47 | #include <linux/sched/sysctl.h> | ||
48 | #include <linux/sched/rt.h> | ||
47 | #include <linux/timer.h> | 49 | #include <linux/timer.h> |
48 | 50 | ||
49 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
@@ -61,6 +63,7 @@ | |||
61 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | 63 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
62 | { | 64 | { |
63 | 65 | ||
66 | .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), | ||
64 | .clock_base = | 67 | .clock_base = |
65 | { | 68 | { |
66 | { | 69 | { |
@@ -640,21 +643,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | |||
640 | * and expiry check is done in the hrtimer_interrupt or in the softirq. | 643 | * and expiry check is done in the hrtimer_interrupt or in the softirq. |
641 | */ | 644 | */ |
642 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 645 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
643 | struct hrtimer_clock_base *base, | 646 | struct hrtimer_clock_base *base) |
644 | int wakeup) | ||
645 | { | 647 | { |
646 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 648 | return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); |
647 | if (wakeup) { | ||
648 | raw_spin_unlock(&base->cpu_base->lock); | ||
649 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | ||
650 | raw_spin_lock(&base->cpu_base->lock); | ||
651 | } else | ||
652 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); | ||
653 | |||
654 | return 1; | ||
655 | } | ||
656 | |||
657 | return 0; | ||
658 | } | 649 | } |
659 | 650 | ||
660 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) | 651 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) |
@@ -735,8 +726,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; } | |||
735 | static inline void | 726 | static inline void |
736 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | 727 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } |
737 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 728 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
738 | struct hrtimer_clock_base *base, | 729 | struct hrtimer_clock_base *base) |
739 | int wakeup) | ||
740 | { | 730 | { |
741 | return 0; | 731 | return 0; |
742 | } | 732 | } |
@@ -995,8 +985,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |||
995 | * | 985 | * |
996 | * XXX send_remote_softirq() ? | 986 | * XXX send_remote_softirq() ? |
997 | */ | 987 | */ |
998 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) | 988 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) |
999 | hrtimer_enqueue_reprogram(timer, new_base, wakeup); | 989 | && hrtimer_enqueue_reprogram(timer, new_base)) { |
990 | if (wakeup) { | ||
991 | /* | ||
992 | * We need to drop cpu_base->lock to avoid a | ||
993 | * lock ordering issue vs. rq->lock. | ||
994 | */ | ||
995 | raw_spin_unlock(&new_base->cpu_base->lock); | ||
996 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | ||
997 | local_irq_restore(flags); | ||
998 | return ret; | ||
999 | } else { | ||
1000 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); | ||
1001 | } | ||
1002 | } | ||
1000 | 1003 | ||
1001 | unlock_hrtimer_base(timer, &flags); | 1004 | unlock_hrtimer_base(timer, &flags); |
1002 | 1005 | ||
@@ -1640,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1640 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1643 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1641 | int i; | 1644 | int i; |
1642 | 1645 | ||
1643 | raw_spin_lock_init(&cpu_base->lock); | ||
1644 | |||
1645 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1646 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1646 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1647 | cpu_base->clock_base[i].cpu_base = cpu_base; |
1647 | timerqueue_init_head(&cpu_base->clock_base[i].active); | 1648 | timerqueue_init_head(&cpu_base->clock_base[i].active); |