aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c38
1 files changed, 20 insertions, 18 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6db7a5ed52b5..cc47812d3feb 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -44,6 +44,8 @@
44#include <linux/err.h> 44#include <linux/err.h>
45#include <linux/debugobjects.h> 45#include <linux/debugobjects.h>
46#include <linux/sched.h> 46#include <linux/sched.h>
47#include <linux/sched/sysctl.h>
48#include <linux/sched/rt.h>
47#include <linux/timer.h> 49#include <linux/timer.h>
48 50
49#include <asm/uaccess.h> 51#include <asm/uaccess.h>
@@ -640,21 +642,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
640 * and expiry check is done in the hrtimer_interrupt or in the softirq. 642 * and expiry check is done in the hrtimer_interrupt or in the softirq.
641 */ 643 */
642static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 644static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
643 struct hrtimer_clock_base *base, 645 struct hrtimer_clock_base *base)
644 int wakeup)
645{ 646{
646 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 647 return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
647 if (wakeup) {
648 raw_spin_unlock(&base->cpu_base->lock);
649 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
650 raw_spin_lock(&base->cpu_base->lock);
651 } else
652 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
653
654 return 1;
655 }
656
657 return 0;
658} 648}
659 649
660static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 650static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
@@ -735,8 +725,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
735static inline void 725static inline void
736hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } 726hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
737static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 727static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
738 struct hrtimer_clock_base *base, 728 struct hrtimer_clock_base *base)
739 int wakeup)
740{ 729{
741 return 0; 730 return 0;
742} 731}
@@ -995,8 +984,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
995 * 984 *
996 * XXX send_remote_softirq() ? 985 * XXX send_remote_softirq() ?
997 */ 986 */
998 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) 987 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
999 hrtimer_enqueue_reprogram(timer, new_base, wakeup); 988 && hrtimer_enqueue_reprogram(timer, new_base)) {
989 if (wakeup) {
990 /*
991 * We need to drop cpu_base->lock to avoid a
992 * lock ordering issue vs. rq->lock.
993 */
994 raw_spin_unlock(&new_base->cpu_base->lock);
995 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
996 local_irq_restore(flags);
997 return ret;
998 } else {
999 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1000 }
1001 }
1000 1002
1001 unlock_hrtimer_base(timer, &flags); 1003 unlock_hrtimer_base(timer, &flags);
1002 1004