aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c9
-rw-r--r--kernel/sched/sched.h15
4 files changed, 28 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 46a2345f9f45..b53cc859fc4f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -119,7 +119,9 @@ void update_rq_clock(struct rq *rq)
119{ 119{
120 s64 delta; 120 s64 delta;
121 121
122 if (rq->skip_clock_update > 0) 122 lockdep_assert_held(&rq->lock);
123
124 if (rq->clock_skip_update & RQCF_ACT_SKIP)
123 return; 125 return;
124 126
125 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 127 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
@@ -1046,7 +1048,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1046 * this case, we can save a useless back to back clock update. 1048 * this case, we can save a useless back to back clock update.
1047 */ 1049 */
1048 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1050 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
1049 rq->skip_clock_update = 1; 1051 rq_clock_skip_update(rq, true);
1050} 1052}
1051 1053
1052#ifdef CONFIG_SMP 1054#ifdef CONFIG_SMP
@@ -2779,6 +2781,8 @@ need_resched:
2779 smp_mb__before_spinlock(); 2781 smp_mb__before_spinlock();
2780 raw_spin_lock_irq(&rq->lock); 2782 raw_spin_lock_irq(&rq->lock);
2781 2783
2784 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
2785
2782 switch_count = &prev->nivcsw; 2786 switch_count = &prev->nivcsw;
2783 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 2787 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2784 if (unlikely(signal_pending_state(prev->state, prev))) { 2788 if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2803,13 +2807,13 @@ need_resched:
2803 switch_count = &prev->nvcsw; 2807 switch_count = &prev->nvcsw;
2804 } 2808 }
2805 2809
2806 if (task_on_rq_queued(prev) || rq->skip_clock_update < 0) 2810 if (task_on_rq_queued(prev))
2807 update_rq_clock(rq); 2811 update_rq_clock(rq);
2808 2812
2809 next = pick_next_task(rq, prev); 2813 next = pick_next_task(rq, prev);
2810 clear_tsk_need_resched(prev); 2814 clear_tsk_need_resched(prev);
2811 clear_preempt_need_resched(); 2815 clear_preempt_need_resched();
2812 rq->skip_clock_update = 0; 2816 rq->clock_skip_update = 0;
2813 2817
2814 if (likely(prev != next)) { 2818 if (likely(prev != next)) {
2815 rq->nr_switches++; 2819 rq->nr_switches++;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 50ff90289293..2ecf779829f5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5156,7 +5156,7 @@ static void yield_task_fair(struct rq *rq)
5156 * so we don't do microscopic update in schedule() 5156 * so we don't do microscopic update in schedule()
5157 * and double the fastpath cost. 5157 * and double the fastpath cost.
5158 */ 5158 */
5159 rq->skip_clock_update = 1; 5159 rq_clock_skip_update(rq, true);
5160 } 5160 }
5161 5161
5162 set_skip_buddy(se); 5162 set_skip_buddy(se);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ee15f5a0d1c1..6725e3c49660 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -831,11 +831,14 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
831 enqueue = 1; 831 enqueue = 1;
832 832
833 /* 833 /*
834 * Force a clock update if the CPU was idle, 834 * When we're idle and a woken (rt) task is
835 * lest wakeup -> unthrottle time accumulate. 835 * throttled check_preempt_curr() will set
836 * skip_update and the time between the wakeup
837 * and this unthrottle will get accounted as
838 * 'runtime'.
836 */ 839 */
837 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 840 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
838 rq->skip_clock_update = -1; 841 rq_clock_skip_update(rq, false);
839 } 842 }
840 if (rt_rq->rt_time || rt_rq->rt_nr_running) 843 if (rt_rq->rt_time || rt_rq->rt_nr_running)
841 idle = 0; 844 idle = 0;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index bd2373273a9e..0870db23d79c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -558,8 +558,6 @@ struct rq {
558#ifdef CONFIG_NO_HZ_FULL 558#ifdef CONFIG_NO_HZ_FULL
559 unsigned long last_sched_tick; 559 unsigned long last_sched_tick;
560#endif 560#endif
561 int skip_clock_update;
562
563 /* capture load from *all* tasks on this cpu: */ 561 /* capture load from *all* tasks on this cpu: */
564 struct load_weight load; 562 struct load_weight load;
565 unsigned long nr_load_updates; 563 unsigned long nr_load_updates;
@@ -588,6 +586,7 @@ struct rq {
588 unsigned long next_balance; 586 unsigned long next_balance;
589 struct mm_struct *prev_mm; 587 struct mm_struct *prev_mm;
590 588
589 unsigned int clock_skip_update;
591 u64 clock; 590 u64 clock;
592 u64 clock_task; 591 u64 clock_task;
593 592
@@ -704,6 +703,18 @@ static inline u64 rq_clock_task(struct rq *rq)
704 return rq->clock_task; 703 return rq->clock_task;
705} 704}
706 705
706#define RQCF_REQ_SKIP 0x01
707#define RQCF_ACT_SKIP 0x02
708
709static inline void rq_clock_skip_update(struct rq *rq, bool skip)
710{
711 lockdep_assert_held(&rq->lock);
712 if (skip)
713 rq->clock_skip_update |= RQCF_REQ_SKIP;
714 else
715 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
716}
717
707#ifdef CONFIG_NUMA 718#ifdef CONFIG_NUMA
708enum numa_topology_type { 719enum numa_topology_type {
709 NUMA_DIRECT, 720 NUMA_DIRECT,