summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorQian Cai <cai@lca.pw>2019-08-20 14:40:55 -0400
committerIngo Molnar <mingo@kernel.org>2019-09-25 11:42:31 -0400
commit763a9ec06c409dcde2a761aac4bb83ff3938e0b3 (patch)
treea586544068ddf85c3bfa607cc5cd8f3a34ff1644 /kernel/sched
parent714e501e16cd473538b609b3e351b2cc9f7f09ed (diff)
sched/fair: Fix -Wunused-but-set-variable warnings
Commit: de53fd7aedb1 ("sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices") introduced a few compilation warnings: kernel/sched/fair.c: In function '__refill_cfs_bandwidth_runtime': kernel/sched/fair.c:4365:6: warning: variable 'now' set but not used [-Wunused-but-set-variable] kernel/sched/fair.c: In function 'start_cfs_bandwidth': kernel/sched/fair.c:4992:6: warning: variable 'overrun' set but not used [-Wunused-but-set-variable] Also, __refill_cfs_bandwidth_runtime() does no longer update the expiration time, so fix the comments accordingly. Signed-off-by: Qian Cai <cai@lca.pw> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Ben Segall <bsegall@google.com> Reviewed-by: Dave Chiluk <chiluk+linux@indeed.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: pauld@redhat.com Fixes: de53fd7aedb1 ("sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices") Link: https://lkml.kernel.org/r/1566326455-8038-1-git-send-email-cai@lca.pw Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5bc23996ffae..dfdac90fd211 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4353,21 +4353,16 @@ static inline u64 sched_cfs_bandwidth_slice(void)
4353} 4353}
4354 4354
4355/* 4355/*
4356 * Replenish runtime according to assigned quota and update expiration time. 4356 * Replenish runtime according to assigned quota. We use sched_clock_cpu
4357 * We use sched_clock_cpu directly instead of rq->clock to avoid adding 4357 * directly instead of rq->clock to avoid adding additional synchronization
4358 * additional synchronization around rq->lock. 4358 * around rq->lock.
4359 * 4359 *
4360 * requires cfs_b->lock 4360 * requires cfs_b->lock
4361 */ 4361 */
4362void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) 4362void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
4363{ 4363{
4364 u64 now; 4364 if (cfs_b->quota != RUNTIME_INF)
4365 4365 cfs_b->runtime = cfs_b->quota;
4366 if (cfs_b->quota == RUNTIME_INF)
4367 return;
4368
4369 now = sched_clock_cpu(smp_processor_id());
4370 cfs_b->runtime = cfs_b->quota;
4371} 4366}
4372 4367
4373static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 4368static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
@@ -4983,15 +4978,13 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4983 4978
4984void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4979void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4985{ 4980{
4986 u64 overrun;
4987
4988 lockdep_assert_held(&cfs_b->lock); 4981 lockdep_assert_held(&cfs_b->lock);
4989 4982
4990 if (cfs_b->period_active) 4983 if (cfs_b->period_active)
4991 return; 4984 return;
4992 4985
4993 cfs_b->period_active = 1; 4986 cfs_b->period_active = 1;
4994 overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); 4987 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4995 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); 4988 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4996} 4989}
4997 4990