aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/sched_fair.c15
2 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index eb3454c410fa..7d282c52bd42 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -375,6 +375,7 @@ struct cfs_rq {
375 375
376 u64 exec_clock; 376 u64 exec_clock;
377 u64 min_vruntime; 377 u64 min_vruntime;
378 u64 pair_start;
378 379
379 struct rb_root tasks_timeline; 380 struct rb_root tasks_timeline;
380 struct rb_node *rb_leftmost; 381 struct rb_node *rb_leftmost;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 183388c4dead..509092af0330 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -63,13 +63,13 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
63 63
64/* 64/*
65 * SCHED_OTHER wake-up granularity. 65 * SCHED_OTHER wake-up granularity.
66 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) 66 * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
67 * 67 *
68 * This option delays the preemption effects of decoupled workloads 68 * This option delays the preemption effects of decoupled workloads
69 * and reduces their over-scheduling. Synchronous workloads will still 69 * and reduces their over-scheduling. Synchronous workloads will still
70 * have immediate wakeup/sleep latencies. 70 * have immediate wakeup/sleep latencies.
71 */ 71 */
72unsigned int sysctl_sched_wakeup_granularity = 10000000UL; 72unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
73 73
74const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 74const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
75 75
@@ -813,17 +813,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
813 se->prev_sum_exec_runtime = se->sum_exec_runtime; 813 se->prev_sum_exec_runtime = se->sum_exec_runtime;
814} 814}
815 815
816static int
817wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
818
819static struct sched_entity * 816static struct sched_entity *
820pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) 817pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
821{ 818{
822 if (!cfs_rq->next) 819 struct rq *rq = rq_of(cfs_rq);
823 return se; 820 u64 pair_slice = rq->clock - cfs_rq->pair_start;
824 821
825 if (wakeup_preempt_entity(cfs_rq->next, se) != 0) 822 if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) {
823 cfs_rq->pair_start = rq->clock;
826 return se; 824 return se;
825 }
827 826
828 return cfs_rq->next; 827 return cfs_rq->next;
829} 828}