diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 69 |
1 files changed, 49 insertions, 20 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 86a93376282c..b01f8e77f2ac 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -73,13 +73,13 @@ unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL; | |||
73 | 73 | ||
74 | /* | 74 | /* |
75 | * SCHED_OTHER wake-up granularity. | 75 | * SCHED_OTHER wake-up granularity. |
76 | * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds) | 76 | * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) |
77 | * | 77 | * |
78 | * This option delays the preemption effects of decoupled workloads | 78 | * This option delays the preemption effects of decoupled workloads |
79 | * and reduces their over-scheduling. Synchronous workloads will still | 79 | * and reduces their over-scheduling. Synchronous workloads will still |
80 | * have immediate wakeup/sleep latencies. | 80 | * have immediate wakeup/sleep latencies. |
81 | */ | 81 | */ |
82 | unsigned int sysctl_sched_wakeup_granularity = 5000000UL; | 82 | unsigned int sysctl_sched_wakeup_granularity = 10000000UL; |
83 | 83 | ||
84 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | 84 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; |
85 | 85 | ||
@@ -629,20 +629,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
629 | se->prev_sum_exec_runtime = se->sum_exec_runtime; | 629 | se->prev_sum_exec_runtime = se->sum_exec_runtime; |
630 | } | 630 | } |
631 | 631 | ||
632 | static int | ||
633 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | ||
634 | |||
632 | static struct sched_entity * | 635 | static struct sched_entity * |
633 | pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) | 636 | pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) |
634 | { | 637 | { |
635 | s64 diff, gran; | ||
636 | |||
637 | if (!cfs_rq->next) | 638 | if (!cfs_rq->next) |
638 | return se; | 639 | return se; |
639 | 640 | ||
640 | diff = cfs_rq->next->vruntime - se->vruntime; | 641 | if (wakeup_preempt_entity(cfs_rq->next, se) != 0) |
641 | if (diff < 0) | ||
642 | return se; | ||
643 | |||
644 | gran = calc_delta_fair(sysctl_sched_wakeup_granularity, &cfs_rq->load); | ||
645 | if (diff > gran) | ||
646 | return se; | 642 | return se; |
647 | 643 | ||
648 | return cfs_rq->next; | 644 | return cfs_rq->next; |
@@ -1101,6 +1097,48 @@ out: | |||
1101 | } | 1097 | } |
1102 | #endif /* CONFIG_SMP */ | 1098 | #endif /* CONFIG_SMP */ |
1103 | 1099 | ||
1100 | static unsigned long wakeup_gran(struct sched_entity *se) | ||
1101 | { | ||
1102 | unsigned long gran = sysctl_sched_wakeup_granularity; | ||
1103 | |||
1104 | /* | ||
1105 | * More easily preempt - nice tasks, while not making | ||
1106 | * it harder for + nice tasks. | ||
1107 | */ | ||
1108 | if (unlikely(se->load.weight > NICE_0_LOAD)) | ||
1109 | gran = calc_delta_fair(gran, &se->load); | ||
1110 | |||
1111 | return gran; | ||
1112 | } | ||
1113 | |||
1114 | /* | ||
1115 | * Should 'se' preempt 'curr'. | ||
1116 | * | ||
1117 | * |s1 | ||
1118 | * |s2 | ||
1119 | * |s3 | ||
1120 | * g | ||
1121 | * |<--->|c | ||
1122 | * | ||
1123 | * w(c, s1) = -1 | ||
1124 | * w(c, s2) = 0 | ||
1125 | * w(c, s3) = 1 | ||
1126 | * | ||
1127 | */ | ||
1128 | static int | ||
1129 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | ||
1130 | { | ||
1131 | s64 gran, vdiff = curr->vruntime - se->vruntime; | ||
1132 | |||
1133 | if (vdiff < 0) | ||
1134 | return -1; | ||
1135 | |||
1136 | gran = wakeup_gran(curr); | ||
1137 | if (vdiff > gran) | ||
1138 | return 1; | ||
1139 | |||
1140 | return 0; | ||
1141 | } | ||
1104 | 1142 | ||
1105 | /* | 1143 | /* |
1106 | * Preempt the current task with a newly woken task if needed: | 1144 | * Preempt the current task with a newly woken task if needed: |
@@ -1110,7 +1148,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1110 | struct task_struct *curr = rq->curr; | 1148 | struct task_struct *curr = rq->curr; |
1111 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1149 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1112 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1150 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1113 | unsigned long gran; | ||
1114 | 1151 | ||
1115 | if (unlikely(rt_prio(p->prio))) { | 1152 | if (unlikely(rt_prio(p->prio))) { |
1116 | update_rq_clock(rq); | 1153 | update_rq_clock(rq); |
@@ -1140,15 +1177,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1140 | pse = parent_entity(pse); | 1177 | pse = parent_entity(pse); |
1141 | } | 1178 | } |
1142 | 1179 | ||
1143 | gran = sysctl_sched_wakeup_granularity; | 1180 | if (wakeup_preempt_entity(se, pse) == 1) |
1144 | /* | ||
1145 | * More easily preempt - nice tasks, while not making | ||
1146 | * it harder for + nice tasks. | ||
1147 | */ | ||
1148 | if (unlikely(se->load.weight > NICE_0_LOAD)) | ||
1149 | gran = calc_delta_fair(gran, &se->load); | ||
1150 | |||
1151 | if (pse->vruntime + gran < se->vruntime) | ||
1152 | resched_task(curr); | 1181 | resched_task(curr); |
1153 | } | 1182 | } |
1154 | 1183 | ||