diff options
author | Wanpeng Li <wanpeng.li@linux.intel.com> | 2015-05-13 02:01:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-06-19 04:06:45 -0400 |
commit | 8b5e770ed7c05a65ffd2d33a83c14572696236dc (patch) | |
tree | 861a74f878283f7c2f2b23bcdf7f365667275b7b /kernel/sched/deadline.c | |
parent | 1cde2930e15473cb4dd7e5a07d83e605a969bd6e (diff) |
sched/deadline: Optimize pull_dl_task()
pull_dl_task() uses pick_next_earliest_dl_task() to select a migration
candidate; this is sub-optimal since the next earliest task -- as per
the regular runqueue -- might not be migratable at all. This could
result in iterating the entire runqueue looking for a task.
Instead iterate the pushable queue -- this queue only contains tasks
that have at least 2 cpus set in their cpus_allowed mask.
Signed-off-by: Wanpeng Li <wanpeng.li@linux.intel.com>
[ Improved the changelog. ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431496867-4194-1-git-send-email-wanpeng.li@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r-- | kernel/sched/deadline.c | 28 |
1 files changed, 27 insertions, 1 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 890ce951c717..9cbe1c7fd36e 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1230,6 +1230,32 @@ next_node: | |||
1230 | return NULL; | 1230 | return NULL; |
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | /* | ||
1234 | * Return the earliest pushable rq's task, which is suitable to be executed | ||
1235 | * on the CPU, NULL otherwise: | ||
1236 | */ | ||
1237 | static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) | ||
1238 | { | ||
1239 | struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost; | ||
1240 | struct task_struct *p = NULL; | ||
1241 | |||
1242 | if (!has_pushable_dl_tasks(rq)) | ||
1243 | return NULL; | ||
1244 | |||
1245 | next_node: | ||
1246 | if (next_node) { | ||
1247 | p = rb_entry(next_node, struct task_struct, pushable_dl_tasks); | ||
1248 | |||
1249 | if (pick_dl_task(rq, p, cpu)) | ||
1250 | return p; | ||
1251 | |||
1252 | next_node = rb_next(next_node); | ||
1253 | goto next_node; | ||
1254 | } | ||
1255 | |||
1256 | return NULL; | ||
1257 | } | ||
1258 | |||
1233 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); | 1259 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); |
1234 | 1260 | ||
1235 | static int find_later_rq(struct task_struct *task) | 1261 | static int find_later_rq(struct task_struct *task) |
@@ -1514,7 +1540,7 @@ static int pull_dl_task(struct rq *this_rq) | |||
1514 | if (src_rq->dl.dl_nr_running <= 1) | 1540 | if (src_rq->dl.dl_nr_running <= 1) |
1515 | goto skip; | 1541 | goto skip; |
1516 | 1542 | ||
1517 | p = pick_next_earliest_dl_task(src_rq, this_cpu); | 1543 | p = pick_earliest_pushable_dl_task(src_rq, this_cpu); |
1518 | 1544 | ||
1519 | /* | 1545 | /* |
1520 | * We found a task to be pulled if: | 1546 | * We found a task to be pulled if: |