diff options
author | Kirill Tkhai <tkhai@yandex.ru> | 2013-06-07 15:37:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-06-19 06:58:40 -0400 |
commit | e23ee74777f389369431d77390c4b09332ce026a (patch) | |
tree | 7db62a86e3efc6bb7ecf1b8eca35d76a5ceb9492 | |
parent | d81344c50824a4d28a9397e97135d60075ac37ff (diff) |
sched/rt: Simplify pull_rt_task() logic and remove .leaf_rt_rq_list
[ Peter, this is based off of some of my work, I ran it though a few
tests and it passed. I also reviewed it, and added my SOB as I am
somewhat a co-author to it. ]
Based on the patch by Steven Rostedt from previous year:
https://lkml.org/lkml/2012/4/18/517
1)Simplify pull_rt_task() logic: search in pushable tasks of dest runqueue.
The only pullable tasks are the tasks which are pushable in their local rq,
and no others.
2)Remove .leaf_rt_rq_list member of struct rt_rq and functions connected
with it: nobody uses it since now.
Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/287571370557898@web7d.yandex.ru
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/rt.c | 82 | ||||
-rw-r--r-- | kernel/sched/sched.h | 1 |
2 files changed, 16 insertions, 67 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 8d85f9ac4262..01970c8e64df 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -399,20 +399,6 @@ static inline struct task_group *next_task_group(struct task_group *tg) | |||
399 | (iter = next_task_group(iter)) && \ | 399 | (iter = next_task_group(iter)) && \ |
400 | (rt_rq = iter->rt_rq[cpu_of(rq)]);) | 400 | (rt_rq = iter->rt_rq[cpu_of(rq)]);) |
401 | 401 | ||
402 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) | ||
403 | { | ||
404 | list_add_rcu(&rt_rq->leaf_rt_rq_list, | ||
405 | &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list); | ||
406 | } | ||
407 | |||
408 | static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) | ||
409 | { | ||
410 | list_del_rcu(&rt_rq->leaf_rt_rq_list); | ||
411 | } | ||
412 | |||
413 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | ||
414 | list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) | ||
415 | |||
416 | #define for_each_sched_rt_entity(rt_se) \ | 402 | #define for_each_sched_rt_entity(rt_se) \ |
417 | for (; rt_se; rt_se = rt_se->parent) | 403 | for (; rt_se; rt_se = rt_se->parent) |
418 | 404 | ||
@@ -509,17 +495,6 @@ typedef struct rt_rq *rt_rq_iter_t; | |||
509 | #define for_each_rt_rq(rt_rq, iter, rq) \ | 495 | #define for_each_rt_rq(rt_rq, iter, rq) \ |
510 | for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) | 496 | for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) |
511 | 497 | ||
512 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) | ||
513 | { | ||
514 | } | ||
515 | |||
516 | static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) | ||
517 | { | ||
518 | } | ||
519 | |||
520 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | ||
521 | for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) | ||
522 | |||
523 | #define for_each_sched_rt_entity(rt_se) \ | 498 | #define for_each_sched_rt_entity(rt_se) \ |
524 | for (; rt_se; rt_se = NULL) | 499 | for (; rt_se; rt_se = NULL) |
525 | 500 | ||
@@ -1066,9 +1041,6 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) | |||
1066 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 1041 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
1067 | return; | 1042 | return; |
1068 | 1043 | ||
1069 | if (!rt_rq->rt_nr_running) | ||
1070 | list_add_leaf_rt_rq(rt_rq); | ||
1071 | |||
1072 | if (head) | 1044 | if (head) |
1073 | list_add(&rt_se->run_list, queue); | 1045 | list_add(&rt_se->run_list, queue); |
1074 | else | 1046 | else |
@@ -1088,8 +1060,6 @@ static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
1088 | __clear_bit(rt_se_prio(rt_se), array->bitmap); | 1060 | __clear_bit(rt_se_prio(rt_se), array->bitmap); |
1089 | 1061 | ||
1090 | dec_rt_tasks(rt_se, rt_rq); | 1062 | dec_rt_tasks(rt_se, rt_rq); |
1091 | if (!rt_rq->rt_nr_running) | ||
1092 | list_del_leaf_rt_rq(rt_rq); | ||
1093 | } | 1063 | } |
1094 | 1064 | ||
1095 | /* | 1065 | /* |
@@ -1394,42 +1364,24 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | |||
1394 | return 0; | 1364 | return 0; |
1395 | } | 1365 | } |
1396 | 1366 | ||
1397 | /* Return the second highest RT task, NULL otherwise */ | 1367 | /* |
1398 | static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | 1368 | * Return the highest pushable rq's task, which is suitable to be executed |
1369 | * on the cpu, NULL otherwise | ||
1370 | */ | ||
1371 | static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) | ||
1399 | { | 1372 | { |
1400 | struct task_struct *next = NULL; | 1373 | struct plist_head *head = &rq->rt.pushable_tasks; |
1401 | struct sched_rt_entity *rt_se; | 1374 | struct task_struct *p; |
1402 | struct rt_prio_array *array; | ||
1403 | struct rt_rq *rt_rq; | ||
1404 | int idx; | ||
1405 | |||
1406 | for_each_leaf_rt_rq(rt_rq, rq) { | ||
1407 | array = &rt_rq->active; | ||
1408 | idx = sched_find_first_bit(array->bitmap); | ||
1409 | next_idx: | ||
1410 | if (idx >= MAX_RT_PRIO) | ||
1411 | continue; | ||
1412 | if (next && next->prio <= idx) | ||
1413 | continue; | ||
1414 | list_for_each_entry(rt_se, array->queue + idx, run_list) { | ||
1415 | struct task_struct *p; | ||
1416 | 1375 | ||
1417 | if (!rt_entity_is_task(rt_se)) | 1376 | if (!has_pushable_tasks(rq)) |
1418 | continue; | 1377 | return NULL; |
1419 | 1378 | ||
1420 | p = rt_task_of(rt_se); | 1379 | plist_for_each_entry(p, head, pushable_tasks) { |
1421 | if (pick_rt_task(rq, p, cpu)) { | 1380 | if (pick_rt_task(rq, p, cpu)) |
1422 | next = p; | 1381 | return p; |
1423 | break; | ||
1424 | } | ||
1425 | } | ||
1426 | if (!next) { | ||
1427 | idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); | ||
1428 | goto next_idx; | ||
1429 | } | ||
1430 | } | 1382 | } |
1431 | 1383 | ||
1432 | return next; | 1384 | return NULL; |
1433 | } | 1385 | } |
1434 | 1386 | ||
1435 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 1387 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
@@ -1703,12 +1655,10 @@ static int pull_rt_task(struct rq *this_rq) | |||
1703 | double_lock_balance(this_rq, src_rq); | 1655 | double_lock_balance(this_rq, src_rq); |
1704 | 1656 | ||
1705 | /* | 1657 | /* |
1706 | * Are there still pullable RT tasks? | 1658 | * We can pull only a task, which is pushable |
1659 | * on its rq, and no others. | ||
1707 | */ | 1660 | */ |
1708 | if (src_rq->rt.rt_nr_running <= 1) | 1661 | p = pick_highest_pushable_task(src_rq, this_cpu); |
1709 | goto skip; | ||
1710 | |||
1711 | p = pick_next_highest_task_rt(src_rq, this_cpu); | ||
1712 | 1662 | ||
1713 | /* | 1663 | /* |
1714 | * Do we have an RT task that preempts | 1664 | * Do we have an RT task that preempts |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 74ff659e964f..029601a61587 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -361,7 +361,6 @@ struct rt_rq { | |||
361 | unsigned long rt_nr_boosted; | 361 | unsigned long rt_nr_boosted; |
362 | 362 | ||
363 | struct rq *rq; | 363 | struct rq *rq; |
364 | struct list_head leaf_rt_rq_list; | ||
365 | struct task_group *tg; | 364 | struct task_group *tg; |
366 | #endif | 365 | #endif |
367 | }; | 366 | }; |