diff options
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 132 |
1 files changed, 21 insertions, 111 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 127a2c4cf4ab..01970c8e64df 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -399,20 +399,6 @@ static inline struct task_group *next_task_group(struct task_group *tg) | |||
399 | (iter = next_task_group(iter)) && \ | 399 | (iter = next_task_group(iter)) && \ |
400 | (rt_rq = iter->rt_rq[cpu_of(rq)]);) | 400 | (rt_rq = iter->rt_rq[cpu_of(rq)]);) |
401 | 401 | ||
402 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) | ||
403 | { | ||
404 | list_add_rcu(&rt_rq->leaf_rt_rq_list, | ||
405 | &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list); | ||
406 | } | ||
407 | |||
408 | static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) | ||
409 | { | ||
410 | list_del_rcu(&rt_rq->leaf_rt_rq_list); | ||
411 | } | ||
412 | |||
413 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | ||
414 | list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) | ||
415 | |||
416 | #define for_each_sched_rt_entity(rt_se) \ | 402 | #define for_each_sched_rt_entity(rt_se) \ |
417 | for (; rt_se; rt_se = rt_se->parent) | 403 | for (; rt_se; rt_se = rt_se->parent) |
418 | 404 | ||
@@ -472,7 +458,7 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se) | |||
472 | #ifdef CONFIG_SMP | 458 | #ifdef CONFIG_SMP |
473 | static inline const struct cpumask *sched_rt_period_mask(void) | 459 | static inline const struct cpumask *sched_rt_period_mask(void) |
474 | { | 460 | { |
475 | return cpu_rq(smp_processor_id())->rd->span; | 461 | return this_rq()->rd->span; |
476 | } | 462 | } |
477 | #else | 463 | #else |
478 | static inline const struct cpumask *sched_rt_period_mask(void) | 464 | static inline const struct cpumask *sched_rt_period_mask(void) |
@@ -509,17 +495,6 @@ typedef struct rt_rq *rt_rq_iter_t; | |||
509 | #define for_each_rt_rq(rt_rq, iter, rq) \ | 495 | #define for_each_rt_rq(rt_rq, iter, rq) \ |
510 | for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) | 496 | for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) |
511 | 497 | ||
512 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) | ||
513 | { | ||
514 | } | ||
515 | |||
516 | static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) | ||
517 | { | ||
518 | } | ||
519 | |||
520 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | ||
521 | for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) | ||
522 | |||
523 | #define for_each_sched_rt_entity(rt_se) \ | 498 | #define for_each_sched_rt_entity(rt_se) \ |
524 | for (; rt_se; rt_se = NULL) | 499 | for (; rt_se; rt_se = NULL) |
525 | 500 | ||
@@ -699,15 +674,6 @@ balanced: | |||
699 | } | 674 | } |
700 | } | 675 | } |
701 | 676 | ||
702 | static void disable_runtime(struct rq *rq) | ||
703 | { | ||
704 | unsigned long flags; | ||
705 | |||
706 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
707 | __disable_runtime(rq); | ||
708 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
709 | } | ||
710 | |||
711 | static void __enable_runtime(struct rq *rq) | 677 | static void __enable_runtime(struct rq *rq) |
712 | { | 678 | { |
713 | rt_rq_iter_t iter; | 679 | rt_rq_iter_t iter; |
@@ -732,37 +698,6 @@ static void __enable_runtime(struct rq *rq) | |||
732 | } | 698 | } |
733 | } | 699 | } |
734 | 700 | ||
735 | static void enable_runtime(struct rq *rq) | ||
736 | { | ||
737 | unsigned long flags; | ||
738 | |||
739 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
740 | __enable_runtime(rq); | ||
741 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
742 | } | ||
743 | |||
744 | int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu) | ||
745 | { | ||
746 | int cpu = (int)(long)hcpu; | ||
747 | |||
748 | switch (action) { | ||
749 | case CPU_DOWN_PREPARE: | ||
750 | case CPU_DOWN_PREPARE_FROZEN: | ||
751 | disable_runtime(cpu_rq(cpu)); | ||
752 | return NOTIFY_OK; | ||
753 | |||
754 | case CPU_DOWN_FAILED: | ||
755 | case CPU_DOWN_FAILED_FROZEN: | ||
756 | case CPU_ONLINE: | ||
757 | case CPU_ONLINE_FROZEN: | ||
758 | enable_runtime(cpu_rq(cpu)); | ||
759 | return NOTIFY_OK; | ||
760 | |||
761 | default: | ||
762 | return NOTIFY_DONE; | ||
763 | } | ||
764 | } | ||
765 | |||
766 | static int balance_runtime(struct rt_rq *rt_rq) | 701 | static int balance_runtime(struct rt_rq *rt_rq) |
767 | { | 702 | { |
768 | int more = 0; | 703 | int more = 0; |
@@ -926,7 +861,7 @@ static void update_curr_rt(struct rq *rq) | |||
926 | if (curr->sched_class != &rt_sched_class) | 861 | if (curr->sched_class != &rt_sched_class) |
927 | return; | 862 | return; |
928 | 863 | ||
929 | delta_exec = rq->clock_task - curr->se.exec_start; | 864 | delta_exec = rq_clock_task(rq) - curr->se.exec_start; |
930 | if (unlikely((s64)delta_exec <= 0)) | 865 | if (unlikely((s64)delta_exec <= 0)) |
931 | return; | 866 | return; |
932 | 867 | ||
@@ -936,7 +871,7 @@ static void update_curr_rt(struct rq *rq) | |||
936 | curr->se.sum_exec_runtime += delta_exec; | 871 | curr->se.sum_exec_runtime += delta_exec; |
937 | account_group_exec_runtime(curr, delta_exec); | 872 | account_group_exec_runtime(curr, delta_exec); |
938 | 873 | ||
939 | curr->se.exec_start = rq->clock_task; | 874 | curr->se.exec_start = rq_clock_task(rq); |
940 | cpuacct_charge(curr, delta_exec); | 875 | cpuacct_charge(curr, delta_exec); |
941 | 876 | ||
942 | sched_rt_avg_update(rq, delta_exec); | 877 | sched_rt_avg_update(rq, delta_exec); |
@@ -1106,9 +1041,6 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) | |||
1106 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 1041 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
1107 | return; | 1042 | return; |
1108 | 1043 | ||
1109 | if (!rt_rq->rt_nr_running) | ||
1110 | list_add_leaf_rt_rq(rt_rq); | ||
1111 | |||
1112 | if (head) | 1044 | if (head) |
1113 | list_add(&rt_se->run_list, queue); | 1045 | list_add(&rt_se->run_list, queue); |
1114 | else | 1046 | else |
@@ -1128,8 +1060,6 @@ static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
1128 | __clear_bit(rt_se_prio(rt_se), array->bitmap); | 1060 | __clear_bit(rt_se_prio(rt_se), array->bitmap); |
1129 | 1061 | ||
1130 | dec_rt_tasks(rt_se, rt_rq); | 1062 | dec_rt_tasks(rt_se, rt_rq); |
1131 | if (!rt_rq->rt_nr_running) | ||
1132 | list_del_leaf_rt_rq(rt_rq); | ||
1133 | } | 1063 | } |
1134 | 1064 | ||
1135 | /* | 1065 | /* |
@@ -1385,7 +1315,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
1385 | } while (rt_rq); | 1315 | } while (rt_rq); |
1386 | 1316 | ||
1387 | p = rt_task_of(rt_se); | 1317 | p = rt_task_of(rt_se); |
1388 | p->se.exec_start = rq->clock_task; | 1318 | p->se.exec_start = rq_clock_task(rq); |
1389 | 1319 | ||
1390 | return p; | 1320 | return p; |
1391 | } | 1321 | } |
@@ -1434,42 +1364,24 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | |||
1434 | return 0; | 1364 | return 0; |
1435 | } | 1365 | } |
1436 | 1366 | ||
1437 | /* Return the second highest RT task, NULL otherwise */ | 1367 | /* |
1438 | static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | 1368 | * Return the highest pushable rq's task, which is suitable to be executed |
1369 | * on the cpu, NULL otherwise | ||
1370 | */ | ||
1371 | static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) | ||
1439 | { | 1372 | { |
1440 | struct task_struct *next = NULL; | 1373 | struct plist_head *head = &rq->rt.pushable_tasks; |
1441 | struct sched_rt_entity *rt_se; | 1374 | struct task_struct *p; |
1442 | struct rt_prio_array *array; | ||
1443 | struct rt_rq *rt_rq; | ||
1444 | int idx; | ||
1445 | |||
1446 | for_each_leaf_rt_rq(rt_rq, rq) { | ||
1447 | array = &rt_rq->active; | ||
1448 | idx = sched_find_first_bit(array->bitmap); | ||
1449 | next_idx: | ||
1450 | if (idx >= MAX_RT_PRIO) | ||
1451 | continue; | ||
1452 | if (next && next->prio <= idx) | ||
1453 | continue; | ||
1454 | list_for_each_entry(rt_se, array->queue + idx, run_list) { | ||
1455 | struct task_struct *p; | ||
1456 | 1375 | ||
1457 | if (!rt_entity_is_task(rt_se)) | 1376 | if (!has_pushable_tasks(rq)) |
1458 | continue; | 1377 | return NULL; |
1459 | 1378 | ||
1460 | p = rt_task_of(rt_se); | 1379 | plist_for_each_entry(p, head, pushable_tasks) { |
1461 | if (pick_rt_task(rq, p, cpu)) { | 1380 | if (pick_rt_task(rq, p, cpu)) |
1462 | next = p; | 1381 | return p; |
1463 | break; | ||
1464 | } | ||
1465 | } | ||
1466 | if (!next) { | ||
1467 | idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); | ||
1468 | goto next_idx; | ||
1469 | } | ||
1470 | } | 1382 | } |
1471 | 1383 | ||
1472 | return next; | 1384 | return NULL; |
1473 | } | 1385 | } |
1474 | 1386 | ||
1475 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | 1387 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
@@ -1743,12 +1655,10 @@ static int pull_rt_task(struct rq *this_rq) | |||
1743 | double_lock_balance(this_rq, src_rq); | 1655 | double_lock_balance(this_rq, src_rq); |
1744 | 1656 | ||
1745 | /* | 1657 | /* |
1746 | * Are there still pullable RT tasks? | 1658 | * We can pull only a task, which is pushable |
1659 | * on its rq, and no others. | ||
1747 | */ | 1660 | */ |
1748 | if (src_rq->rt.rt_nr_running <= 1) | 1661 | p = pick_highest_pushable_task(src_rq, this_cpu); |
1749 | goto skip; | ||
1750 | |||
1751 | p = pick_next_highest_task_rt(src_rq, this_cpu); | ||
1752 | 1662 | ||
1753 | /* | 1663 | /* |
1754 | * Do we have an RT task that preempts | 1664 | * Do we have an RT task that preempts |
@@ -2037,7 +1947,7 @@ static void set_curr_task_rt(struct rq *rq) | |||
2037 | { | 1947 | { |
2038 | struct task_struct *p = rq->curr; | 1948 | struct task_struct *p = rq->curr; |
2039 | 1949 | ||
2040 | p->se.exec_start = rq->clock_task; | 1950 | p->se.exec_start = rq_clock_task(rq); |
2041 | 1951 | ||
2042 | /* The running task is never eligible for pushing */ | 1952 | /* The running task is never eligible for pushing */ |
2043 | dequeue_pushable_task(rq, p); | 1953 | dequeue_pushable_task(rq, p); |