diff options
Diffstat (limited to 'kernel/sched_rt.c')
| -rw-r--r-- | kernel/sched_rt.c | 91 |
1 files changed, 57 insertions, 34 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 47ceac9e8552..6163e4cf885b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
| 240 | 240 | ||
| 241 | spin_lock(&rt_b->rt_runtime_lock); | 241 | spin_lock(&rt_b->rt_runtime_lock); |
| 242 | rt_period = ktime_to_ns(rt_b->rt_period); | 242 | rt_period = ktime_to_ns(rt_b->rt_period); |
| 243 | for_each_cpu_mask(i, rd->span) { | 243 | for_each_cpu_mask_nr(i, rd->span) { |
| 244 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 244 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
| 245 | s64 diff; | 245 | s64 diff; |
| 246 | 246 | ||
| @@ -253,7 +253,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
| 253 | 253 | ||
| 254 | diff = iter->rt_runtime - iter->rt_time; | 254 | diff = iter->rt_runtime - iter->rt_time; |
| 255 | if (diff > 0) { | 255 | if (diff > 0) { |
| 256 | do_div(diff, weight); | 256 | diff = div_u64((u64)diff, weight); |
| 257 | if (rt_rq->rt_runtime + diff > rt_period) | 257 | if (rt_rq->rt_runtime + diff > rt_period) |
| 258 | diff = rt_period - rt_rq->rt_runtime; | 258 | diff = rt_period - rt_rq->rt_runtime; |
| 259 | iter->rt_runtime -= diff; | 259 | iter->rt_runtime -= diff; |
| @@ -505,7 +505,9 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 505 | rt_rq->rt_nr_running++; | 505 | rt_rq->rt_nr_running++; |
| 506 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 506 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
| 507 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { | 507 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { |
| 508 | #ifdef CONFIG_SMP | ||
| 508 | struct rq *rq = rq_of_rt_rq(rt_rq); | 509 | struct rq *rq = rq_of_rt_rq(rt_rq); |
| 510 | #endif | ||
| 509 | 511 | ||
| 510 | rt_rq->highest_prio = rt_se_prio(rt_se); | 512 | rt_rq->highest_prio = rt_se_prio(rt_se); |
| 511 | #ifdef CONFIG_SMP | 513 | #ifdef CONFIG_SMP |
| @@ -599,11 +601,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
| 599 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 601 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
| 600 | return; | 602 | return; |
| 601 | 603 | ||
| 602 | if (rt_se->nr_cpus_allowed == 1) | 604 | list_add_tail(&rt_se->run_list, queue); |
| 603 | list_add(&rt_se->run_list, queue); | ||
| 604 | else | ||
| 605 | list_add_tail(&rt_se->run_list, queue); | ||
| 606 | |||
| 607 | __set_bit(rt_se_prio(rt_se), array->bitmap); | 605 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
| 608 | 606 | ||
| 609 | inc_rt_tasks(rt_se, rt_rq); | 607 | inc_rt_tasks(rt_se, rt_rq); |
| @@ -688,32 +686,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
| 688 | * Put task to the end of the run list without the overhead of dequeue | 686 | * Put task to the end of the run list without the overhead of dequeue |
| 689 | * followed by enqueue. | 687 | * followed by enqueue. |
| 690 | */ | 688 | */ |
| 691 | static | 689 | static void |
| 692 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 690 | requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) |
| 693 | { | 691 | { |
| 694 | struct rt_prio_array *array = &rt_rq->active; | ||
| 695 | |||
| 696 | if (on_rt_rq(rt_se)) { | 692 | if (on_rt_rq(rt_se)) { |
| 697 | list_del_init(&rt_se->run_list); | 693 | struct rt_prio_array *array = &rt_rq->active; |
| 698 | list_add_tail(&rt_se->run_list, | 694 | struct list_head *queue = array->queue + rt_se_prio(rt_se); |
| 699 | array->queue + rt_se_prio(rt_se)); | 695 | |
| 696 | if (head) | ||
| 697 | list_move(&rt_se->run_list, queue); | ||
| 698 | else | ||
| 699 | list_move_tail(&rt_se->run_list, queue); | ||
| 700 | } | 700 | } |
| 701 | } | 701 | } |
| 702 | 702 | ||
| 703 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 703 | static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) |
| 704 | { | 704 | { |
| 705 | struct sched_rt_entity *rt_se = &p->rt; | 705 | struct sched_rt_entity *rt_se = &p->rt; |
| 706 | struct rt_rq *rt_rq; | 706 | struct rt_rq *rt_rq; |
| 707 | 707 | ||
| 708 | for_each_sched_rt_entity(rt_se) { | 708 | for_each_sched_rt_entity(rt_se) { |
| 709 | rt_rq = rt_rq_of_se(rt_se); | 709 | rt_rq = rt_rq_of_se(rt_se); |
| 710 | requeue_rt_entity(rt_rq, rt_se); | 710 | requeue_rt_entity(rt_rq, rt_se, head); |
| 711 | } | 711 | } |
| 712 | } | 712 | } |
| 713 | 713 | ||
| 714 | static void yield_task_rt(struct rq *rq) | 714 | static void yield_task_rt(struct rq *rq) |
| 715 | { | 715 | { |
| 716 | requeue_task_rt(rq, rq->curr); | 716 | requeue_task_rt(rq, rq->curr, 0); |
| 717 | } | 717 | } |
| 718 | 718 | ||
| 719 | #ifdef CONFIG_SMP | 719 | #ifdef CONFIG_SMP |
| @@ -753,6 +753,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
| 753 | */ | 753 | */ |
| 754 | return task_cpu(p); | 754 | return task_cpu(p); |
| 755 | } | 755 | } |
| 756 | |||
| 757 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | ||
| 758 | { | ||
| 759 | cpumask_t mask; | ||
| 760 | |||
| 761 | if (rq->curr->rt.nr_cpus_allowed == 1) | ||
| 762 | return; | ||
| 763 | |||
| 764 | if (p->rt.nr_cpus_allowed != 1 | ||
| 765 | && cpupri_find(&rq->rd->cpupri, p, &mask)) | ||
| 766 | return; | ||
| 767 | |||
| 768 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | ||
| 769 | return; | ||
| 770 | |||
| 771 | /* | ||
| 772 | * There appears to be other cpus that can accept | ||
| 773 | * current and none to run 'p', so lets reschedule | ||
| 774 | * to try and push current away: | ||
| 775 | */ | ||
| 776 | requeue_task_rt(rq, p, 1); | ||
| 777 | resched_task(rq->curr); | ||
| 778 | } | ||
| 779 | |||
| 756 | #endif /* CONFIG_SMP */ | 780 | #endif /* CONFIG_SMP */ |
| 757 | 781 | ||
| 758 | /* | 782 | /* |
| @@ -778,18 +802,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | |||
| 778 | * to move current somewhere else, making room for our non-migratable | 802 | * to move current somewhere else, making room for our non-migratable |
| 779 | * task. | 803 | * task. |
| 780 | */ | 804 | */ |
| 781 | if((p->prio == rq->curr->prio) | 805 | if (p->prio == rq->curr->prio && !need_resched()) |
| 782 | && p->rt.nr_cpus_allowed == 1 | 806 | check_preempt_equal_prio(rq, p); |
| 783 | && rq->curr->rt.nr_cpus_allowed != 1) { | ||
| 784 | cpumask_t mask; | ||
| 785 | |||
| 786 | if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | ||
| 787 | /* | ||
| 788 | * There appears to be other cpus that can accept | ||
| 789 | * current, so lets reschedule to try and push it away | ||
| 790 | */ | ||
| 791 | resched_task(rq->curr); | ||
| 792 | } | ||
| 793 | #endif | 807 | #endif |
| 794 | } | 808 | } |
| 795 | 809 | ||
| @@ -847,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
| 847 | #define RT_MAX_TRIES 3 | 861 | #define RT_MAX_TRIES 3 |
| 848 | 862 | ||
| 849 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); | 863 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); |
| 864 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); | ||
| 865 | |||
| 850 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | 866 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
| 851 | 867 | ||
| 852 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 868 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
| @@ -922,6 +938,13 @@ static int find_lowest_rq(struct task_struct *task) | |||
| 922 | return -1; /* No targets found */ | 938 | return -1; /* No targets found */ |
| 923 | 939 | ||
| 924 | /* | 940 | /* |
| 941 | * Only consider CPUs that are usable for migration. | ||
| 942 | * I guess we might want to change cpupri_find() to ignore those | ||
| 943 | * in the first place. | ||
| 944 | */ | ||
| 945 | cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); | ||
| 946 | |||
| 947 | /* | ||
| 925 | * At this point we have built a mask of cpus representing the | 948 | * At this point we have built a mask of cpus representing the |
| 926 | * lowest priority tasks in the system. Now we want to elect | 949 | * lowest priority tasks in the system. Now we want to elect |
| 927 | * the best one based on our affinity and topology. | 950 | * the best one based on our affinity and topology. |
| @@ -1001,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
| 1001 | break; | 1024 | break; |
| 1002 | 1025 | ||
| 1003 | /* try again */ | 1026 | /* try again */ |
| 1004 | spin_unlock(&lowest_rq->lock); | 1027 | double_unlock_balance(rq, lowest_rq); |
| 1005 | lowest_rq = NULL; | 1028 | lowest_rq = NULL; |
| 1006 | } | 1029 | } |
| 1007 | 1030 | ||
| @@ -1070,7 +1093,7 @@ static int push_rt_task(struct rq *rq) | |||
| 1070 | 1093 | ||
| 1071 | resched_task(lowest_rq->curr); | 1094 | resched_task(lowest_rq->curr); |
| 1072 | 1095 | ||
| 1073 | spin_unlock(&lowest_rq->lock); | 1096 | double_unlock_balance(rq, lowest_rq); |
| 1074 | 1097 | ||
| 1075 | ret = 1; | 1098 | ret = 1; |
| 1076 | out: | 1099 | out: |
| @@ -1107,7 +1130,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
| 1107 | 1130 | ||
| 1108 | next = pick_next_task_rt(this_rq); | 1131 | next = pick_next_task_rt(this_rq); |
| 1109 | 1132 | ||
| 1110 | for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { | 1133 | for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { |
| 1111 | if (this_cpu == cpu) | 1134 | if (this_cpu == cpu) |
| 1112 | continue; | 1135 | continue; |
| 1113 | 1136 | ||
| @@ -1176,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
| 1176 | 1199 | ||
| 1177 | } | 1200 | } |
| 1178 | skip: | 1201 | skip: |
| 1179 | spin_unlock(&src_rq->lock); | 1202 | double_unlock_balance(this_rq, src_rq); |
| 1180 | } | 1203 | } |
| 1181 | 1204 | ||
| 1182 | return ret; | 1205 | return ret; |
| @@ -1415,7 +1438,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
| 1415 | * on the queue: | 1438 | * on the queue: |
| 1416 | */ | 1439 | */ |
| 1417 | if (p->rt.run_list.prev != p->rt.run_list.next) { | 1440 | if (p->rt.run_list.prev != p->rt.run_list.next) { |
| 1418 | requeue_task_rt(rq, p); | 1441 | requeue_task_rt(rq, p, 0); |
| 1419 | set_tsk_need_resched(p); | 1442 | set_tsk_need_resched(p); |
| 1420 | } | 1443 | } |
| 1421 | } | 1444 | } |
