diff options
Diffstat (limited to 'kernel/sched_rt.c')
| -rw-r--r-- | kernel/sched_rt.c | 54 |
1 files changed, 22 insertions, 32 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index f48328ac216f..bf3e38fdbe6d 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -194,17 +194,20 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
| 194 | return rt_se->my_q; | 194 | return rt_se->my_q; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se); | 197 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); |
| 198 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | 198 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se); |
| 199 | 199 | ||
| 200 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 200 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
| 201 | { | 201 | { |
| 202 | int this_cpu = smp_processor_id(); | ||
| 202 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | 203 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; |
| 203 | struct sched_rt_entity *rt_se = rt_rq->rt_se; | 204 | struct sched_rt_entity *rt_se; |
| 205 | |||
| 206 | rt_se = rt_rq->tg->rt_se[this_cpu]; | ||
| 204 | 207 | ||
| 205 | if (rt_rq->rt_nr_running) { | 208 | if (rt_rq->rt_nr_running) { |
| 206 | if (rt_se && !on_rt_rq(rt_se)) | 209 | if (rt_se && !on_rt_rq(rt_se)) |
| 207 | enqueue_rt_entity(rt_se); | 210 | enqueue_rt_entity(rt_se, false); |
| 208 | if (rt_rq->highest_prio.curr < curr->prio) | 211 | if (rt_rq->highest_prio.curr < curr->prio) |
| 209 | resched_task(curr); | 212 | resched_task(curr); |
| 210 | } | 213 | } |
| @@ -212,7 +215,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
| 212 | 215 | ||
| 213 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 216 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
| 214 | { | 217 | { |
| 215 | struct sched_rt_entity *rt_se = rt_rq->rt_se; | 218 | int this_cpu = smp_processor_id(); |
| 219 | struct sched_rt_entity *rt_se; | ||
| 220 | |||
| 221 | rt_se = rt_rq->tg->rt_se[this_cpu]; | ||
| 216 | 222 | ||
| 217 | if (rt_se && on_rt_rq(rt_se)) | 223 | if (rt_se && on_rt_rq(rt_se)) |
| 218 | dequeue_rt_entity(rt_se); | 224 | dequeue_rt_entity(rt_se); |
| @@ -803,7 +809,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 803 | dec_rt_group(rt_se, rt_rq); | 809 | dec_rt_group(rt_se, rt_rq); |
| 804 | } | 810 | } |
| 805 | 811 | ||
| 806 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | 812 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) |
| 807 | { | 813 | { |
| 808 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 814 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
| 809 | struct rt_prio_array *array = &rt_rq->active; | 815 | struct rt_prio_array *array = &rt_rq->active; |
| @@ -819,7 +825,10 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
| 819 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 825 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
| 820 | return; | 826 | return; |
| 821 | 827 | ||
| 822 | list_add_tail(&rt_se->run_list, queue); | 828 | if (head) |
| 829 | list_add(&rt_se->run_list, queue); | ||
| 830 | else | ||
| 831 | list_add_tail(&rt_se->run_list, queue); | ||
| 823 | __set_bit(rt_se_prio(rt_se), array->bitmap); | 832 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
| 824 | 833 | ||
| 825 | inc_rt_tasks(rt_se, rt_rq); | 834 | inc_rt_tasks(rt_se, rt_rq); |
| @@ -856,11 +865,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se) | |||
| 856 | } | 865 | } |
| 857 | } | 866 | } |
| 858 | 867 | ||
| 859 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 868 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) |
| 860 | { | 869 | { |
| 861 | dequeue_rt_stack(rt_se); | 870 | dequeue_rt_stack(rt_se); |
| 862 | for_each_sched_rt_entity(rt_se) | 871 | for_each_sched_rt_entity(rt_se) |
| 863 | __enqueue_rt_entity(rt_se); | 872 | __enqueue_rt_entity(rt_se, head); |
| 864 | } | 873 | } |
| 865 | 874 | ||
| 866 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | 875 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) |
| @@ -871,21 +880,22 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
| 871 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | 880 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
| 872 | 881 | ||
| 873 | if (rt_rq && rt_rq->rt_nr_running) | 882 | if (rt_rq && rt_rq->rt_nr_running) |
| 874 | __enqueue_rt_entity(rt_se); | 883 | __enqueue_rt_entity(rt_se, false); |
| 875 | } | 884 | } |
| 876 | } | 885 | } |
| 877 | 886 | ||
| 878 | /* | 887 | /* |
| 879 | * Adding/removing a task to/from a priority array: | 888 | * Adding/removing a task to/from a priority array: |
| 880 | */ | 889 | */ |
| 881 | static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | 890 | static void |
| 891 | enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) | ||
| 882 | { | 892 | { |
| 883 | struct sched_rt_entity *rt_se = &p->rt; | 893 | struct sched_rt_entity *rt_se = &p->rt; |
| 884 | 894 | ||
| 885 | if (wakeup) | 895 | if (wakeup) |
| 886 | rt_se->timeout = 0; | 896 | rt_se->timeout = 0; |
| 887 | 897 | ||
| 888 | enqueue_rt_entity(rt_se); | 898 | enqueue_rt_entity(rt_se, head); |
| 889 | 899 | ||
| 890 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) | 900 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) |
| 891 | enqueue_pushable_task(rq, p); | 901 | enqueue_pushable_task(rq, p); |
| @@ -1481,24 +1491,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
| 1481 | push_rt_tasks(rq); | 1491 | push_rt_tasks(rq); |
| 1482 | } | 1492 | } |
| 1483 | 1493 | ||
| 1484 | static unsigned long | ||
| 1485 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
| 1486 | unsigned long max_load_move, | ||
| 1487 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
| 1488 | int *all_pinned, int *this_best_prio) | ||
| 1489 | { | ||
| 1490 | /* don't touch RT tasks */ | ||
| 1491 | return 0; | ||
| 1492 | } | ||
| 1493 | |||
| 1494 | static int | ||
| 1495 | move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
| 1496 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
| 1497 | { | ||
| 1498 | /* don't touch RT tasks */ | ||
| 1499 | return 0; | ||
| 1500 | } | ||
| 1501 | |||
| 1502 | static void set_cpus_allowed_rt(struct task_struct *p, | 1494 | static void set_cpus_allowed_rt(struct task_struct *p, |
| 1503 | const struct cpumask *new_mask) | 1495 | const struct cpumask *new_mask) |
| 1504 | { | 1496 | { |
| @@ -1721,7 +1713,7 @@ static void set_curr_task_rt(struct rq *rq) | |||
| 1721 | dequeue_pushable_task(rq, p); | 1713 | dequeue_pushable_task(rq, p); |
| 1722 | } | 1714 | } |
| 1723 | 1715 | ||
| 1724 | unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) | 1716 | static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) |
| 1725 | { | 1717 | { |
| 1726 | /* | 1718 | /* |
| 1727 | * Time slice is 0 for SCHED_FIFO tasks | 1719 | * Time slice is 0 for SCHED_FIFO tasks |
| @@ -1746,8 +1738,6 @@ static const struct sched_class rt_sched_class = { | |||
| 1746 | #ifdef CONFIG_SMP | 1738 | #ifdef CONFIG_SMP |
| 1747 | .select_task_rq = select_task_rq_rt, | 1739 | .select_task_rq = select_task_rq_rt, |
| 1748 | 1740 | ||
| 1749 | .load_balance = load_balance_rt, | ||
| 1750 | .move_one_task = move_one_task_rt, | ||
| 1751 | .set_cpus_allowed = set_cpus_allowed_rt, | 1741 | .set_cpus_allowed = set_cpus_allowed_rt, |
| 1752 | .rq_online = rq_online_rt, | 1742 | .rq_online = rq_online_rt, |
| 1753 | .rq_offline = rq_offline_rt, | 1743 | .rq_offline = rq_offline_rt, |
