diff options
author | Jiri Kosina <jkosina@suse.cz> | 2011-11-13 14:55:35 -0500 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2011-11-13 14:55:53 -0500 |
commit | 2290c0d06d82faee87b1ab2d9d4f7bf81ef64379 (patch) | |
tree | e075e4d5534193f28e6059904f61e5ca03958d3c /kernel/sched_rt.c | |
parent | 4da669a2e3e5bc70b30a0465f3641528681b5f77 (diff) | |
parent | 52e4c2a05256cb83cda12f3c2137ab1533344edb (diff) |
Merge branch 'master' into for-next
Sync with Linus tree to have 157550ff ("mtd: add GPMI-NAND driver
in the config and Makefile") as I have patch depending on that one.
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 99 |
1 files changed, 39 insertions, 60 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index af1177858be3..056cbd2e2a27 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -124,21 +124,33 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
124 | update_rt_migration(rt_rq); | 124 | update_rt_migration(rt_rq); |
125 | } | 125 | } |
126 | 126 | ||
127 | static inline int has_pushable_tasks(struct rq *rq) | ||
128 | { | ||
129 | return !plist_head_empty(&rq->rt.pushable_tasks); | ||
130 | } | ||
131 | |||
127 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) | 132 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) |
128 | { | 133 | { |
129 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); | 134 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); |
130 | plist_node_init(&p->pushable_tasks, p->prio); | 135 | plist_node_init(&p->pushable_tasks, p->prio); |
131 | plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); | 136 | plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); |
137 | |||
138 | /* Update the highest prio pushable task */ | ||
139 | if (p->prio < rq->rt.highest_prio.next) | ||
140 | rq->rt.highest_prio.next = p->prio; | ||
132 | } | 141 | } |
133 | 142 | ||
134 | static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) | 143 | static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) |
135 | { | 144 | { |
136 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); | 145 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); |
137 | } | ||
138 | 146 | ||
139 | static inline int has_pushable_tasks(struct rq *rq) | 147 | /* Update the new highest prio pushable task */ |
140 | { | 148 | if (has_pushable_tasks(rq)) { |
141 | return !plist_head_empty(&rq->rt.pushable_tasks); | 149 | p = plist_first_entry(&rq->rt.pushable_tasks, |
150 | struct task_struct, pushable_tasks); | ||
151 | rq->rt.highest_prio.next = p->prio; | ||
152 | } else | ||
153 | rq->rt.highest_prio.next = MAX_RT_PRIO; | ||
142 | } | 154 | } |
143 | 155 | ||
144 | #else | 156 | #else |
@@ -643,6 +655,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) | |||
643 | 655 | ||
644 | if (rt_rq->rt_time > runtime) { | 656 | if (rt_rq->rt_time > runtime) { |
645 | rt_rq->rt_throttled = 1; | 657 | rt_rq->rt_throttled = 1; |
658 | printk_once(KERN_WARNING "sched: RT throttling activated\n"); | ||
646 | if (rt_rq_throttled(rt_rq)) { | 659 | if (rt_rq_throttled(rt_rq)) { |
647 | sched_rt_rq_dequeue(rt_rq); | 660 | sched_rt_rq_dequeue(rt_rq); |
648 | return 1; | 661 | return 1; |
@@ -698,47 +711,13 @@ static void update_curr_rt(struct rq *rq) | |||
698 | 711 | ||
699 | #if defined CONFIG_SMP | 712 | #if defined CONFIG_SMP |
700 | 713 | ||
701 | static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu); | ||
702 | |||
703 | static inline int next_prio(struct rq *rq) | ||
704 | { | ||
705 | struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu); | ||
706 | |||
707 | if (next && rt_prio(next->prio)) | ||
708 | return next->prio; | ||
709 | else | ||
710 | return MAX_RT_PRIO; | ||
711 | } | ||
712 | |||
713 | static void | 714 | static void |
714 | inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | 715 | inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) |
715 | { | 716 | { |
716 | struct rq *rq = rq_of_rt_rq(rt_rq); | 717 | struct rq *rq = rq_of_rt_rq(rt_rq); |
717 | 718 | ||
718 | if (prio < prev_prio) { | 719 | if (rq->online && prio < prev_prio) |
719 | 720 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); | |
720 | /* | ||
721 | * If the new task is higher in priority than anything on the | ||
722 | * run-queue, we know that the previous high becomes our | ||
723 | * next-highest. | ||
724 | */ | ||
725 | rt_rq->highest_prio.next = prev_prio; | ||
726 | |||
727 | if (rq->online) | ||
728 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); | ||
729 | |||
730 | } else if (prio == rt_rq->highest_prio.curr) | ||
731 | /* | ||
732 | * If the next task is equal in priority to the highest on | ||
733 | * the run-queue, then we implicitly know that the next highest | ||
734 | * task cannot be any lower than current | ||
735 | */ | ||
736 | rt_rq->highest_prio.next = prio; | ||
737 | else if (prio < rt_rq->highest_prio.next) | ||
738 | /* | ||
739 | * Otherwise, we need to recompute next-highest | ||
740 | */ | ||
741 | rt_rq->highest_prio.next = next_prio(rq); | ||
742 | } | 721 | } |
743 | 722 | ||
744 | static void | 723 | static void |
@@ -746,9 +725,6 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | |||
746 | { | 725 | { |
747 | struct rq *rq = rq_of_rt_rq(rt_rq); | 726 | struct rq *rq = rq_of_rt_rq(rt_rq); |
748 | 727 | ||
749 | if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next)) | ||
750 | rt_rq->highest_prio.next = next_prio(rq); | ||
751 | |||
752 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) | 728 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) |
753 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); | 729 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); |
754 | } | 730 | } |
@@ -961,6 +937,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) | |||
961 | 937 | ||
962 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) | 938 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) |
963 | enqueue_pushable_task(rq, p); | 939 | enqueue_pushable_task(rq, p); |
940 | |||
941 | inc_nr_running(rq); | ||
964 | } | 942 | } |
965 | 943 | ||
966 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) | 944 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) |
@@ -971,6 +949,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) | |||
971 | dequeue_rt_entity(rt_se); | 949 | dequeue_rt_entity(rt_se); |
972 | 950 | ||
973 | dequeue_pushable_task(rq, p); | 951 | dequeue_pushable_task(rq, p); |
952 | |||
953 | dec_nr_running(rq); | ||
974 | } | 954 | } |
975 | 955 | ||
976 | /* | 956 | /* |
@@ -1017,10 +997,12 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
1017 | struct rq *rq; | 997 | struct rq *rq; |
1018 | int cpu; | 998 | int cpu; |
1019 | 999 | ||
1020 | if (sd_flag != SD_BALANCE_WAKE) | ||
1021 | return smp_processor_id(); | ||
1022 | |||
1023 | cpu = task_cpu(p); | 1000 | cpu = task_cpu(p); |
1001 | |||
1002 | /* For anything but wake ups, just return the task_cpu */ | ||
1003 | if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) | ||
1004 | goto out; | ||
1005 | |||
1024 | rq = cpu_rq(cpu); | 1006 | rq = cpu_rq(cpu); |
1025 | 1007 | ||
1026 | rcu_read_lock(); | 1008 | rcu_read_lock(); |
@@ -1059,6 +1041,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
1059 | } | 1041 | } |
1060 | rcu_read_unlock(); | 1042 | rcu_read_unlock(); |
1061 | 1043 | ||
1044 | out: | ||
1062 | return cpu; | 1045 | return cpu; |
1063 | } | 1046 | } |
1064 | 1047 | ||
@@ -1178,7 +1161,6 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) | |||
1178 | static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | 1161 | static void put_prev_task_rt(struct rq *rq, struct task_struct *p) |
1179 | { | 1162 | { |
1180 | update_curr_rt(rq); | 1163 | update_curr_rt(rq); |
1181 | p->se.exec_start = 0; | ||
1182 | 1164 | ||
1183 | /* | 1165 | /* |
1184 | * The previous task needs to be made eligible for pushing | 1166 | * The previous task needs to be made eligible for pushing |
@@ -1198,7 +1180,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | |||
1198 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 1180 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
1199 | { | 1181 | { |
1200 | if (!task_running(rq, p) && | 1182 | if (!task_running(rq, p) && |
1201 | (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && | 1183 | (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && |
1202 | (p->rt.nr_cpus_allowed > 1)) | 1184 | (p->rt.nr_cpus_allowed > 1)) |
1203 | return 1; | 1185 | return 1; |
1204 | return 0; | 1186 | return 0; |
@@ -1343,7 +1325,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1343 | */ | 1325 | */ |
1344 | if (unlikely(task_rq(task) != rq || | 1326 | if (unlikely(task_rq(task) != rq || |
1345 | !cpumask_test_cpu(lowest_rq->cpu, | 1327 | !cpumask_test_cpu(lowest_rq->cpu, |
1346 | &task->cpus_allowed) || | 1328 | tsk_cpus_allowed(task)) || |
1347 | task_running(rq, task) || | 1329 | task_running(rq, task) || |
1348 | !task->on_rq)) { | 1330 | !task->on_rq)) { |
1349 | 1331 | ||
@@ -1394,6 +1376,7 @@ static int push_rt_task(struct rq *rq) | |||
1394 | { | 1376 | { |
1395 | struct task_struct *next_task; | 1377 | struct task_struct *next_task; |
1396 | struct rq *lowest_rq; | 1378 | struct rq *lowest_rq; |
1379 | int ret = 0; | ||
1397 | 1380 | ||
1398 | if (!rq->rt.overloaded) | 1381 | if (!rq->rt.overloaded) |
1399 | return 0; | 1382 | return 0; |
@@ -1426,7 +1409,7 @@ retry: | |||
1426 | if (!lowest_rq) { | 1409 | if (!lowest_rq) { |
1427 | struct task_struct *task; | 1410 | struct task_struct *task; |
1428 | /* | 1411 | /* |
1429 | * find lock_lowest_rq releases rq->lock | 1412 | * find_lock_lowest_rq releases rq->lock |
1430 | * so it is possible that next_task has migrated. | 1413 | * so it is possible that next_task has migrated. |
1431 | * | 1414 | * |
1432 | * We need to make sure that the task is still on the same | 1415 | * We need to make sure that the task is still on the same |
@@ -1436,12 +1419,11 @@ retry: | |||
1436 | task = pick_next_pushable_task(rq); | 1419 | task = pick_next_pushable_task(rq); |
1437 | if (task_cpu(next_task) == rq->cpu && task == next_task) { | 1420 | if (task_cpu(next_task) == rq->cpu && task == next_task) { |
1438 | /* | 1421 | /* |
1439 | * If we get here, the task hasn't moved at all, but | 1422 | * The task hasn't migrated, and is still the next |
1440 | * it has failed to push. We will not try again, | 1423 | * eligible task, but we failed to find a run-queue |
1441 | * since the other cpus will pull from us when they | 1424 | * to push it to. Do not retry in this case, since |
1442 | * are ready. | 1425 | * other cpus will pull from us when ready. |
1443 | */ | 1426 | */ |
1444 | dequeue_pushable_task(rq, next_task); | ||
1445 | goto out; | 1427 | goto out; |
1446 | } | 1428 | } |
1447 | 1429 | ||
@@ -1460,6 +1442,7 @@ retry: | |||
1460 | deactivate_task(rq, next_task, 0); | 1442 | deactivate_task(rq, next_task, 0); |
1461 | set_task_cpu(next_task, lowest_rq->cpu); | 1443 | set_task_cpu(next_task, lowest_rq->cpu); |
1462 | activate_task(lowest_rq, next_task, 0); | 1444 | activate_task(lowest_rq, next_task, 0); |
1445 | ret = 1; | ||
1463 | 1446 | ||
1464 | resched_task(lowest_rq->curr); | 1447 | resched_task(lowest_rq->curr); |
1465 | 1448 | ||
@@ -1468,7 +1451,7 @@ retry: | |||
1468 | out: | 1451 | out: |
1469 | put_task_struct(next_task); | 1452 | put_task_struct(next_task); |
1470 | 1453 | ||
1471 | return 1; | 1454 | return ret; |
1472 | } | 1455 | } |
1473 | 1456 | ||
1474 | static void push_rt_tasks(struct rq *rq) | 1457 | static void push_rt_tasks(struct rq *rq) |
@@ -1626,9 +1609,6 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1626 | 1609 | ||
1627 | update_rt_migration(&rq->rt); | 1610 | update_rt_migration(&rq->rt); |
1628 | } | 1611 | } |
1629 | |||
1630 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
1631 | p->rt.nr_cpus_allowed = weight; | ||
1632 | } | 1612 | } |
1633 | 1613 | ||
1634 | /* Assumes rq->lock is held */ | 1614 | /* Assumes rq->lock is held */ |
@@ -1863,4 +1843,3 @@ static void print_rt_stats(struct seq_file *m, int cpu) | |||
1863 | rcu_read_unlock(); | 1843 | rcu_read_unlock(); |
1864 | } | 1844 | } |
1865 | #endif /* CONFIG_SCHED_DEBUG */ | 1845 | #endif /* CONFIG_SCHED_DEBUG */ |
1866 | |||