aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-05-11 08:23:31 -0400
committerIngo Molnar <mingo@kernel.org>2016-05-12 03:55:36 -0400
commit50605ffbdaf6d7ccab70d4631fd8347fc78af14f (patch)
tree4da2d3c09f4af5712d72471b5bb93174dcf857cc /kernel/sched/deadline.c
parentade42e092b5d1fb9a77b026f019b9953d66f1573 (diff)
sched/core: Provide a tsk_nr_cpus_allowed() helper
tsk_nr_cpus_allowed() is an accessor for task->nr_cpus_allowed which allows us to change the representation of ->nr_cpus_allowed if required. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1462969411-17735-2-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 4c9b4eee3f1c..fcb7f0217ff4 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -134,7 +134,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134{ 134{
135 struct task_struct *p = dl_task_of(dl_se); 135 struct task_struct *p = dl_task_of(dl_se);
136 136
137 if (p->nr_cpus_allowed > 1) 137 if (tsk_nr_cpus_allowed(p) > 1)
138 dl_rq->dl_nr_migratory++; 138 dl_rq->dl_nr_migratory++;
139 139
140 update_dl_migration(dl_rq); 140 update_dl_migration(dl_rq);
@@ -144,7 +144,7 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{ 144{
145 struct task_struct *p = dl_task_of(dl_se); 145 struct task_struct *p = dl_task_of(dl_se);
146 146
147 if (p->nr_cpus_allowed > 1) 147 if (tsk_nr_cpus_allowed(p) > 1)
148 dl_rq->dl_nr_migratory--; 148 dl_rq->dl_nr_migratory--;
149 149
150 update_dl_migration(dl_rq); 150 update_dl_migration(dl_rq);
@@ -966,7 +966,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
966 966
967 enqueue_dl_entity(&p->dl, pi_se, flags); 967 enqueue_dl_entity(&p->dl, pi_se, flags);
968 968
969 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 969 if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
970 enqueue_pushable_dl_task(rq, p); 970 enqueue_pushable_dl_task(rq, p);
971} 971}
972 972
@@ -1040,9 +1040,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1040 * try to make it stay here, it might be important. 1040 * try to make it stay here, it might be important.
1041 */ 1041 */
1042 if (unlikely(dl_task(curr)) && 1042 if (unlikely(dl_task(curr)) &&
1043 (curr->nr_cpus_allowed < 2 || 1043 (tsk_nr_cpus_allowed(curr) < 2 ||
1044 !dl_entity_preempt(&p->dl, &curr->dl)) && 1044 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1045 (p->nr_cpus_allowed > 1)) { 1045 (tsk_nr_cpus_allowed(p) > 1)) {
1046 int target = find_later_rq(p); 1046 int target = find_later_rq(p);
1047 1047
1048 if (target != -1 && 1048 if (target != -1 &&
@@ -1063,7 +1063,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1063 * Current can't be migrated, useless to reschedule, 1063 * Current can't be migrated, useless to reschedule,
1064 * let's hope p can move out. 1064 * let's hope p can move out.
1065 */ 1065 */
1066 if (rq->curr->nr_cpus_allowed == 1 || 1066 if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
1067 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) 1067 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1068 return; 1068 return;
1069 1069
@@ -1071,7 +1071,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1071 * p is migratable, so let's not schedule it and 1071 * p is migratable, so let's not schedule it and
1072 * see if it is pushed or pulled somewhere else. 1072 * see if it is pushed or pulled somewhere else.
1073 */ 1073 */
1074 if (p->nr_cpus_allowed != 1 && 1074 if (tsk_nr_cpus_allowed(p) != 1 &&
1075 cpudl_find(&rq->rd->cpudl, p, NULL) != -1) 1075 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1076 return; 1076 return;
1077 1077
@@ -1186,7 +1186,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1186{ 1186{
1187 update_curr_dl(rq); 1187 update_curr_dl(rq);
1188 1188
1189 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1189 if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
1190 enqueue_pushable_dl_task(rq, p); 1190 enqueue_pushable_dl_task(rq, p);
1191} 1191}
1192 1192
@@ -1287,7 +1287,7 @@ static int find_later_rq(struct task_struct *task)
1287 if (unlikely(!later_mask)) 1287 if (unlikely(!later_mask))
1288 return -1; 1288 return -1;
1289 1289
1290 if (task->nr_cpus_allowed == 1) 1290 if (tsk_nr_cpus_allowed(task) == 1)
1291 return -1; 1291 return -1;
1292 1292
1293 /* 1293 /*
@@ -1433,7 +1433,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1433 1433
1434 BUG_ON(rq->cpu != task_cpu(p)); 1434 BUG_ON(rq->cpu != task_cpu(p));
1435 BUG_ON(task_current(rq, p)); 1435 BUG_ON(task_current(rq, p));
1436 BUG_ON(p->nr_cpus_allowed <= 1); 1436 BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
1437 1437
1438 BUG_ON(!task_on_rq_queued(p)); 1438 BUG_ON(!task_on_rq_queued(p));
1439 BUG_ON(!dl_task(p)); 1439 BUG_ON(!dl_task(p));
@@ -1472,7 +1472,7 @@ retry:
1472 */ 1472 */
1473 if (dl_task(rq->curr) && 1473 if (dl_task(rq->curr) &&
1474 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 1474 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1475 rq->curr->nr_cpus_allowed > 1) { 1475 tsk_nr_cpus_allowed(rq->curr) > 1) {
1476 resched_curr(rq); 1476 resched_curr(rq);
1477 return 0; 1477 return 0;
1478 } 1478 }
@@ -1619,9 +1619,9 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
1619{ 1619{
1620 if (!task_running(rq, p) && 1620 if (!task_running(rq, p) &&
1621 !test_tsk_need_resched(rq->curr) && 1621 !test_tsk_need_resched(rq->curr) &&
1622 p->nr_cpus_allowed > 1 && 1622 tsk_nr_cpus_allowed(p) > 1 &&
1623 dl_task(rq->curr) && 1623 dl_task(rq->curr) &&
1624 (rq->curr->nr_cpus_allowed < 2 || 1624 (tsk_nr_cpus_allowed(rq->curr) < 2 ||
1625 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { 1625 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1626 push_dl_tasks(rq); 1626 push_dl_tasks(rq);
1627 } 1627 }
@@ -1725,7 +1725,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
1725 1725
1726 if (task_on_rq_queued(p) && rq->curr != p) { 1726 if (task_on_rq_queued(p) && rq->curr != p) {
1727#ifdef CONFIG_SMP 1727#ifdef CONFIG_SMP
1728 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 1728 if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
1729 queue_push_tasks(rq); 1729 queue_push_tasks(rq);
1730#else 1730#else
1731 if (dl_task(rq->curr)) 1731 if (dl_task(rq->curr))