diff options
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r-- | kernel/sched/deadline.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index fc4f98b1258f..255ce138b652 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -306,7 +306,7 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, | |||
306 | * the overrunning entity can't interfere with other entity in the system and | 306 | * the overrunning entity can't interfere with other entity in the system and |
307 | * can't make them miss their deadlines. Reasons why this kind of overruns | 307 | * can't make them miss their deadlines. Reasons why this kind of overruns |
308 | * could happen are, typically, a entity voluntarily trying to overcome its | 308 | * could happen are, typically, a entity voluntarily trying to overcome its |
309 | * runtime, or it just underestimated it during sched_setscheduler_ex(). | 309 | * runtime, or it just underestimated it during sched_setattr(). |
310 | */ | 310 | */ |
311 | static void replenish_dl_entity(struct sched_dl_entity *dl_se, | 311 | static void replenish_dl_entity(struct sched_dl_entity *dl_se, |
312 | struct sched_dl_entity *pi_se) | 312 | struct sched_dl_entity *pi_se) |
@@ -535,7 +535,7 @@ again: | |||
535 | if (task_has_dl_policy(rq->curr)) | 535 | if (task_has_dl_policy(rq->curr)) |
536 | check_preempt_curr_dl(rq, p, 0); | 536 | check_preempt_curr_dl(rq, p, 0); |
537 | else | 537 | else |
538 | resched_task(rq->curr); | 538 | resched_curr(rq); |
539 | #ifdef CONFIG_SMP | 539 | #ifdef CONFIG_SMP |
540 | /* | 540 | /* |
541 | * Queueing this task back might have overloaded rq, | 541 | * Queueing this task back might have overloaded rq, |
@@ -634,7 +634,7 @@ static void update_curr_dl(struct rq *rq) | |||
634 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); | 634 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); |
635 | 635 | ||
636 | if (!is_leftmost(curr, &rq->dl)) | 636 | if (!is_leftmost(curr, &rq->dl)) |
637 | resched_task(curr); | 637 | resched_curr(rq); |
638 | } | 638 | } |
639 | 639 | ||
640 | /* | 640 | /* |
@@ -964,7 +964,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) | |||
964 | cpudl_find(&rq->rd->cpudl, p, NULL) != -1) | 964 | cpudl_find(&rq->rd->cpudl, p, NULL) != -1) |
965 | return; | 965 | return; |
966 | 966 | ||
967 | resched_task(rq->curr); | 967 | resched_curr(rq); |
968 | } | 968 | } |
969 | 969 | ||
970 | static int pull_dl_task(struct rq *this_rq); | 970 | static int pull_dl_task(struct rq *this_rq); |
@@ -979,7 +979,7 @@ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, | |||
979 | int flags) | 979 | int flags) |
980 | { | 980 | { |
981 | if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { | 981 | if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { |
982 | resched_task(rq->curr); | 982 | resched_curr(rq); |
983 | return; | 983 | return; |
984 | } | 984 | } |
985 | 985 | ||
@@ -1333,7 +1333,7 @@ retry: | |||
1333 | if (dl_task(rq->curr) && | 1333 | if (dl_task(rq->curr) && |
1334 | dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && | 1334 | dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && |
1335 | rq->curr->nr_cpus_allowed > 1) { | 1335 | rq->curr->nr_cpus_allowed > 1) { |
1336 | resched_task(rq->curr); | 1336 | resched_curr(rq); |
1337 | return 0; | 1337 | return 0; |
1338 | } | 1338 | } |
1339 | 1339 | ||
@@ -1373,7 +1373,7 @@ retry: | |||
1373 | set_task_cpu(next_task, later_rq->cpu); | 1373 | set_task_cpu(next_task, later_rq->cpu); |
1374 | activate_task(later_rq, next_task, 0); | 1374 | activate_task(later_rq, next_task, 0); |
1375 | 1375 | ||
1376 | resched_task(later_rq->curr); | 1376 | resched_curr(later_rq); |
1377 | 1377 | ||
1378 | double_unlock_balance(rq, later_rq); | 1378 | double_unlock_balance(rq, later_rq); |
1379 | 1379 | ||
@@ -1632,14 +1632,14 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p, | |||
1632 | */ | 1632 | */ |
1633 | if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && | 1633 | if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && |
1634 | rq->curr == p) | 1634 | rq->curr == p) |
1635 | resched_task(p); | 1635 | resched_curr(rq); |
1636 | #else | 1636 | #else |
1637 | /* | 1637 | /* |
1638 | * Again, we don't know if p has a earlier | 1638 | * Again, we don't know if p has a earlier |
1639 | * or later deadline, so let's blindly set a | 1639 | * or later deadline, so let's blindly set a |
1640 | * (maybe not needed) rescheduling point. | 1640 | * (maybe not needed) rescheduling point. |
1641 | */ | 1641 | */ |
1642 | resched_task(p); | 1642 | resched_curr(rq); |
1643 | #endif /* CONFIG_SMP */ | 1643 | #endif /* CONFIG_SMP */ |
1644 | } else | 1644 | } else |
1645 | switched_to_dl(rq, p); | 1645 | switched_to_dl(rq, p); |