diff options
author | Kirill Tkhai <tkhai@yandex.ru> | 2014-06-28 16:03:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-07-16 07:38:19 -0400 |
commit | 8875125efe8402c4d84b08291e68f1281baba8e2 (patch) | |
tree | 2957c181dd06189a1499e09836c5fe5c3932a0b3 | |
parent | 466af29bf4270e84261712428a1304c28e3743fa (diff) |
sched: Transform resched_task() into resched_curr()
We always use resched_task() with rq->curr argument.
It's not possible to reschedule any task but rq's current.
The patch introduces resched_curr(struct rq *) to
replace all of the repeating patterns. The main aim
is cleanup, but there is a little size profit too:
(before)
$ size kernel/sched/built-in.o
text data bss dec hex filename
155274 16445 7042 178761 2ba49 kernel/sched/built-in.o
$ size vmlinux
text data bss dec hex filename
7411490 1178376 991232 9581098 92322a vmlinux
(after)
$ size kernel/sched/built-in.o
text data bss dec hex filename
155130 16445 7042 178617 2b9b9 kernel/sched/built-in.o
$ size vmlinux
text data bss dec hex filename
7411362 1178376 991232 9580970 9231aa vmlinux
I was choosing between resched_curr() and resched_rq(),
and the first name looks better for me.
A little lie in Documentation/trace/ftrace.txt. I have not
actually collected the tracing again. With a hope the patch
won't make execution times much worse :)
Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20140628200219.1778.18735.stgit@localhost
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | Documentation/trace/ftrace.txt | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 6 | ||||
-rw-r--r-- | kernel/sched/core.c | 25 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 16 | ||||
-rw-r--r-- | kernel/sched/fair.c | 20 | ||||
-rw-r--r-- | kernel/sched/idle_task.c | 2 | ||||
-rw-r--r-- | kernel/sched/rt.c | 27 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
8 files changed, 51 insertions, 49 deletions
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index 2479b2a0c77c..4da42616939f 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt | |||
@@ -1515,7 +1515,7 @@ Doing the same with chrt -r 5 and function-trace set. | |||
1515 | <idle>-0 3d.h4 1us+: 0:120:R + [003] 2448: 94:R sleep | 1515 | <idle>-0 3d.h4 1us+: 0:120:R + [003] 2448: 94:R sleep |
1516 | <idle>-0 3d.h4 2us : ttwu_do_activate.constprop.87 <-try_to_wake_up | 1516 | <idle>-0 3d.h4 2us : ttwu_do_activate.constprop.87 <-try_to_wake_up |
1517 | <idle>-0 3d.h3 3us : check_preempt_curr <-ttwu_do_wakeup | 1517 | <idle>-0 3d.h3 3us : check_preempt_curr <-ttwu_do_wakeup |
1518 | <idle>-0 3d.h3 3us : resched_task <-check_preempt_curr | 1518 | <idle>-0 3d.h3 3us : resched_curr <-check_preempt_curr |
1519 | <idle>-0 3dNh3 4us : task_woken_rt <-ttwu_do_wakeup | 1519 | <idle>-0 3dNh3 4us : task_woken_rt <-ttwu_do_wakeup |
1520 | <idle>-0 3dNh3 4us : _raw_spin_unlock <-try_to_wake_up | 1520 | <idle>-0 3dNh3 4us : _raw_spin_unlock <-try_to_wake_up |
1521 | <idle>-0 3dNh3 4us : sub_preempt_count <-_raw_spin_unlock | 1521 | <idle>-0 3dNh3 4us : sub_preempt_count <-_raw_spin_unlock |
diff --git a/include/linux/sched.h b/include/linux/sched.h index c9c9ff723525..41a195385081 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2786,7 +2786,7 @@ static inline bool __must_check current_set_polling_and_test(void) | |||
2786 | 2786 | ||
2787 | /* | 2787 | /* |
2788 | * Polling state must be visible before we test NEED_RESCHED, | 2788 | * Polling state must be visible before we test NEED_RESCHED, |
2789 | * paired by resched_task() | 2789 | * paired by resched_curr() |
2790 | */ | 2790 | */ |
2791 | smp_mb__after_atomic(); | 2791 | smp_mb__after_atomic(); |
2792 | 2792 | ||
@@ -2804,7 +2804,7 @@ static inline bool __must_check current_clr_polling_and_test(void) | |||
2804 | 2804 | ||
2805 | /* | 2805 | /* |
2806 | * Polling state must be visible before we test NEED_RESCHED, | 2806 | * Polling state must be visible before we test NEED_RESCHED, |
2807 | * paired by resched_task() | 2807 | * paired by resched_curr() |
2808 | */ | 2808 | */ |
2809 | smp_mb__after_atomic(); | 2809 | smp_mb__after_atomic(); |
2810 | 2810 | ||
@@ -2836,7 +2836,7 @@ static inline void current_clr_polling(void) | |||
2836 | * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also | 2836 | * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also |
2837 | * fold. | 2837 | * fold. |
2838 | */ | 2838 | */ |
2839 | smp_mb(); /* paired with resched_task() */ | 2839 | smp_mb(); /* paired with resched_curr() */ |
2840 | 2840 | ||
2841 | preempt_fold_need_resched(); | 2841 | preempt_fold_need_resched(); |
2842 | } | 2842 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index cf7695a6c1d2..2f960813c582 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -589,30 +589,31 @@ static bool set_nr_if_polling(struct task_struct *p) | |||
589 | #endif | 589 | #endif |
590 | 590 | ||
591 | /* | 591 | /* |
592 | * resched_task - mark a task 'to be rescheduled now'. | 592 | * resched_curr - mark rq's current task 'to be rescheduled now'. |
593 | * | 593 | * |
594 | * On UP this means the setting of the need_resched flag, on SMP it | 594 | * On UP this means the setting of the need_resched flag, on SMP it |
595 | * might also involve a cross-CPU call to trigger the scheduler on | 595 | * might also involve a cross-CPU call to trigger the scheduler on |
596 | * the target CPU. | 596 | * the target CPU. |
597 | */ | 597 | */ |
598 | void resched_task(struct task_struct *p) | 598 | void resched_curr(struct rq *rq) |
599 | { | 599 | { |
600 | struct task_struct *curr = rq->curr; | ||
600 | int cpu; | 601 | int cpu; |
601 | 602 | ||
602 | lockdep_assert_held(&task_rq(p)->lock); | 603 | lockdep_assert_held(&rq->lock); |
603 | 604 | ||
604 | if (test_tsk_need_resched(p)) | 605 | if (test_tsk_need_resched(curr)) |
605 | return; | 606 | return; |
606 | 607 | ||
607 | cpu = task_cpu(p); | 608 | cpu = cpu_of(rq); |
608 | 609 | ||
609 | if (cpu == smp_processor_id()) { | 610 | if (cpu == smp_processor_id()) { |
610 | set_tsk_need_resched(p); | 611 | set_tsk_need_resched(curr); |
611 | set_preempt_need_resched(); | 612 | set_preempt_need_resched(); |
612 | return; | 613 | return; |
613 | } | 614 | } |
614 | 615 | ||
615 | if (set_nr_and_not_polling(p)) | 616 | if (set_nr_and_not_polling(curr)) |
616 | smp_send_reschedule(cpu); | 617 | smp_send_reschedule(cpu); |
617 | else | 618 | else |
618 | trace_sched_wake_idle_without_ipi(cpu); | 619 | trace_sched_wake_idle_without_ipi(cpu); |
@@ -625,7 +626,7 @@ void resched_cpu(int cpu) | |||
625 | 626 | ||
626 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) | 627 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
627 | return; | 628 | return; |
628 | resched_task(cpu_curr(cpu)); | 629 | resched_curr(rq); |
629 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 630 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
630 | } | 631 | } |
631 | 632 | ||
@@ -1027,7 +1028,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
1027 | if (class == rq->curr->sched_class) | 1028 | if (class == rq->curr->sched_class) |
1028 | break; | 1029 | break; |
1029 | if (class == p->sched_class) { | 1030 | if (class == p->sched_class) { |
1030 | resched_task(rq->curr); | 1031 | resched_curr(rq); |
1031 | break; | 1032 | break; |
1032 | } | 1033 | } |
1033 | } | 1034 | } |
@@ -3073,7 +3074,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
3073 | * lowered its priority, then reschedule its CPU: | 3074 | * lowered its priority, then reschedule its CPU: |
3074 | */ | 3075 | */ |
3075 | if (delta < 0 || (delta > 0 && task_running(rq, p))) | 3076 | if (delta < 0 || (delta > 0 && task_running(rq, p))) |
3076 | resched_task(rq->curr); | 3077 | resched_curr(rq); |
3077 | } | 3078 | } |
3078 | out_unlock: | 3079 | out_unlock: |
3079 | task_rq_unlock(rq, p, &flags); | 3080 | task_rq_unlock(rq, p, &flags); |
@@ -4299,7 +4300,7 @@ again: | |||
4299 | * fairness. | 4300 | * fairness. |
4300 | */ | 4301 | */ |
4301 | if (preempt && rq != p_rq) | 4302 | if (preempt && rq != p_rq) |
4302 | resched_task(p_rq->curr); | 4303 | resched_curr(p_rq); |
4303 | } | 4304 | } |
4304 | 4305 | ||
4305 | out_unlock: | 4306 | out_unlock: |
@@ -7106,7 +7107,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p) | |||
7106 | __setscheduler(rq, p, &attr); | 7107 | __setscheduler(rq, p, &attr); |
7107 | if (on_rq) { | 7108 | if (on_rq) { |
7108 | enqueue_task(rq, p, 0); | 7109 | enqueue_task(rq, p, 0); |
7109 | resched_task(rq->curr); | 7110 | resched_curr(rq); |
7110 | } | 7111 | } |
7111 | 7112 | ||
7112 | check_class_changed(rq, p, prev_class, old_prio); | 7113 | check_class_changed(rq, p, prev_class, old_prio); |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index fc4f98b1258f..df0b77a8caca 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -535,7 +535,7 @@ again: | |||
535 | if (task_has_dl_policy(rq->curr)) | 535 | if (task_has_dl_policy(rq->curr)) |
536 | check_preempt_curr_dl(rq, p, 0); | 536 | check_preempt_curr_dl(rq, p, 0); |
537 | else | 537 | else |
538 | resched_task(rq->curr); | 538 | resched_curr(rq); |
539 | #ifdef CONFIG_SMP | 539 | #ifdef CONFIG_SMP |
540 | /* | 540 | /* |
541 | * Queueing this task back might have overloaded rq, | 541 | * Queueing this task back might have overloaded rq, |
@@ -634,7 +634,7 @@ static void update_curr_dl(struct rq *rq) | |||
634 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); | 634 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); |
635 | 635 | ||
636 | if (!is_leftmost(curr, &rq->dl)) | 636 | if (!is_leftmost(curr, &rq->dl)) |
637 | resched_task(curr); | 637 | resched_curr(rq); |
638 | } | 638 | } |
639 | 639 | ||
640 | /* | 640 | /* |
@@ -964,7 +964,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) | |||
964 | cpudl_find(&rq->rd->cpudl, p, NULL) != -1) | 964 | cpudl_find(&rq->rd->cpudl, p, NULL) != -1) |
965 | return; | 965 | return; |
966 | 966 | ||
967 | resched_task(rq->curr); | 967 | resched_curr(rq); |
968 | } | 968 | } |
969 | 969 | ||
970 | static int pull_dl_task(struct rq *this_rq); | 970 | static int pull_dl_task(struct rq *this_rq); |
@@ -979,7 +979,7 @@ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, | |||
979 | int flags) | 979 | int flags) |
980 | { | 980 | { |
981 | if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { | 981 | if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { |
982 | resched_task(rq->curr); | 982 | resched_curr(rq); |
983 | return; | 983 | return; |
984 | } | 984 | } |
985 | 985 | ||
@@ -1333,7 +1333,7 @@ retry: | |||
1333 | if (dl_task(rq->curr) && | 1333 | if (dl_task(rq->curr) && |
1334 | dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && | 1334 | dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && |
1335 | rq->curr->nr_cpus_allowed > 1) { | 1335 | rq->curr->nr_cpus_allowed > 1) { |
1336 | resched_task(rq->curr); | 1336 | resched_curr(rq); |
1337 | return 0; | 1337 | return 0; |
1338 | } | 1338 | } |
1339 | 1339 | ||
@@ -1373,7 +1373,7 @@ retry: | |||
1373 | set_task_cpu(next_task, later_rq->cpu); | 1373 | set_task_cpu(next_task, later_rq->cpu); |
1374 | activate_task(later_rq, next_task, 0); | 1374 | activate_task(later_rq, next_task, 0); |
1375 | 1375 | ||
1376 | resched_task(later_rq->curr); | 1376 | resched_curr(later_rq); |
1377 | 1377 | ||
1378 | double_unlock_balance(rq, later_rq); | 1378 | double_unlock_balance(rq, later_rq); |
1379 | 1379 | ||
@@ -1632,14 +1632,14 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p, | |||
1632 | */ | 1632 | */ |
1633 | if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && | 1633 | if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && |
1634 | rq->curr == p) | 1634 | rq->curr == p) |
1635 | resched_task(p); | 1635 | resched_curr(rq); |
1636 | #else | 1636 | #else |
1637 | /* | 1637 | /* |
1638 | * Again, we don't know if p has a earlier | 1638 | * Again, we don't know if p has a earlier |
1639 | * or later deadline, so let's blindly set a | 1639 | * or later deadline, so let's blindly set a |
1640 | * (maybe not needed) rescheduling point. | 1640 | * (maybe not needed) rescheduling point. |
1641 | */ | 1641 | */ |
1642 | resched_task(p); | 1642 | resched_curr(rq); |
1643 | #endif /* CONFIG_SMP */ | 1643 | #endif /* CONFIG_SMP */ |
1644 | } else | 1644 | } else |
1645 | switched_to_dl(rq, p); | 1645 | switched_to_dl(rq, p); |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 923fe32db6b3..f5f0cc91518c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2923,7 +2923,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
2923 | ideal_runtime = sched_slice(cfs_rq, curr); | 2923 | ideal_runtime = sched_slice(cfs_rq, curr); |
2924 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 2924 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
2925 | if (delta_exec > ideal_runtime) { | 2925 | if (delta_exec > ideal_runtime) { |
2926 | resched_task(rq_of(cfs_rq)->curr); | 2926 | resched_curr(rq_of(cfs_rq)); |
2927 | /* | 2927 | /* |
2928 | * The current task ran long enough, ensure it doesn't get | 2928 | * The current task ran long enough, ensure it doesn't get |
2929 | * re-elected due to buddy favours. | 2929 | * re-elected due to buddy favours. |
@@ -2947,7 +2947,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
2947 | return; | 2947 | return; |
2948 | 2948 | ||
2949 | if (delta > ideal_runtime) | 2949 | if (delta > ideal_runtime) |
2950 | resched_task(rq_of(cfs_rq)->curr); | 2950 | resched_curr(rq_of(cfs_rq)); |
2951 | } | 2951 | } |
2952 | 2952 | ||
2953 | static void | 2953 | static void |
@@ -3087,7 +3087,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
3087 | * validating it and just reschedule. | 3087 | * validating it and just reschedule. |
3088 | */ | 3088 | */ |
3089 | if (queued) { | 3089 | if (queued) { |
3090 | resched_task(rq_of(cfs_rq)->curr); | 3090 | resched_curr(rq_of(cfs_rq)); |
3091 | return; | 3091 | return; |
3092 | } | 3092 | } |
3093 | /* | 3093 | /* |
@@ -3278,7 +3278,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) | |||
3278 | * hierarchy can be throttled | 3278 | * hierarchy can be throttled |
3279 | */ | 3279 | */ |
3280 | if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) | 3280 | if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) |
3281 | resched_task(rq_of(cfs_rq)->curr); | 3281 | resched_curr(rq_of(cfs_rq)); |
3282 | } | 3282 | } |
3283 | 3283 | ||
3284 | static __always_inline | 3284 | static __always_inline |
@@ -3438,7 +3438,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) | |||
3438 | 3438 | ||
3439 | /* determine whether we need to wake up potentially idle cpu */ | 3439 | /* determine whether we need to wake up potentially idle cpu */ |
3440 | if (rq->curr == rq->idle && rq->cfs.nr_running) | 3440 | if (rq->curr == rq->idle && rq->cfs.nr_running) |
3441 | resched_task(rq->curr); | 3441 | resched_curr(rq); |
3442 | } | 3442 | } |
3443 | 3443 | ||
3444 | static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, | 3444 | static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, |
@@ -3897,7 +3897,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | |||
3897 | 3897 | ||
3898 | if (delta < 0) { | 3898 | if (delta < 0) { |
3899 | if (rq->curr == p) | 3899 | if (rq->curr == p) |
3900 | resched_task(p); | 3900 | resched_curr(rq); |
3901 | return; | 3901 | return; |
3902 | } | 3902 | } |
3903 | 3903 | ||
@@ -4766,7 +4766,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
4766 | return; | 4766 | return; |
4767 | 4767 | ||
4768 | preempt: | 4768 | preempt: |
4769 | resched_task(curr); | 4769 | resched_curr(rq); |
4770 | /* | 4770 | /* |
4771 | * Only set the backward buddy when the current task is still | 4771 | * Only set the backward buddy when the current task is still |
4772 | * on the rq. This can happen when a wakeup gets interleaved | 4772 | * on the rq. This can happen when a wakeup gets interleaved |
@@ -7457,7 +7457,7 @@ static void task_fork_fair(struct task_struct *p) | |||
7457 | * 'current' within the tree based on its new key value. | 7457 | * 'current' within the tree based on its new key value. |
7458 | */ | 7458 | */ |
7459 | swap(curr->vruntime, se->vruntime); | 7459 | swap(curr->vruntime, se->vruntime); |
7460 | resched_task(rq->curr); | 7460 | resched_curr(rq); |
7461 | } | 7461 | } |
7462 | 7462 | ||
7463 | se->vruntime -= cfs_rq->min_vruntime; | 7463 | se->vruntime -= cfs_rq->min_vruntime; |
@@ -7482,7 +7482,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) | |||
7482 | */ | 7482 | */ |
7483 | if (rq->curr == p) { | 7483 | if (rq->curr == p) { |
7484 | if (p->prio > oldprio) | 7484 | if (p->prio > oldprio) |
7485 | resched_task(rq->curr); | 7485 | resched_curr(rq); |
7486 | } else | 7486 | } else |
7487 | check_preempt_curr(rq, p, 0); | 7487 | check_preempt_curr(rq, p, 0); |
7488 | } | 7488 | } |
@@ -7545,7 +7545,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) | |||
7545 | * if we can still preempt the current task. | 7545 | * if we can still preempt the current task. |
7546 | */ | 7546 | */ |
7547 | if (rq->curr == p) | 7547 | if (rq->curr == p) |
7548 | resched_task(rq->curr); | 7548 | resched_curr(rq); |
7549 | else | 7549 | else |
7550 | check_preempt_curr(rq, p, 0); | 7550 | check_preempt_curr(rq, p, 0); |
7551 | } | 7551 | } |
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 879f2b75266a..67ad4e7f506a 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c | |||
@@ -20,7 +20,7 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) | |||
20 | */ | 20 | */ |
21 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) | 21 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) |
22 | { | 22 | { |
23 | resched_task(rq->idle); | 23 | resched_curr(rq); |
24 | } | 24 | } |
25 | 25 | ||
26 | static struct task_struct * | 26 | static struct task_struct * |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 671a8b5fdb6f..5f6edca4fafd 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -463,9 +463,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | |||
463 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 463 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
464 | { | 464 | { |
465 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | 465 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; |
466 | struct rq *rq = rq_of_rt_rq(rt_rq); | ||
466 | struct sched_rt_entity *rt_se; | 467 | struct sched_rt_entity *rt_se; |
467 | 468 | ||
468 | int cpu = cpu_of(rq_of_rt_rq(rt_rq)); | 469 | int cpu = cpu_of(rq); |
469 | 470 | ||
470 | rt_se = rt_rq->tg->rt_se[cpu]; | 471 | rt_se = rt_rq->tg->rt_se[cpu]; |
471 | 472 | ||
@@ -476,7 +477,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
476 | enqueue_rt_entity(rt_se, false); | 477 | enqueue_rt_entity(rt_se, false); |
477 | 478 | ||
478 | if (rt_rq->highest_prio.curr < curr->prio) | 479 | if (rt_rq->highest_prio.curr < curr->prio) |
479 | resched_task(curr); | 480 | resched_curr(rq); |
480 | } | 481 | } |
481 | } | 482 | } |
482 | 483 | ||
@@ -566,7 +567,7 @@ static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
566 | return; | 567 | return; |
567 | 568 | ||
568 | enqueue_top_rt_rq(rt_rq); | 569 | enqueue_top_rt_rq(rt_rq); |
569 | resched_task(rq->curr); | 570 | resched_curr(rq); |
570 | } | 571 | } |
571 | 572 | ||
572 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 573 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
@@ -951,7 +952,7 @@ static void update_curr_rt(struct rq *rq) | |||
951 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 952 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
952 | rt_rq->rt_time += delta_exec; | 953 | rt_rq->rt_time += delta_exec; |
953 | if (sched_rt_runtime_exceeded(rt_rq)) | 954 | if (sched_rt_runtime_exceeded(rt_rq)) |
954 | resched_task(curr); | 955 | resched_curr(rq); |
955 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 956 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
956 | } | 957 | } |
957 | } | 958 | } |
@@ -1366,7 +1367,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
1366 | * to try and push current away: | 1367 | * to try and push current away: |
1367 | */ | 1368 | */ |
1368 | requeue_task_rt(rq, p, 1); | 1369 | requeue_task_rt(rq, p, 1); |
1369 | resched_task(rq->curr); | 1370 | resched_curr(rq); |
1370 | } | 1371 | } |
1371 | 1372 | ||
1372 | #endif /* CONFIG_SMP */ | 1373 | #endif /* CONFIG_SMP */ |
@@ -1377,7 +1378,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
1377 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) | 1378 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) |
1378 | { | 1379 | { |
1379 | if (p->prio < rq->curr->prio) { | 1380 | if (p->prio < rq->curr->prio) { |
1380 | resched_task(rq->curr); | 1381 | resched_curr(rq); |
1381 | return; | 1382 | return; |
1382 | } | 1383 | } |
1383 | 1384 | ||
@@ -1693,7 +1694,7 @@ retry: | |||
1693 | * just reschedule current. | 1694 | * just reschedule current. |
1694 | */ | 1695 | */ |
1695 | if (unlikely(next_task->prio < rq->curr->prio)) { | 1696 | if (unlikely(next_task->prio < rq->curr->prio)) { |
1696 | resched_task(rq->curr); | 1697 | resched_curr(rq); |
1697 | return 0; | 1698 | return 0; |
1698 | } | 1699 | } |
1699 | 1700 | ||
@@ -1740,7 +1741,7 @@ retry: | |||
1740 | activate_task(lowest_rq, next_task, 0); | 1741 | activate_task(lowest_rq, next_task, 0); |
1741 | ret = 1; | 1742 | ret = 1; |
1742 | 1743 | ||
1743 | resched_task(lowest_rq->curr); | 1744 | resched_curr(lowest_rq); |
1744 | 1745 | ||
1745 | double_unlock_balance(rq, lowest_rq); | 1746 | double_unlock_balance(rq, lowest_rq); |
1746 | 1747 | ||
@@ -1939,7 +1940,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) | |||
1939 | return; | 1940 | return; |
1940 | 1941 | ||
1941 | if (pull_rt_task(rq)) | 1942 | if (pull_rt_task(rq)) |
1942 | resched_task(rq->curr); | 1943 | resched_curr(rq); |
1943 | } | 1944 | } |
1944 | 1945 | ||
1945 | void __init init_sched_rt_class(void) | 1946 | void __init init_sched_rt_class(void) |
@@ -1977,7 +1978,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) | |||
1977 | check_resched = 0; | 1978 | check_resched = 0; |
1978 | #endif /* CONFIG_SMP */ | 1979 | #endif /* CONFIG_SMP */ |
1979 | if (check_resched && p->prio < rq->curr->prio) | 1980 | if (check_resched && p->prio < rq->curr->prio) |
1980 | resched_task(rq->curr); | 1981 | resched_curr(rq); |
1981 | } | 1982 | } |
1982 | } | 1983 | } |
1983 | 1984 | ||
@@ -2006,11 +2007,11 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) | |||
2006 | * Only reschedule if p is still on the same runqueue. | 2007 | * Only reschedule if p is still on the same runqueue. |
2007 | */ | 2008 | */ |
2008 | if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) | 2009 | if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) |
2009 | resched_task(p); | 2010 | resched_curr(rq); |
2010 | #else | 2011 | #else |
2011 | /* For UP simply resched on drop of prio */ | 2012 | /* For UP simply resched on drop of prio */ |
2012 | if (oldprio < p->prio) | 2013 | if (oldprio < p->prio) |
2013 | resched_task(p); | 2014 | resched_curr(rq); |
2014 | #endif /* CONFIG_SMP */ | 2015 | #endif /* CONFIG_SMP */ |
2015 | } else { | 2016 | } else { |
2016 | /* | 2017 | /* |
@@ -2019,7 +2020,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) | |||
2019 | * then reschedule. | 2020 | * then reschedule. |
2020 | */ | 2021 | */ |
2021 | if (p->prio < rq->curr->prio) | 2022 | if (p->prio < rq->curr->prio) |
2022 | resched_task(rq->curr); | 2023 | resched_curr(rq); |
2023 | } | 2024 | } |
2024 | } | 2025 | } |
2025 | 2026 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0191ed563bdd..1283945d1ace 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1199,7 +1199,7 @@ extern void init_sched_rt_class(void); | |||
1199 | extern void init_sched_fair_class(void); | 1199 | extern void init_sched_fair_class(void); |
1200 | extern void init_sched_dl_class(void); | 1200 | extern void init_sched_dl_class(void); |
1201 | 1201 | ||
1202 | extern void resched_task(struct task_struct *p); | 1202 | extern void resched_curr(struct rq *rq); |
1203 | extern void resched_cpu(int cpu); | 1203 | extern void resched_cpu(int cpu); |
1204 | 1204 | ||
1205 | extern struct rt_bandwidth def_rt_bandwidth; | 1205 | extern struct rt_bandwidth def_rt_bandwidth; |