diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 19:23:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 19:23:30 -0400 |
commit | 98959948a7ba33cf8c708626e0d2a1456397e1c6 (patch) | |
tree | 8ba9b6c2679a06e89f23bdd7018e9bb0249e3bda /kernel/sched/rt.c | |
parent | ef35ad26f8ff44d2c93e29952cdb336bda729d9d (diff) | |
parent | cd3bd4e628a6d57d66afe77835fe8d93ae3e41f8 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
- Move the nohz kick code out of the scheduler tick to a dedicated IPI,
from Frederic Weisbecker.
This necessiated quite some background infrastructure rework,
including:
* Clean up some irq-work internals
* Implement remote irq-work
* Implement nohz kick on top of remote irq-work
* Move full dynticks timer enqueue notification to new kick
* Move multi-task notification to new kick
* Remove unecessary barriers on multi-task notification
- Remove proliferation of wait_on_bit() action functions and allow
wait_on_bit_action() functions to support a timeout. (Neil Brown)
- Another round of sched/numa improvements, cleanups and fixes. (Rik
van Riel)
- Implement fast idling of CPUs when the system is partially loaded,
for better scalability. (Tim Chen)
- Restructure and fix the CPU hotplug handling code that may leave
cfs_rq and rt_rq's throttled when tasks are migrated away from a dead
cpu. (Kirill Tkhai)
- Robustify the sched topology setup code. (Peterz Zijlstra)
- Improve sched_feat() handling wrt. static_keys (Jason Baron)
- Misc fixes.
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
sched/fair: Fix 'make xmldocs' warning caused by missing description
sched: Use macro for magic number of -1 for setparam
sched: Robustify topology setup
sched: Fix sched_setparam() policy == -1 logic
sched: Allow wait_on_bit_action() functions to support a timeout
sched: Remove proliferation of wait_on_bit() action functions
sched/numa: Revert "Use effective_load() to balance NUMA loads"
sched: Fix static_key race with sched_feat()
sched: Remove extra static_key*() function indirection
sched/rt: Fix replenish_dl_entity() comments to match the current upstream code
sched: Transform resched_task() into resched_curr()
sched/deadline: Kill task_struct->pi_top_task
sched: Rework check_for_tasks()
sched/rt: Enqueue just unthrottled rt_rq back on the stack in __disable_runtime()
sched/fair: Disable runtime_enabled on dying rq
sched/numa: Change scan period code to match intent
sched/numa: Rework best node setting in task_numa_migrate()
sched/numa: Examine a task move when examining a task swap
sched/numa: Simplify task_numa_compare()
sched/numa: Use effective_load() to balance NUMA loads
...
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 30 |
1 files changed, 17 insertions, 13 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a49083192c64..5f6edca4fafd 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -463,9 +463,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | |||
463 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 463 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
464 | { | 464 | { |
465 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | 465 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; |
466 | struct rq *rq = rq_of_rt_rq(rt_rq); | ||
466 | struct sched_rt_entity *rt_se; | 467 | struct sched_rt_entity *rt_se; |
467 | 468 | ||
468 | int cpu = cpu_of(rq_of_rt_rq(rt_rq)); | 469 | int cpu = cpu_of(rq); |
469 | 470 | ||
470 | rt_se = rt_rq->tg->rt_se[cpu]; | 471 | rt_se = rt_rq->tg->rt_se[cpu]; |
471 | 472 | ||
@@ -476,7 +477,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
476 | enqueue_rt_entity(rt_se, false); | 477 | enqueue_rt_entity(rt_se, false); |
477 | 478 | ||
478 | if (rt_rq->highest_prio.curr < curr->prio) | 479 | if (rt_rq->highest_prio.curr < curr->prio) |
479 | resched_task(curr); | 480 | resched_curr(rq); |
480 | } | 481 | } |
481 | } | 482 | } |
482 | 483 | ||
@@ -566,7 +567,7 @@ static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
566 | return; | 567 | return; |
567 | 568 | ||
568 | enqueue_top_rt_rq(rt_rq); | 569 | enqueue_top_rt_rq(rt_rq); |
569 | resched_task(rq->curr); | 570 | resched_curr(rq); |
570 | } | 571 | } |
571 | 572 | ||
572 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 573 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
@@ -740,6 +741,9 @@ balanced: | |||
740 | rt_rq->rt_throttled = 0; | 741 | rt_rq->rt_throttled = 0; |
741 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 742 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
742 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 743 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
744 | |||
745 | /* Make rt_rq available for pick_next_task() */ | ||
746 | sched_rt_rq_enqueue(rt_rq); | ||
743 | } | 747 | } |
744 | } | 748 | } |
745 | 749 | ||
@@ -948,7 +952,7 @@ static void update_curr_rt(struct rq *rq) | |||
948 | raw_spin_lock(&rt_rq->rt_runtime_lock); | 952 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
949 | rt_rq->rt_time += delta_exec; | 953 | rt_rq->rt_time += delta_exec; |
950 | if (sched_rt_runtime_exceeded(rt_rq)) | 954 | if (sched_rt_runtime_exceeded(rt_rq)) |
951 | resched_task(curr); | 955 | resched_curr(rq); |
952 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 956 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
953 | } | 957 | } |
954 | } | 958 | } |
@@ -1363,7 +1367,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
1363 | * to try and push current away: | 1367 | * to try and push current away: |
1364 | */ | 1368 | */ |
1365 | requeue_task_rt(rq, p, 1); | 1369 | requeue_task_rt(rq, p, 1); |
1366 | resched_task(rq->curr); | 1370 | resched_curr(rq); |
1367 | } | 1371 | } |
1368 | 1372 | ||
1369 | #endif /* CONFIG_SMP */ | 1373 | #endif /* CONFIG_SMP */ |
@@ -1374,7 +1378,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
1374 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) | 1378 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) |
1375 | { | 1379 | { |
1376 | if (p->prio < rq->curr->prio) { | 1380 | if (p->prio < rq->curr->prio) { |
1377 | resched_task(rq->curr); | 1381 | resched_curr(rq); |
1378 | return; | 1382 | return; |
1379 | } | 1383 | } |
1380 | 1384 | ||
@@ -1690,7 +1694,7 @@ retry: | |||
1690 | * just reschedule current. | 1694 | * just reschedule current. |
1691 | */ | 1695 | */ |
1692 | if (unlikely(next_task->prio < rq->curr->prio)) { | 1696 | if (unlikely(next_task->prio < rq->curr->prio)) { |
1693 | resched_task(rq->curr); | 1697 | resched_curr(rq); |
1694 | return 0; | 1698 | return 0; |
1695 | } | 1699 | } |
1696 | 1700 | ||
@@ -1737,7 +1741,7 @@ retry: | |||
1737 | activate_task(lowest_rq, next_task, 0); | 1741 | activate_task(lowest_rq, next_task, 0); |
1738 | ret = 1; | 1742 | ret = 1; |
1739 | 1743 | ||
1740 | resched_task(lowest_rq->curr); | 1744 | resched_curr(lowest_rq); |
1741 | 1745 | ||
1742 | double_unlock_balance(rq, lowest_rq); | 1746 | double_unlock_balance(rq, lowest_rq); |
1743 | 1747 | ||
@@ -1936,7 +1940,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) | |||
1936 | return; | 1940 | return; |
1937 | 1941 | ||
1938 | if (pull_rt_task(rq)) | 1942 | if (pull_rt_task(rq)) |
1939 | resched_task(rq->curr); | 1943 | resched_curr(rq); |
1940 | } | 1944 | } |
1941 | 1945 | ||
1942 | void __init init_sched_rt_class(void) | 1946 | void __init init_sched_rt_class(void) |
@@ -1974,7 +1978,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) | |||
1974 | check_resched = 0; | 1978 | check_resched = 0; |
1975 | #endif /* CONFIG_SMP */ | 1979 | #endif /* CONFIG_SMP */ |
1976 | if (check_resched && p->prio < rq->curr->prio) | 1980 | if (check_resched && p->prio < rq->curr->prio) |
1977 | resched_task(rq->curr); | 1981 | resched_curr(rq); |
1978 | } | 1982 | } |
1979 | } | 1983 | } |
1980 | 1984 | ||
@@ -2003,11 +2007,11 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) | |||
2003 | * Only reschedule if p is still on the same runqueue. | 2007 | * Only reschedule if p is still on the same runqueue. |
2004 | */ | 2008 | */ |
2005 | if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) | 2009 | if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) |
2006 | resched_task(p); | 2010 | resched_curr(rq); |
2007 | #else | 2011 | #else |
2008 | /* For UP simply resched on drop of prio */ | 2012 | /* For UP simply resched on drop of prio */ |
2009 | if (oldprio < p->prio) | 2013 | if (oldprio < p->prio) |
2010 | resched_task(p); | 2014 | resched_curr(rq); |
2011 | #endif /* CONFIG_SMP */ | 2015 | #endif /* CONFIG_SMP */ |
2012 | } else { | 2016 | } else { |
2013 | /* | 2017 | /* |
@@ -2016,7 +2020,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) | |||
2016 | * then reschedule. | 2020 | * then reschedule. |
2017 | */ | 2021 | */ |
2018 | if (p->prio < rq->curr->prio) | 2022 | if (p->prio < rq->curr->prio) |
2019 | resched_task(rq->curr); | 2023 | resched_curr(rq); |
2020 | } | 2024 | } |
2021 | } | 2025 | } |
2022 | 2026 | ||