aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-20 15:52:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-20 15:52:55 -0500
commit828cad8ea05d194d8a9452e0793261c2024c23a2 (patch)
tree0ad7c7e044cdcfe75d78da0b52eb2358d4686e02 /kernel/sched/rt.c
parent60c906bab124a0627fba04c9ca5e61bba4747c0c (diff)
parentbb3bac2ca9a3a5b7fa601781adf70167a0449d75 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this (fairly busy) cycle were: - There was a class of scheduler bugs related to forgetting to update the rq-clock timestamp which can cause weird and hard to debug problems, so there's a new debug facility for this: which uncovered a whole lot of bugs which convinced us that we want to keep the debug facility. (Peter Zijlstra, Matt Fleming) - Various cputime related updates: eliminate cputime and use u64 nanoseconds directly, simplify and improve the arch interfaces, implement delayed accounting more widely, etc. - (Frederic Weisbecker) - Move code around for better structure plus cleanups (Ingo Molnar) - Move IO schedule accounting deeper into the scheduler plus related changes to improve the situation (Tejun Heo) - ... plus a round of sched/rt and sched/deadline fixes, plus other fixes, updats and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (85 commits) sched/core: Remove unlikely() annotation from sched_move_task() sched/autogroup: Rename auto_group.[ch] to autogroup.[ch] sched/topology: Split out scheduler topology code from core.c into topology.c sched/core: Remove unnecessary #include headers sched/rq_clock: Consolidate the ordering of the rq_clock methods delayacct: Include <uapi/linux/taskstats.h> sched/core: Clean up comments sched/rt: Show the 'sched_rr_timeslice' SCHED_RR timeslice tuning knob in milliseconds sched/clock: Add dummy clear_sched_clock_stable() stub function sched/cputime: Remove generic asm headers sched/cputime: Remove unused nsec_to_cputime() s390, sched/cputime: Remove unused cputime definitions powerpc, sched/cputime: Remove unused cputime definitions s390, sched/cputime: Make arch_cpu_idle_time() to return nsecs ia64, sched/cputime: Remove unused cputime definitions ia64: Convert vtime to use nsec units directly ia64, sched/cputime: Move the nsecs based cputime headers to the last arch using it sched/cputime: Remove jiffies based cputime sched/cputime, vtime: Return nsecs instead of cputime_t to account sched/cputime: Complete nsec conversion of tick based accounting ...
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a688a8206727..e8836cfc4cdb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -9,6 +9,7 @@
9#include <linux/irq_work.h> 9#include <linux/irq_work.h>
10 10
11int sched_rr_timeslice = RR_TIMESLICE; 11int sched_rr_timeslice = RR_TIMESLICE;
12int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
12 13
13static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 14static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14 15
@@ -1523,7 +1524,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
1523} 1524}
1524 1525
1525static struct task_struct * 1526static struct task_struct *
1526pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) 1527pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1527{ 1528{
1528 struct task_struct *p; 1529 struct task_struct *p;
1529 struct rt_rq *rt_rq = &rq->rt; 1530 struct rt_rq *rt_rq = &rq->rt;
@@ -1535,9 +1536,9 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie coo
1535 * disabled avoiding further scheduler activity on it and we're 1536 * disabled avoiding further scheduler activity on it and we're
1536 * being very careful to re-start the picking loop. 1537 * being very careful to re-start the picking loop.
1537 */ 1538 */
1538 lockdep_unpin_lock(&rq->lock, cookie); 1539 rq_unpin_lock(rq, rf);
1539 pull_rt_task(rq); 1540 pull_rt_task(rq);
1540 lockdep_repin_lock(&rq->lock, cookie); 1541 rq_repin_lock(rq, rf);
1541 /* 1542 /*
1542 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1543 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1543 * means a dl or stop task can slip in, in which case we need 1544 * means a dl or stop task can slip in, in which case we need
@@ -2198,10 +2199,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
2198#ifdef CONFIG_SMP 2199#ifdef CONFIG_SMP
2199 if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) 2200 if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
2200 queue_push_tasks(rq); 2201 queue_push_tasks(rq);
2201#else 2202#endif /* CONFIG_SMP */
2202 if (p->prio < rq->curr->prio) 2203 if (p->prio < rq->curr->prio)
2203 resched_curr(rq); 2204 resched_curr(rq);
2204#endif /* CONFIG_SMP */
2205 } 2205 }
2206} 2206}
2207 2207