aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a688a8206727..e8836cfc4cdb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -9,6 +9,7 @@
9#include <linux/irq_work.h> 9#include <linux/irq_work.h>
10 10
11int sched_rr_timeslice = RR_TIMESLICE; 11int sched_rr_timeslice = RR_TIMESLICE;
12int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
12 13
13static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 14static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14 15
@@ -1523,7 +1524,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
1523} 1524}
1524 1525
1525static struct task_struct * 1526static struct task_struct *
1526pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) 1527pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1527{ 1528{
1528 struct task_struct *p; 1529 struct task_struct *p;
1529 struct rt_rq *rt_rq = &rq->rt; 1530 struct rt_rq *rt_rq = &rq->rt;
@@ -1535,9 +1536,9 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie coo
1535 * disabled avoiding further scheduler activity on it and we're 1536 * disabled avoiding further scheduler activity on it and we're
1536 * being very careful to re-start the picking loop. 1537 * being very careful to re-start the picking loop.
1537 */ 1538 */
1538 lockdep_unpin_lock(&rq->lock, cookie); 1539 rq_unpin_lock(rq, rf);
1539 pull_rt_task(rq); 1540 pull_rt_task(rq);
1540 lockdep_repin_lock(&rq->lock, cookie); 1541 rq_repin_lock(rq, rf);
1541 /* 1542 /*
1542 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1543 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1543 * means a dl or stop task can slip in, in which case we need 1544 * means a dl or stop task can slip in, in which case we need
@@ -2198,10 +2199,9 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
2198#ifdef CONFIG_SMP 2199#ifdef CONFIG_SMP
2199 if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) 2200 if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
2200 queue_push_tasks(rq); 2201 queue_push_tasks(rq);
2201#else 2202#endif /* CONFIG_SMP */
2202 if (p->prio < rq->curr->prio) 2203 if (p->prio < rq->curr->prio)
2203 resched_curr(rq); 2204 resched_curr(rq);
2204#endif /* CONFIG_SMP */
2205 } 2205 }
2206} 2206}
2207 2207