diff options
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 418feb01344e..127a2c4cf4ab 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -7,6 +7,8 @@ | |||
7 | 7 | ||
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | 9 | ||
10 | int sched_rr_timeslice = RR_TIMESLICE; | ||
11 | |||
10 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); | 12 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); |
11 | 13 | ||
12 | struct rt_bandwidth def_rt_bandwidth; | 14 | struct rt_bandwidth def_rt_bandwidth; |
@@ -566,7 +568,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
566 | static int do_balance_runtime(struct rt_rq *rt_rq) | 568 | static int do_balance_runtime(struct rt_rq *rt_rq) |
567 | { | 569 | { |
568 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 570 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
569 | struct root_domain *rd = cpu_rq(smp_processor_id())->rd; | 571 | struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; |
570 | int i, weight, more = 0; | 572 | int i, weight, more = 0; |
571 | u64 rt_period; | 573 | u64 rt_period; |
572 | 574 | ||
@@ -925,8 +927,8 @@ static void update_curr_rt(struct rq *rq) | |||
925 | return; | 927 | return; |
926 | 928 | ||
927 | delta_exec = rq->clock_task - curr->se.exec_start; | 929 | delta_exec = rq->clock_task - curr->se.exec_start; |
928 | if (unlikely((s64)delta_exec < 0)) | 930 | if (unlikely((s64)delta_exec <= 0)) |
929 | delta_exec = 0; | 931 | return; |
930 | 932 | ||
931 | schedstat_set(curr->se.statistics.exec_max, | 933 | schedstat_set(curr->se.statistics.exec_max, |
932 | max(curr->se.statistics.exec_max, delta_exec)); | 934 | max(curr->se.statistics.exec_max, delta_exec)); |
@@ -1427,8 +1429,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
1427 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 1429 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
1428 | { | 1430 | { |
1429 | if (!task_running(rq, p) && | 1431 | if (!task_running(rq, p) && |
1430 | (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && | 1432 | cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) |
1431 | (p->nr_cpus_allowed > 1)) | ||
1432 | return 1; | 1433 | return 1; |
1433 | return 0; | 1434 | return 0; |
1434 | } | 1435 | } |
@@ -1889,8 +1890,11 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) | |||
1889 | * we may need to handle the pulling of RT tasks | 1890 | * we may need to handle the pulling of RT tasks |
1890 | * now. | 1891 | * now. |
1891 | */ | 1892 | */ |
1892 | if (p->on_rq && !rq->rt.rt_nr_running) | 1893 | if (!p->on_rq || rq->rt.rt_nr_running) |
1893 | pull_rt_task(rq); | 1894 | return; |
1895 | |||
1896 | if (pull_rt_task(rq)) | ||
1897 | resched_task(rq->curr); | ||
1894 | } | 1898 | } |
1895 | 1899 | ||
1896 | void init_sched_rt_class(void) | 1900 | void init_sched_rt_class(void) |
@@ -1985,7 +1989,11 @@ static void watchdog(struct rq *rq, struct task_struct *p) | |||
1985 | if (soft != RLIM_INFINITY) { | 1989 | if (soft != RLIM_INFINITY) { |
1986 | unsigned long next; | 1990 | unsigned long next; |
1987 | 1991 | ||
1988 | p->rt.timeout++; | 1992 | if (p->rt.watchdog_stamp != jiffies) { |
1993 | p->rt.timeout++; | ||
1994 | p->rt.watchdog_stamp = jiffies; | ||
1995 | } | ||
1996 | |||
1989 | next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); | 1997 | next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); |
1990 | if (p->rt.timeout > next) | 1998 | if (p->rt.timeout > next) |
1991 | p->cputime_expires.sched_exp = p->se.sum_exec_runtime; | 1999 | p->cputime_expires.sched_exp = p->se.sum_exec_runtime; |
@@ -2010,7 +2018,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
2010 | if (--p->rt.time_slice) | 2018 | if (--p->rt.time_slice) |
2011 | return; | 2019 | return; |
2012 | 2020 | ||
2013 | p->rt.time_slice = RR_TIMESLICE; | 2021 | p->rt.time_slice = sched_rr_timeslice; |
2014 | 2022 | ||
2015 | /* | 2023 | /* |
2016 | * Requeue to the end of queue if we (and all of our ancestors) are the | 2024 | * Requeue to the end of queue if we (and all of our ancestors) are the |
@@ -2041,7 +2049,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) | |||
2041 | * Time slice is 0 for SCHED_FIFO tasks | 2049 | * Time slice is 0 for SCHED_FIFO tasks |
2042 | */ | 2050 | */ |
2043 | if (task->policy == SCHED_RR) | 2051 | if (task->policy == SCHED_RR) |
2044 | return RR_TIMESLICE; | 2052 | return sched_rr_timeslice; |
2045 | else | 2053 | else |
2046 | return 0; | 2054 | return 0; |
2047 | } | 2055 | } |