aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index dcd553cc4ee8..52b98675acb2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4422,7 +4422,7 @@ int task_nice(const struct task_struct *p)
4422{ 4422{
4423 return TASK_NICE(p); 4423 return TASK_NICE(p);
4424} 4424}
4425EXPORT_SYMBOL_GPL(task_nice); 4425EXPORT_SYMBOL(task_nice);
4426 4426
4427/** 4427/**
4428 * idle_cpu - is a given cpu idle currently? 4428 * idle_cpu - is a given cpu idle currently?
@@ -5100,7 +5100,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
5100 time_slice = 0; 5100 time_slice = 0;
5101 if (p->policy == SCHED_RR) { 5101 if (p->policy == SCHED_RR) {
5102 time_slice = DEF_TIMESLICE; 5102 time_slice = DEF_TIMESLICE;
5103 } else { 5103 } else if (p->policy != SCHED_FIFO) {
5104 struct sched_entity *se = &p->se; 5104 struct sched_entity *se = &p->se;
5105 unsigned long flags; 5105 unsigned long flags;
5106 struct rq *rq; 5106 struct rq *rq;
@@ -7625,6 +7625,11 @@ void sched_move_task(struct task_struct *tsk)
7625 7625
7626 set_task_rq(tsk, task_cpu(tsk)); 7626 set_task_rq(tsk, task_cpu(tsk));
7627 7627
7628#ifdef CONFIG_FAIR_GROUP_SCHED
7629 if (tsk->sched_class->moved_group)
7630 tsk->sched_class->moved_group(tsk);
7631#endif
7632
7628 if (on_rq) { 7633 if (on_rq) {
7629 if (unlikely(running)) 7634 if (unlikely(running))
7630 tsk->sched_class->set_curr_task(rq); 7635 tsk->sched_class->set_curr_task(rq);
@@ -7721,9 +7726,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
7721 if (runtime == RUNTIME_INF) 7726 if (runtime == RUNTIME_INF)
7722 return 1ULL << 16; 7727 return 1ULL << 16;
7723 7728
7724 runtime *= (1ULL << 16); 7729 return div64_64(runtime << 16, period);
7725 div64_64(runtime, period);
7726 return runtime;
7727} 7730}
7728 7731
7729static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 7732static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
@@ -7747,25 +7750,40 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7747 return total + to_ratio(period, runtime) < global_ratio; 7750 return total + to_ratio(period, runtime) < global_ratio;
7748} 7751}
7749 7752
7753/* Must be called with tasklist_lock held */
7754static inline int tg_has_rt_tasks(struct task_group *tg)
7755{
7756 struct task_struct *g, *p;
7757 do_each_thread(g, p) {
7758 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
7759 return 1;
7760 } while_each_thread(g, p);
7761 return 0;
7762}
7763
7750int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7764int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7751{ 7765{
7752 u64 rt_runtime, rt_period; 7766 u64 rt_runtime, rt_period;
7753 int err = 0; 7767 int err = 0;
7754 7768
7755 rt_period = sysctl_sched_rt_period * NSEC_PER_USEC; 7769 rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
7756 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7770 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7757 if (rt_runtime_us == -1) 7771 if (rt_runtime_us == -1)
7758 rt_runtime = rt_period; 7772 rt_runtime = RUNTIME_INF;
7759 7773
7760 mutex_lock(&rt_constraints_mutex); 7774 mutex_lock(&rt_constraints_mutex);
7775 read_lock(&tasklist_lock);
7776 if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
7777 err = -EBUSY;
7778 goto unlock;
7779 }
7761 if (!__rt_schedulable(tg, rt_period, rt_runtime)) { 7780 if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
7762 err = -EINVAL; 7781 err = -EINVAL;
7763 goto unlock; 7782 goto unlock;
7764 } 7783 }
7765 if (rt_runtime_us == -1)
7766 rt_runtime = RUNTIME_INF;
7767 tg->rt_runtime = rt_runtime; 7784 tg->rt_runtime = rt_runtime;
7768 unlock: 7785 unlock:
7786 read_unlock(&tasklist_lock);
7769 mutex_unlock(&rt_constraints_mutex); 7787 mutex_unlock(&rt_constraints_mutex);
7770 7788
7771 return err; 7789 return err;