diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 39 |
1 files changed, 29 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index dcd553cc4ee8..1cb53fb1fe3d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4422,7 +4422,7 @@ int task_nice(const struct task_struct *p) | |||
4422 | { | 4422 | { |
4423 | return TASK_NICE(p); | 4423 | return TASK_NICE(p); |
4424 | } | 4424 | } |
4425 | EXPORT_SYMBOL_GPL(task_nice); | 4425 | EXPORT_SYMBOL(task_nice); |
4426 | 4426 | ||
4427 | /** | 4427 | /** |
4428 | * idle_cpu - is a given cpu idle currently? | 4428 | * idle_cpu - is a given cpu idle currently? |
@@ -5100,7 +5100,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) | |||
5100 | time_slice = 0; | 5100 | time_slice = 0; |
5101 | if (p->policy == SCHED_RR) { | 5101 | if (p->policy == SCHED_RR) { |
5102 | time_slice = DEF_TIMESLICE; | 5102 | time_slice = DEF_TIMESLICE; |
5103 | } else { | 5103 | } else if (p->policy != SCHED_FIFO) { |
5104 | struct sched_entity *se = &p->se; | 5104 | struct sched_entity *se = &p->se; |
5105 | unsigned long flags; | 5105 | unsigned long flags; |
5106 | struct rq *rq; | 5106 | struct rq *rq; |
@@ -5881,7 +5881,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5881 | spin_unlock_irq(&rq->lock); | 5881 | spin_unlock_irq(&rq->lock); |
5882 | break; | 5882 | break; |
5883 | 5883 | ||
5884 | case CPU_DOWN_PREPARE: | 5884 | case CPU_DYING: |
5885 | case CPU_DYING_FROZEN: | ||
5885 | /* Update our root-domain */ | 5886 | /* Update our root-domain */ |
5886 | rq = cpu_rq(cpu); | 5887 | rq = cpu_rq(cpu); |
5887 | spin_lock_irqsave(&rq->lock, flags); | 5888 | spin_lock_irqsave(&rq->lock, flags); |
@@ -7625,6 +7626,11 @@ void sched_move_task(struct task_struct *tsk) | |||
7625 | 7626 | ||
7626 | set_task_rq(tsk, task_cpu(tsk)); | 7627 | set_task_rq(tsk, task_cpu(tsk)); |
7627 | 7628 | ||
7629 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7630 | if (tsk->sched_class->moved_group) | ||
7631 | tsk->sched_class->moved_group(tsk); | ||
7632 | #endif | ||
7633 | |||
7628 | if (on_rq) { | 7634 | if (on_rq) { |
7629 | if (unlikely(running)) | 7635 | if (unlikely(running)) |
7630 | tsk->sched_class->set_curr_task(rq); | 7636 | tsk->sched_class->set_curr_task(rq); |
@@ -7721,9 +7727,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) | |||
7721 | if (runtime == RUNTIME_INF) | 7727 | if (runtime == RUNTIME_INF) |
7722 | return 1ULL << 16; | 7728 | return 1ULL << 16; |
7723 | 7729 | ||
7724 | runtime *= (1ULL << 16); | 7730 | return div64_64(runtime << 16, period); |
7725 | div64_64(runtime, period); | ||
7726 | return runtime; | ||
7727 | } | 7731 | } |
7728 | 7732 | ||
7729 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | 7733 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
@@ -7747,25 +7751,40 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | |||
7747 | return total + to_ratio(period, runtime) < global_ratio; | 7751 | return total + to_ratio(period, runtime) < global_ratio; |
7748 | } | 7752 | } |
7749 | 7753 | ||
7754 | /* Must be called with tasklist_lock held */ | ||
7755 | static inline int tg_has_rt_tasks(struct task_group *tg) | ||
7756 | { | ||
7757 | struct task_struct *g, *p; | ||
7758 | do_each_thread(g, p) { | ||
7759 | if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) | ||
7760 | return 1; | ||
7761 | } while_each_thread(g, p); | ||
7762 | return 0; | ||
7763 | } | ||
7764 | |||
7750 | int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) | 7765 | int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) |
7751 | { | 7766 | { |
7752 | u64 rt_runtime, rt_period; | 7767 | u64 rt_runtime, rt_period; |
7753 | int err = 0; | 7768 | int err = 0; |
7754 | 7769 | ||
7755 | rt_period = sysctl_sched_rt_period * NSEC_PER_USEC; | 7770 | rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
7756 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; | 7771 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; |
7757 | if (rt_runtime_us == -1) | 7772 | if (rt_runtime_us == -1) |
7758 | rt_runtime = rt_period; | 7773 | rt_runtime = RUNTIME_INF; |
7759 | 7774 | ||
7760 | mutex_lock(&rt_constraints_mutex); | 7775 | mutex_lock(&rt_constraints_mutex); |
7776 | read_lock(&tasklist_lock); | ||
7777 | if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) { | ||
7778 | err = -EBUSY; | ||
7779 | goto unlock; | ||
7780 | } | ||
7761 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) { | 7781 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) { |
7762 | err = -EINVAL; | 7782 | err = -EINVAL; |
7763 | goto unlock; | 7783 | goto unlock; |
7764 | } | 7784 | } |
7765 | if (rt_runtime_us == -1) | ||
7766 | rt_runtime = RUNTIME_INF; | ||
7767 | tg->rt_runtime = rt_runtime; | 7785 | tg->rt_runtime = rt_runtime; |
7768 | unlock: | 7786 | unlock: |
7787 | read_unlock(&tasklist_lock); | ||
7769 | mutex_unlock(&rt_constraints_mutex); | 7788 | mutex_unlock(&rt_constraints_mutex); |
7770 | 7789 | ||
7771 | return err; | 7790 | return err; |