diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 51 |
1 files changed, 37 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 52bbf1c842a8..8e2558c2ba67 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
223 | { | 223 | { |
224 | ktime_t now; | 224 | ktime_t now; |
225 | 225 | ||
226 | if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) | 226 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
227 | return; | 227 | return; |
228 | 228 | ||
229 | if (hrtimer_active(&rt_b->rt_period_timer)) | 229 | if (hrtimer_active(&rt_b->rt_period_timer)) |
@@ -3880,19 +3880,24 @@ int select_nohz_load_balancer(int stop_tick) | |||
3880 | int cpu = smp_processor_id(); | 3880 | int cpu = smp_processor_id(); |
3881 | 3881 | ||
3882 | if (stop_tick) { | 3882 | if (stop_tick) { |
3883 | cpumask_set_cpu(cpu, nohz.cpu_mask); | ||
3884 | cpu_rq(cpu)->in_nohz_recently = 1; | 3883 | cpu_rq(cpu)->in_nohz_recently = 1; |
3885 | 3884 | ||
3886 | /* | 3885 | if (!cpu_active(cpu)) { |
3887 | * If we are going offline and still the leader, give up! | 3886 | if (atomic_read(&nohz.load_balancer) != cpu) |
3888 | */ | 3887 | return 0; |
3889 | if (!cpu_active(cpu) && | 3888 | |
3890 | atomic_read(&nohz.load_balancer) == cpu) { | 3889 | /* |
3890 | * If we are going offline and still the leader, | ||
3891 | * give up! | ||
3892 | */ | ||
3891 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3893 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
3892 | BUG(); | 3894 | BUG(); |
3895 | |||
3893 | return 0; | 3896 | return 0; |
3894 | } | 3897 | } |
3895 | 3898 | ||
3899 | cpumask_set_cpu(cpu, nohz.cpu_mask); | ||
3900 | |||
3896 | /* time for ilb owner also to sleep */ | 3901 | /* time for ilb owner also to sleep */ |
3897 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { | 3902 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
3898 | if (atomic_read(&nohz.load_balancer) == cpu) | 3903 | if (atomic_read(&nohz.load_balancer) == cpu) |
@@ -4687,8 +4692,8 @@ EXPORT_SYMBOL(default_wake_function); | |||
4687 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | 4692 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
4688 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | 4693 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
4689 | */ | 4694 | */ |
4690 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 4695 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
4691 | int nr_exclusive, int sync, void *key) | 4696 | int nr_exclusive, int sync, void *key) |
4692 | { | 4697 | { |
4693 | wait_queue_t *curr, *next; | 4698 | wait_queue_t *curr, *next; |
4694 | 4699 | ||
@@ -6939,20 +6944,26 @@ static void free_rootdomain(struct root_domain *rd) | |||
6939 | 6944 | ||
6940 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6945 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
6941 | { | 6946 | { |
6947 | struct root_domain *old_rd = NULL; | ||
6942 | unsigned long flags; | 6948 | unsigned long flags; |
6943 | 6949 | ||
6944 | spin_lock_irqsave(&rq->lock, flags); | 6950 | spin_lock_irqsave(&rq->lock, flags); |
6945 | 6951 | ||
6946 | if (rq->rd) { | 6952 | if (rq->rd) { |
6947 | struct root_domain *old_rd = rq->rd; | 6953 | old_rd = rq->rd; |
6948 | 6954 | ||
6949 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) | 6955 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
6950 | set_rq_offline(rq); | 6956 | set_rq_offline(rq); |
6951 | 6957 | ||
6952 | cpumask_clear_cpu(rq->cpu, old_rd->span); | 6958 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
6953 | 6959 | ||
6954 | if (atomic_dec_and_test(&old_rd->refcount)) | 6960 | /* |
6955 | free_rootdomain(old_rd); | 6961 | * If we dont want to free the old_rt yet then |
6962 | * set old_rd to NULL to skip the freeing later | ||
6963 | * in this function: | ||
6964 | */ | ||
6965 | if (!atomic_dec_and_test(&old_rd->refcount)) | ||
6966 | old_rd = NULL; | ||
6956 | } | 6967 | } |
6957 | 6968 | ||
6958 | atomic_inc(&rd->refcount); | 6969 | atomic_inc(&rd->refcount); |
@@ -6963,6 +6974,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6963 | set_rq_online(rq); | 6974 | set_rq_online(rq); |
6964 | 6975 | ||
6965 | spin_unlock_irqrestore(&rq->lock, flags); | 6976 | spin_unlock_irqrestore(&rq->lock, flags); |
6977 | |||
6978 | if (old_rd) | ||
6979 | free_rootdomain(old_rd); | ||
6966 | } | 6980 | } |
6967 | 6981 | ||
6968 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) | 6982 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) |
@@ -9210,6 +9224,16 @@ static int sched_rt_global_constraints(void) | |||
9210 | 9224 | ||
9211 | return ret; | 9225 | return ret; |
9212 | } | 9226 | } |
9227 | |||
9228 | int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) | ||
9229 | { | ||
9230 | /* Don't accept realtime tasks when there is no way for them to run */ | ||
9231 | if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) | ||
9232 | return 0; | ||
9233 | |||
9234 | return 1; | ||
9235 | } | ||
9236 | |||
9213 | #else /* !CONFIG_RT_GROUP_SCHED */ | 9237 | #else /* !CONFIG_RT_GROUP_SCHED */ |
9214 | static int sched_rt_global_constraints(void) | 9238 | static int sched_rt_global_constraints(void) |
9215 | { | 9239 | { |
@@ -9303,8 +9327,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
9303 | struct task_struct *tsk) | 9327 | struct task_struct *tsk) |
9304 | { | 9328 | { |
9305 | #ifdef CONFIG_RT_GROUP_SCHED | 9329 | #ifdef CONFIG_RT_GROUP_SCHED |
9306 | /* Don't accept realtime tasks when there is no way for them to run */ | 9330 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
9307 | if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0) | ||
9308 | return -EINVAL; | 9331 | return -EINVAL; |
9309 | #else | 9332 | #else |
9310 | /* We don't support RT-tasks being in separate groups */ | 9333 | /* We don't support RT-tasks being in separate groups */ |