aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c76
1 files changed, 63 insertions, 13 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 257002c13bb0..3a673a3b0c6b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -83,7 +83,7 @@
83#endif 83#endif
84 84
85#include "sched.h" 85#include "sched.h"
86#include "../workqueue_sched.h" 86#include "../workqueue_internal.h"
87#include "../smpboot.h" 87#include "../smpboot.h"
88 88
89#define CREATE_TRACE_POINTS 89#define CREATE_TRACE_POINTS
@@ -1523,7 +1523,8 @@ out:
1523 */ 1523 */
1524int wake_up_process(struct task_struct *p) 1524int wake_up_process(struct task_struct *p)
1525{ 1525{
1526 return try_to_wake_up(p, TASK_ALL, 0); 1526 WARN_ON(task_is_stopped_or_traced(p));
1527 return try_to_wake_up(p, TASK_NORMAL, 0);
1527} 1528}
1528EXPORT_SYMBOL(wake_up_process); 1529EXPORT_SYMBOL(wake_up_process);
1529 1530
@@ -4370,7 +4371,7 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
4370 struct task_struct *curr = current; 4371 struct task_struct *curr = current;
4371 struct rq *rq, *p_rq; 4372 struct rq *rq, *p_rq;
4372 unsigned long flags; 4373 unsigned long flags;
4373 bool yielded = 0; 4374 int yielded = 0;
4374 4375
4375 local_irq_save(flags); 4376 local_irq_save(flags);
4376 rq = this_rq(); 4377 rq = this_rq();
@@ -4666,6 +4667,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
4666 */ 4667 */
4667 idle->sched_class = &idle_sched_class; 4668 idle->sched_class = &idle_sched_class;
4668 ftrace_graph_init_idle_task(idle, cpu); 4669 ftrace_graph_init_idle_task(idle, cpu);
4670 vtime_init_idle(idle);
4669#if defined(CONFIG_SMP) 4671#if defined(CONFIG_SMP)
4670 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4672 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4671#endif 4673#endif
@@ -7159,7 +7161,6 @@ static void free_sched_group(struct task_group *tg)
7159struct task_group *sched_create_group(struct task_group *parent) 7161struct task_group *sched_create_group(struct task_group *parent)
7160{ 7162{
7161 struct task_group *tg; 7163 struct task_group *tg;
7162 unsigned long flags;
7163 7164
7164 tg = kzalloc(sizeof(*tg), GFP_KERNEL); 7165 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7165 if (!tg) 7166 if (!tg)
@@ -7171,6 +7172,17 @@ struct task_group *sched_create_group(struct task_group *parent)
7171 if (!alloc_rt_sched_group(tg, parent)) 7172 if (!alloc_rt_sched_group(tg, parent))
7172 goto err; 7173 goto err;
7173 7174
7175 return tg;
7176
7177err:
7178 free_sched_group(tg);
7179 return ERR_PTR(-ENOMEM);
7180}
7181
7182void sched_online_group(struct task_group *tg, struct task_group *parent)
7183{
7184 unsigned long flags;
7185
7174 spin_lock_irqsave(&task_group_lock, flags); 7186 spin_lock_irqsave(&task_group_lock, flags);
7175 list_add_rcu(&tg->list, &task_groups); 7187 list_add_rcu(&tg->list, &task_groups);
7176 7188
@@ -7180,12 +7192,6 @@ struct task_group *sched_create_group(struct task_group *parent)
7180 INIT_LIST_HEAD(&tg->children); 7192 INIT_LIST_HEAD(&tg->children);
7181 list_add_rcu(&tg->siblings, &parent->children); 7193 list_add_rcu(&tg->siblings, &parent->children);
7182 spin_unlock_irqrestore(&task_group_lock, flags); 7194 spin_unlock_irqrestore(&task_group_lock, flags);
7183
7184 return tg;
7185
7186err:
7187 free_sched_group(tg);
7188 return ERR_PTR(-ENOMEM);
7189} 7195}
7190 7196
7191/* rcu callback to free various structures associated with a task group */ 7197/* rcu callback to free various structures associated with a task group */
@@ -7198,6 +7204,12 @@ static void free_sched_group_rcu(struct rcu_head *rhp)
7198/* Destroy runqueue etc associated with a task group */ 7204/* Destroy runqueue etc associated with a task group */
7199void sched_destroy_group(struct task_group *tg) 7205void sched_destroy_group(struct task_group *tg)
7200{ 7206{
7207 /* wait for possible concurrent references to cfs_rqs complete */
7208 call_rcu(&tg->rcu, free_sched_group_rcu);
7209}
7210
7211void sched_offline_group(struct task_group *tg)
7212{
7201 unsigned long flags; 7213 unsigned long flags;
7202 int i; 7214 int i;
7203 7215
@@ -7209,9 +7221,6 @@ void sched_destroy_group(struct task_group *tg)
7209 list_del_rcu(&tg->list); 7221 list_del_rcu(&tg->list);
7210 list_del_rcu(&tg->siblings); 7222 list_del_rcu(&tg->siblings);
7211 spin_unlock_irqrestore(&task_group_lock, flags); 7223 spin_unlock_irqrestore(&task_group_lock, flags);
7212
7213 /* wait for possible concurrent references to cfs_rqs complete */
7214 call_rcu(&tg->rcu, free_sched_group_rcu);
7215} 7224}
7216 7225
7217/* change task's runqueue when it moves between groups. 7226/* change task's runqueue when it moves between groups.
@@ -7507,6 +7516,25 @@ static int sched_rt_global_constraints(void)
7507} 7516}
7508#endif /* CONFIG_RT_GROUP_SCHED */ 7517#endif /* CONFIG_RT_GROUP_SCHED */
7509 7518
7519int sched_rr_handler(struct ctl_table *table, int write,
7520 void __user *buffer, size_t *lenp,
7521 loff_t *ppos)
7522{
7523 int ret;
7524 static DEFINE_MUTEX(mutex);
7525
7526 mutex_lock(&mutex);
7527 ret = proc_dointvec(table, write, buffer, lenp, ppos);
7528 /* make sure that internally we keep jiffies */
7529 /* also, writing zero resets timeslice to default */
7530 if (!ret && write) {
7531 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
7532 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
7533 }
7534 mutex_unlock(&mutex);
7535 return ret;
7536}
7537
7510int sched_rt_handler(struct ctl_table *table, int write, 7538int sched_rt_handler(struct ctl_table *table, int write,
7511 void __user *buffer, size_t *lenp, 7539 void __user *buffer, size_t *lenp,
7512 loff_t *ppos) 7540 loff_t *ppos)
@@ -7563,6 +7591,19 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
7563 return &tg->css; 7591 return &tg->css;
7564} 7592}
7565 7593
7594static int cpu_cgroup_css_online(struct cgroup *cgrp)
7595{
7596 struct task_group *tg = cgroup_tg(cgrp);
7597 struct task_group *parent;
7598
7599 if (!cgrp->parent)
7600 return 0;
7601
7602 parent = cgroup_tg(cgrp->parent);
7603 sched_online_group(tg, parent);
7604 return 0;
7605}
7606
7566static void cpu_cgroup_css_free(struct cgroup *cgrp) 7607static void cpu_cgroup_css_free(struct cgroup *cgrp)
7567{ 7608{
7568 struct task_group *tg = cgroup_tg(cgrp); 7609 struct task_group *tg = cgroup_tg(cgrp);
@@ -7570,6 +7611,13 @@ static void cpu_cgroup_css_free(struct cgroup *cgrp)
7570 sched_destroy_group(tg); 7611 sched_destroy_group(tg);
7571} 7612}
7572 7613
7614static void cpu_cgroup_css_offline(struct cgroup *cgrp)
7615{
7616 struct task_group *tg = cgroup_tg(cgrp);
7617
7618 sched_offline_group(tg);
7619}
7620
7573static int cpu_cgroup_can_attach(struct cgroup *cgrp, 7621static int cpu_cgroup_can_attach(struct cgroup *cgrp,
7574 struct cgroup_taskset *tset) 7622 struct cgroup_taskset *tset)
7575{ 7623{
@@ -7925,6 +7973,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
7925 .name = "cpu", 7973 .name = "cpu",
7926 .css_alloc = cpu_cgroup_css_alloc, 7974 .css_alloc = cpu_cgroup_css_alloc,
7927 .css_free = cpu_cgroup_css_free, 7975 .css_free = cpu_cgroup_css_free,
7976 .css_online = cpu_cgroup_css_online,
7977 .css_offline = cpu_cgroup_css_offline,
7928 .can_attach = cpu_cgroup_can_attach, 7978 .can_attach = cpu_cgroup_can_attach,
7929 .attach = cpu_cgroup_attach, 7979 .attach = cpu_cgroup_attach,
7930 .exit = cpu_cgroup_exit, 7980 .exit = cpu_cgroup_exit,