aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ea3e5eff3878..42eab5a8437d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -553,9 +553,6 @@ struct rq {
553 /* try_to_wake_up() stats */ 553 /* try_to_wake_up() stats */
554 unsigned int ttwu_count; 554 unsigned int ttwu_count;
555 unsigned int ttwu_local; 555 unsigned int ttwu_local;
556
557 /* BKL stats */
558 unsigned int bkl_count;
559#endif 556#endif
560}; 557};
561 558
@@ -609,6 +606,9 @@ static inline struct task_group *task_group(struct task_struct *p)
609 struct task_group *tg; 606 struct task_group *tg;
610 struct cgroup_subsys_state *css; 607 struct cgroup_subsys_state *css;
611 608
609 if (p->flags & PF_EXITING)
610 return &root_task_group;
611
612 css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 612 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
613 lockdep_is_held(&task_rq(p)->lock)); 613 lockdep_is_held(&task_rq(p)->lock));
614 tg = container_of(css, struct task_group, css); 614 tg = container_of(css, struct task_group, css);
@@ -3887,7 +3887,7 @@ static inline void schedule_debug(struct task_struct *prev)
3887 schedstat_inc(this_rq(), sched_count); 3887 schedstat_inc(this_rq(), sched_count);
3888#ifdef CONFIG_SCHEDSTATS 3888#ifdef CONFIG_SCHEDSTATS
3889 if (unlikely(prev->lock_depth >= 0)) { 3889 if (unlikely(prev->lock_depth >= 0)) {
3890 schedstat_inc(this_rq(), bkl_count); 3890 schedstat_inc(this_rq(), rq_sched_info.bkl_count);
3891 schedstat_inc(prev, sched_info.bkl_count); 3891 schedstat_inc(prev, sched_info.bkl_count);
3892 } 3892 }
3893#endif 3893#endif
@@ -4213,6 +4213,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4213{ 4213{
4214 __wake_up_common(q, mode, 1, 0, key); 4214 __wake_up_common(q, mode, 1, 0, key);
4215} 4215}
4216EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4216 4217
4217/** 4218/**
4218 * __wake_up_sync_key - wake up threads blocked on a waitqueue. 4219 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
@@ -4871,7 +4872,8 @@ recheck:
4871 * assigned. 4872 * assigned.
4872 */ 4873 */
4873 if (rt_bandwidth_enabled() && rt_policy(policy) && 4874 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4874 task_group(p)->rt_bandwidth.rt_runtime == 0) { 4875 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4876 !task_group_is_autogroup(task_group(p))) {
4875 __task_rq_unlock(rq); 4877 __task_rq_unlock(rq);
4876 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4878 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4877 return -EPERM; 4879 return -EPERM;
@@ -8882,6 +8884,20 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8882 } 8884 }
8883} 8885}
8884 8886
8887static void
8888cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
8889{
8890 /*
8891 * cgroup_exit() is called in the copy_process() failure path.
8892 * Ignore this case since the task hasn't ran yet, this avoids
8893 * trying to poke a half freed task state from generic code.
8894 */
8895 if (!(task->flags & PF_EXITING))
8896 return;
8897
8898 sched_move_task(task);
8899}
8900
8885#ifdef CONFIG_FAIR_GROUP_SCHED 8901#ifdef CONFIG_FAIR_GROUP_SCHED
8886static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, 8902static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
8887 u64 shareval) 8903 u64 shareval)
@@ -8954,6 +8970,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
8954 .destroy = cpu_cgroup_destroy, 8970 .destroy = cpu_cgroup_destroy,
8955 .can_attach = cpu_cgroup_can_attach, 8971 .can_attach = cpu_cgroup_can_attach,
8956 .attach = cpu_cgroup_attach, 8972 .attach = cpu_cgroup_attach,
8973 .exit = cpu_cgroup_exit,
8957 .populate = cpu_cgroup_populate, 8974 .populate = cpu_cgroup_populate,
8958 .subsys_id = cpu_cgroup_subsys_id, 8975 .subsys_id = cpu_cgroup_subsys_id,
8959 .early_init = 1, 8976 .early_init = 1,