diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ea3e5eff3878..18d38e4ec7ba 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -553,9 +553,6 @@ struct rq { | |||
553 | /* try_to_wake_up() stats */ | 553 | /* try_to_wake_up() stats */ |
554 | unsigned int ttwu_count; | 554 | unsigned int ttwu_count; |
555 | unsigned int ttwu_local; | 555 | unsigned int ttwu_local; |
556 | |||
557 | /* BKL stats */ | ||
558 | unsigned int bkl_count; | ||
559 | #endif | 556 | #endif |
560 | }; | 557 | }; |
561 | 558 | ||
@@ -609,6 +606,9 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
609 | struct task_group *tg; | 606 | struct task_group *tg; |
610 | struct cgroup_subsys_state *css; | 607 | struct cgroup_subsys_state *css; |
611 | 608 | ||
609 | if (p->flags & PF_EXITING) | ||
610 | return &root_task_group; | ||
611 | |||
612 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, | 612 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, |
613 | lockdep_is_held(&task_rq(p)->lock)); | 613 | lockdep_is_held(&task_rq(p)->lock)); |
614 | tg = container_of(css, struct task_group, css); | 614 | tg = container_of(css, struct task_group, css); |
@@ -3887,7 +3887,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
3887 | schedstat_inc(this_rq(), sched_count); | 3887 | schedstat_inc(this_rq(), sched_count); |
3888 | #ifdef CONFIG_SCHEDSTATS | 3888 | #ifdef CONFIG_SCHEDSTATS |
3889 | if (unlikely(prev->lock_depth >= 0)) { | 3889 | if (unlikely(prev->lock_depth >= 0)) { |
3890 | schedstat_inc(this_rq(), bkl_count); | 3890 | schedstat_inc(this_rq(), rq_sched_info.bkl_count); |
3891 | schedstat_inc(prev, sched_info.bkl_count); | 3891 | schedstat_inc(prev, sched_info.bkl_count); |
3892 | } | 3892 | } |
3893 | #endif | 3893 | #endif |
@@ -4871,7 +4871,8 @@ recheck: | |||
4871 | * assigned. | 4871 | * assigned. |
4872 | */ | 4872 | */ |
4873 | if (rt_bandwidth_enabled() && rt_policy(policy) && | 4873 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
4874 | task_group(p)->rt_bandwidth.rt_runtime == 0) { | 4874 | task_group(p)->rt_bandwidth.rt_runtime == 0 && |
4875 | !task_group_is_autogroup(task_group(p))) { | ||
4875 | __task_rq_unlock(rq); | 4876 | __task_rq_unlock(rq); |
4876 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | 4877 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
4877 | return -EPERM; | 4878 | return -EPERM; |
@@ -8882,6 +8883,20 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
8882 | } | 8883 | } |
8883 | } | 8884 | } |
8884 | 8885 | ||
8886 | static void | ||
8887 | cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task) | ||
8888 | { | ||
8889 | /* | ||
8890 | * cgroup_exit() is called in the copy_process() failure path. | ||
8891 | * Ignore this case since the task hasn't ran yet, this avoids | ||
8892 | * trying to poke a half freed task state from generic code. | ||
8893 | */ | ||
8894 | if (!(task->flags & PF_EXITING)) | ||
8895 | return; | ||
8896 | |||
8897 | sched_move_task(task); | ||
8898 | } | ||
8899 | |||
8885 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8900 | #ifdef CONFIG_FAIR_GROUP_SCHED |
8886 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, | 8901 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, |
8887 | u64 shareval) | 8902 | u64 shareval) |
@@ -8954,6 +8969,7 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
8954 | .destroy = cpu_cgroup_destroy, | 8969 | .destroy = cpu_cgroup_destroy, |
8955 | .can_attach = cpu_cgroup_can_attach, | 8970 | .can_attach = cpu_cgroup_can_attach, |
8956 | .attach = cpu_cgroup_attach, | 8971 | .attach = cpu_cgroup_attach, |
8972 | .exit = cpu_cgroup_exit, | ||
8957 | .populate = cpu_cgroup_populate, | 8973 | .populate = cpu_cgroup_populate, |
8958 | .subsys_id = cpu_cgroup_subsys_id, | 8974 | .subsys_id = cpu_cgroup_subsys_id, |
8959 | .early_init = 1, | 8975 | .early_init = 1, |