diff options
| -rw-r--r-- | init/Kconfig | 1 | ||||
| -rw-r--r-- | kernel/sched/core.c | 7 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 10 |
3 files changed, 7 insertions, 11 deletions
diff --git a/init/Kconfig b/init/Kconfig index a075765d5fbe..018d206c21f7 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -713,7 +713,6 @@ config CGROUP_PERF | |||
| 713 | 713 | ||
| 714 | menuconfig CGROUP_SCHED | 714 | menuconfig CGROUP_SCHED |
| 715 | bool "Group CPU scheduler" | 715 | bool "Group CPU scheduler" |
| 716 | depends on EXPERIMENTAL | ||
| 717 | default n | 716 | default n |
| 718 | help | 717 | help |
| 719 | This feature lets CPU scheduler recognize task groups and control CPU | 718 | This feature lets CPU scheduler recognize task groups and control CPU |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index cecbb64be05f..fd7b25e90079 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -7134,10 +7134,6 @@ void set_curr_task(int cpu, struct task_struct *p) | |||
| 7134 | 7134 | ||
| 7135 | #endif | 7135 | #endif |
| 7136 | 7136 | ||
| 7137 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 7138 | #else /* !CONFIG_RT_GROUP_SCHED */ | ||
| 7139 | #endif /* CONFIG_RT_GROUP_SCHED */ | ||
| 7140 | |||
| 7141 | #ifdef CONFIG_CGROUP_SCHED | 7137 | #ifdef CONFIG_CGROUP_SCHED |
| 7142 | /* task_group_lock serializes the addition/removal of task groups */ | 7138 | /* task_group_lock serializes the addition/removal of task groups */ |
| 7143 | static DEFINE_SPINLOCK(task_group_lock); | 7139 | static DEFINE_SPINLOCK(task_group_lock); |
| @@ -7246,9 +7242,6 @@ void sched_move_task(struct task_struct *tsk) | |||
| 7246 | } | 7242 | } |
| 7247 | #endif /* CONFIG_CGROUP_SCHED */ | 7243 | #endif /* CONFIG_CGROUP_SCHED */ |
| 7248 | 7244 | ||
| 7249 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 7250 | #endif | ||
| 7251 | |||
| 7252 | #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH) | 7245 | #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH) |
| 7253 | static unsigned long to_ratio(u64 period, u64 runtime) | 7246 | static unsigned long to_ratio(u64 period, u64 runtime) |
| 7254 | { | 7247 | { |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8e42de9105f8..84adb2d66cbd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -3130,8 +3130,10 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
| 3130 | } | 3130 | } |
| 3131 | 3131 | ||
| 3132 | #define LBF_ALL_PINNED 0x01 | 3132 | #define LBF_ALL_PINNED 0x01 |
| 3133 | #define LBF_NEED_BREAK 0x02 | 3133 | #define LBF_NEED_BREAK 0x02 /* clears into HAD_BREAK */ |
| 3134 | #define LBF_ABORT 0x04 | 3134 | #define LBF_HAD_BREAK 0x04 |
| 3135 | #define LBF_HAD_BREAKS 0x0C /* count HAD_BREAKs overflows into ABORT */ | ||
| 3136 | #define LBF_ABORT 0x10 | ||
| 3135 | 3137 | ||
| 3136 | /* | 3138 | /* |
| 3137 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? | 3139 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? |
| @@ -4508,7 +4510,9 @@ redo: | |||
| 4508 | goto out_balanced; | 4510 | goto out_balanced; |
| 4509 | 4511 | ||
| 4510 | if (lb_flags & LBF_NEED_BREAK) { | 4512 | if (lb_flags & LBF_NEED_BREAK) { |
| 4511 | lb_flags &= ~LBF_NEED_BREAK; | 4513 | lb_flags += LBF_HAD_BREAK - LBF_NEED_BREAK; |
| 4514 | if (lb_flags & LBF_ABORT) | ||
| 4515 | goto out_balanced; | ||
| 4512 | goto redo; | 4516 | goto redo; |
| 4513 | } | 4517 | } |
| 4514 | 4518 | ||
