diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2012-02-20 15:49:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-03-01 07:08:37 -0500 |
commit | 367456c756a6b84f493ca9cc5b17b1f5d38ef466 (patch) | |
tree | 0e95a2fa5cb25ea14e2841d84d4d2410ff383e33 /kernel/sched/sched.h | |
parent | ddcdf6e7d9919d139031fa2a6addd9544a9a833e (diff) |
sched: Ditch per cgroup task lists for load-balancing
Per cgroup load-balance has numerous problems, chief amongst them that
there is no real sane order in them. So stop pretending it makes sense
and enqueue all tasks on a single list.
This also allows us to more easily fix the fwd progress issue
uncovered by the lock-break stuff. Rotate the list on failure to
migreate and limit the total iterations to nr_running (which with
releasing the lock isn't strictly accurate but close enough).
Also add a filter that skips very light tasks on the first attempt
around the list, this attempts to avoid shooting whole cgroups around
without affecting over balance.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: pjt@google.com
Link: http://lkml.kernel.org/n/tip-tx8yqydc7eimgq7i4rkc3a4g@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 10 |
1 files changed, 2 insertions, 8 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c0660a1a0cd1..753bdd567416 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -212,9 +212,6 @@ struct cfs_rq { | |||
212 | struct rb_root tasks_timeline; | 212 | struct rb_root tasks_timeline; |
213 | struct rb_node *rb_leftmost; | 213 | struct rb_node *rb_leftmost; |
214 | 214 | ||
215 | struct list_head tasks; | ||
216 | struct list_head *balance_iterator; | ||
217 | |||
218 | /* | 215 | /* |
219 | * 'curr' points to currently running entity on this cfs_rq. | 216 | * 'curr' points to currently running entity on this cfs_rq. |
220 | * It is set to NULL otherwise (i.e when none are currently running). | 217 | * It is set to NULL otherwise (i.e when none are currently running). |
@@ -242,11 +239,6 @@ struct cfs_rq { | |||
242 | 239 | ||
243 | #ifdef CONFIG_SMP | 240 | #ifdef CONFIG_SMP |
244 | /* | 241 | /* |
245 | * the part of load.weight contributed by tasks | ||
246 | */ | ||
247 | unsigned long task_weight; | ||
248 | |||
249 | /* | ||
250 | * h_load = weight * f(tg) | 242 | * h_load = weight * f(tg) |
251 | * | 243 | * |
252 | * Where f(tg) is the recursive weight fraction assigned to | 244 | * Where f(tg) is the recursive weight fraction assigned to |
@@ -420,6 +412,8 @@ struct rq { | |||
420 | int cpu; | 412 | int cpu; |
421 | int online; | 413 | int online; |
422 | 414 | ||
415 | struct list_head cfs_tasks; | ||
416 | |||
423 | u64 rt_avg; | 417 | u64 rt_avg; |
424 | u64 age_stamp; | 418 | u64 age_stamp; |
425 | u64 idle_stamp; | 419 | u64 idle_stamp; |