From 6492c7f83e88a3a9521793b6934d882b97afe287 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sun, 8 Jun 2008 09:27:13 +0200 Subject: sched: trivial sched_features cleanup Remove unused debug/tuning features. Signed-off-by: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/sched_features.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel/sched_features.h') diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 1c7283cb9581..62b39ca92ebd 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -6,5 +6,3 @@ SCHED_FEAT(CACHE_HOT_BUDDY, 1) SCHED_FEAT(SYNC_WAKEUPS, 1) SCHED_FEAT(HRTICK, 1) SCHED_FEAT(DOUBLE_TICK, 0) -SCHED_FEAT(NORMALIZED_SLEEPER, 1) -SCHED_FEAT(DEADLINE, 1) -- cgit v1.2.2 From a7be37ac8e1565e00880531f4e2aff421a21c803 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 27 Jun 2008 13:41:11 +0200 Subject: sched: revert the revert of: weight calculations Try again.. initial commit: 8f1bc385cfbab474db6c27b5af1e439614f3025c revert: f9305d4a0968201b2818dbed0dc8cb0d4ee7aeb3 Signed-off-by: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/sched_features.h | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/sched_features.h') diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 62b39ca92ebd..afa549166d8d 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -1,4 +1,5 @@ SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) +SCHED_FEAT(NORMALIZED_SLEEPER, 1) SCHED_FEAT(WAKEUP_PREEMPT, 1) SCHED_FEAT(START_DEBIT, 1) SCHED_FEAT(AFFINE_WAKEUPS, 1) -- cgit v1.2.2 From c9c294a630e28eec5f2865f028ecfc58d45c0a5a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 27 Jun 2008 13:41:12 +0200 Subject: sched: fix calc_delta_asym() calc_delta_asym() is supposed to do the same as calc_delta_fair() except linearly shrink the result for negative nice processes - this causes them to have a smaller preemption threshold so that they are more easily preempted. The problem is that for task groups se->load.weight is the per cpu share of the actual task group weight; take that into account. Also provide a debug switch to disable the asymmetry (which I still don't like - but it does greatly benefit some workloads) This would explain the interactivity issues reported against group scheduling. Signed-off-by: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/sched_features.h | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/sched_features.h') diff --git a/kernel/sched_features.h b/kernel/sched_features.h index afa549166d8d..04123af2e678 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -7,3 +7,4 @@ SCHED_FEAT(CACHE_HOT_BUDDY, 1) SCHED_FEAT(SYNC_WAKEUPS, 1) SCHED_FEAT(HRTICK, 1) SCHED_FEAT(DOUBLE_TICK, 0) +SCHED_FEAT(ASYM_GRAN, 1) -- cgit v1.2.2 From 93b75217df39e6d75889cc6f8050343286aff4a5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 27 Jun 2008 13:41:33 +0200 Subject: sched: disable source/target_load bias The bias given by source/target_load functions can be very large, disable it by default to get faster convergence. Signed-off-by: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/sched_features.h | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/sched_features.h') diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 04123af2e678..d56e3053e746 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -8,3 +8,4 @@ SCHED_FEAT(SYNC_WAKEUPS, 1) SCHED_FEAT(HRTICK, 1) SCHED_FEAT(DOUBLE_TICK, 0) SCHED_FEAT(ASYM_GRAN, 1) +SCHED_FEAT(LB_BIAS, 0) \ No newline at end of file -- cgit v1.2.2 From 2398f2c6d34b43025f274fc42eaca34d23ec2320 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 27 Jun 2008 13:41:35 +0200 Subject: sched: update shares on wakeup We found that the affine wakeup code needs rather accurate load figures to be effective. The trouble is that updating the load figures is fairly expensive with group scheduling. Therefore ratelimit the updating. Signed-off-by: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/sched_features.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/sched_features.h') diff --git a/kernel/sched_features.h b/kernel/sched_features.h index d56e3053e746..7d616d2a2a3f 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -8,4 +8,5 @@ SCHED_FEAT(SYNC_WAKEUPS, 1) SCHED_FEAT(HRTICK, 1) SCHED_FEAT(DOUBLE_TICK, 0) SCHED_FEAT(ASYM_GRAN, 1) -SCHED_FEAT(LB_BIAS, 0) \ No newline at end of file +SCHED_FEAT(LB_BIAS, 0) +SCHED_FEAT(LB_WAKEUP_UPDATE, 1) -- cgit v1.2.2 From f5bfb7d9ff73d72ee4f2f4830a6f0c9088d00f92 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 27 Jun 2008 13:41:39 +0200 Subject: sched: bias effective_load() error towards failing wake_affine(). Measurement shows that the difference between cgroup:/ and cgroup:/foo wake_affine() results is that the latter succeeds significantly more. Therefore bias the calculations towards failing the test. Signed-off-by: Peter Zijlstra Cc: Srivatsa Vaddagiri Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/sched_features.h | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/sched_features.h') diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 7d616d2a2a3f..862b06bd560a 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -10,3 +10,4 @@ SCHED_FEAT(DOUBLE_TICK, 0) SCHED_FEAT(ASYM_GRAN, 1) SCHED_FEAT(LB_BIAS, 0) SCHED_FEAT(LB_WAKEUP_UPDATE, 1) +SCHED_FEAT(ASYM_EFF_LOAD, 1) -- cgit v1.2.2