aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-09 05:16:52 -0400
committerIngo Molnar <mingo@elte.hu>2007-08-09 05:16:52 -0400
commit7cff8cf61cac15fa29a1ca802826d2bcbca66152 (patch)
tree86fa25bbf7d8cd3b23f7230fb821cdb04990ebfc
parenta69edb55605117cc0f20aa36c49c20b96590774d (diff)
sched: refine negative nice level granularity
refine the granularity of negative nice level tasks: let them reschedule more often to offset the effect of them consuming their wait_runtime proportionately slower. (This makes nice-0 task scheduling smoother in the presence of negatively reniced tasks.) Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched_fair.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7a632c534ce5..e91db32cadfd 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -222,21 +222,25 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity)
222{ 222{
223 u64 tmp; 223 u64 tmp;
224 224
225 if (likely(curr->load.weight == NICE_0_LOAD))
226 return granularity;
225 /* 227 /*
226 * Negative nice levels get the same granularity as nice-0: 228 * Positive nice levels get the same granularity as nice-0:
227 */ 229 */
228 if (likely(curr->load.weight >= NICE_0_LOAD)) 230 if (likely(curr->load.weight < NICE_0_LOAD)) {
229 return granularity; 231 tmp = curr->load.weight * (u64)granularity;
232 return (long) (tmp >> NICE_0_SHIFT);
233 }
230 /* 234 /*
231 * Positive nice level tasks get linearly finer 235 * Negative nice level tasks get linearly finer
232 * granularity: 236 * granularity:
233 */ 237 */
234 tmp = curr->load.weight * (u64)granularity; 238 tmp = curr->load.inv_weight * (u64)granularity;
235 239
236 /* 240 /*
237 * It will always fit into 'long': 241 * It will always fit into 'long':
238 */ 242 */
239 return (long) (tmp >> NICE_0_SHIFT); 243 return (long) (tmp >> WMULT_SHIFT);
240} 244}
241 245
242static inline void 246static inline void