diff options
| -rw-r--r-- | kernel/sched_fair.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7a632c534ce5..e91db32cadfd 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -222,21 +222,25 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity) | |||
| 222 | { | 222 | { |
| 223 | u64 tmp; | 223 | u64 tmp; |
| 224 | 224 | ||
| 225 | if (likely(curr->load.weight == NICE_0_LOAD)) | ||
| 226 | return granularity; | ||
| 225 | /* | 227 | /* |
| 226 | * Negative nice levels get the same granularity as nice-0: | 228 | * Positive nice levels get the same granularity as nice-0: |
| 227 | */ | 229 | */ |
| 228 | if (likely(curr->load.weight >= NICE_0_LOAD)) | 230 | if (likely(curr->load.weight < NICE_0_LOAD)) { |
| 229 | return granularity; | 231 | tmp = curr->load.weight * (u64)granularity; |
| 232 | return (long) (tmp >> NICE_0_SHIFT); | ||
| 233 | } | ||
| 230 | /* | 234 | /* |
| 231 | * Positive nice level tasks get linearly finer | 235 | * Negative nice level tasks get linearly finer |
| 232 | * granularity: | 236 | * granularity: |
| 233 | */ | 237 | */ |
| 234 | tmp = curr->load.weight * (u64)granularity; | 238 | tmp = curr->load.inv_weight * (u64)granularity; |
| 235 | 239 | ||
| 236 | /* | 240 | /* |
| 237 | * It will always fit into 'long': | 241 | * It will always fit into 'long': |
| 238 | */ | 242 | */ |
| 239 | return (long) (tmp >> NICE_0_SHIFT); | 243 | return (long) (tmp >> WMULT_SHIFT); |
| 240 | } | 244 | } |
| 241 | 245 | ||
| 242 | static inline void | 246 | static inline void |
