diff options
author | Peter Williams <pwil3058@bigpond.net.au> | 2007-08-02 11:41:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-08-02 11:41:40 -0400 |
commit | 5a4f3ea77e1b0c72a3ec136c881eb0d64aa1d25e (patch) | |
tree | 383ca664b739e9a7459bbf00fff05aa43169fb14 /kernel | |
parent | 362a7016637648c6aefc98b706298baedfaa1543 (diff) |
[PATCH] sched: tidy up left over smpnice code
1. The only place that RTPRIO_TO_LOAD_WEIGHT() is used is in the call to
move_tasks() in the function active_load_balance() and its purpose here
is just to make sure that the load to be moved is big enough to ensure
that exactly one task is moved (if there's one available). This can be
accomplished by using ULONG_MAX instead and this allows
RTPRIO_TO_LOAD_WEIGHT() to be deleted.
2. This, in turn, allows PRIO_TO_LOAD_WEIGHT() to be deleted.
3. This allows load_weight() to be deleted which allows
TIME_SLICE_NICE_ZERO to be deleted along with the comment above it.
Signed-off-by: Peter Williams <pwil3058@bigpond.net.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 16 |
1 files changed, 1 insertions, 15 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1641235f8e9..ed8cebf5328 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -727,19 +727,6 @@ static void update_curr_load(struct rq *rq, u64 now) | |||
727 | * slice expiry etc. | 727 | * slice expiry etc. |
728 | */ | 728 | */ |
729 | 729 | ||
730 | /* | ||
731 | * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE | ||
732 | * If static_prio_timeslice() is ever changed to break this assumption then | ||
733 | * this code will need modification | ||
734 | */ | ||
735 | #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE | ||
736 | #define load_weight(lp) \ | ||
737 | (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO) | ||
738 | #define PRIO_TO_LOAD_WEIGHT(prio) \ | ||
739 | load_weight(static_prio_timeslice(prio)) | ||
740 | #define RTPRIO_TO_LOAD_WEIGHT(rp) \ | ||
741 | (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + load_weight(rp)) | ||
742 | |||
743 | #define WEIGHT_IDLEPRIO 2 | 730 | #define WEIGHT_IDLEPRIO 2 |
744 | #define WMULT_IDLEPRIO (1 << 31) | 731 | #define WMULT_IDLEPRIO (1 << 31) |
745 | 732 | ||
@@ -2908,8 +2895,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
2908 | schedstat_inc(sd, alb_cnt); | 2895 | schedstat_inc(sd, alb_cnt); |
2909 | 2896 | ||
2910 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, | 2897 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, |
2911 | RTPRIO_TO_LOAD_WEIGHT(100), sd, CPU_IDLE, | 2898 | ULONG_MAX, sd, CPU_IDLE, NULL)) |
2912 | NULL)) | ||
2913 | schedstat_inc(sd, alb_pushed); | 2899 | schedstat_inc(sd, alb_pushed); |
2914 | else | 2900 | else |
2915 | schedstat_inc(sd, alb_failed); | 2901 | schedstat_inc(sd, alb_failed); |