aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c16
1 files changed, 1 insertions, 15 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1641235f8e9a..ed8cebf53286 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -727,19 +727,6 @@ static void update_curr_load(struct rq *rq, u64 now)
727 * slice expiry etc. 727 * slice expiry etc.
728 */ 728 */
729 729
730/*
731 * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
732 * If static_prio_timeslice() is ever changed to break this assumption then
733 * this code will need modification
734 */
735#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
736#define load_weight(lp) \
737 (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
738#define PRIO_TO_LOAD_WEIGHT(prio) \
739 load_weight(static_prio_timeslice(prio))
740#define RTPRIO_TO_LOAD_WEIGHT(rp) \
741 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + load_weight(rp))
742
743#define WEIGHT_IDLEPRIO 2 730#define WEIGHT_IDLEPRIO 2
744#define WMULT_IDLEPRIO (1 << 31) 731#define WMULT_IDLEPRIO (1 << 31)
745 732
@@ -2908,8 +2895,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2908 schedstat_inc(sd, alb_cnt); 2895 schedstat_inc(sd, alb_cnt);
2909 2896
2910 if (move_tasks(target_rq, target_cpu, busiest_rq, 1, 2897 if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
2911 RTPRIO_TO_LOAD_WEIGHT(100), sd, CPU_IDLE, 2898 ULONG_MAX, sd, CPU_IDLE, NULL))
2912 NULL))
2913 schedstat_inc(sd, alb_pushed); 2899 schedstat_inc(sd, alb_pushed);
2914 else 2900 else
2915 schedstat_inc(sd, alb_failed); 2901 schedstat_inc(sd, alb_failed);