aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 41e69b5ee652..8b035a8b3c30 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1942,15 +1942,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
1942 goto out_balanced; 1942 goto out_balanced;
1943 } 1943 }
1944 1944
1945 /* 1945 BUG_ON(busiest == this_rq);
1946 * This should be "impossible", but since load
1947 * balancing is inherently racy and statistical,
1948 * it could happen in theory.
1949 */
1950 if (unlikely(busiest == this_rq)) {
1951 WARN_ON(1);
1952 goto out_balanced;
1953 }
1954 1946
1955 schedstat_add(sd, lb_imbalance[idle], imbalance); 1947 schedstat_add(sd, lb_imbalance[idle], imbalance);
1956 1948
@@ -2052,11 +2044,13 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2052 } 2044 }
2053 2045
2054 busiest = find_busiest_queue(group); 2046 busiest = find_busiest_queue(group);
2055 if (!busiest || busiest == this_rq) { 2047 if (!busiest) {
2056 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); 2048 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
2057 goto out_balanced; 2049 goto out_balanced;
2058 } 2050 }
2059 2051
2052 BUG_ON(busiest == this_rq);
2053
2060 /* Attempt to move tasks */ 2054 /* Attempt to move tasks */
2061 double_lock_balance(this_rq, busiest); 2055 double_lock_balance(this_rq, busiest);
2062 2056