aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 579da278e72f..6e452eb95ac3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2031,6 +2031,12 @@ static runqueue_t *find_busiest_queue(struct sched_group *group)
2031} 2031}
2032 2032
2033/* 2033/*
2034 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
2035 * so long as it is large enough.
2036 */
2037#define MAX_PINNED_INTERVAL 512
2038
2039/*
2034 * Check this_cpu to ensure it is balanced within domain. Attempt to move 2040 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2035 * tasks if there is an imbalance. 2041 * tasks if there is an imbalance.
2036 * 2042 *
@@ -2042,7 +2048,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2042 struct sched_group *group; 2048 struct sched_group *group;
2043 runqueue_t *busiest; 2049 runqueue_t *busiest;
2044 unsigned long imbalance; 2050 unsigned long imbalance;
2045 int nr_moved, all_pinned; 2051 int nr_moved, all_pinned = 0;
2046 int active_balance = 0; 2052 int active_balance = 0;
2047 2053
2048 spin_lock(&this_rq->lock); 2054 spin_lock(&this_rq->lock);
@@ -2133,7 +2139,8 @@ out_balanced:
2133 2139
2134 sd->nr_balance_failed = 0; 2140 sd->nr_balance_failed = 0;
2135 /* tune up the balancing interval */ 2141 /* tune up the balancing interval */
2136 if (sd->balance_interval < sd->max_interval) 2142 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
2143 (sd->balance_interval < sd->max_interval))
2137 sd->balance_interval *= 2; 2144 sd->balance_interval *= 2;
2138 2145
2139 return 0; 2146 return 0;