diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 17 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 7729f9a45a8b..94d9a6c5ff94 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2984,6 +2984,16 @@ next: | |||
2984 | pulled++; | 2984 | pulled++; |
2985 | rem_load_move -= p->se.load.weight; | 2985 | rem_load_move -= p->se.load.weight; |
2986 | 2986 | ||
2987 | #ifdef CONFIG_PREEMPT | ||
2988 | /* | ||
2989 | * NEWIDLE balancing is a source of latency, so preemptible kernels | ||
2990 | * will stop after the first task is pulled to minimize the critical | ||
2991 | * section. | ||
2992 | */ | ||
2993 | if (idle == CPU_NEWLY_IDLE) | ||
2994 | goto out; | ||
2995 | #endif | ||
2996 | |||
2987 | /* | 2997 | /* |
2988 | * We only want to steal up to the prescribed amount of weighted load. | 2998 | * We only want to steal up to the prescribed amount of weighted load. |
2989 | */ | 2999 | */ |
@@ -3030,9 +3040,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3030 | sd, idle, all_pinned, &this_best_prio); | 3040 | sd, idle, all_pinned, &this_best_prio); |
3031 | class = class->next; | 3041 | class = class->next; |
3032 | 3042 | ||
3043 | #ifdef CONFIG_PREEMPT | ||
3044 | /* | ||
3045 | * NEWIDLE balancing is a source of latency, so preemptible | ||
3046 | * kernels will stop after the first task is pulled to minimize | ||
3047 | * the critical section. | ||
3048 | */ | ||
3033 | if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) | 3049 | if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) |
3034 | break; | 3050 | break; |
3035 | 3051 | #endif | |
3036 | } while (class && max_load_move > total_load_moved); | 3052 | } while (class && max_load_move > total_load_moved); |
3037 | 3053 | ||
3038 | return total_load_moved > 0; | 3054 | return total_load_moved > 0; |