diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 33 |
1 files changed, 27 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index cb31fb4a1379..93cf241cfbe9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -301,7 +301,7 @@ struct rq { | |||
301 | struct lock_class_key rq_lock_key; | 301 | struct lock_class_key rq_lock_key; |
302 | }; | 302 | }; |
303 | 303 | ||
304 | static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; | 304 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
305 | static DEFINE_MUTEX(sched_hotcpu_mutex); | 305 | static DEFINE_MUTEX(sched_hotcpu_mutex); |
306 | 306 | ||
307 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) | 307 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) |
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq) | |||
379 | #define task_rq(p) cpu_rq(task_cpu(p)) | 379 | #define task_rq(p) cpu_rq(task_cpu(p)) |
380 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 380 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
381 | 381 | ||
382 | /* | ||
383 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | ||
384 | * clock constructed from sched_clock(): | ||
385 | */ | ||
386 | unsigned long long cpu_clock(int cpu) | ||
387 | { | ||
388 | struct rq *rq = cpu_rq(cpu); | ||
389 | unsigned long long now; | ||
390 | unsigned long flags; | ||
391 | |||
392 | spin_lock_irqsave(&rq->lock, flags); | ||
393 | now = rq_clock(rq); | ||
394 | spin_unlock_irqrestore(&rq->lock, flags); | ||
395 | |||
396 | return now; | ||
397 | } | ||
398 | |||
382 | #ifdef CONFIG_FAIR_GROUP_SCHED | 399 | #ifdef CONFIG_FAIR_GROUP_SCHED |
383 | /* Change a task's ->cfs_rq if it moves across CPUs */ | 400 | /* Change a task's ->cfs_rq if it moves across CPUs */ |
384 | static inline void set_task_cfs_rq(struct task_struct *p) | 401 | static inline void set_task_cfs_rq(struct task_struct *p) |
@@ -2235,7 +2252,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2235 | 2252 | ||
2236 | rq = cpu_rq(i); | 2253 | rq = cpu_rq(i); |
2237 | 2254 | ||
2238 | if (*sd_idle && !idle_cpu(i)) | 2255 | if (*sd_idle && rq->nr_running) |
2239 | *sd_idle = 0; | 2256 | *sd_idle = 0; |
2240 | 2257 | ||
2241 | /* Bias balancing toward cpus of our domain */ | 2258 | /* Bias balancing toward cpus of our domain */ |
@@ -2257,9 +2274,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2257 | /* | 2274 | /* |
2258 | * First idle cpu or the first cpu(busiest) in this sched group | 2275 | * First idle cpu or the first cpu(busiest) in this sched group |
2259 | * is eligible for doing load balancing at this and above | 2276 | * is eligible for doing load balancing at this and above |
2260 | * domains. | 2277 | * domains. In the newly idle case, we will allow all the cpu's |
2278 | * to do the newly idle load balance. | ||
2261 | */ | 2279 | */ |
2262 | if (local_group && balance_cpu != this_cpu && balance) { | 2280 | if (idle != CPU_NEWLY_IDLE && local_group && |
2281 | balance_cpu != this_cpu && balance) { | ||
2263 | *balance = 0; | 2282 | *balance = 0; |
2264 | goto ret; | 2283 | goto ret; |
2265 | } | 2284 | } |
@@ -2677,6 +2696,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
2677 | unsigned long imbalance; | 2696 | unsigned long imbalance; |
2678 | int nr_moved = 0; | 2697 | int nr_moved = 0; |
2679 | int sd_idle = 0; | 2698 | int sd_idle = 0; |
2699 | int all_pinned = 0; | ||
2680 | cpumask_t cpus = CPU_MASK_ALL; | 2700 | cpumask_t cpus = CPU_MASK_ALL; |
2681 | 2701 | ||
2682 | /* | 2702 | /* |
@@ -2715,10 +2735,11 @@ redo: | |||
2715 | double_lock_balance(this_rq, busiest); | 2735 | double_lock_balance(this_rq, busiest); |
2716 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2736 | nr_moved = move_tasks(this_rq, this_cpu, busiest, |
2717 | minus_1_or_zero(busiest->nr_running), | 2737 | minus_1_or_zero(busiest->nr_running), |
2718 | imbalance, sd, CPU_NEWLY_IDLE, NULL); | 2738 | imbalance, sd, CPU_NEWLY_IDLE, |
2739 | &all_pinned); | ||
2719 | spin_unlock(&busiest->lock); | 2740 | spin_unlock(&busiest->lock); |
2720 | 2741 | ||
2721 | if (!nr_moved) { | 2742 | if (unlikely(all_pinned)) { |
2722 | cpu_clear(cpu_of(busiest), cpus); | 2743 | cpu_clear(cpu_of(busiest), cpus); |
2723 | if (!cpus_empty(cpus)) | 2744 | if (!cpus_empty(cpus)) |
2724 | goto redo; | 2745 | goto redo; |