aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 17:11:14 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 17:11:14 -0400
commitff86303e3021587c49a14df1bc54fe2d393e2223 (patch)
tree7f1b26407aef36ba486428285604b8b7a7cbf99e /kernel/sched.c
parent626ac545c12e5f9bffe93086d1d03d26c99987ea (diff)
parente436d80085133858bf2613a630365e8a0459fd58 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: [PATCH] sched: implement cpu_clock(cpu) high-speed time source [PATCH] sched: fix the all pinned logic in load_balance_newidle() [PATCH] sched: fix newly idle load balance in case of SMT [PATCH] sched: sched_cacheflush is now unused
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 645256b228c3..93cf241cfbe9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq)
379#define task_rq(p) cpu_rq(task_cpu(p)) 379#define task_rq(p) cpu_rq(task_cpu(p))
380#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 380#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
381 381
382/*
383 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
384 * clock constructed from sched_clock():
385 */
386unsigned long long cpu_clock(int cpu)
387{
388 struct rq *rq = cpu_rq(cpu);
389 unsigned long long now;
390 unsigned long flags;
391
392 spin_lock_irqsave(&rq->lock, flags);
393 now = rq_clock(rq);
394 spin_unlock_irqrestore(&rq->lock, flags);
395
396 return now;
397}
398
382#ifdef CONFIG_FAIR_GROUP_SCHED 399#ifdef CONFIG_FAIR_GROUP_SCHED
383/* Change a task's ->cfs_rq if it moves across CPUs */ 400/* Change a task's ->cfs_rq if it moves across CPUs */
384static inline void set_task_cfs_rq(struct task_struct *p) 401static inline void set_task_cfs_rq(struct task_struct *p)
@@ -2235,7 +2252,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2235 2252
2236 rq = cpu_rq(i); 2253 rq = cpu_rq(i);
2237 2254
2238 if (*sd_idle && !idle_cpu(i)) 2255 if (*sd_idle && rq->nr_running)
2239 *sd_idle = 0; 2256 *sd_idle = 0;
2240 2257
2241 /* Bias balancing toward cpus of our domain */ 2258 /* Bias balancing toward cpus of our domain */
@@ -2257,9 +2274,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2257 /* 2274 /*
2258 * First idle cpu or the first cpu(busiest) in this sched group 2275 * First idle cpu or the first cpu(busiest) in this sched group
2259 * is eligible for doing load balancing at this and above 2276 * is eligible for doing load balancing at this and above
2260 * domains. 2277 * domains. In the newly idle case, we will allow all the cpu's
2278 * to do the newly idle load balance.
2261 */ 2279 */
2262 if (local_group && balance_cpu != this_cpu && balance) { 2280 if (idle != CPU_NEWLY_IDLE && local_group &&
2281 balance_cpu != this_cpu && balance) {
2263 *balance = 0; 2282 *balance = 0;
2264 goto ret; 2283 goto ret;
2265 } 2284 }
@@ -2677,6 +2696,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2677 unsigned long imbalance; 2696 unsigned long imbalance;
2678 int nr_moved = 0; 2697 int nr_moved = 0;
2679 int sd_idle = 0; 2698 int sd_idle = 0;
2699 int all_pinned = 0;
2680 cpumask_t cpus = CPU_MASK_ALL; 2700 cpumask_t cpus = CPU_MASK_ALL;
2681 2701
2682 /* 2702 /*
@@ -2715,10 +2735,11 @@ redo:
2715 double_lock_balance(this_rq, busiest); 2735 double_lock_balance(this_rq, busiest);
2716 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2736 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2717 minus_1_or_zero(busiest->nr_running), 2737 minus_1_or_zero(busiest->nr_running),
2718 imbalance, sd, CPU_NEWLY_IDLE, NULL); 2738 imbalance, sd, CPU_NEWLY_IDLE,
2739 &all_pinned);
2719 spin_unlock(&busiest->lock); 2740 spin_unlock(&busiest->lock);
2720 2741
2721 if (!nr_moved) { 2742 if (unlikely(all_pinned)) {
2722 cpu_clear(cpu_of(busiest), cpus); 2743 cpu_clear(cpu_of(busiest), cpus);
2723 if (!cpus_empty(cpus)) 2744 if (!cpus_empty(cpus))
2724 goto redo; 2745 goto redo;