aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-07-22 13:10:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-07-22 13:10:36 -0400
commit356d1b52eb2445d94c6781f15346f00f4a675fda (patch)
tree16e908c7548b3697183432a3771f511e8c1c9c05 /kernel
parentbb184d11ffd015e67e5334e5a88bec2e00be5c20 (diff)
parent6301cb95c119ebf324bb96ee226fa9ddffad80a7 (diff)
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: fix nr_uninterruptible accounting of frozen tasks really sched: fix load average accounting vs. cpu hotplug sched: Account for vruntime wrapping
Diffstat (limited to 'kernel')
-rw-r--r--kernel/freezer.c7
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sched_fair.c10
3 files changed, 17 insertions, 4 deletions
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 2f4936cf7083..bd1d42b17cb2 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -44,12 +44,19 @@ void refrigerator(void)
44 recalc_sigpending(); /* We sent fake signal, clean it up */ 44 recalc_sigpending(); /* We sent fake signal, clean it up */
45 spin_unlock_irq(&current->sighand->siglock); 45 spin_unlock_irq(&current->sighand->siglock);
46 46
47 /* prevent accounting of that task to load */
48 current->flags |= PF_FREEZING;
49
47 for (;;) { 50 for (;;) {
48 set_current_state(TASK_UNINTERRUPTIBLE); 51 set_current_state(TASK_UNINTERRUPTIBLE);
49 if (!frozen(current)) 52 if (!frozen(current))
50 break; 53 break;
51 schedule(); 54 schedule();
52 } 55 }
56
57 /* Remove the accounting blocker */
58 current->flags &= ~PF_FREEZING;
59
53 pr_debug("%s left refrigerator\n", current->comm); 60 pr_debug("%s left refrigerator\n", current->comm);
54 __set_current_state(save); 61 __set_current_state(save);
55} 62}
diff --git a/kernel/sched.c b/kernel/sched.c
index 98972d366fdc..1b59e265273b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7289,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
7289static void calc_global_load_remove(struct rq *rq) 7289static void calc_global_load_remove(struct rq *rq)
7290{ 7290{
7291 atomic_long_sub(rq->calc_load_active, &calc_load_tasks); 7291 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
7292 rq->calc_load_active = 0;
7292} 7293}
7293#endif /* CONFIG_HOTPLUG_CPU */ 7294#endif /* CONFIG_HOTPLUG_CPU */
7294 7295
@@ -7515,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7515 task_rq_unlock(rq, &flags); 7516 task_rq_unlock(rq, &flags);
7516 get_task_struct(p); 7517 get_task_struct(p);
7517 cpu_rq(cpu)->migration_thread = p; 7518 cpu_rq(cpu)->migration_thread = p;
7519 rq->calc_load_update = calc_load_update;
7518 break; 7520 break;
7519 7521
7520 case CPU_ONLINE: 7522 case CPU_ONLINE:
@@ -7525,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7525 /* Update our root-domain */ 7527 /* Update our root-domain */
7526 rq = cpu_rq(cpu); 7528 rq = cpu_rq(cpu);
7527 spin_lock_irqsave(&rq->lock, flags); 7529 spin_lock_irqsave(&rq->lock, flags);
7528 rq->calc_load_update = calc_load_update;
7529 rq->calc_load_active = 0;
7530 if (rq->rd) { 7530 if (rq->rd) {
7531 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7531 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7532 7532
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7c248dc30f41..9ffb2b2ceba4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
266 return min_vruntime; 266 return min_vruntime;
267} 267}
268 268
269static inline int entity_before(struct sched_entity *a,
270 struct sched_entity *b)
271{
272 return (s64)(a->vruntime - b->vruntime) < 0;
273}
274
269static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) 275static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
270{ 276{
271 return se->vruntime - cfs_rq->min_vruntime; 277 return se->vruntime - cfs_rq->min_vruntime;
@@ -1017,7 +1023,7 @@ static void yield_task_fair(struct rq *rq)
1017 /* 1023 /*
1018 * Already in the rightmost position? 1024 * Already in the rightmost position?
1019 */ 1025 */
1020 if (unlikely(!rightmost || rightmost->vruntime < se->vruntime)) 1026 if (unlikely(!rightmost || entity_before(rightmost, se)))
1021 return; 1027 return;
1022 1028
1023 /* 1029 /*
@@ -1713,7 +1719,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1713 1719
1714 /* 'curr' will be NULL if the child belongs to a different group */ 1720 /* 'curr' will be NULL if the child belongs to a different group */
1715 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && 1721 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1716 curr && curr->vruntime < se->vruntime) { 1722 curr && entity_before(curr, se)) {
1717 /* 1723 /*
1718 * Upon rescheduling, sched_class::put_prev_task() will place 1724 * Upon rescheduling, sched_class::put_prev_task() will place
1719 * 'current' within the tree based on its new key value. 1725 * 'current' within the tree based on its new key value.