aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 12:37:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 12:37:16 -0400
commit133e887f90208d339088dd60cb1d08a72ba27288 (patch)
tree4f6bab9d13df7fac50bc7d699d8486f441deb428 /kernel/sched.c
parente82cff752f57810a2259415ad2e9087c2d69484c (diff)
parent0c4b83da58ec2e96ce9c44c211d6eac5f9dae478 (diff)
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: disable the hrtick for now sched: revert back to per-rq vruntime sched: fair scheduler should not resched rt tasks sched: optimize group load balancer sched: minor fast-path overhead reduction sched: fix the wrong mask_len, cleanup sched: kill unused scheduler decl. sched: fix the wrong mask_len sched: only update rq->clock while holding rq->lock
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c51
1 files changed, 26 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d906f72b42d2..945a97b9600d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -819,6 +819,13 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
819unsigned int sysctl_sched_shares_ratelimit = 250000; 819unsigned int sysctl_sched_shares_ratelimit = 250000;
820 820
821/* 821/*
822 * Inject some fuzzyness into changing the per-cpu group shares
823 * this avoids remote rq-locks at the expense of fairness.
824 * default: 4
825 */
826unsigned int sysctl_sched_shares_thresh = 4;
827
828/*
822 * period over which we measure -rt task cpu usage in us. 829 * period over which we measure -rt task cpu usage in us.
823 * default: 1s 830 * default: 1s
824 */ 831 */
@@ -1454,8 +1461,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1454 * Calculate and set the cpu's group shares. 1461 * Calculate and set the cpu's group shares.
1455 */ 1462 */
1456static void 1463static void
1457__update_group_shares_cpu(struct task_group *tg, int cpu, 1464update_group_shares_cpu(struct task_group *tg, int cpu,
1458 unsigned long sd_shares, unsigned long sd_rq_weight) 1465 unsigned long sd_shares, unsigned long sd_rq_weight)
1459{ 1466{
1460 int boost = 0; 1467 int boost = 0;
1461 unsigned long shares; 1468 unsigned long shares;
@@ -1486,19 +1493,23 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
1486 * 1493 *
1487 */ 1494 */
1488 shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); 1495 shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
1496 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
1489 1497
1490 /* 1498 if (abs(shares - tg->se[cpu]->load.weight) >
1491 * record the actual number of shares, not the boosted amount. 1499 sysctl_sched_shares_thresh) {
1492 */ 1500 struct rq *rq = cpu_rq(cpu);
1493 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1501 unsigned long flags;
1494 tg->cfs_rq[cpu]->rq_weight = rq_weight;
1495 1502
1496 if (shares < MIN_SHARES) 1503 spin_lock_irqsave(&rq->lock, flags);
1497 shares = MIN_SHARES; 1504 /*
1498 else if (shares > MAX_SHARES) 1505 * record the actual number of shares, not the boosted amount.
1499 shares = MAX_SHARES; 1506 */
1507 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1508 tg->cfs_rq[cpu]->rq_weight = rq_weight;
1500 1509
1501 __set_se_shares(tg->se[cpu], shares); 1510 __set_se_shares(tg->se[cpu], shares);
1511 spin_unlock_irqrestore(&rq->lock, flags);
1512 }
1502} 1513}
1503 1514
1504/* 1515/*
@@ -1527,14 +1538,8 @@ static int tg_shares_up(struct task_group *tg, void *data)
1527 if (!rq_weight) 1538 if (!rq_weight)
1528 rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; 1539 rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
1529 1540
1530 for_each_cpu_mask(i, sd->span) { 1541 for_each_cpu_mask(i, sd->span)
1531 struct rq *rq = cpu_rq(i); 1542 update_group_shares_cpu(tg, i, shares, rq_weight);
1532 unsigned long flags;
1533
1534 spin_lock_irqsave(&rq->lock, flags);
1535 __update_group_shares_cpu(tg, i, shares, rq_weight);
1536 spin_unlock_irqrestore(&rq->lock, flags);
1537 }
1538 1543
1539 return 0; 1544 return 0;
1540} 1545}
@@ -4443,12 +4448,8 @@ need_resched_nonpreemptible:
4443 if (sched_feat(HRTICK)) 4448 if (sched_feat(HRTICK))
4444 hrtick_clear(rq); 4449 hrtick_clear(rq);
4445 4450
4446 /* 4451 spin_lock_irq(&rq->lock);
4447 * Do the rq-clock update outside the rq lock:
4448 */
4449 local_irq_disable();
4450 update_rq_clock(rq); 4452 update_rq_clock(rq);
4451 spin_lock(&rq->lock);
4452 clear_tsk_need_resched(prev); 4453 clear_tsk_need_resched(prev);
4453 4454
4454 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 4455 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {