diff options
| author | Steve French <sfrench@us.ibm.com> | 2008-04-25 16:20:10 -0400 |
|---|---|---|
| committer | Steve French <sfrench@us.ibm.com> | 2008-04-25 16:20:10 -0400 |
| commit | 404e86e1550cc2c84bb57a372af784585c732f9a (patch) | |
| tree | c0e8e2d61c1b1c79705c0dc9f0f16e35267286e4 /kernel/sched.c | |
| parent | 0206e61b467fde4d7b50f1a64355182a4fd9576b (diff) | |
| parent | b9fa38f75ea7e1f64bc29653ca9758303ce698e4 (diff) | |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 54 |
1 files changed, 3 insertions, 51 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0014b03adaca..740fb409e5bb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1657,42 +1657,6 @@ void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd) | |||
| 1657 | } | 1657 | } |
| 1658 | 1658 | ||
| 1659 | /* | 1659 | /* |
| 1660 | * Redistribute tg->shares amongst all tg->cfs_rq[]s. | ||
| 1661 | */ | ||
| 1662 | static void __aggregate_redistribute_shares(struct task_group *tg) | ||
| 1663 | { | ||
| 1664 | int i, max_cpu = smp_processor_id(); | ||
| 1665 | unsigned long rq_weight = 0; | ||
| 1666 | unsigned long shares, max_shares = 0, shares_rem = tg->shares; | ||
| 1667 | |||
| 1668 | for_each_possible_cpu(i) | ||
| 1669 | rq_weight += tg->cfs_rq[i]->load.weight; | ||
| 1670 | |||
| 1671 | for_each_possible_cpu(i) { | ||
| 1672 | /* | ||
| 1673 | * divide shares proportional to the rq_weights. | ||
| 1674 | */ | ||
| 1675 | shares = tg->shares * tg->cfs_rq[i]->load.weight; | ||
| 1676 | shares /= rq_weight + 1; | ||
| 1677 | |||
| 1678 | tg->cfs_rq[i]->shares = shares; | ||
| 1679 | |||
| 1680 | if (shares > max_shares) { | ||
| 1681 | max_shares = shares; | ||
| 1682 | max_cpu = i; | ||
| 1683 | } | ||
| 1684 | shares_rem -= shares; | ||
| 1685 | } | ||
| 1686 | |||
| 1687 | /* | ||
| 1688 | * Ensure it all adds up to tg->shares; we can loose a few | ||
| 1689 | * due to rounding down when computing the per-cpu shares. | ||
| 1690 | */ | ||
| 1691 | if (shares_rem) | ||
| 1692 | tg->cfs_rq[max_cpu]->shares += shares_rem; | ||
| 1693 | } | ||
| 1694 | |||
| 1695 | /* | ||
| 1696 | * Compute the weight of this group on the given cpus. | 1660 | * Compute the weight of this group on the given cpus. |
| 1697 | */ | 1661 | */ |
| 1698 | static | 1662 | static |
| @@ -1701,18 +1665,11 @@ void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd) | |||
| 1701 | unsigned long shares = 0; | 1665 | unsigned long shares = 0; |
| 1702 | int i; | 1666 | int i; |
| 1703 | 1667 | ||
| 1704 | again: | ||
| 1705 | for_each_cpu_mask(i, sd->span) | 1668 | for_each_cpu_mask(i, sd->span) |
| 1706 | shares += tg->cfs_rq[i]->shares; | 1669 | shares += tg->cfs_rq[i]->shares; |
| 1707 | 1670 | ||
| 1708 | /* | 1671 | if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares) |
| 1709 | * When the span doesn't have any shares assigned, but does have | 1672 | shares = tg->shares; |
| 1710 | * tasks to run do a machine wide rebalance (should be rare). | ||
| 1711 | */ | ||
| 1712 | if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) { | ||
| 1713 | __aggregate_redistribute_shares(tg); | ||
| 1714 | goto again; | ||
| 1715 | } | ||
| 1716 | 1673 | ||
| 1717 | aggregate(tg, sd)->shares = shares; | 1674 | aggregate(tg, sd)->shares = shares; |
| 1718 | } | 1675 | } |
| @@ -7991,11 +7948,6 @@ void __init sched_init_smp(void) | |||
| 7991 | #else | 7948 | #else |
| 7992 | void __init sched_init_smp(void) | 7949 | void __init sched_init_smp(void) |
| 7993 | { | 7950 | { |
| 7994 | #if defined(CONFIG_NUMA) | ||
| 7995 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | ||
| 7996 | GFP_KERNEL); | ||
| 7997 | BUG_ON(sched_group_nodes_bycpu == NULL); | ||
| 7998 | #endif | ||
| 7999 | sched_init_granularity(); | 7951 | sched_init_granularity(); |
| 8000 | } | 7952 | } |
| 8001 | #endif /* CONFIG_SMP */ | 7953 | #endif /* CONFIG_SMP */ |
| @@ -8128,7 +8080,7 @@ void __init sched_init(void) | |||
| 8128 | * we use alloc_bootmem(). | 8080 | * we use alloc_bootmem(). |
| 8129 | */ | 8081 | */ |
| 8130 | if (alloc_size) { | 8082 | if (alloc_size) { |
| 8131 | ptr = (unsigned long)alloc_bootmem_low(alloc_size); | 8083 | ptr = (unsigned long)alloc_bootmem(alloc_size); |
| 8132 | 8084 | ||
| 8133 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8085 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8134 | init_task_group.se = (struct sched_entity **)ptr; | 8086 | init_task_group.se = (struct sched_entity **)ptr; |
