diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 47 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 1 |
2 files changed, 3 insertions, 45 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 09ca69b2c17d..9e19287f3359 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1657,42 +1657,6 @@ void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd) | |||
1657 | } | 1657 | } |
1658 | 1658 | ||
1659 | /* | 1659 | /* |
1660 | * Redistribute tg->shares amongst all tg->cfs_rq[]s. | ||
1661 | */ | ||
1662 | static void __aggregate_redistribute_shares(struct task_group *tg) | ||
1663 | { | ||
1664 | int i, max_cpu = smp_processor_id(); | ||
1665 | unsigned long rq_weight = 0; | ||
1666 | unsigned long shares, max_shares = 0, shares_rem = tg->shares; | ||
1667 | |||
1668 | for_each_possible_cpu(i) | ||
1669 | rq_weight += tg->cfs_rq[i]->load.weight; | ||
1670 | |||
1671 | for_each_possible_cpu(i) { | ||
1672 | /* | ||
1673 | * divide shares proportional to the rq_weights. | ||
1674 | */ | ||
1675 | shares = tg->shares * tg->cfs_rq[i]->load.weight; | ||
1676 | shares /= rq_weight + 1; | ||
1677 | |||
1678 | tg->cfs_rq[i]->shares = shares; | ||
1679 | |||
1680 | if (shares > max_shares) { | ||
1681 | max_shares = shares; | ||
1682 | max_cpu = i; | ||
1683 | } | ||
1684 | shares_rem -= shares; | ||
1685 | } | ||
1686 | |||
1687 | /* | ||
1688 | * Ensure it all adds up to tg->shares; we can loose a few | ||
1689 | * due to rounding down when computing the per-cpu shares. | ||
1690 | */ | ||
1691 | if (shares_rem) | ||
1692 | tg->cfs_rq[max_cpu]->shares += shares_rem; | ||
1693 | } | ||
1694 | |||
1695 | /* | ||
1696 | * Compute the weight of this group on the given cpus. | 1660 | * Compute the weight of this group on the given cpus. |
1697 | */ | 1661 | */ |
1698 | static | 1662 | static |
@@ -1701,18 +1665,11 @@ void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd) | |||
1701 | unsigned long shares = 0; | 1665 | unsigned long shares = 0; |
1702 | int i; | 1666 | int i; |
1703 | 1667 | ||
1704 | again: | ||
1705 | for_each_cpu_mask(i, sd->span) | 1668 | for_each_cpu_mask(i, sd->span) |
1706 | shares += tg->cfs_rq[i]->shares; | 1669 | shares += tg->cfs_rq[i]->shares; |
1707 | 1670 | ||
1708 | /* | 1671 | if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares) |
1709 | * When the span doesn't have any shares assigned, but does have | 1672 | shares = tg->shares; |
1710 | * tasks to run do a machine wide rebalance (should be rare). | ||
1711 | */ | ||
1712 | if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) { | ||
1713 | __aggregate_redistribute_shares(tg); | ||
1714 | goto again; | ||
1715 | } | ||
1716 | 1673 | ||
1717 | aggregate(tg, sd)->shares = shares; | 1674 | aggregate(tg, sd)->shares = shares; |
1718 | } | 1675 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index d358d4e3a958..b854a895591e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -393,6 +393,7 @@ void tick_nohz_restart_sched_tick(void) | |||
393 | sub_preempt_count(HARDIRQ_OFFSET); | 393 | sub_preempt_count(HARDIRQ_OFFSET); |
394 | } | 394 | } |
395 | 395 | ||
396 | touch_softlockup_watchdog(); | ||
396 | /* | 397 | /* |
397 | * Cancel the scheduled timer and restore the tick | 398 | * Cancel the scheduled timer and restore the tick |
398 | */ | 399 | */ |