diff options
-rw-r--r-- | kernel/sched.c | 67 | ||||
-rw-r--r-- | kernel/sched_fair.c | 58 |
2 files changed, 58 insertions, 67 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 22436dd2e19f..6268d2dc9a91 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -279,13 +279,6 @@ static DEFINE_SPINLOCK(task_group_lock); | |||
279 | 279 | ||
280 | #ifdef CONFIG_FAIR_GROUP_SCHED | 280 | #ifdef CONFIG_FAIR_GROUP_SCHED |
281 | 281 | ||
282 | #ifdef CONFIG_SMP | ||
283 | static int root_task_group_empty(void) | ||
284 | { | ||
285 | return list_empty(&root_task_group.children); | ||
286 | } | ||
287 | #endif | ||
288 | |||
289 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD | 282 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD |
290 | 283 | ||
291 | /* | 284 | /* |
@@ -1546,48 +1539,6 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1546 | 1539 | ||
1547 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1540 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1548 | 1541 | ||
1549 | static void update_cfs_load(struct cfs_rq *cfs_rq, int lb); | ||
1550 | static void update_cfs_shares(struct cfs_rq *cfs_rq); | ||
1551 | |||
1552 | /* | ||
1553 | * update tg->load_weight by folding this cpu's load_avg | ||
1554 | */ | ||
1555 | static int tg_shares_up(struct task_group *tg, void *data) | ||
1556 | { | ||
1557 | long load_avg; | ||
1558 | struct cfs_rq *cfs_rq; | ||
1559 | unsigned long flags; | ||
1560 | int cpu = (long)data; | ||
1561 | struct rq *rq; | ||
1562 | |||
1563 | if (!tg->se[cpu]) | ||
1564 | return 0; | ||
1565 | |||
1566 | rq = cpu_rq(cpu); | ||
1567 | cfs_rq = tg->cfs_rq[cpu]; | ||
1568 | |||
1569 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
1570 | |||
1571 | update_rq_clock(rq); | ||
1572 | update_cfs_load(cfs_rq, 1); | ||
1573 | |||
1574 | load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); | ||
1575 | load_avg -= cfs_rq->load_contribution; | ||
1576 | |||
1577 | atomic_add(load_avg, &tg->load_weight); | ||
1578 | cfs_rq->load_contribution += load_avg; | ||
1579 | |||
1580 | /* | ||
1581 | * We need to update shares after updating tg->load_weight in | ||
1582 | * order to adjust the weight of groups with long running tasks. | ||
1583 | */ | ||
1584 | update_cfs_shares(cfs_rq); | ||
1585 | |||
1586 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
1587 | |||
1588 | return 0; | ||
1589 | } | ||
1590 | |||
1591 | /* | 1542 | /* |
1592 | * Compute the cpu's hierarchical load factor for each task group. | 1543 | * Compute the cpu's hierarchical load factor for each task group. |
1593 | * This needs to be done in a top-down fashion because the load of a child | 1544 | * This needs to be done in a top-down fashion because the load of a child |
@@ -1611,29 +1562,11 @@ static int tg_load_down(struct task_group *tg, void *data) | |||
1611 | return 0; | 1562 | return 0; |
1612 | } | 1563 | } |
1613 | 1564 | ||
1614 | static void update_shares(long cpu) | ||
1615 | { | ||
1616 | if (root_task_group_empty()) | ||
1617 | return; | ||
1618 | |||
1619 | /* | ||
1620 | * XXX: replace with an on-demand list | ||
1621 | */ | ||
1622 | |||
1623 | walk_tg_tree(tg_nop, tg_shares_up, (void *)cpu); | ||
1624 | } | ||
1625 | |||
1626 | static void update_h_load(long cpu) | 1565 | static void update_h_load(long cpu) |
1627 | { | 1566 | { |
1628 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | 1567 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
1629 | } | 1568 | } |
1630 | 1569 | ||
1631 | #else | ||
1632 | |||
1633 | static inline void update_shares(int cpu) | ||
1634 | { | ||
1635 | } | ||
1636 | |||
1637 | #endif | 1570 | #endif |
1638 | 1571 | ||
1639 | #ifdef CONFIG_PREEMPT | 1572 | #ifdef CONFIG_PREEMPT |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0560e72bd732..46ff6587dc16 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -2004,6 +2004,60 @@ out: | |||
2004 | } | 2004 | } |
2005 | 2005 | ||
2006 | #ifdef CONFIG_FAIR_GROUP_SCHED | 2006 | #ifdef CONFIG_FAIR_GROUP_SCHED |
2007 | /* | ||
2008 | * update tg->load_weight by folding this cpu's load_avg | ||
2009 | */ | ||
2010 | static int tg_shares_up(struct task_group *tg, int cpu) | ||
2011 | { | ||
2012 | struct cfs_rq *cfs_rq; | ||
2013 | unsigned long flags; | ||
2014 | struct rq *rq; | ||
2015 | long load_avg; | ||
2016 | |||
2017 | if (!tg->se[cpu]) | ||
2018 | return 0; | ||
2019 | |||
2020 | rq = cpu_rq(cpu); | ||
2021 | cfs_rq = tg->cfs_rq[cpu]; | ||
2022 | |||
2023 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
2024 | |||
2025 | update_rq_clock(rq); | ||
2026 | update_cfs_load(cfs_rq, 1); | ||
2027 | |||
2028 | load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); | ||
2029 | load_avg -= cfs_rq->load_contribution; | ||
2030 | atomic_add(load_avg, &tg->load_weight); | ||
2031 | cfs_rq->load_contribution += load_avg; | ||
2032 | |||
2033 | /* | ||
2034 | * We need to update shares after updating tg->load_weight in | ||
2035 | * order to adjust the weight of groups with long running tasks. | ||
2036 | */ | ||
2037 | update_cfs_shares(cfs_rq); | ||
2038 | |||
2039 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
2040 | |||
2041 | return 0; | ||
2042 | } | ||
2043 | |||
2044 | static void update_shares(int cpu) | ||
2045 | { | ||
2046 | struct cfs_rq *cfs_rq; | ||
2047 | struct rq *rq = cpu_rq(cpu); | ||
2048 | |||
2049 | rcu_read_lock(); | ||
2050 | for_each_leaf_cfs_rq(rq, cfs_rq) { | ||
2051 | struct task_group *tg = cfs_rq->tg; | ||
2052 | |||
2053 | do { | ||
2054 | tg_shares_up(tg, cpu); | ||
2055 | tg = tg->parent; | ||
2056 | } while (tg); | ||
2057 | } | ||
2058 | rcu_read_unlock(); | ||
2059 | } | ||
2060 | |||
2007 | static unsigned long | 2061 | static unsigned long |
2008 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2062 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2009 | unsigned long max_load_move, | 2063 | unsigned long max_load_move, |
@@ -2051,6 +2105,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2051 | return max_load_move - rem_load_move; | 2105 | return max_load_move - rem_load_move; |
2052 | } | 2106 | } |
2053 | #else | 2107 | #else |
2108 | static inline void update_shares(int cpu) | ||
2109 | { | ||
2110 | } | ||
2111 | |||
2054 | static unsigned long | 2112 | static unsigned long |
2055 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2113 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2056 | unsigned long max_load_move, | 2114 | unsigned long max_load_move, |