diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 447 |
1 files changed, 41 insertions, 406 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index cfa222a91539..bfb8ad8ed171 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) | |||
| 136 | 136 | ||
| 137 | static inline int rt_policy(int policy) | 137 | static inline int rt_policy(int policy) |
| 138 | { | 138 | { |
| 139 | if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) | 139 | if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) |
| 140 | return 1; | 140 | return 1; |
| 141 | return 0; | 141 | return 0; |
| 142 | } | 142 | } |
| @@ -398,43 +398,6 @@ struct cfs_rq { | |||
| 398 | */ | 398 | */ |
| 399 | struct list_head leaf_cfs_rq_list; | 399 | struct list_head leaf_cfs_rq_list; |
| 400 | struct task_group *tg; /* group that "owns" this runqueue */ | 400 | struct task_group *tg; /* group that "owns" this runqueue */ |
| 401 | |||
| 402 | #ifdef CONFIG_SMP | ||
| 403 | unsigned long task_weight; | ||
| 404 | unsigned long shares; | ||
| 405 | /* | ||
| 406 | * We need space to build a sched_domain wide view of the full task | ||
| 407 | * group tree, in order to avoid depending on dynamic memory allocation | ||
| 408 | * during the load balancing we place this in the per cpu task group | ||
| 409 | * hierarchy. This limits the load balancing to one instance per cpu, | ||
| 410 | * but more should not be needed anyway. | ||
| 411 | */ | ||
| 412 | struct aggregate_struct { | ||
| 413 | /* | ||
| 414 | * load = weight(cpus) * f(tg) | ||
| 415 | * | ||
| 416 | * Where f(tg) is the recursive weight fraction assigned to | ||
| 417 | * this group. | ||
| 418 | */ | ||
| 419 | unsigned long load; | ||
| 420 | |||
| 421 | /* | ||
| 422 | * part of the group weight distributed to this span. | ||
| 423 | */ | ||
| 424 | unsigned long shares; | ||
| 425 | |||
| 426 | /* | ||
| 427 | * The sum of all runqueue weights within this span. | ||
| 428 | */ | ||
| 429 | unsigned long rq_weight; | ||
| 430 | |||
| 431 | /* | ||
| 432 | * Weight contributed by tasks; this is the part we can | ||
| 433 | * influence by moving tasks around. | ||
| 434 | */ | ||
| 435 | unsigned long task_weight; | ||
| 436 | } aggregate; | ||
| 437 | #endif | ||
| 438 | #endif | 401 | #endif |
| 439 | }; | 402 | }; |
| 440 | 403 | ||
| @@ -1368,9 +1331,6 @@ static void __resched_task(struct task_struct *p, int tif_bit) | |||
| 1368 | */ | 1331 | */ |
| 1369 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | 1332 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) |
| 1370 | 1333 | ||
| 1371 | /* | ||
| 1372 | * delta *= weight / lw | ||
| 1373 | */ | ||
| 1374 | static unsigned long | 1334 | static unsigned long |
| 1375 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | 1335 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, |
| 1376 | struct load_weight *lw) | 1336 | struct load_weight *lw) |
| @@ -1393,6 +1353,12 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
| 1393 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | 1353 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); |
| 1394 | } | 1354 | } |
| 1395 | 1355 | ||
| 1356 | static inline unsigned long | ||
| 1357 | calc_delta_fair(unsigned long delta_exec, struct load_weight *lw) | ||
| 1358 | { | ||
| 1359 | return calc_delta_mine(delta_exec, NICE_0_LOAD, lw); | ||
| 1360 | } | ||
| 1361 | |||
| 1396 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) | 1362 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) |
| 1397 | { | 1363 | { |
| 1398 | lw->weight += inc; | 1364 | lw->weight += inc; |
| @@ -1505,326 +1471,6 @@ static unsigned long source_load(int cpu, int type); | |||
| 1505 | static unsigned long target_load(int cpu, int type); | 1471 | static unsigned long target_load(int cpu, int type); |
| 1506 | static unsigned long cpu_avg_load_per_task(int cpu); | 1472 | static unsigned long cpu_avg_load_per_task(int cpu); |
| 1507 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | 1473 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); |
| 1508 | |||
| 1509 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 1510 | |||
| 1511 | /* | ||
| 1512 | * Group load balancing. | ||
| 1513 | * | ||
| 1514 | * We calculate a few balance domain wide aggregate numbers; load and weight. | ||
| 1515 | * Given the pictures below, and assuming each item has equal weight: | ||
| 1516 | * | ||
| 1517 | * root 1 - thread | ||
| 1518 | * / | \ A - group | ||
| 1519 | * A 1 B | ||
| 1520 | * /|\ / \ | ||
| 1521 | * C 2 D 3 4 | ||
| 1522 | * | | | ||
| 1523 | * 5 6 | ||
| 1524 | * | ||
| 1525 | * load: | ||
| 1526 | * A and B get 1/3-rd of the total load. C and D get 1/3-rd of A's 1/3-rd, | ||
| 1527 | * which equals 1/9-th of the total load. | ||
| 1528 | * | ||
| 1529 | * shares: | ||
| 1530 | * The weight of this group on the selected cpus. | ||
| 1531 | * | ||
| 1532 | * rq_weight: | ||
| 1533 | * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while | ||
| 1534 | * B would get 2. | ||
| 1535 | * | ||
| 1536 | * task_weight: | ||
| 1537 | * Part of the rq_weight contributed by tasks; all groups except B would | ||
| 1538 | * get 1, B gets 2. | ||
| 1539 | */ | ||
| 1540 | |||
| 1541 | static inline struct aggregate_struct * | ||
| 1542 | aggregate(struct task_group *tg, struct sched_domain *sd) | ||
| 1543 | { | ||
| 1544 | return &tg->cfs_rq[sd->first_cpu]->aggregate; | ||
| 1545 | } | ||
| 1546 | |||
| 1547 | typedef void (*aggregate_func)(struct task_group *, struct sched_domain *); | ||
| 1548 | |||
| 1549 | /* | ||
| 1550 | * Iterate the full tree, calling @down when first entering a node and @up when | ||
| 1551 | * leaving it for the final time. | ||
| 1552 | */ | ||
| 1553 | static | ||
| 1554 | void aggregate_walk_tree(aggregate_func down, aggregate_func up, | ||
| 1555 | struct sched_domain *sd) | ||
| 1556 | { | ||
| 1557 | struct task_group *parent, *child; | ||
| 1558 | |||
| 1559 | rcu_read_lock(); | ||
| 1560 | parent = &root_task_group; | ||
| 1561 | down: | ||
| 1562 | (*down)(parent, sd); | ||
| 1563 | list_for_each_entry_rcu(child, &parent->children, siblings) { | ||
| 1564 | parent = child; | ||
| 1565 | goto down; | ||
| 1566 | |||
| 1567 | up: | ||
| 1568 | continue; | ||
| 1569 | } | ||
| 1570 | (*up)(parent, sd); | ||
| 1571 | |||
| 1572 | child = parent; | ||
| 1573 | parent = parent->parent; | ||
| 1574 | if (parent) | ||
| 1575 | goto up; | ||
| 1576 | rcu_read_unlock(); | ||
| 1577 | } | ||
| 1578 | |||
| 1579 | /* | ||
| 1580 | * Calculate the aggregate runqueue weight. | ||
| 1581 | */ | ||
| 1582 | static | ||
| 1583 | void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd) | ||
| 1584 | { | ||
| 1585 | unsigned long rq_weight = 0; | ||
| 1586 | unsigned long task_weight = 0; | ||
| 1587 | int i; | ||
| 1588 | |||
| 1589 | for_each_cpu_mask(i, sd->span) { | ||
| 1590 | rq_weight += tg->cfs_rq[i]->load.weight; | ||
| 1591 | task_weight += tg->cfs_rq[i]->task_weight; | ||
| 1592 | } | ||
| 1593 | |||
| 1594 | aggregate(tg, sd)->rq_weight = rq_weight; | ||
| 1595 | aggregate(tg, sd)->task_weight = task_weight; | ||
| 1596 | } | ||
| 1597 | |||
| 1598 | /* | ||
| 1599 | * Compute the weight of this group on the given cpus. | ||
| 1600 | */ | ||
| 1601 | static | ||
| 1602 | void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd) | ||
| 1603 | { | ||
| 1604 | unsigned long shares = 0; | ||
| 1605 | int i; | ||
| 1606 | |||
| 1607 | for_each_cpu_mask(i, sd->span) | ||
| 1608 | shares += tg->cfs_rq[i]->shares; | ||
| 1609 | |||
| 1610 | if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares) | ||
| 1611 | shares = tg->shares; | ||
| 1612 | |||
| 1613 | aggregate(tg, sd)->shares = shares; | ||
| 1614 | } | ||
| 1615 | |||
| 1616 | /* | ||
| 1617 | * Compute the load fraction assigned to this group, relies on the aggregate | ||
| 1618 | * weight and this group's parent's load, i.e. top-down. | ||
| 1619 | */ | ||
| 1620 | static | ||
| 1621 | void aggregate_group_load(struct task_group *tg, struct sched_domain *sd) | ||
| 1622 | { | ||
| 1623 | unsigned long load; | ||
| 1624 | |||
| 1625 | if (!tg->parent) { | ||
| 1626 | int i; | ||
| 1627 | |||
| 1628 | load = 0; | ||
| 1629 | for_each_cpu_mask(i, sd->span) | ||
| 1630 | load += cpu_rq(i)->load.weight; | ||
| 1631 | |||
| 1632 | } else { | ||
| 1633 | load = aggregate(tg->parent, sd)->load; | ||
| 1634 | |||
| 1635 | /* | ||
| 1636 | * shares is our weight in the parent's rq so | ||
| 1637 | * shares/parent->rq_weight gives our fraction of the load | ||
| 1638 | */ | ||
| 1639 | load *= aggregate(tg, sd)->shares; | ||
| 1640 | load /= aggregate(tg->parent, sd)->rq_weight + 1; | ||
| 1641 | } | ||
| 1642 | |||
| 1643 | aggregate(tg, sd)->load = load; | ||
| 1644 | } | ||
| 1645 | |||
| 1646 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | ||
| 1647 | |||
| 1648 | /* | ||
| 1649 | * Calculate and set the cpu's group shares. | ||
| 1650 | */ | ||
| 1651 | static void | ||
| 1652 | __update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd, | ||
| 1653 | int tcpu) | ||
| 1654 | { | ||
| 1655 | int boost = 0; | ||
| 1656 | unsigned long shares; | ||
| 1657 | unsigned long rq_weight; | ||
| 1658 | |||
| 1659 | if (!tg->se[tcpu]) | ||
| 1660 | return; | ||
| 1661 | |||
| 1662 | rq_weight = tg->cfs_rq[tcpu]->load.weight; | ||
| 1663 | |||
| 1664 | /* | ||
| 1665 | * If there are currently no tasks on the cpu pretend there is one of | ||
| 1666 | * average load so that when a new task gets to run here it will not | ||
| 1667 | * get delayed by group starvation. | ||
| 1668 | */ | ||
| 1669 | if (!rq_weight) { | ||
| 1670 | boost = 1; | ||
| 1671 | rq_weight = NICE_0_LOAD; | ||
| 1672 | } | ||
| 1673 | |||
| 1674 | /* | ||
| 1675 | * \Sum shares * rq_weight | ||
| 1676 | * shares = ----------------------- | ||
| 1677 | * \Sum rq_weight | ||
| 1678 | * | ||
| 1679 | */ | ||
| 1680 | shares = aggregate(tg, sd)->shares * rq_weight; | ||
| 1681 | shares /= aggregate(tg, sd)->rq_weight + 1; | ||
| 1682 | |||
| 1683 | /* | ||
| 1684 | * record the actual number of shares, not the boosted amount. | ||
| 1685 | */ | ||
| 1686 | tg->cfs_rq[tcpu]->shares = boost ? 0 : shares; | ||
| 1687 | |||
| 1688 | if (shares < MIN_SHARES) | ||
| 1689 | shares = MIN_SHARES; | ||
| 1690 | else if (shares > MAX_SHARES) | ||
| 1691 | shares = MAX_SHARES; | ||
| 1692 | |||
| 1693 | __set_se_shares(tg->se[tcpu], shares); | ||
| 1694 | } | ||
| 1695 | |||
| 1696 | /* | ||
| 1697 | * Re-adjust the weights on the cpu the task came from and on the cpu the | ||
| 1698 | * task went to. | ||
| 1699 | */ | ||
| 1700 | static void | ||
| 1701 | __move_group_shares(struct task_group *tg, struct sched_domain *sd, | ||
| 1702 | int scpu, int dcpu) | ||
| 1703 | { | ||
| 1704 | unsigned long shares; | ||
| 1705 | |||
| 1706 | shares = tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares; | ||
| 1707 | |||
| 1708 | __update_group_shares_cpu(tg, sd, scpu); | ||
| 1709 | __update_group_shares_cpu(tg, sd, dcpu); | ||
| 1710 | |||
| 1711 | /* | ||
| 1712 | * ensure we never loose shares due to rounding errors in the | ||
| 1713 | * above redistribution. | ||
| 1714 | */ | ||
| 1715 | shares -= tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares; | ||
| 1716 | if (shares) | ||
| 1717 | tg->cfs_rq[dcpu]->shares += shares; | ||
| 1718 | } | ||
| 1719 | |||
| 1720 | /* | ||
| 1721 | * Because changing a group's shares changes the weight of the super-group | ||
| 1722 | * we need to walk up the tree and change all shares until we hit the root. | ||
| 1723 | */ | ||
| 1724 | static void | ||
| 1725 | move_group_shares(struct task_group *tg, struct sched_domain *sd, | ||
| 1726 | int scpu, int dcpu) | ||
| 1727 | { | ||
| 1728 | while (tg) { | ||
| 1729 | __move_group_shares(tg, sd, scpu, dcpu); | ||
| 1730 | tg = tg->parent; | ||
| 1731 | } | ||
| 1732 | } | ||
| 1733 | |||
| 1734 | static | ||
| 1735 | void aggregate_group_set_shares(struct task_group *tg, struct sched_domain *sd) | ||
| 1736 | { | ||
| 1737 | unsigned long shares = aggregate(tg, sd)->shares; | ||
| 1738 | int i; | ||
| 1739 | |||
| 1740 | for_each_cpu_mask(i, sd->span) { | ||
| 1741 | struct rq *rq = cpu_rq(i); | ||
| 1742 | unsigned long flags; | ||
| 1743 | |||
| 1744 | spin_lock_irqsave(&rq->lock, flags); | ||
| 1745 | __update_group_shares_cpu(tg, sd, i); | ||
| 1746 | spin_unlock_irqrestore(&rq->lock, flags); | ||
| 1747 | } | ||
| 1748 | |||
| 1749 | aggregate_group_shares(tg, sd); | ||
| 1750 | |||
| 1751 | /* | ||
| 1752 | * ensure we never loose shares due to rounding errors in the | ||
| 1753 | * above redistribution. | ||
| 1754 | */ | ||
| 1755 | shares -= aggregate(tg, sd)->shares; | ||
| 1756 | if (shares) { | ||
| 1757 | tg->cfs_rq[sd->first_cpu]->shares += shares; | ||
| 1758 | aggregate(tg, sd)->shares += shares; | ||
| 1759 | } | ||
| 1760 | } | ||
| 1761 | |||
| 1762 | /* | ||
| 1763 | * Calculate the accumulative weight and recursive load of each task group | ||
| 1764 | * while walking down the tree. | ||
| 1765 | */ | ||
| 1766 | static | ||
| 1767 | void aggregate_get_down(struct task_group *tg, struct sched_domain *sd) | ||
| 1768 | { | ||
| 1769 | aggregate_group_weight(tg, sd); | ||
| 1770 | aggregate_group_shares(tg, sd); | ||
| 1771 | aggregate_group_load(tg, sd); | ||
| 1772 | } | ||
| 1773 | |||
| 1774 | /* | ||
| 1775 | * Rebalance the cpu shares while walking back up the tree. | ||
| 1776 | */ | ||
| 1777 | static | ||
| 1778 | void aggregate_get_up(struct task_group *tg, struct sched_domain *sd) | ||
| 1779 | { | ||
| 1780 | aggregate_group_set_shares(tg, sd); | ||
| 1781 | } | ||
| 1782 | |||
| 1783 | static DEFINE_PER_CPU(spinlock_t, aggregate_lock); | ||
| 1784 | |||
| 1785 | static void __init init_aggregate(void) | ||
| 1786 | { | ||
| 1787 | int i; | ||
| 1788 | |||
| 1789 | for_each_possible_cpu(i) | ||
| 1790 | spin_lock_init(&per_cpu(aggregate_lock, i)); | ||
| 1791 | } | ||
| 1792 | |||
| 1793 | static int get_aggregate(struct sched_domain *sd) | ||
| 1794 | { | ||
| 1795 | if (!spin_trylock(&per_cpu(aggregate_lock, sd->first_cpu))) | ||
| 1796 | return 0; | ||
| 1797 | |||
| 1798 | aggregate_walk_tree(aggregate_get_down, aggregate_get_up, sd); | ||
| 1799 | return 1; | ||
| 1800 | } | ||
| 1801 | |||
| 1802 | static void put_aggregate(struct sched_domain *sd) | ||
| 1803 | { | ||
| 1804 | spin_unlock(&per_cpu(aggregate_lock, sd->first_cpu)); | ||
| 1805 | } | ||
| 1806 | |||
| 1807 | static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | ||
| 1808 | { | ||
| 1809 | cfs_rq->shares = shares; | ||
| 1810 | } | ||
| 1811 | |||
| 1812 | #else | ||
| 1813 | |||
| 1814 | static inline void init_aggregate(void) | ||
| 1815 | { | ||
| 1816 | } | ||
| 1817 | |||
| 1818 | static inline int get_aggregate(struct sched_domain *sd) | ||
| 1819 | { | ||
| 1820 | return 0; | ||
| 1821 | } | ||
| 1822 | |||
| 1823 | static inline void put_aggregate(struct sched_domain *sd) | ||
| 1824 | { | ||
| 1825 | } | ||
| 1826 | #endif | ||
| 1827 | |||
| 1828 | #else /* CONFIG_SMP */ | 1474 | #else /* CONFIG_SMP */ |
| 1829 | 1475 | ||
| 1830 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1476 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| @@ -1845,14 +1491,26 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | |||
| 1845 | 1491 | ||
| 1846 | #define sched_class_highest (&rt_sched_class) | 1492 | #define sched_class_highest (&rt_sched_class) |
| 1847 | 1493 | ||
| 1848 | static void inc_nr_running(struct rq *rq) | 1494 | static inline void inc_load(struct rq *rq, const struct task_struct *p) |
| 1495 | { | ||
| 1496 | update_load_add(&rq->load, p->se.load.weight); | ||
| 1497 | } | ||
| 1498 | |||
| 1499 | static inline void dec_load(struct rq *rq, const struct task_struct *p) | ||
| 1500 | { | ||
| 1501 | update_load_sub(&rq->load, p->se.load.weight); | ||
| 1502 | } | ||
| 1503 | |||
| 1504 | static void inc_nr_running(struct task_struct *p, struct rq *rq) | ||
| 1849 | { | 1505 | { |
| 1850 | rq->nr_running++; | 1506 | rq->nr_running++; |
| 1507 | inc_load(rq, p); | ||
| 1851 | } | 1508 | } |
| 1852 | 1509 | ||
| 1853 | static void dec_nr_running(struct rq *rq) | 1510 | static void dec_nr_running(struct task_struct *p, struct rq *rq) |
| 1854 | { | 1511 | { |
| 1855 | rq->nr_running--; | 1512 | rq->nr_running--; |
| 1513 | dec_load(rq, p); | ||
| 1856 | } | 1514 | } |
| 1857 | 1515 | ||
| 1858 | static void set_load_weight(struct task_struct *p) | 1516 | static void set_load_weight(struct task_struct *p) |
| @@ -1944,7 +1602,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
| 1944 | rq->nr_uninterruptible--; | 1602 | rq->nr_uninterruptible--; |
| 1945 | 1603 | ||
| 1946 | enqueue_task(rq, p, wakeup); | 1604 | enqueue_task(rq, p, wakeup); |
| 1947 | inc_nr_running(rq); | 1605 | inc_nr_running(p, rq); |
| 1948 | } | 1606 | } |
| 1949 | 1607 | ||
| 1950 | /* | 1608 | /* |
| @@ -1956,7 +1614,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | |||
| 1956 | rq->nr_uninterruptible++; | 1614 | rq->nr_uninterruptible++; |
| 1957 | 1615 | ||
| 1958 | dequeue_task(rq, p, sleep); | 1616 | dequeue_task(rq, p, sleep); |
| 1959 | dec_nr_running(rq); | 1617 | dec_nr_running(p, rq); |
| 1960 | } | 1618 | } |
| 1961 | 1619 | ||
| 1962 | /** | 1620 | /** |
| @@ -2609,7 +2267,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
| 2609 | * management (if any): | 2267 | * management (if any): |
| 2610 | */ | 2268 | */ |
| 2611 | p->sched_class->task_new(rq, p); | 2269 | p->sched_class->task_new(rq, p); |
| 2612 | inc_nr_running(rq); | 2270 | inc_nr_running(p, rq); |
| 2613 | } | 2271 | } |
| 2614 | check_preempt_curr(rq, p); | 2272 | check_preempt_curr(rq, p); |
| 2615 | #ifdef CONFIG_SMP | 2273 | #ifdef CONFIG_SMP |
| @@ -3600,12 +3258,9 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
| 3600 | unsigned long imbalance; | 3258 | unsigned long imbalance; |
| 3601 | struct rq *busiest; | 3259 | struct rq *busiest; |
| 3602 | unsigned long flags; | 3260 | unsigned long flags; |
| 3603 | int unlock_aggregate; | ||
| 3604 | 3261 | ||
| 3605 | cpus_setall(*cpus); | 3262 | cpus_setall(*cpus); |
| 3606 | 3263 | ||
| 3607 | unlock_aggregate = get_aggregate(sd); | ||
| 3608 | |||
| 3609 | /* | 3264 | /* |
| 3610 | * When power savings policy is enabled for the parent domain, idle | 3265 | * When power savings policy is enabled for the parent domain, idle |
| 3611 | * sibling can pick up load irrespective of busy siblings. In this case, | 3266 | * sibling can pick up load irrespective of busy siblings. In this case, |
| @@ -3721,9 +3376,8 @@ redo: | |||
| 3721 | 3376 | ||
| 3722 | if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3377 | if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
| 3723 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 3378 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
| 3724 | ld_moved = -1; | 3379 | return -1; |
| 3725 | 3380 | return ld_moved; | |
| 3726 | goto out; | ||
| 3727 | 3381 | ||
| 3728 | out_balanced: | 3382 | out_balanced: |
| 3729 | schedstat_inc(sd, lb_balanced[idle]); | 3383 | schedstat_inc(sd, lb_balanced[idle]); |
| @@ -3738,13 +3392,8 @@ out_one_pinned: | |||
| 3738 | 3392 | ||
| 3739 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3393 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
| 3740 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 3394 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
| 3741 | ld_moved = -1; | 3395 | return -1; |
| 3742 | else | 3396 | return 0; |
| 3743 | ld_moved = 0; | ||
| 3744 | out: | ||
| 3745 | if (unlock_aggregate) | ||
| 3746 | put_aggregate(sd); | ||
| 3747 | return ld_moved; | ||
| 3748 | } | 3397 | } |
| 3749 | 3398 | ||
| 3750 | /* | 3399 | /* |
| @@ -4430,7 +4079,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
| 4430 | * schedule() atomically, we ignore that path for now. | 4079 | * schedule() atomically, we ignore that path for now. |
| 4431 | * Otherwise, whine if we are scheduling when we should not be. | 4080 | * Otherwise, whine if we are scheduling when we should not be. |
| 4432 | */ | 4081 | */ |
| 4433 | if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state)) | 4082 | if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) |
| 4434 | __schedule_bug(prev); | 4083 | __schedule_bug(prev); |
| 4435 | 4084 | ||
| 4436 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); | 4085 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
| @@ -4931,8 +4580,10 @@ void set_user_nice(struct task_struct *p, long nice) | |||
| 4931 | goto out_unlock; | 4580 | goto out_unlock; |
| 4932 | } | 4581 | } |
| 4933 | on_rq = p->se.on_rq; | 4582 | on_rq = p->se.on_rq; |
| 4934 | if (on_rq) | 4583 | if (on_rq) { |
| 4935 | dequeue_task(rq, p, 0); | 4584 | dequeue_task(rq, p, 0); |
| 4585 | dec_load(rq, p); | ||
| 4586 | } | ||
| 4936 | 4587 | ||
| 4937 | p->static_prio = NICE_TO_PRIO(nice); | 4588 | p->static_prio = NICE_TO_PRIO(nice); |
| 4938 | set_load_weight(p); | 4589 | set_load_weight(p); |
| @@ -4942,6 +4593,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
| 4942 | 4593 | ||
| 4943 | if (on_rq) { | 4594 | if (on_rq) { |
| 4944 | enqueue_task(rq, p, 0); | 4595 | enqueue_task(rq, p, 0); |
| 4596 | inc_load(rq, p); | ||
| 4945 | /* | 4597 | /* |
| 4946 | * If the task increased its priority or is running and | 4598 | * If the task increased its priority or is running and |
| 4947 | * lowered its priority, then reschedule its CPU: | 4599 | * lowered its priority, then reschedule its CPU: |
| @@ -7316,7 +6968,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7316 | SD_INIT(sd, ALLNODES); | 6968 | SD_INIT(sd, ALLNODES); |
| 7317 | set_domain_attribute(sd, attr); | 6969 | set_domain_attribute(sd, attr); |
| 7318 | sd->span = *cpu_map; | 6970 | sd->span = *cpu_map; |
| 7319 | sd->first_cpu = first_cpu(sd->span); | ||
| 7320 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); | 6971 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); |
| 7321 | p = sd; | 6972 | p = sd; |
| 7322 | sd_allnodes = 1; | 6973 | sd_allnodes = 1; |
| @@ -7327,7 +6978,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7327 | SD_INIT(sd, NODE); | 6978 | SD_INIT(sd, NODE); |
| 7328 | set_domain_attribute(sd, attr); | 6979 | set_domain_attribute(sd, attr); |
| 7329 | sched_domain_node_span(cpu_to_node(i), &sd->span); | 6980 | sched_domain_node_span(cpu_to_node(i), &sd->span); |
| 7330 | sd->first_cpu = first_cpu(sd->span); | ||
| 7331 | sd->parent = p; | 6981 | sd->parent = p; |
| 7332 | if (p) | 6982 | if (p) |
| 7333 | p->child = sd; | 6983 | p->child = sd; |
| @@ -7339,7 +6989,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7339 | SD_INIT(sd, CPU); | 6989 | SD_INIT(sd, CPU); |
| 7340 | set_domain_attribute(sd, attr); | 6990 | set_domain_attribute(sd, attr); |
| 7341 | sd->span = *nodemask; | 6991 | sd->span = *nodemask; |
| 7342 | sd->first_cpu = first_cpu(sd->span); | ||
| 7343 | sd->parent = p; | 6992 | sd->parent = p; |
| 7344 | if (p) | 6993 | if (p) |
| 7345 | p->child = sd; | 6994 | p->child = sd; |
| @@ -7351,7 +7000,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7351 | SD_INIT(sd, MC); | 7000 | SD_INIT(sd, MC); |
| 7352 | set_domain_attribute(sd, attr); | 7001 | set_domain_attribute(sd, attr); |
| 7353 | sd->span = cpu_coregroup_map(i); | 7002 | sd->span = cpu_coregroup_map(i); |
| 7354 | sd->first_cpu = first_cpu(sd->span); | ||
| 7355 | cpus_and(sd->span, sd->span, *cpu_map); | 7003 | cpus_and(sd->span, sd->span, *cpu_map); |
| 7356 | sd->parent = p; | 7004 | sd->parent = p; |
| 7357 | p->child = sd; | 7005 | p->child = sd; |
| @@ -7364,7 +7012,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7364 | SD_INIT(sd, SIBLING); | 7012 | SD_INIT(sd, SIBLING); |
| 7365 | set_domain_attribute(sd, attr); | 7013 | set_domain_attribute(sd, attr); |
| 7366 | sd->span = per_cpu(cpu_sibling_map, i); | 7014 | sd->span = per_cpu(cpu_sibling_map, i); |
| 7367 | sd->first_cpu = first_cpu(sd->span); | ||
| 7368 | cpus_and(sd->span, sd->span, *cpu_map); | 7015 | cpus_and(sd->span, sd->span, *cpu_map); |
| 7369 | sd->parent = p; | 7016 | sd->parent = p; |
| 7370 | p->child = sd; | 7017 | p->child = sd; |
| @@ -7568,8 +7215,8 @@ static int build_sched_domains(const cpumask_t *cpu_map) | |||
| 7568 | 7215 | ||
| 7569 | static cpumask_t *doms_cur; /* current sched domains */ | 7216 | static cpumask_t *doms_cur; /* current sched domains */ |
| 7570 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 7217 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
| 7571 | static struct sched_domain_attr *dattr_cur; /* attribues of custom domains | 7218 | static struct sched_domain_attr *dattr_cur; |
| 7572 | in 'doms_cur' */ | 7219 | /* attribues of custom domains in 'doms_cur' */ |
| 7573 | 7220 | ||
| 7574 | /* | 7221 | /* |
| 7575 | * Special case: If a kmalloc of a doms_cur partition (array of | 7222 | * Special case: If a kmalloc of a doms_cur partition (array of |
| @@ -8034,7 +7681,6 @@ void __init sched_init(void) | |||
| 8034 | } | 7681 | } |
| 8035 | 7682 | ||
| 8036 | #ifdef CONFIG_SMP | 7683 | #ifdef CONFIG_SMP |
| 8037 | init_aggregate(); | ||
| 8038 | init_defrootdomain(); | 7684 | init_defrootdomain(); |
| 8039 | #endif | 7685 | #endif |
| 8040 | 7686 | ||
| @@ -8599,11 +8245,14 @@ void sched_move_task(struct task_struct *tsk) | |||
| 8599 | #endif | 8245 | #endif |
| 8600 | 8246 | ||
| 8601 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8247 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8602 | static void __set_se_shares(struct sched_entity *se, unsigned long shares) | 8248 | static void set_se_shares(struct sched_entity *se, unsigned long shares) |
| 8603 | { | 8249 | { |
| 8604 | struct cfs_rq *cfs_rq = se->cfs_rq; | 8250 | struct cfs_rq *cfs_rq = se->cfs_rq; |
| 8251 | struct rq *rq = cfs_rq->rq; | ||
| 8605 | int on_rq; | 8252 | int on_rq; |
| 8606 | 8253 | ||
| 8254 | spin_lock_irq(&rq->lock); | ||
| 8255 | |||
| 8607 | on_rq = se->on_rq; | 8256 | on_rq = se->on_rq; |
| 8608 | if (on_rq) | 8257 | if (on_rq) |
| 8609 | dequeue_entity(cfs_rq, se, 0); | 8258 | dequeue_entity(cfs_rq, se, 0); |
| @@ -8613,17 +8262,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares) | |||
| 8613 | 8262 | ||
| 8614 | if (on_rq) | 8263 | if (on_rq) |
| 8615 | enqueue_entity(cfs_rq, se, 0); | 8264 | enqueue_entity(cfs_rq, se, 0); |
| 8616 | } | ||
| 8617 | 8265 | ||
| 8618 | static void set_se_shares(struct sched_entity *se, unsigned long shares) | 8266 | spin_unlock_irq(&rq->lock); |
| 8619 | { | ||
| 8620 | struct cfs_rq *cfs_rq = se->cfs_rq; | ||
| 8621 | struct rq *rq = cfs_rq->rq; | ||
| 8622 | unsigned long flags; | ||
| 8623 | |||
| 8624 | spin_lock_irqsave(&rq->lock, flags); | ||
| 8625 | __set_se_shares(se, shares); | ||
| 8626 | spin_unlock_irqrestore(&rq->lock, flags); | ||
| 8627 | } | 8267 | } |
| 8628 | 8268 | ||
| 8629 | static DEFINE_MUTEX(shares_mutex); | 8269 | static DEFINE_MUTEX(shares_mutex); |
| @@ -8662,13 +8302,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
| 8662 | * w/o tripping rebalance_share or load_balance_fair. | 8302 | * w/o tripping rebalance_share or load_balance_fair. |
| 8663 | */ | 8303 | */ |
| 8664 | tg->shares = shares; | 8304 | tg->shares = shares; |
| 8665 | for_each_possible_cpu(i) { | 8305 | for_each_possible_cpu(i) |
| 8666 | /* | ||
| 8667 | * force a rebalance | ||
| 8668 | */ | ||
| 8669 | cfs_rq_set_shares(tg->cfs_rq[i], 0); | ||
| 8670 | set_se_shares(tg->se[i], shares); | 8306 | set_se_shares(tg->se[i], shares); |
| 8671 | } | ||
| 8672 | 8307 | ||
| 8673 | /* | 8308 | /* |
| 8674 | * Enable load balance activity on this group, by inserting it back on | 8309 | * Enable load balance activity on this group, by inserting it back on |
