diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 1171 |
1 files changed, 612 insertions, 559 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 50a21f964679..b309027bf9e8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -118,6 +118,12 @@ | |||
| 118 | */ | 118 | */ |
| 119 | #define RUNTIME_INF ((u64)~0ULL) | 119 | #define RUNTIME_INF ((u64)~0ULL) |
| 120 | 120 | ||
| 121 | DEFINE_TRACE(sched_wait_task); | ||
| 122 | DEFINE_TRACE(sched_wakeup); | ||
| 123 | DEFINE_TRACE(sched_wakeup_new); | ||
| 124 | DEFINE_TRACE(sched_switch); | ||
| 125 | DEFINE_TRACE(sched_migrate_task); | ||
| 126 | |||
| 121 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
| 122 | /* | 128 | /* |
| 123 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
| @@ -261,6 +267,10 @@ struct task_group { | |||
| 261 | struct cgroup_subsys_state css; | 267 | struct cgroup_subsys_state css; |
| 262 | #endif | 268 | #endif |
| 263 | 269 | ||
| 270 | #ifdef CONFIG_USER_SCHED | ||
| 271 | uid_t uid; | ||
| 272 | #endif | ||
| 273 | |||
| 264 | #ifdef CONFIG_FAIR_GROUP_SCHED | 274 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 265 | /* schedulable entities of this group on each cpu */ | 275 | /* schedulable entities of this group on each cpu */ |
| 266 | struct sched_entity **se; | 276 | struct sched_entity **se; |
| @@ -286,6 +296,12 @@ struct task_group { | |||
| 286 | 296 | ||
| 287 | #ifdef CONFIG_USER_SCHED | 297 | #ifdef CONFIG_USER_SCHED |
| 288 | 298 | ||
| 299 | /* Helper function to pass uid information to create_sched_user() */ | ||
| 300 | void set_tg_uid(struct user_struct *user) | ||
| 301 | { | ||
| 302 | user->tg->uid = user->uid; | ||
| 303 | } | ||
| 304 | |||
| 289 | /* | 305 | /* |
| 290 | * Root task group. | 306 | * Root task group. |
| 291 | * Every UID task group (including init_task_group aka UID-0) will | 307 | * Every UID task group (including init_task_group aka UID-0) will |
| @@ -481,14 +497,14 @@ struct rt_rq { | |||
| 481 | */ | 497 | */ |
| 482 | struct root_domain { | 498 | struct root_domain { |
| 483 | atomic_t refcount; | 499 | atomic_t refcount; |
| 484 | cpumask_t span; | 500 | cpumask_var_t span; |
| 485 | cpumask_t online; | 501 | cpumask_var_t online; |
| 486 | 502 | ||
| 487 | /* | 503 | /* |
| 488 | * The "RT overload" flag: it gets set if a CPU has more than | 504 | * The "RT overload" flag: it gets set if a CPU has more than |
| 489 | * one runnable RT task. | 505 | * one runnable RT task. |
| 490 | */ | 506 | */ |
| 491 | cpumask_t rto_mask; | 507 | cpumask_var_t rto_mask; |
| 492 | atomic_t rto_count; | 508 | atomic_t rto_count; |
| 493 | #ifdef CONFIG_SMP | 509 | #ifdef CONFIG_SMP |
| 494 | struct cpupri cpupri; | 510 | struct cpupri cpupri; |
| @@ -703,45 +719,18 @@ static __read_mostly char *sched_feat_names[] = { | |||
| 703 | 719 | ||
| 704 | #undef SCHED_FEAT | 720 | #undef SCHED_FEAT |
| 705 | 721 | ||
| 706 | static int sched_feat_open(struct inode *inode, struct file *filp) | 722 | static int sched_feat_show(struct seq_file *m, void *v) |
| 707 | { | ||
| 708 | filp->private_data = inode->i_private; | ||
| 709 | return 0; | ||
| 710 | } | ||
| 711 | |||
| 712 | static ssize_t | ||
| 713 | sched_feat_read(struct file *filp, char __user *ubuf, | ||
| 714 | size_t cnt, loff_t *ppos) | ||
| 715 | { | 723 | { |
| 716 | char *buf; | ||
| 717 | int r = 0; | ||
| 718 | int len = 0; | ||
| 719 | int i; | 724 | int i; |
| 720 | 725 | ||
| 721 | for (i = 0; sched_feat_names[i]; i++) { | 726 | for (i = 0; sched_feat_names[i]; i++) { |
| 722 | len += strlen(sched_feat_names[i]); | 727 | if (!(sysctl_sched_features & (1UL << i))) |
| 723 | len += 4; | 728 | seq_puts(m, "NO_"); |
| 729 | seq_printf(m, "%s ", sched_feat_names[i]); | ||
| 724 | } | 730 | } |
| 731 | seq_puts(m, "\n"); | ||
| 725 | 732 | ||
| 726 | buf = kmalloc(len + 2, GFP_KERNEL); | 733 | return 0; |
| 727 | if (!buf) | ||
| 728 | return -ENOMEM; | ||
| 729 | |||
| 730 | for (i = 0; sched_feat_names[i]; i++) { | ||
| 731 | if (sysctl_sched_features & (1UL << i)) | ||
| 732 | r += sprintf(buf + r, "%s ", sched_feat_names[i]); | ||
| 733 | else | ||
| 734 | r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]); | ||
| 735 | } | ||
| 736 | |||
| 737 | r += sprintf(buf + r, "\n"); | ||
| 738 | WARN_ON(r >= len + 2); | ||
| 739 | |||
| 740 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
| 741 | |||
| 742 | kfree(buf); | ||
| 743 | |||
| 744 | return r; | ||
| 745 | } | 734 | } |
| 746 | 735 | ||
| 747 | static ssize_t | 736 | static ssize_t |
| @@ -786,10 +775,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
| 786 | return cnt; | 775 | return cnt; |
| 787 | } | 776 | } |
| 788 | 777 | ||
| 778 | static int sched_feat_open(struct inode *inode, struct file *filp) | ||
| 779 | { | ||
| 780 | return single_open(filp, sched_feat_show, NULL); | ||
| 781 | } | ||
| 782 | |||
| 789 | static struct file_operations sched_feat_fops = { | 783 | static struct file_operations sched_feat_fops = { |
| 790 | .open = sched_feat_open, | 784 | .open = sched_feat_open, |
| 791 | .read = sched_feat_read, | 785 | .write = sched_feat_write, |
| 792 | .write = sched_feat_write, | 786 | .read = seq_read, |
| 787 | .llseek = seq_lseek, | ||
| 788 | .release = single_release, | ||
| 793 | }; | 789 | }; |
| 794 | 790 | ||
| 795 | static __init int sched_init_debug(void) | 791 | static __init int sched_init_debug(void) |
| @@ -1453,9 +1449,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | |||
| 1453 | static unsigned long cpu_avg_load_per_task(int cpu) | 1449 | static unsigned long cpu_avg_load_per_task(int cpu) |
| 1454 | { | 1450 | { |
| 1455 | struct rq *rq = cpu_rq(cpu); | 1451 | struct rq *rq = cpu_rq(cpu); |
| 1452 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); | ||
| 1456 | 1453 | ||
| 1457 | if (rq->nr_running) | 1454 | if (nr_running) |
| 1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1455 | rq->avg_load_per_task = rq->load.weight / nr_running; |
| 1456 | else | ||
| 1457 | rq->avg_load_per_task = 0; | ||
| 1459 | 1458 | ||
| 1460 | return rq->avg_load_per_task; | 1459 | return rq->avg_load_per_task; |
| 1461 | } | 1460 | } |
| @@ -1471,27 +1470,13 @@ static void | |||
| 1471 | update_group_shares_cpu(struct task_group *tg, int cpu, | 1470 | update_group_shares_cpu(struct task_group *tg, int cpu, |
| 1472 | unsigned long sd_shares, unsigned long sd_rq_weight) | 1471 | unsigned long sd_shares, unsigned long sd_rq_weight) |
| 1473 | { | 1472 | { |
| 1474 | int boost = 0; | ||
| 1475 | unsigned long shares; | 1473 | unsigned long shares; |
| 1476 | unsigned long rq_weight; | 1474 | unsigned long rq_weight; |
| 1477 | 1475 | ||
| 1478 | if (!tg->se[cpu]) | 1476 | if (!tg->se[cpu]) |
| 1479 | return; | 1477 | return; |
| 1480 | 1478 | ||
| 1481 | rq_weight = tg->cfs_rq[cpu]->load.weight; | 1479 | rq_weight = tg->cfs_rq[cpu]->rq_weight; |
| 1482 | |||
| 1483 | /* | ||
| 1484 | * If there are currently no tasks on the cpu pretend there is one of | ||
| 1485 | * average load so that when a new task gets to run here it will not | ||
| 1486 | * get delayed by group starvation. | ||
| 1487 | */ | ||
| 1488 | if (!rq_weight) { | ||
| 1489 | boost = 1; | ||
| 1490 | rq_weight = NICE_0_LOAD; | ||
| 1491 | } | ||
| 1492 | |||
| 1493 | if (unlikely(rq_weight > sd_rq_weight)) | ||
| 1494 | rq_weight = sd_rq_weight; | ||
| 1495 | 1480 | ||
| 1496 | /* | 1481 | /* |
| 1497 | * \Sum shares * rq_weight | 1482 | * \Sum shares * rq_weight |
| @@ -1499,7 +1484,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
| 1499 | * \Sum rq_weight | 1484 | * \Sum rq_weight |
| 1500 | * | 1485 | * |
| 1501 | */ | 1486 | */ |
| 1502 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); | 1487 | shares = (sd_shares * rq_weight) / sd_rq_weight; |
| 1503 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); | 1488 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); |
| 1504 | 1489 | ||
| 1505 | if (abs(shares - tg->se[cpu]->load.weight) > | 1490 | if (abs(shares - tg->se[cpu]->load.weight) > |
| @@ -1508,11 +1493,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
| 1508 | unsigned long flags; | 1493 | unsigned long flags; |
| 1509 | 1494 | ||
| 1510 | spin_lock_irqsave(&rq->lock, flags); | 1495 | spin_lock_irqsave(&rq->lock, flags); |
| 1511 | /* | 1496 | tg->cfs_rq[cpu]->shares = shares; |
| 1512 | * record the actual number of shares, not the boosted amount. | ||
| 1513 | */ | ||
| 1514 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | ||
| 1515 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
| 1516 | 1497 | ||
| 1517 | __set_se_shares(tg->se[cpu], shares); | 1498 | __set_se_shares(tg->se[cpu], shares); |
| 1518 | spin_unlock_irqrestore(&rq->lock, flags); | 1499 | spin_unlock_irqrestore(&rq->lock, flags); |
| @@ -1526,13 +1507,23 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
| 1526 | */ | 1507 | */ |
| 1527 | static int tg_shares_up(struct task_group *tg, void *data) | 1508 | static int tg_shares_up(struct task_group *tg, void *data) |
| 1528 | { | 1509 | { |
| 1529 | unsigned long rq_weight = 0; | 1510 | unsigned long weight, rq_weight = 0; |
| 1530 | unsigned long shares = 0; | 1511 | unsigned long shares = 0; |
| 1531 | struct sched_domain *sd = data; | 1512 | struct sched_domain *sd = data; |
| 1532 | int i; | 1513 | int i; |
| 1533 | 1514 | ||
| 1534 | for_each_cpu_mask(i, sd->span) { | 1515 | for_each_cpu(i, sched_domain_span(sd)) { |
| 1535 | rq_weight += tg->cfs_rq[i]->load.weight; | 1516 | /* |
| 1517 | * If there are currently no tasks on the cpu pretend there | ||
| 1518 | * is one of average load so that when a new task gets to | ||
| 1519 | * run here it will not get delayed by group starvation. | ||
| 1520 | */ | ||
| 1521 | weight = tg->cfs_rq[i]->load.weight; | ||
| 1522 | if (!weight) | ||
| 1523 | weight = NICE_0_LOAD; | ||
| 1524 | |||
| 1525 | tg->cfs_rq[i]->rq_weight = weight; | ||
| 1526 | rq_weight += weight; | ||
| 1536 | shares += tg->cfs_rq[i]->shares; | 1527 | shares += tg->cfs_rq[i]->shares; |
| 1537 | } | 1528 | } |
| 1538 | 1529 | ||
| @@ -1542,10 +1533,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
| 1542 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) | 1533 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
| 1543 | shares = tg->shares; | 1534 | shares = tg->shares; |
| 1544 | 1535 | ||
| 1545 | if (!rq_weight) | 1536 | for_each_cpu(i, sched_domain_span(sd)) |
| 1546 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; | ||
| 1547 | |||
| 1548 | for_each_cpu_mask(i, sd->span) | ||
| 1549 | update_group_shares_cpu(tg, i, shares, rq_weight); | 1537 | update_group_shares_cpu(tg, i, shares, rq_weight); |
| 1550 | 1538 | ||
| 1551 | return 0; | 1539 | return 0; |
| @@ -1609,6 +1597,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
| 1609 | 1597 | ||
| 1610 | #endif | 1598 | #endif |
| 1611 | 1599 | ||
| 1600 | /* | ||
| 1601 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
| 1602 | */ | ||
| 1603 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
| 1604 | __releases(this_rq->lock) | ||
| 1605 | __acquires(busiest->lock) | ||
| 1606 | __acquires(this_rq->lock) | ||
| 1607 | { | ||
| 1608 | int ret = 0; | ||
| 1609 | |||
| 1610 | if (unlikely(!irqs_disabled())) { | ||
| 1611 | /* printk() doesn't work good under rq->lock */ | ||
| 1612 | spin_unlock(&this_rq->lock); | ||
| 1613 | BUG_ON(1); | ||
| 1614 | } | ||
| 1615 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
| 1616 | if (busiest < this_rq) { | ||
| 1617 | spin_unlock(&this_rq->lock); | ||
| 1618 | spin_lock(&busiest->lock); | ||
| 1619 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
| 1620 | ret = 1; | ||
| 1621 | } else | ||
| 1622 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
| 1623 | } | ||
| 1624 | return ret; | ||
| 1625 | } | ||
| 1626 | |||
| 1627 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
| 1628 | __releases(busiest->lock) | ||
| 1629 | { | ||
| 1630 | spin_unlock(&busiest->lock); | ||
| 1631 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
| 1632 | } | ||
| 1612 | #endif | 1633 | #endif |
| 1613 | 1634 | ||
| 1614 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1635 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| @@ -2076,15 +2097,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
| 2076 | int i; | 2097 | int i; |
| 2077 | 2098 | ||
| 2078 | /* Skip over this group if it has no CPUs allowed */ | 2099 | /* Skip over this group if it has no CPUs allowed */ |
| 2079 | if (!cpus_intersects(group->cpumask, p->cpus_allowed)) | 2100 | if (!cpumask_intersects(sched_group_cpus(group), |
| 2101 | &p->cpus_allowed)) | ||
| 2080 | continue; | 2102 | continue; |
| 2081 | 2103 | ||
| 2082 | local_group = cpu_isset(this_cpu, group->cpumask); | 2104 | local_group = cpumask_test_cpu(this_cpu, |
| 2105 | sched_group_cpus(group)); | ||
| 2083 | 2106 | ||
| 2084 | /* Tally up the load of all CPUs in the group */ | 2107 | /* Tally up the load of all CPUs in the group */ |
| 2085 | avg_load = 0; | 2108 | avg_load = 0; |
| 2086 | 2109 | ||
| 2087 | for_each_cpu_mask_nr(i, group->cpumask) { | 2110 | for_each_cpu(i, sched_group_cpus(group)) { |
| 2088 | /* Bias balancing toward cpus of our domain */ | 2111 | /* Bias balancing toward cpus of our domain */ |
| 2089 | if (local_group) | 2112 | if (local_group) |
| 2090 | load = source_load(i, load_idx); | 2113 | load = source_load(i, load_idx); |
| @@ -2116,17 +2139,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
| 2116 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | 2139 | * find_idlest_cpu - find the idlest cpu among the cpus in group. |
| 2117 | */ | 2140 | */ |
| 2118 | static int | 2141 | static int |
| 2119 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, | 2142 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
| 2120 | cpumask_t *tmp) | ||
| 2121 | { | 2143 | { |
| 2122 | unsigned long load, min_load = ULONG_MAX; | 2144 | unsigned long load, min_load = ULONG_MAX; |
| 2123 | int idlest = -1; | 2145 | int idlest = -1; |
| 2124 | int i; | 2146 | int i; |
| 2125 | 2147 | ||
| 2126 | /* Traverse only the allowed CPUs */ | 2148 | /* Traverse only the allowed CPUs */ |
| 2127 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); | 2149 | for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { |
| 2128 | |||
| 2129 | for_each_cpu_mask_nr(i, *tmp) { | ||
| 2130 | load = weighted_cpuload(i); | 2150 | load = weighted_cpuload(i); |
| 2131 | 2151 | ||
| 2132 | if (load < min_load || (load == min_load && i == this_cpu)) { | 2152 | if (load < min_load || (load == min_load && i == this_cpu)) { |
| @@ -2168,7 +2188,6 @@ static int sched_balance_self(int cpu, int flag) | |||
| 2168 | update_shares(sd); | 2188 | update_shares(sd); |
| 2169 | 2189 | ||
| 2170 | while (sd) { | 2190 | while (sd) { |
| 2171 | cpumask_t span, tmpmask; | ||
| 2172 | struct sched_group *group; | 2191 | struct sched_group *group; |
| 2173 | int new_cpu, weight; | 2192 | int new_cpu, weight; |
| 2174 | 2193 | ||
| @@ -2177,14 +2196,13 @@ static int sched_balance_self(int cpu, int flag) | |||
| 2177 | continue; | 2196 | continue; |
| 2178 | } | 2197 | } |
| 2179 | 2198 | ||
| 2180 | span = sd->span; | ||
| 2181 | group = find_idlest_group(sd, t, cpu); | 2199 | group = find_idlest_group(sd, t, cpu); |
| 2182 | if (!group) { | 2200 | if (!group) { |
| 2183 | sd = sd->child; | 2201 | sd = sd->child; |
| 2184 | continue; | 2202 | continue; |
| 2185 | } | 2203 | } |
| 2186 | 2204 | ||
| 2187 | new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); | 2205 | new_cpu = find_idlest_cpu(group, t, cpu); |
| 2188 | if (new_cpu == -1 || new_cpu == cpu) { | 2206 | if (new_cpu == -1 || new_cpu == cpu) { |
| 2189 | /* Now try balancing at a lower domain level of cpu */ | 2207 | /* Now try balancing at a lower domain level of cpu */ |
| 2190 | sd = sd->child; | 2208 | sd = sd->child; |
| @@ -2193,10 +2211,10 @@ static int sched_balance_self(int cpu, int flag) | |||
| 2193 | 2211 | ||
| 2194 | /* Now try balancing at a lower domain level of new_cpu */ | 2212 | /* Now try balancing at a lower domain level of new_cpu */ |
| 2195 | cpu = new_cpu; | 2213 | cpu = new_cpu; |
| 2214 | weight = cpumask_weight(sched_domain_span(sd)); | ||
| 2196 | sd = NULL; | 2215 | sd = NULL; |
| 2197 | weight = cpus_weight(span); | ||
| 2198 | for_each_domain(cpu, tmp) { | 2216 | for_each_domain(cpu, tmp) { |
| 2199 | if (weight <= cpus_weight(tmp->span)) | 2217 | if (weight <= cpumask_weight(sched_domain_span(tmp))) |
| 2200 | break; | 2218 | break; |
| 2201 | if (tmp->flags & flag) | 2219 | if (tmp->flags & flag) |
| 2202 | sd = tmp; | 2220 | sd = tmp; |
| @@ -2241,7 +2259,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
| 2241 | cpu = task_cpu(p); | 2259 | cpu = task_cpu(p); |
| 2242 | 2260 | ||
| 2243 | for_each_domain(this_cpu, sd) { | 2261 | for_each_domain(this_cpu, sd) { |
| 2244 | if (cpu_isset(cpu, sd->span)) { | 2262 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 2245 | update_shares(sd); | 2263 | update_shares(sd); |
| 2246 | break; | 2264 | break; |
| 2247 | } | 2265 | } |
| @@ -2289,7 +2307,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
| 2289 | else { | 2307 | else { |
| 2290 | struct sched_domain *sd; | 2308 | struct sched_domain *sd; |
| 2291 | for_each_domain(this_cpu, sd) { | 2309 | for_each_domain(this_cpu, sd) { |
| 2292 | if (cpu_isset(cpu, sd->span)) { | 2310 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 2293 | schedstat_inc(sd, ttwu_wake_remote); | 2311 | schedstat_inc(sd, ttwu_wake_remote); |
| 2294 | break; | 2312 | break; |
| 2295 | } | 2313 | } |
| @@ -2809,40 +2827,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
| 2809 | } | 2827 | } |
| 2810 | 2828 | ||
| 2811 | /* | 2829 | /* |
| 2812 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
| 2813 | */ | ||
| 2814 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
| 2815 | __releases(this_rq->lock) | ||
| 2816 | __acquires(busiest->lock) | ||
| 2817 | __acquires(this_rq->lock) | ||
| 2818 | { | ||
| 2819 | int ret = 0; | ||
| 2820 | |||
| 2821 | if (unlikely(!irqs_disabled())) { | ||
| 2822 | /* printk() doesn't work good under rq->lock */ | ||
| 2823 | spin_unlock(&this_rq->lock); | ||
| 2824 | BUG_ON(1); | ||
| 2825 | } | ||
| 2826 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
| 2827 | if (busiest < this_rq) { | ||
| 2828 | spin_unlock(&this_rq->lock); | ||
| 2829 | spin_lock(&busiest->lock); | ||
| 2830 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
| 2831 | ret = 1; | ||
| 2832 | } else | ||
| 2833 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
| 2834 | } | ||
| 2835 | return ret; | ||
| 2836 | } | ||
| 2837 | |||
| 2838 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
| 2839 | __releases(busiest->lock) | ||
| 2840 | { | ||
| 2841 | spin_unlock(&busiest->lock); | ||
| 2842 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
| 2843 | } | ||
| 2844 | |||
| 2845 | /* | ||
| 2846 | * If dest_cpu is allowed for this process, migrate the task to it. | 2830 | * If dest_cpu is allowed for this process, migrate the task to it. |
| 2847 | * This is accomplished by forcing the cpu_allowed mask to only | 2831 | * This is accomplished by forcing the cpu_allowed mask to only |
| 2848 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | 2832 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then |
| @@ -2855,7 +2839,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
| 2855 | struct rq *rq; | 2839 | struct rq *rq; |
| 2856 | 2840 | ||
| 2857 | rq = task_rq_lock(p, &flags); | 2841 | rq = task_rq_lock(p, &flags); |
| 2858 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2842 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
| 2859 | || unlikely(!cpu_active(dest_cpu))) | 2843 | || unlikely(!cpu_active(dest_cpu))) |
| 2860 | goto out; | 2844 | goto out; |
| 2861 | 2845 | ||
| @@ -2921,7 +2905,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
| 2921 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | 2905 | * 2) cannot be migrated to this CPU due to cpus_allowed, or |
| 2922 | * 3) are cache-hot on their current CPU. | 2906 | * 3) are cache-hot on their current CPU. |
| 2923 | */ | 2907 | */ |
| 2924 | if (!cpu_isset(this_cpu, p->cpus_allowed)) { | 2908 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { |
| 2925 | schedstat_inc(p, se.nr_failed_migrations_affine); | 2909 | schedstat_inc(p, se.nr_failed_migrations_affine); |
| 2926 | return 0; | 2910 | return 0; |
| 2927 | } | 2911 | } |
| @@ -3096,7 +3080,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
| 3096 | static struct sched_group * | 3080 | static struct sched_group * |
| 3097 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 3081 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
| 3098 | unsigned long *imbalance, enum cpu_idle_type idle, | 3082 | unsigned long *imbalance, enum cpu_idle_type idle, |
| 3099 | int *sd_idle, const cpumask_t *cpus, int *balance) | 3083 | int *sd_idle, const struct cpumask *cpus, int *balance) |
| 3100 | { | 3084 | { |
| 3101 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 3085 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
| 3102 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 3086 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
| @@ -3132,10 +3116,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3132 | unsigned long sum_avg_load_per_task; | 3116 | unsigned long sum_avg_load_per_task; |
| 3133 | unsigned long avg_load_per_task; | 3117 | unsigned long avg_load_per_task; |
| 3134 | 3118 | ||
| 3135 | local_group = cpu_isset(this_cpu, group->cpumask); | 3119 | local_group = cpumask_test_cpu(this_cpu, |
| 3120 | sched_group_cpus(group)); | ||
| 3136 | 3121 | ||
| 3137 | if (local_group) | 3122 | if (local_group) |
| 3138 | balance_cpu = first_cpu(group->cpumask); | 3123 | balance_cpu = cpumask_first(sched_group_cpus(group)); |
| 3139 | 3124 | ||
| 3140 | /* Tally up the load of all CPUs in the group */ | 3125 | /* Tally up the load of all CPUs in the group */ |
| 3141 | sum_weighted_load = sum_nr_running = avg_load = 0; | 3126 | sum_weighted_load = sum_nr_running = avg_load = 0; |
| @@ -3144,13 +3129,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3144 | max_cpu_load = 0; | 3129 | max_cpu_load = 0; |
| 3145 | min_cpu_load = ~0UL; | 3130 | min_cpu_load = ~0UL; |
| 3146 | 3131 | ||
| 3147 | for_each_cpu_mask_nr(i, group->cpumask) { | 3132 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { |
| 3148 | struct rq *rq; | 3133 | struct rq *rq = cpu_rq(i); |
| 3149 | |||
| 3150 | if (!cpu_isset(i, *cpus)) | ||
| 3151 | continue; | ||
| 3152 | |||
| 3153 | rq = cpu_rq(i); | ||
| 3154 | 3134 | ||
| 3155 | if (*sd_idle && rq->nr_running) | 3135 | if (*sd_idle && rq->nr_running) |
| 3156 | *sd_idle = 0; | 3136 | *sd_idle = 0; |
| @@ -3261,8 +3241,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3261 | */ | 3241 | */ |
| 3262 | if ((sum_nr_running < min_nr_running) || | 3242 | if ((sum_nr_running < min_nr_running) || |
| 3263 | (sum_nr_running == min_nr_running && | 3243 | (sum_nr_running == min_nr_running && |
| 3264 | first_cpu(group->cpumask) < | 3244 | cpumask_first(sched_group_cpus(group)) < |
| 3265 | first_cpu(group_min->cpumask))) { | 3245 | cpumask_first(sched_group_cpus(group_min)))) { |
| 3266 | group_min = group; | 3246 | group_min = group; |
| 3267 | min_nr_running = sum_nr_running; | 3247 | min_nr_running = sum_nr_running; |
| 3268 | min_load_per_task = sum_weighted_load / | 3248 | min_load_per_task = sum_weighted_load / |
| @@ -3277,8 +3257,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3277 | if (sum_nr_running <= group_capacity - 1) { | 3257 | if (sum_nr_running <= group_capacity - 1) { |
| 3278 | if (sum_nr_running > leader_nr_running || | 3258 | if (sum_nr_running > leader_nr_running || |
| 3279 | (sum_nr_running == leader_nr_running && | 3259 | (sum_nr_running == leader_nr_running && |
| 3280 | first_cpu(group->cpumask) > | 3260 | cpumask_first(sched_group_cpus(group)) > |
| 3281 | first_cpu(group_leader->cpumask))) { | 3261 | cpumask_first(sched_group_cpus(group_leader)))) { |
| 3282 | group_leader = group; | 3262 | group_leader = group; |
| 3283 | leader_nr_running = sum_nr_running; | 3263 | leader_nr_running = sum_nr_running; |
| 3284 | } | 3264 | } |
| @@ -3417,16 +3397,16 @@ ret: | |||
| 3417 | */ | 3397 | */ |
| 3418 | static struct rq * | 3398 | static struct rq * |
| 3419 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | 3399 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, |
| 3420 | unsigned long imbalance, const cpumask_t *cpus) | 3400 | unsigned long imbalance, const struct cpumask *cpus) |
| 3421 | { | 3401 | { |
| 3422 | struct rq *busiest = NULL, *rq; | 3402 | struct rq *busiest = NULL, *rq; |
| 3423 | unsigned long max_load = 0; | 3403 | unsigned long max_load = 0; |
| 3424 | int i; | 3404 | int i; |
| 3425 | 3405 | ||
| 3426 | for_each_cpu_mask_nr(i, group->cpumask) { | 3406 | for_each_cpu(i, sched_group_cpus(group)) { |
| 3427 | unsigned long wl; | 3407 | unsigned long wl; |
| 3428 | 3408 | ||
| 3429 | if (!cpu_isset(i, *cpus)) | 3409 | if (!cpumask_test_cpu(i, cpus)) |
| 3430 | continue; | 3410 | continue; |
| 3431 | 3411 | ||
| 3432 | rq = cpu_rq(i); | 3412 | rq = cpu_rq(i); |
| @@ -3456,7 +3436,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
| 3456 | */ | 3436 | */ |
| 3457 | static int load_balance(int this_cpu, struct rq *this_rq, | 3437 | static int load_balance(int this_cpu, struct rq *this_rq, |
| 3458 | struct sched_domain *sd, enum cpu_idle_type idle, | 3438 | struct sched_domain *sd, enum cpu_idle_type idle, |
| 3459 | int *balance, cpumask_t *cpus) | 3439 | int *balance, struct cpumask *cpus) |
| 3460 | { | 3440 | { |
| 3461 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 3441 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
| 3462 | struct sched_group *group; | 3442 | struct sched_group *group; |
| @@ -3464,7 +3444,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
| 3464 | struct rq *busiest; | 3444 | struct rq *busiest; |
| 3465 | unsigned long flags; | 3445 | unsigned long flags; |
| 3466 | 3446 | ||
| 3467 | cpus_setall(*cpus); | 3447 | cpumask_setall(cpus); |
| 3468 | 3448 | ||
| 3469 | /* | 3449 | /* |
| 3470 | * When power savings policy is enabled for the parent domain, idle | 3450 | * When power savings policy is enabled for the parent domain, idle |
| @@ -3524,8 +3504,8 @@ redo: | |||
| 3524 | 3504 | ||
| 3525 | /* All tasks on this runqueue were pinned by CPU affinity */ | 3505 | /* All tasks on this runqueue were pinned by CPU affinity */ |
| 3526 | if (unlikely(all_pinned)) { | 3506 | if (unlikely(all_pinned)) { |
| 3527 | cpu_clear(cpu_of(busiest), *cpus); | 3507 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
| 3528 | if (!cpus_empty(*cpus)) | 3508 | if (!cpumask_empty(cpus)) |
| 3529 | goto redo; | 3509 | goto redo; |
| 3530 | goto out_balanced; | 3510 | goto out_balanced; |
| 3531 | } | 3511 | } |
| @@ -3542,7 +3522,8 @@ redo: | |||
| 3542 | /* don't kick the migration_thread, if the curr | 3522 | /* don't kick the migration_thread, if the curr |
| 3543 | * task on busiest cpu can't be moved to this_cpu | 3523 | * task on busiest cpu can't be moved to this_cpu |
| 3544 | */ | 3524 | */ |
| 3545 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3525 | if (!cpumask_test_cpu(this_cpu, |
| 3526 | &busiest->curr->cpus_allowed)) { | ||
| 3546 | spin_unlock_irqrestore(&busiest->lock, flags); | 3527 | spin_unlock_irqrestore(&busiest->lock, flags); |
| 3547 | all_pinned = 1; | 3528 | all_pinned = 1; |
| 3548 | goto out_one_pinned; | 3529 | goto out_one_pinned; |
| @@ -3617,7 +3598,7 @@ out: | |||
| 3617 | */ | 3598 | */ |
| 3618 | static int | 3599 | static int |
| 3619 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | 3600 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, |
| 3620 | cpumask_t *cpus) | 3601 | struct cpumask *cpus) |
| 3621 | { | 3602 | { |
| 3622 | struct sched_group *group; | 3603 | struct sched_group *group; |
| 3623 | struct rq *busiest = NULL; | 3604 | struct rq *busiest = NULL; |
| @@ -3626,7 +3607,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | |||
| 3626 | int sd_idle = 0; | 3607 | int sd_idle = 0; |
| 3627 | int all_pinned = 0; | 3608 | int all_pinned = 0; |
| 3628 | 3609 | ||
| 3629 | cpus_setall(*cpus); | 3610 | cpumask_setall(cpus); |
| 3630 | 3611 | ||
| 3631 | /* | 3612 | /* |
| 3632 | * When power savings policy is enabled for the parent domain, idle | 3613 | * When power savings policy is enabled for the parent domain, idle |
| @@ -3670,8 +3651,8 @@ redo: | |||
| 3670 | double_unlock_balance(this_rq, busiest); | 3651 | double_unlock_balance(this_rq, busiest); |
| 3671 | 3652 | ||
| 3672 | if (unlikely(all_pinned)) { | 3653 | if (unlikely(all_pinned)) { |
| 3673 | cpu_clear(cpu_of(busiest), *cpus); | 3654 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
| 3674 | if (!cpus_empty(*cpus)) | 3655 | if (!cpumask_empty(cpus)) |
| 3675 | goto redo; | 3656 | goto redo; |
| 3676 | } | 3657 | } |
| 3677 | } | 3658 | } |
| @@ -3704,9 +3685,12 @@ out_balanced: | |||
| 3704 | static void idle_balance(int this_cpu, struct rq *this_rq) | 3685 | static void idle_balance(int this_cpu, struct rq *this_rq) |
| 3705 | { | 3686 | { |
| 3706 | struct sched_domain *sd; | 3687 | struct sched_domain *sd; |
| 3707 | int pulled_task = -1; | 3688 | int pulled_task = 0; |
| 3708 | unsigned long next_balance = jiffies + HZ; | 3689 | unsigned long next_balance = jiffies + HZ; |
| 3709 | cpumask_t tmpmask; | 3690 | cpumask_var_t tmpmask; |
| 3691 | |||
| 3692 | if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) | ||
| 3693 | return; | ||
| 3710 | 3694 | ||
| 3711 | for_each_domain(this_cpu, sd) { | 3695 | for_each_domain(this_cpu, sd) { |
| 3712 | unsigned long interval; | 3696 | unsigned long interval; |
| @@ -3717,7 +3701,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
| 3717 | if (sd->flags & SD_BALANCE_NEWIDLE) | 3701 | if (sd->flags & SD_BALANCE_NEWIDLE) |
| 3718 | /* If we've pulled tasks over stop searching: */ | 3702 | /* If we've pulled tasks over stop searching: */ |
| 3719 | pulled_task = load_balance_newidle(this_cpu, this_rq, | 3703 | pulled_task = load_balance_newidle(this_cpu, this_rq, |
| 3720 | sd, &tmpmask); | 3704 | sd, tmpmask); |
| 3721 | 3705 | ||
| 3722 | interval = msecs_to_jiffies(sd->balance_interval); | 3706 | interval = msecs_to_jiffies(sd->balance_interval); |
| 3723 | if (time_after(next_balance, sd->last_balance + interval)) | 3707 | if (time_after(next_balance, sd->last_balance + interval)) |
| @@ -3732,6 +3716,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
| 3732 | */ | 3716 | */ |
| 3733 | this_rq->next_balance = next_balance; | 3717 | this_rq->next_balance = next_balance; |
| 3734 | } | 3718 | } |
| 3719 | free_cpumask_var(tmpmask); | ||
| 3735 | } | 3720 | } |
| 3736 | 3721 | ||
| 3737 | /* | 3722 | /* |
| @@ -3769,7 +3754,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
| 3769 | /* Search for an sd spanning us and the target CPU. */ | 3754 | /* Search for an sd spanning us and the target CPU. */ |
| 3770 | for_each_domain(target_cpu, sd) { | 3755 | for_each_domain(target_cpu, sd) { |
| 3771 | if ((sd->flags & SD_LOAD_BALANCE) && | 3756 | if ((sd->flags & SD_LOAD_BALANCE) && |
| 3772 | cpu_isset(busiest_cpu, sd->span)) | 3757 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) |
| 3773 | break; | 3758 | break; |
| 3774 | } | 3759 | } |
| 3775 | 3760 | ||
| @@ -3788,10 +3773,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
| 3788 | #ifdef CONFIG_NO_HZ | 3773 | #ifdef CONFIG_NO_HZ |
| 3789 | static struct { | 3774 | static struct { |
| 3790 | atomic_t load_balancer; | 3775 | atomic_t load_balancer; |
| 3791 | cpumask_t cpu_mask; | 3776 | cpumask_var_t cpu_mask; |
| 3792 | } nohz ____cacheline_aligned = { | 3777 | } nohz ____cacheline_aligned = { |
| 3793 | .load_balancer = ATOMIC_INIT(-1), | 3778 | .load_balancer = ATOMIC_INIT(-1), |
| 3794 | .cpu_mask = CPU_MASK_NONE, | ||
| 3795 | }; | 3779 | }; |
| 3796 | 3780 | ||
| 3797 | /* | 3781 | /* |
| @@ -3819,7 +3803,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
| 3819 | int cpu = smp_processor_id(); | 3803 | int cpu = smp_processor_id(); |
| 3820 | 3804 | ||
| 3821 | if (stop_tick) { | 3805 | if (stop_tick) { |
| 3822 | cpu_set(cpu, nohz.cpu_mask); | 3806 | cpumask_set_cpu(cpu, nohz.cpu_mask); |
| 3823 | cpu_rq(cpu)->in_nohz_recently = 1; | 3807 | cpu_rq(cpu)->in_nohz_recently = 1; |
| 3824 | 3808 | ||
| 3825 | /* | 3809 | /* |
| @@ -3833,7 +3817,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
| 3833 | } | 3817 | } |
| 3834 | 3818 | ||
| 3835 | /* time for ilb owner also to sleep */ | 3819 | /* time for ilb owner also to sleep */ |
| 3836 | if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 3820 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
| 3837 | if (atomic_read(&nohz.load_balancer) == cpu) | 3821 | if (atomic_read(&nohz.load_balancer) == cpu) |
| 3838 | atomic_set(&nohz.load_balancer, -1); | 3822 | atomic_set(&nohz.load_balancer, -1); |
| 3839 | return 0; | 3823 | return 0; |
| @@ -3846,10 +3830,10 @@ int select_nohz_load_balancer(int stop_tick) | |||
| 3846 | } else if (atomic_read(&nohz.load_balancer) == cpu) | 3830 | } else if (atomic_read(&nohz.load_balancer) == cpu) |
| 3847 | return 1; | 3831 | return 1; |
| 3848 | } else { | 3832 | } else { |
| 3849 | if (!cpu_isset(cpu, nohz.cpu_mask)) | 3833 | if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) |
| 3850 | return 0; | 3834 | return 0; |
| 3851 | 3835 | ||
| 3852 | cpu_clear(cpu, nohz.cpu_mask); | 3836 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
| 3853 | 3837 | ||
| 3854 | if (atomic_read(&nohz.load_balancer) == cpu) | 3838 | if (atomic_read(&nohz.load_balancer) == cpu) |
| 3855 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3839 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
| @@ -3877,7 +3861,11 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
| 3877 | unsigned long next_balance = jiffies + 60*HZ; | 3861 | unsigned long next_balance = jiffies + 60*HZ; |
| 3878 | int update_next_balance = 0; | 3862 | int update_next_balance = 0; |
| 3879 | int need_serialize; | 3863 | int need_serialize; |
| 3880 | cpumask_t tmp; | 3864 | cpumask_var_t tmp; |
| 3865 | |||
| 3866 | /* Fails alloc? Rebalancing probably not a priority right now. */ | ||
| 3867 | if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) | ||
| 3868 | return; | ||
| 3881 | 3869 | ||
| 3882 | for_each_domain(cpu, sd) { | 3870 | for_each_domain(cpu, sd) { |
| 3883 | if (!(sd->flags & SD_LOAD_BALANCE)) | 3871 | if (!(sd->flags & SD_LOAD_BALANCE)) |
| @@ -3902,7 +3890,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
| 3902 | } | 3890 | } |
| 3903 | 3891 | ||
| 3904 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 3892 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
| 3905 | if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { | 3893 | if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { |
| 3906 | /* | 3894 | /* |
| 3907 | * We've pulled tasks over so either we're no | 3895 | * We've pulled tasks over so either we're no |
| 3908 | * longer idle, or one of our SMT siblings is | 3896 | * longer idle, or one of our SMT siblings is |
| @@ -3936,6 +3924,8 @@ out: | |||
| 3936 | */ | 3924 | */ |
| 3937 | if (likely(update_next_balance)) | 3925 | if (likely(update_next_balance)) |
| 3938 | rq->next_balance = next_balance; | 3926 | rq->next_balance = next_balance; |
| 3927 | |||
| 3928 | free_cpumask_var(tmp); | ||
| 3939 | } | 3929 | } |
| 3940 | 3930 | ||
| 3941 | /* | 3931 | /* |
| @@ -3960,12 +3950,13 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
| 3960 | */ | 3950 | */ |
| 3961 | if (this_rq->idle_at_tick && | 3951 | if (this_rq->idle_at_tick && |
| 3962 | atomic_read(&nohz.load_balancer) == this_cpu) { | 3952 | atomic_read(&nohz.load_balancer) == this_cpu) { |
| 3963 | cpumask_t cpus = nohz.cpu_mask; | ||
| 3964 | struct rq *rq; | 3953 | struct rq *rq; |
| 3965 | int balance_cpu; | 3954 | int balance_cpu; |
| 3966 | 3955 | ||
| 3967 | cpu_clear(this_cpu, cpus); | 3956 | for_each_cpu(balance_cpu, nohz.cpu_mask) { |
| 3968 | for_each_cpu_mask_nr(balance_cpu, cpus) { | 3957 | if (balance_cpu == this_cpu) |
| 3958 | continue; | ||
| 3959 | |||
| 3969 | /* | 3960 | /* |
| 3970 | * If this cpu gets work to do, stop the load balancing | 3961 | * If this cpu gets work to do, stop the load balancing |
| 3971 | * work being done for other cpus. Next load | 3962 | * work being done for other cpus. Next load |
| @@ -4003,7 +3994,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
| 4003 | rq->in_nohz_recently = 0; | 3994 | rq->in_nohz_recently = 0; |
| 4004 | 3995 | ||
| 4005 | if (atomic_read(&nohz.load_balancer) == cpu) { | 3996 | if (atomic_read(&nohz.load_balancer) == cpu) { |
| 4006 | cpu_clear(cpu, nohz.cpu_mask); | 3997 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
| 4007 | atomic_set(&nohz.load_balancer, -1); | 3998 | atomic_set(&nohz.load_balancer, -1); |
| 4008 | } | 3999 | } |
| 4009 | 4000 | ||
| @@ -4016,7 +4007,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
| 4016 | * TBD: Traverse the sched domains and nominate | 4007 | * TBD: Traverse the sched domains and nominate |
| 4017 | * the nearest cpu in the nohz.cpu_mask. | 4008 | * the nearest cpu in the nohz.cpu_mask. |
| 4018 | */ | 4009 | */ |
| 4019 | int ilb = first_cpu(nohz.cpu_mask); | 4010 | int ilb = cpumask_first(nohz.cpu_mask); |
| 4020 | 4011 | ||
| 4021 | if (ilb < nr_cpu_ids) | 4012 | if (ilb < nr_cpu_ids) |
| 4022 | resched_cpu(ilb); | 4013 | resched_cpu(ilb); |
| @@ -4028,7 +4019,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
| 4028 | * cpus with ticks stopped, is it time for that to stop? | 4019 | * cpus with ticks stopped, is it time for that to stop? |
| 4029 | */ | 4020 | */ |
| 4030 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && | 4021 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && |
| 4031 | cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 4022 | cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
| 4032 | resched_cpu(cpu); | 4023 | resched_cpu(cpu); |
| 4033 | return; | 4024 | return; |
| 4034 | } | 4025 | } |
| @@ -4038,7 +4029,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
| 4038 | * someone else, then no need raise the SCHED_SOFTIRQ | 4029 | * someone else, then no need raise the SCHED_SOFTIRQ |
| 4039 | */ | 4030 | */ |
| 4040 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && | 4031 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && |
| 4041 | cpu_isset(cpu, nohz.cpu_mask)) | 4032 | cpumask_test_cpu(cpu, nohz.cpu_mask)) |
| 4042 | return; | 4033 | return; |
| 4043 | #endif | 4034 | #endif |
| 4044 | if (time_after_eq(jiffies, rq->next_balance)) | 4035 | if (time_after_eq(jiffies, rq->next_balance)) |
| @@ -4200,7 +4191,6 @@ void account_steal_time(struct task_struct *p, cputime_t steal) | |||
| 4200 | 4191 | ||
| 4201 | if (p == rq->idle) { | 4192 | if (p == rq->idle) { |
| 4202 | p->stime = cputime_add(p->stime, steal); | 4193 | p->stime = cputime_add(p->stime, steal); |
| 4203 | account_group_system_time(p, steal); | ||
| 4204 | if (atomic_read(&rq->nr_iowait) > 0) | 4194 | if (atomic_read(&rq->nr_iowait) > 0) |
| 4205 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 4195 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); |
| 4206 | else | 4196 | else |
| @@ -4336,7 +4326,7 @@ void __kprobes sub_preempt_count(int val) | |||
| 4336 | /* | 4326 | /* |
| 4337 | * Underflow? | 4327 | * Underflow? |
| 4338 | */ | 4328 | */ |
| 4339 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) | 4329 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) |
| 4340 | return; | 4330 | return; |
| 4341 | /* | 4331 | /* |
| 4342 | * Is the spinlock portion underflowing? | 4332 | * Is the spinlock portion underflowing? |
| @@ -5397,10 +5387,9 @@ out_unlock: | |||
| 5397 | return retval; | 5387 | return retval; |
| 5398 | } | 5388 | } |
| 5399 | 5389 | ||
| 5400 | long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | 5390 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
| 5401 | { | 5391 | { |
| 5402 | cpumask_t cpus_allowed; | 5392 | cpumask_var_t cpus_allowed, new_mask; |
| 5403 | cpumask_t new_mask = *in_mask; | ||
| 5404 | struct task_struct *p; | 5393 | struct task_struct *p; |
| 5405 | int retval; | 5394 | int retval; |
| 5406 | 5395 | ||
| @@ -5422,6 +5411,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
| 5422 | get_task_struct(p); | 5411 | get_task_struct(p); |
| 5423 | read_unlock(&tasklist_lock); | 5412 | read_unlock(&tasklist_lock); |
| 5424 | 5413 | ||
| 5414 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | ||
| 5415 | retval = -ENOMEM; | ||
| 5416 | goto out_put_task; | ||
| 5417 | } | ||
| 5418 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { | ||
| 5419 | retval = -ENOMEM; | ||
| 5420 | goto out_free_cpus_allowed; | ||
| 5421 | } | ||
| 5425 | retval = -EPERM; | 5422 | retval = -EPERM; |
| 5426 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 5423 | if ((current->euid != p->euid) && (current->euid != p->uid) && |
| 5427 | !capable(CAP_SYS_NICE)) | 5424 | !capable(CAP_SYS_NICE)) |
| @@ -5431,37 +5428,41 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
| 5431 | if (retval) | 5428 | if (retval) |
| 5432 | goto out_unlock; | 5429 | goto out_unlock; |
| 5433 | 5430 | ||
| 5434 | cpuset_cpus_allowed(p, &cpus_allowed); | 5431 | cpuset_cpus_allowed(p, cpus_allowed); |
| 5435 | cpus_and(new_mask, new_mask, cpus_allowed); | 5432 | cpumask_and(new_mask, in_mask, cpus_allowed); |
| 5436 | again: | 5433 | again: |
| 5437 | retval = set_cpus_allowed_ptr(p, &new_mask); | 5434 | retval = set_cpus_allowed_ptr(p, new_mask); |
| 5438 | 5435 | ||
| 5439 | if (!retval) { | 5436 | if (!retval) { |
| 5440 | cpuset_cpus_allowed(p, &cpus_allowed); | 5437 | cpuset_cpus_allowed(p, cpus_allowed); |
| 5441 | if (!cpus_subset(new_mask, cpus_allowed)) { | 5438 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
| 5442 | /* | 5439 | /* |
| 5443 | * We must have raced with a concurrent cpuset | 5440 | * We must have raced with a concurrent cpuset |
| 5444 | * update. Just reset the cpus_allowed to the | 5441 | * update. Just reset the cpus_allowed to the |
| 5445 | * cpuset's cpus_allowed | 5442 | * cpuset's cpus_allowed |
| 5446 | */ | 5443 | */ |
| 5447 | new_mask = cpus_allowed; | 5444 | cpumask_copy(new_mask, cpus_allowed); |
| 5448 | goto again; | 5445 | goto again; |
| 5449 | } | 5446 | } |
| 5450 | } | 5447 | } |
| 5451 | out_unlock: | 5448 | out_unlock: |
| 5449 | free_cpumask_var(new_mask); | ||
| 5450 | out_free_cpus_allowed: | ||
| 5451 | free_cpumask_var(cpus_allowed); | ||
| 5452 | out_put_task: | ||
| 5452 | put_task_struct(p); | 5453 | put_task_struct(p); |
| 5453 | put_online_cpus(); | 5454 | put_online_cpus(); |
| 5454 | return retval; | 5455 | return retval; |
| 5455 | } | 5456 | } |
| 5456 | 5457 | ||
| 5457 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | 5458 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
| 5458 | cpumask_t *new_mask) | 5459 | struct cpumask *new_mask) |
| 5459 | { | 5460 | { |
| 5460 | if (len < sizeof(cpumask_t)) { | 5461 | if (len < cpumask_size()) |
| 5461 | memset(new_mask, 0, sizeof(cpumask_t)); | 5462 | cpumask_clear(new_mask); |
| 5462 | } else if (len > sizeof(cpumask_t)) { | 5463 | else if (len > cpumask_size()) |
| 5463 | len = sizeof(cpumask_t); | 5464 | len = cpumask_size(); |
| 5464 | } | 5465 | |
| 5465 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; | 5466 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
| 5466 | } | 5467 | } |
| 5467 | 5468 | ||
| @@ -5474,17 +5475,20 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
| 5474 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 5475 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, |
| 5475 | unsigned long __user *user_mask_ptr) | 5476 | unsigned long __user *user_mask_ptr) |
| 5476 | { | 5477 | { |
| 5477 | cpumask_t new_mask; | 5478 | cpumask_var_t new_mask; |
| 5478 | int retval; | 5479 | int retval; |
| 5479 | 5480 | ||
| 5480 | retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); | 5481 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
| 5481 | if (retval) | 5482 | return -ENOMEM; |
| 5482 | return retval; | ||
| 5483 | 5483 | ||
| 5484 | return sched_setaffinity(pid, &new_mask); | 5484 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
| 5485 | if (retval == 0) | ||
| 5486 | retval = sched_setaffinity(pid, new_mask); | ||
| 5487 | free_cpumask_var(new_mask); | ||
| 5488 | return retval; | ||
| 5485 | } | 5489 | } |
| 5486 | 5490 | ||
| 5487 | long sched_getaffinity(pid_t pid, cpumask_t *mask) | 5491 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
| 5488 | { | 5492 | { |
| 5489 | struct task_struct *p; | 5493 | struct task_struct *p; |
| 5490 | int retval; | 5494 | int retval; |
| @@ -5501,7 +5505,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
| 5501 | if (retval) | 5505 | if (retval) |
| 5502 | goto out_unlock; | 5506 | goto out_unlock; |
| 5503 | 5507 | ||
| 5504 | cpus_and(*mask, p->cpus_allowed, cpu_online_map); | 5508 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
| 5505 | 5509 | ||
| 5506 | out_unlock: | 5510 | out_unlock: |
| 5507 | read_unlock(&tasklist_lock); | 5511 | read_unlock(&tasklist_lock); |
| @@ -5520,19 +5524,24 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
| 5520 | unsigned long __user *user_mask_ptr) | 5524 | unsigned long __user *user_mask_ptr) |
| 5521 | { | 5525 | { |
| 5522 | int ret; | 5526 | int ret; |
| 5523 | cpumask_t mask; | 5527 | cpumask_var_t mask; |
| 5524 | 5528 | ||
| 5525 | if (len < sizeof(cpumask_t)) | 5529 | if (len < cpumask_size()) |
| 5526 | return -EINVAL; | 5530 | return -EINVAL; |
| 5527 | 5531 | ||
| 5528 | ret = sched_getaffinity(pid, &mask); | 5532 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 5529 | if (ret < 0) | 5533 | return -ENOMEM; |
| 5530 | return ret; | ||
| 5531 | 5534 | ||
| 5532 | if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) | 5535 | ret = sched_getaffinity(pid, mask); |
| 5533 | return -EFAULT; | 5536 | if (ret == 0) { |
| 5537 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | ||
| 5538 | ret = -EFAULT; | ||
| 5539 | else | ||
| 5540 | ret = cpumask_size(); | ||
| 5541 | } | ||
| 5542 | free_cpumask_var(mask); | ||
| 5534 | 5543 | ||
| 5535 | return sizeof(cpumask_t); | 5544 | return ret; |
| 5536 | } | 5545 | } |
| 5537 | 5546 | ||
| 5538 | /** | 5547 | /** |
| @@ -5868,14 +5877,15 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5868 | struct rq *rq = cpu_rq(cpu); | 5877 | struct rq *rq = cpu_rq(cpu); |
| 5869 | unsigned long flags; | 5878 | unsigned long flags; |
| 5870 | 5879 | ||
| 5880 | spin_lock_irqsave(&rq->lock, flags); | ||
| 5881 | |||
| 5871 | __sched_fork(idle); | 5882 | __sched_fork(idle); |
| 5872 | idle->se.exec_start = sched_clock(); | 5883 | idle->se.exec_start = sched_clock(); |
| 5873 | 5884 | ||
| 5874 | idle->prio = idle->normal_prio = MAX_PRIO; | 5885 | idle->prio = idle->normal_prio = MAX_PRIO; |
| 5875 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 5886 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
| 5876 | __set_task_cpu(idle, cpu); | 5887 | __set_task_cpu(idle, cpu); |
| 5877 | 5888 | ||
| 5878 | spin_lock_irqsave(&rq->lock, flags); | ||
| 5879 | rq->curr = rq->idle = idle; | 5889 | rq->curr = rq->idle = idle; |
| 5880 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5890 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
| 5881 | idle->oncpu = 1; | 5891 | idle->oncpu = 1; |
| @@ -5892,6 +5902,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5892 | * The idle tasks have their own, simple scheduling class: | 5902 | * The idle tasks have their own, simple scheduling class: |
| 5893 | */ | 5903 | */ |
| 5894 | idle->sched_class = &idle_sched_class; | 5904 | idle->sched_class = &idle_sched_class; |
| 5905 | ftrace_graph_init_task(idle); | ||
| 5895 | } | 5906 | } |
| 5896 | 5907 | ||
| 5897 | /* | 5908 | /* |
| @@ -5899,9 +5910,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5899 | * indicates which cpus entered this state. This is used | 5910 | * indicates which cpus entered this state. This is used |
| 5900 | * in the rcu update to wait only for active cpus. For system | 5911 | * in the rcu update to wait only for active cpus. For system |
| 5901 | * which do not switch off the HZ timer nohz_cpu_mask should | 5912 | * which do not switch off the HZ timer nohz_cpu_mask should |
| 5902 | * always be CPU_MASK_NONE. | 5913 | * always be CPU_BITS_NONE. |
| 5903 | */ | 5914 | */ |
| 5904 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | 5915 | cpumask_var_t nohz_cpu_mask; |
| 5905 | 5916 | ||
| 5906 | /* | 5917 | /* |
| 5907 | * Increase the granularity value when there are more CPUs, | 5918 | * Increase the granularity value when there are more CPUs, |
| @@ -5956,7 +5967,7 @@ static inline void sched_init_granularity(void) | |||
| 5956 | * task must not exit() & deallocate itself prematurely. The | 5967 | * task must not exit() & deallocate itself prematurely. The |
| 5957 | * call is not atomic; no spinlocks may be held. | 5968 | * call is not atomic; no spinlocks may be held. |
| 5958 | */ | 5969 | */ |
| 5959 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | 5970 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
| 5960 | { | 5971 | { |
| 5961 | struct migration_req req; | 5972 | struct migration_req req; |
| 5962 | unsigned long flags; | 5973 | unsigned long flags; |
| @@ -5964,13 +5975,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
| 5964 | int ret = 0; | 5975 | int ret = 0; |
| 5965 | 5976 | ||
| 5966 | rq = task_rq_lock(p, &flags); | 5977 | rq = task_rq_lock(p, &flags); |
| 5967 | if (!cpus_intersects(*new_mask, cpu_online_map)) { | 5978 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { |
| 5968 | ret = -EINVAL; | 5979 | ret = -EINVAL; |
| 5969 | goto out; | 5980 | goto out; |
| 5970 | } | 5981 | } |
| 5971 | 5982 | ||
| 5972 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && | 5983 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && |
| 5973 | !cpus_equal(p->cpus_allowed, *new_mask))) { | 5984 | !cpumask_equal(&p->cpus_allowed, new_mask))) { |
| 5974 | ret = -EINVAL; | 5985 | ret = -EINVAL; |
| 5975 | goto out; | 5986 | goto out; |
| 5976 | } | 5987 | } |
| @@ -5978,15 +5989,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
| 5978 | if (p->sched_class->set_cpus_allowed) | 5989 | if (p->sched_class->set_cpus_allowed) |
| 5979 | p->sched_class->set_cpus_allowed(p, new_mask); | 5990 | p->sched_class->set_cpus_allowed(p, new_mask); |
| 5980 | else { | 5991 | else { |
| 5981 | p->cpus_allowed = *new_mask; | 5992 | cpumask_copy(&p->cpus_allowed, new_mask); |
| 5982 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); | 5993 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
| 5983 | } | 5994 | } |
| 5984 | 5995 | ||
| 5985 | /* Can the task run on the task's current CPU? If so, we're done */ | 5996 | /* Can the task run on the task's current CPU? If so, we're done */ |
| 5986 | if (cpu_isset(task_cpu(p), *new_mask)) | 5997 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
| 5987 | goto out; | 5998 | goto out; |
| 5988 | 5999 | ||
| 5989 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { | 6000 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { |
| 5990 | /* Need help from migration thread: drop lock and wait. */ | 6001 | /* Need help from migration thread: drop lock and wait. */ |
| 5991 | task_rq_unlock(rq, &flags); | 6002 | task_rq_unlock(rq, &flags); |
| 5992 | wake_up_process(rq->migration_thread); | 6003 | wake_up_process(rq->migration_thread); |
| @@ -6028,7 +6039,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
| 6028 | if (task_cpu(p) != src_cpu) | 6039 | if (task_cpu(p) != src_cpu) |
| 6029 | goto done; | 6040 | goto done; |
| 6030 | /* Affinity changed (again). */ | 6041 | /* Affinity changed (again). */ |
| 6031 | if (!cpu_isset(dest_cpu, p->cpus_allowed)) | 6042 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
| 6032 | goto fail; | 6043 | goto fail; |
| 6033 | 6044 | ||
| 6034 | on_rq = p->se.on_rq; | 6045 | on_rq = p->se.on_rq; |
| @@ -6122,54 +6133,46 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
| 6122 | 6133 | ||
| 6123 | /* | 6134 | /* |
| 6124 | * Figure out where task on dead CPU should go, use force if necessary. | 6135 | * Figure out where task on dead CPU should go, use force if necessary. |
| 6125 | * NOTE: interrupts should be disabled by the caller | ||
| 6126 | */ | 6136 | */ |
| 6127 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6137 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
| 6128 | { | 6138 | { |
| 6129 | unsigned long flags; | ||
| 6130 | cpumask_t mask; | ||
| 6131 | struct rq *rq; | ||
| 6132 | int dest_cpu; | 6139 | int dest_cpu; |
| 6140 | /* FIXME: Use cpumask_of_node here. */ | ||
| 6141 | cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu)); | ||
| 6142 | const struct cpumask *nodemask = &_nodemask; | ||
| 6143 | |||
| 6144 | again: | ||
| 6145 | /* Look for allowed, online CPU in same node. */ | ||
| 6146 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) | ||
| 6147 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | ||
| 6148 | goto move; | ||
| 6149 | |||
| 6150 | /* Any allowed, online CPU? */ | ||
| 6151 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); | ||
| 6152 | if (dest_cpu < nr_cpu_ids) | ||
| 6153 | goto move; | ||
| 6154 | |||
| 6155 | /* No more Mr. Nice Guy. */ | ||
| 6156 | if (dest_cpu >= nr_cpu_ids) { | ||
| 6157 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
| 6158 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | ||
| 6133 | 6159 | ||
| 6134 | do { | 6160 | /* |
| 6135 | /* On same node? */ | 6161 | * Don't tell them about moving exiting tasks or |
| 6136 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 6162 | * kernel threads (both mm NULL), since they never |
| 6137 | cpus_and(mask, mask, p->cpus_allowed); | 6163 | * leave kernel. |
| 6138 | dest_cpu = any_online_cpu(mask); | 6164 | */ |
| 6139 | 6165 | if (p->mm && printk_ratelimit()) { | |
| 6140 | /* On any allowed CPU? */ | 6166 | printk(KERN_INFO "process %d (%s) no " |
| 6141 | if (dest_cpu >= nr_cpu_ids) | 6167 | "longer affine to cpu%d\n", |
| 6142 | dest_cpu = any_online_cpu(p->cpus_allowed); | 6168 | task_pid_nr(p), p->comm, dead_cpu); |
| 6143 | |||
| 6144 | /* No more Mr. Nice Guy. */ | ||
| 6145 | if (dest_cpu >= nr_cpu_ids) { | ||
| 6146 | cpumask_t cpus_allowed; | ||
| 6147 | |||
| 6148 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | ||
| 6149 | /* | ||
| 6150 | * Try to stay on the same cpuset, where the | ||
| 6151 | * current cpuset may be a subset of all cpus. | ||
| 6152 | * The cpuset_cpus_allowed_locked() variant of | ||
| 6153 | * cpuset_cpus_allowed() will not block. It must be | ||
| 6154 | * called within calls to cpuset_lock/cpuset_unlock. | ||
| 6155 | */ | ||
| 6156 | rq = task_rq_lock(p, &flags); | ||
| 6157 | p->cpus_allowed = cpus_allowed; | ||
| 6158 | dest_cpu = any_online_cpu(p->cpus_allowed); | ||
| 6159 | task_rq_unlock(rq, &flags); | ||
| 6160 | |||
| 6161 | /* | ||
| 6162 | * Don't tell them about moving exiting tasks or | ||
| 6163 | * kernel threads (both mm NULL), since they never | ||
| 6164 | * leave kernel. | ||
| 6165 | */ | ||
| 6166 | if (p->mm && printk_ratelimit()) { | ||
| 6167 | printk(KERN_INFO "process %d (%s) no " | ||
| 6168 | "longer affine to cpu%d\n", | ||
| 6169 | task_pid_nr(p), p->comm, dead_cpu); | ||
| 6170 | } | ||
| 6171 | } | 6169 | } |
| 6172 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); | 6170 | } |
| 6171 | |||
| 6172 | move: | ||
| 6173 | /* It can have affinity changed while we were choosing. */ | ||
| 6174 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | ||
| 6175 | goto again; | ||
| 6173 | } | 6176 | } |
| 6174 | 6177 | ||
| 6175 | /* | 6178 | /* |
| @@ -6181,7 +6184,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
| 6181 | */ | 6184 | */ |
| 6182 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 6185 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
| 6183 | { | 6186 | { |
| 6184 | struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); | 6187 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); |
| 6185 | unsigned long flags; | 6188 | unsigned long flags; |
| 6186 | 6189 | ||
| 6187 | local_irq_save(flags); | 6190 | local_irq_save(flags); |
| @@ -6471,7 +6474,7 @@ static void set_rq_online(struct rq *rq) | |||
| 6471 | if (!rq->online) { | 6474 | if (!rq->online) { |
| 6472 | const struct sched_class *class; | 6475 | const struct sched_class *class; |
| 6473 | 6476 | ||
| 6474 | cpu_set(rq->cpu, rq->rd->online); | 6477 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
| 6475 | rq->online = 1; | 6478 | rq->online = 1; |
| 6476 | 6479 | ||
| 6477 | for_each_class(class) { | 6480 | for_each_class(class) { |
| @@ -6491,7 +6494,7 @@ static void set_rq_offline(struct rq *rq) | |||
| 6491 | class->rq_offline(rq); | 6494 | class->rq_offline(rq); |
| 6492 | } | 6495 | } |
| 6493 | 6496 | ||
| 6494 | cpu_clear(rq->cpu, rq->rd->online); | 6497 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
| 6495 | rq->online = 0; | 6498 | rq->online = 0; |
| 6496 | } | 6499 | } |
| 6497 | } | 6500 | } |
| @@ -6532,7 +6535,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 6532 | rq = cpu_rq(cpu); | 6535 | rq = cpu_rq(cpu); |
| 6533 | spin_lock_irqsave(&rq->lock, flags); | 6536 | spin_lock_irqsave(&rq->lock, flags); |
| 6534 | if (rq->rd) { | 6537 | if (rq->rd) { |
| 6535 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6538 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
| 6536 | 6539 | ||
| 6537 | set_rq_online(rq); | 6540 | set_rq_online(rq); |
| 6538 | } | 6541 | } |
| @@ -6546,7 +6549,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 6546 | break; | 6549 | break; |
| 6547 | /* Unbind it from offline cpu so it can run. Fall thru. */ | 6550 | /* Unbind it from offline cpu so it can run. Fall thru. */ |
| 6548 | kthread_bind(cpu_rq(cpu)->migration_thread, | 6551 | kthread_bind(cpu_rq(cpu)->migration_thread, |
| 6549 | any_online_cpu(cpu_online_map)); | 6552 | cpumask_any(cpu_online_mask)); |
| 6550 | kthread_stop(cpu_rq(cpu)->migration_thread); | 6553 | kthread_stop(cpu_rq(cpu)->migration_thread); |
| 6551 | cpu_rq(cpu)->migration_thread = NULL; | 6554 | cpu_rq(cpu)->migration_thread = NULL; |
| 6552 | break; | 6555 | break; |
| @@ -6583,7 +6586,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 6583 | req = list_entry(rq->migration_queue.next, | 6586 | req = list_entry(rq->migration_queue.next, |
| 6584 | struct migration_req, list); | 6587 | struct migration_req, list); |
| 6585 | list_del_init(&req->list); | 6588 | list_del_init(&req->list); |
| 6589 | spin_unlock_irq(&rq->lock); | ||
| 6586 | complete(&req->done); | 6590 | complete(&req->done); |
| 6591 | spin_lock_irq(&rq->lock); | ||
| 6587 | } | 6592 | } |
| 6588 | spin_unlock_irq(&rq->lock); | 6593 | spin_unlock_irq(&rq->lock); |
| 6589 | break; | 6594 | break; |
| @@ -6594,7 +6599,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 6594 | rq = cpu_rq(cpu); | 6599 | rq = cpu_rq(cpu); |
| 6595 | spin_lock_irqsave(&rq->lock, flags); | 6600 | spin_lock_irqsave(&rq->lock, flags); |
| 6596 | if (rq->rd) { | 6601 | if (rq->rd) { |
| 6597 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6602 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
| 6598 | set_rq_offline(rq); | 6603 | set_rq_offline(rq); |
| 6599 | } | 6604 | } |
| 6600 | spin_unlock_irqrestore(&rq->lock, flags); | 6605 | spin_unlock_irqrestore(&rq->lock, flags); |
| @@ -6632,36 +6637,14 @@ early_initcall(migration_init); | |||
| 6632 | 6637 | ||
| 6633 | #ifdef CONFIG_SCHED_DEBUG | 6638 | #ifdef CONFIG_SCHED_DEBUG |
| 6634 | 6639 | ||
| 6635 | static inline const char *sd_level_to_string(enum sched_domain_level lvl) | ||
| 6636 | { | ||
| 6637 | switch (lvl) { | ||
| 6638 | case SD_LV_NONE: | ||
| 6639 | return "NONE"; | ||
| 6640 | case SD_LV_SIBLING: | ||
| 6641 | return "SIBLING"; | ||
| 6642 | case SD_LV_MC: | ||
| 6643 | return "MC"; | ||
| 6644 | case SD_LV_CPU: | ||
| 6645 | return "CPU"; | ||
| 6646 | case SD_LV_NODE: | ||
| 6647 | return "NODE"; | ||
| 6648 | case SD_LV_ALLNODES: | ||
| 6649 | return "ALLNODES"; | ||
| 6650 | case SD_LV_MAX: | ||
| 6651 | return "MAX"; | ||
| 6652 | |||
| 6653 | } | ||
| 6654 | return "MAX"; | ||
| 6655 | } | ||
| 6656 | |||
| 6657 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6640 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
| 6658 | cpumask_t *groupmask) | 6641 | struct cpumask *groupmask) |
| 6659 | { | 6642 | { |
| 6660 | struct sched_group *group = sd->groups; | 6643 | struct sched_group *group = sd->groups; |
| 6661 | char str[256]; | 6644 | char str[256]; |
| 6662 | 6645 | ||
| 6663 | cpulist_scnprintf(str, sizeof(str), sd->span); | 6646 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); |
| 6664 | cpus_clear(*groupmask); | 6647 | cpumask_clear(groupmask); |
| 6665 | 6648 | ||
| 6666 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 6649 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
| 6667 | 6650 | ||
| @@ -6673,14 +6656,13 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 6673 | return -1; | 6656 | return -1; |
| 6674 | } | 6657 | } |
| 6675 | 6658 | ||
| 6676 | printk(KERN_CONT "span %s level %s\n", | 6659 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
| 6677 | str, sd_level_to_string(sd->level)); | ||
| 6678 | 6660 | ||
| 6679 | if (!cpu_isset(cpu, sd->span)) { | 6661 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 6680 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6662 | printk(KERN_ERR "ERROR: domain->span does not contain " |
| 6681 | "CPU%d\n", cpu); | 6663 | "CPU%d\n", cpu); |
| 6682 | } | 6664 | } |
| 6683 | if (!cpu_isset(cpu, group->cpumask)) { | 6665 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
| 6684 | printk(KERN_ERR "ERROR: domain->groups does not contain" | 6666 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
| 6685 | " CPU%d\n", cpu); | 6667 | " CPU%d\n", cpu); |
| 6686 | } | 6668 | } |
| @@ -6700,31 +6682,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 6700 | break; | 6682 | break; |
| 6701 | } | 6683 | } |
| 6702 | 6684 | ||
| 6703 | if (!cpus_weight(group->cpumask)) { | 6685 | if (!cpumask_weight(sched_group_cpus(group))) { |
| 6704 | printk(KERN_CONT "\n"); | 6686 | printk(KERN_CONT "\n"); |
| 6705 | printk(KERN_ERR "ERROR: empty group\n"); | 6687 | printk(KERN_ERR "ERROR: empty group\n"); |
| 6706 | break; | 6688 | break; |
| 6707 | } | 6689 | } |
| 6708 | 6690 | ||
| 6709 | if (cpus_intersects(*groupmask, group->cpumask)) { | 6691 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
| 6710 | printk(KERN_CONT "\n"); | 6692 | printk(KERN_CONT "\n"); |
| 6711 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 6693 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
| 6712 | break; | 6694 | break; |
| 6713 | } | 6695 | } |
| 6714 | 6696 | ||
| 6715 | cpus_or(*groupmask, *groupmask, group->cpumask); | 6697 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
| 6716 | 6698 | ||
| 6717 | cpulist_scnprintf(str, sizeof(str), group->cpumask); | 6699 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
| 6718 | printk(KERN_CONT " %s", str); | 6700 | printk(KERN_CONT " %s", str); |
| 6719 | 6701 | ||
| 6720 | group = group->next; | 6702 | group = group->next; |
| 6721 | } while (group != sd->groups); | 6703 | } while (group != sd->groups); |
| 6722 | printk(KERN_CONT "\n"); | 6704 | printk(KERN_CONT "\n"); |
| 6723 | 6705 | ||
| 6724 | if (!cpus_equal(sd->span, *groupmask)) | 6706 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
| 6725 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | 6707 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
| 6726 | 6708 | ||
| 6727 | if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) | 6709 | if (sd->parent && |
| 6710 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | ||
| 6728 | printk(KERN_ERR "ERROR: parent span is not a superset " | 6711 | printk(KERN_ERR "ERROR: parent span is not a superset " |
| 6729 | "of domain->span\n"); | 6712 | "of domain->span\n"); |
| 6730 | return 0; | 6713 | return 0; |
| @@ -6732,7 +6715,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 6732 | 6715 | ||
| 6733 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 6716 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
| 6734 | { | 6717 | { |
| 6735 | cpumask_t *groupmask; | 6718 | cpumask_var_t groupmask; |
| 6736 | int level = 0; | 6719 | int level = 0; |
| 6737 | 6720 | ||
| 6738 | if (!sd) { | 6721 | if (!sd) { |
| @@ -6742,8 +6725,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
| 6742 | 6725 | ||
| 6743 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 6726 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
| 6744 | 6727 | ||
| 6745 | groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 6728 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { |
| 6746 | if (!groupmask) { | ||
| 6747 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); | 6729 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); |
| 6748 | return; | 6730 | return; |
| 6749 | } | 6731 | } |
| @@ -6756,7 +6738,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
| 6756 | if (!sd) | 6738 | if (!sd) |
| 6757 | break; | 6739 | break; |
| 6758 | } | 6740 | } |
| 6759 | kfree(groupmask); | 6741 | free_cpumask_var(groupmask); |
| 6760 | } | 6742 | } |
| 6761 | #else /* !CONFIG_SCHED_DEBUG */ | 6743 | #else /* !CONFIG_SCHED_DEBUG */ |
| 6762 | # define sched_domain_debug(sd, cpu) do { } while (0) | 6744 | # define sched_domain_debug(sd, cpu) do { } while (0) |
| @@ -6764,7 +6746,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
| 6764 | 6746 | ||
| 6765 | static int sd_degenerate(struct sched_domain *sd) | 6747 | static int sd_degenerate(struct sched_domain *sd) |
| 6766 | { | 6748 | { |
| 6767 | if (cpus_weight(sd->span) == 1) | 6749 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
| 6768 | return 1; | 6750 | return 1; |
| 6769 | 6751 | ||
| 6770 | /* Following flags need at least 2 groups */ | 6752 | /* Following flags need at least 2 groups */ |
| @@ -6795,7 +6777,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
| 6795 | if (sd_degenerate(parent)) | 6777 | if (sd_degenerate(parent)) |
| 6796 | return 1; | 6778 | return 1; |
| 6797 | 6779 | ||
| 6798 | if (!cpus_equal(sd->span, parent->span)) | 6780 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
| 6799 | return 0; | 6781 | return 0; |
| 6800 | 6782 | ||
| 6801 | /* Does parent contain flags not in child? */ | 6783 | /* Does parent contain flags not in child? */ |
| @@ -6810,6 +6792,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
| 6810 | SD_BALANCE_EXEC | | 6792 | SD_BALANCE_EXEC | |
| 6811 | SD_SHARE_CPUPOWER | | 6793 | SD_SHARE_CPUPOWER | |
| 6812 | SD_SHARE_PKG_RESOURCES); | 6794 | SD_SHARE_PKG_RESOURCES); |
| 6795 | if (nr_node_ids == 1) | ||
| 6796 | pflags &= ~SD_SERIALIZE; | ||
| 6813 | } | 6797 | } |
| 6814 | if (~cflags & pflags) | 6798 | if (~cflags & pflags) |
| 6815 | return 0; | 6799 | return 0; |
| @@ -6817,6 +6801,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
| 6817 | return 1; | 6801 | return 1; |
| 6818 | } | 6802 | } |
| 6819 | 6803 | ||
| 6804 | static void free_rootdomain(struct root_domain *rd) | ||
| 6805 | { | ||
| 6806 | cpupri_cleanup(&rd->cpupri); | ||
| 6807 | |||
| 6808 | free_cpumask_var(rd->rto_mask); | ||
| 6809 | free_cpumask_var(rd->online); | ||
| 6810 | free_cpumask_var(rd->span); | ||
| 6811 | kfree(rd); | ||
| 6812 | } | ||
| 6813 | |||
| 6820 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6814 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
| 6821 | { | 6815 | { |
| 6822 | unsigned long flags; | 6816 | unsigned long flags; |
| @@ -6826,38 +6820,63 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
| 6826 | if (rq->rd) { | 6820 | if (rq->rd) { |
| 6827 | struct root_domain *old_rd = rq->rd; | 6821 | struct root_domain *old_rd = rq->rd; |
| 6828 | 6822 | ||
| 6829 | if (cpu_isset(rq->cpu, old_rd->online)) | 6823 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
| 6830 | set_rq_offline(rq); | 6824 | set_rq_offline(rq); |
| 6831 | 6825 | ||
| 6832 | cpu_clear(rq->cpu, old_rd->span); | 6826 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
| 6833 | 6827 | ||
| 6834 | if (atomic_dec_and_test(&old_rd->refcount)) | 6828 | if (atomic_dec_and_test(&old_rd->refcount)) |
| 6835 | kfree(old_rd); | 6829 | free_rootdomain(old_rd); |
| 6836 | } | 6830 | } |
| 6837 | 6831 | ||
| 6838 | atomic_inc(&rd->refcount); | 6832 | atomic_inc(&rd->refcount); |
| 6839 | rq->rd = rd; | 6833 | rq->rd = rd; |
| 6840 | 6834 | ||
| 6841 | cpu_set(rq->cpu, rd->span); | 6835 | cpumask_set_cpu(rq->cpu, rd->span); |
| 6842 | if (cpu_isset(rq->cpu, cpu_online_map)) | 6836 | if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) |
| 6843 | set_rq_online(rq); | 6837 | set_rq_online(rq); |
| 6844 | 6838 | ||
| 6845 | spin_unlock_irqrestore(&rq->lock, flags); | 6839 | spin_unlock_irqrestore(&rq->lock, flags); |
| 6846 | } | 6840 | } |
| 6847 | 6841 | ||
| 6848 | static void init_rootdomain(struct root_domain *rd) | 6842 | static int init_rootdomain(struct root_domain *rd, bool bootmem) |
| 6849 | { | 6843 | { |
| 6850 | memset(rd, 0, sizeof(*rd)); | 6844 | memset(rd, 0, sizeof(*rd)); |
| 6851 | 6845 | ||
| 6852 | cpus_clear(rd->span); | 6846 | if (bootmem) { |
| 6853 | cpus_clear(rd->online); | 6847 | alloc_bootmem_cpumask_var(&def_root_domain.span); |
| 6848 | alloc_bootmem_cpumask_var(&def_root_domain.online); | ||
| 6849 | alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); | ||
| 6850 | cpupri_init(&rd->cpupri, true); | ||
| 6851 | return 0; | ||
| 6852 | } | ||
| 6853 | |||
| 6854 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | ||
| 6855 | goto free_rd; | ||
| 6856 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | ||
| 6857 | goto free_span; | ||
| 6858 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | ||
| 6859 | goto free_online; | ||
| 6854 | 6860 | ||
| 6855 | cpupri_init(&rd->cpupri); | 6861 | if (cpupri_init(&rd->cpupri, false) != 0) |
| 6862 | goto free_rto_mask; | ||
| 6863 | return 0; | ||
| 6864 | |||
| 6865 | free_rto_mask: | ||
| 6866 | free_cpumask_var(rd->rto_mask); | ||
| 6867 | free_online: | ||
| 6868 | free_cpumask_var(rd->online); | ||
| 6869 | free_span: | ||
| 6870 | free_cpumask_var(rd->span); | ||
| 6871 | free_rd: | ||
| 6872 | kfree(rd); | ||
| 6873 | return -ENOMEM; | ||
| 6856 | } | 6874 | } |
| 6857 | 6875 | ||
| 6858 | static void init_defrootdomain(void) | 6876 | static void init_defrootdomain(void) |
| 6859 | { | 6877 | { |
| 6860 | init_rootdomain(&def_root_domain); | 6878 | init_rootdomain(&def_root_domain, true); |
| 6879 | |||
| 6861 | atomic_set(&def_root_domain.refcount, 1); | 6880 | atomic_set(&def_root_domain.refcount, 1); |
| 6862 | } | 6881 | } |
| 6863 | 6882 | ||
| @@ -6869,7 +6888,10 @@ static struct root_domain *alloc_rootdomain(void) | |||
| 6869 | if (!rd) | 6888 | if (!rd) |
| 6870 | return NULL; | 6889 | return NULL; |
| 6871 | 6890 | ||
| 6872 | init_rootdomain(rd); | 6891 | if (init_rootdomain(rd, false) != 0) { |
| 6892 | kfree(rd); | ||
| 6893 | return NULL; | ||
| 6894 | } | ||
| 6873 | 6895 | ||
| 6874 | return rd; | 6896 | return rd; |
| 6875 | } | 6897 | } |
| @@ -6911,19 +6933,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
| 6911 | } | 6933 | } |
| 6912 | 6934 | ||
| 6913 | /* cpus with isolated domains */ | 6935 | /* cpus with isolated domains */ |
| 6914 | static cpumask_t cpu_isolated_map = CPU_MASK_NONE; | 6936 | static cpumask_var_t cpu_isolated_map; |
| 6915 | 6937 | ||
| 6916 | /* Setup the mask of cpus configured for isolated domains */ | 6938 | /* Setup the mask of cpus configured for isolated domains */ |
| 6917 | static int __init isolated_cpu_setup(char *str) | 6939 | static int __init isolated_cpu_setup(char *str) |
| 6918 | { | 6940 | { |
| 6919 | static int __initdata ints[NR_CPUS]; | 6941 | cpulist_parse(str, cpu_isolated_map); |
| 6920 | int i; | ||
| 6921 | |||
| 6922 | str = get_options(str, ARRAY_SIZE(ints), ints); | ||
| 6923 | cpus_clear(cpu_isolated_map); | ||
| 6924 | for (i = 1; i <= ints[0]; i++) | ||
| 6925 | if (ints[i] < NR_CPUS) | ||
| 6926 | cpu_set(ints[i], cpu_isolated_map); | ||
| 6927 | return 1; | 6942 | return 1; |
| 6928 | } | 6943 | } |
| 6929 | 6944 | ||
| @@ -6932,42 +6947,43 @@ __setup("isolcpus=", isolated_cpu_setup); | |||
| 6932 | /* | 6947 | /* |
| 6933 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer | 6948 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer |
| 6934 | * to a function which identifies what group(along with sched group) a CPU | 6949 | * to a function which identifies what group(along with sched group) a CPU |
| 6935 | * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS | 6950 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
| 6936 | * (due to the fact that we keep track of groups covered with a cpumask_t). | 6951 | * (due to the fact that we keep track of groups covered with a struct cpumask). |
| 6937 | * | 6952 | * |
| 6938 | * init_sched_build_groups will build a circular linked list of the groups | 6953 | * init_sched_build_groups will build a circular linked list of the groups |
| 6939 | * covered by the given span, and will set each group's ->cpumask correctly, | 6954 | * covered by the given span, and will set each group's ->cpumask correctly, |
| 6940 | * and ->cpu_power to 0. | 6955 | * and ->cpu_power to 0. |
| 6941 | */ | 6956 | */ |
| 6942 | static void | 6957 | static void |
| 6943 | init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | 6958 | init_sched_build_groups(const struct cpumask *span, |
| 6944 | int (*group_fn)(int cpu, const cpumask_t *cpu_map, | 6959 | const struct cpumask *cpu_map, |
| 6960 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, | ||
| 6945 | struct sched_group **sg, | 6961 | struct sched_group **sg, |
| 6946 | cpumask_t *tmpmask), | 6962 | struct cpumask *tmpmask), |
| 6947 | cpumask_t *covered, cpumask_t *tmpmask) | 6963 | struct cpumask *covered, struct cpumask *tmpmask) |
| 6948 | { | 6964 | { |
| 6949 | struct sched_group *first = NULL, *last = NULL; | 6965 | struct sched_group *first = NULL, *last = NULL; |
| 6950 | int i; | 6966 | int i; |
| 6951 | 6967 | ||
| 6952 | cpus_clear(*covered); | 6968 | cpumask_clear(covered); |
| 6953 | 6969 | ||
| 6954 | for_each_cpu_mask_nr(i, *span) { | 6970 | for_each_cpu(i, span) { |
| 6955 | struct sched_group *sg; | 6971 | struct sched_group *sg; |
| 6956 | int group = group_fn(i, cpu_map, &sg, tmpmask); | 6972 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
| 6957 | int j; | 6973 | int j; |
| 6958 | 6974 | ||
| 6959 | if (cpu_isset(i, *covered)) | 6975 | if (cpumask_test_cpu(i, covered)) |
| 6960 | continue; | 6976 | continue; |
| 6961 | 6977 | ||
| 6962 | cpus_clear(sg->cpumask); | 6978 | cpumask_clear(sched_group_cpus(sg)); |
| 6963 | sg->__cpu_power = 0; | 6979 | sg->__cpu_power = 0; |
| 6964 | 6980 | ||
| 6965 | for_each_cpu_mask_nr(j, *span) { | 6981 | for_each_cpu(j, span) { |
| 6966 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 6982 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
| 6967 | continue; | 6983 | continue; |
| 6968 | 6984 | ||
| 6969 | cpu_set(j, *covered); | 6985 | cpumask_set_cpu(j, covered); |
| 6970 | cpu_set(j, sg->cpumask); | 6986 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
| 6971 | } | 6987 | } |
| 6972 | if (!first) | 6988 | if (!first) |
| 6973 | first = sg; | 6989 | first = sg; |
| @@ -7031,9 +7047,10 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
| 7031 | * should be one that prevents unnecessary balancing, but also spreads tasks | 7047 | * should be one that prevents unnecessary balancing, but also spreads tasks |
| 7032 | * out optimally. | 7048 | * out optimally. |
| 7033 | */ | 7049 | */ |
| 7034 | static void sched_domain_node_span(int node, cpumask_t *span) | 7050 | static void sched_domain_node_span(int node, struct cpumask *span) |
| 7035 | { | 7051 | { |
| 7036 | nodemask_t used_nodes; | 7052 | nodemask_t used_nodes; |
| 7053 | /* FIXME: use cpumask_of_node() */ | ||
| 7037 | node_to_cpumask_ptr(nodemask, node); | 7054 | node_to_cpumask_ptr(nodemask, node); |
| 7038 | int i; | 7055 | int i; |
| 7039 | 7056 | ||
| @@ -7055,18 +7072,33 @@ static void sched_domain_node_span(int node, cpumask_t *span) | |||
| 7055 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | 7072 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
| 7056 | 7073 | ||
| 7057 | /* | 7074 | /* |
| 7075 | * The cpus mask in sched_group and sched_domain hangs off the end. | ||
| 7076 | * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space | ||
| 7077 | * for nr_cpu_ids < CONFIG_NR_CPUS. | ||
| 7078 | */ | ||
| 7079 | struct static_sched_group { | ||
| 7080 | struct sched_group sg; | ||
| 7081 | DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); | ||
| 7082 | }; | ||
| 7083 | |||
| 7084 | struct static_sched_domain { | ||
| 7085 | struct sched_domain sd; | ||
| 7086 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); | ||
| 7087 | }; | ||
| 7088 | |||
| 7089 | /* | ||
| 7058 | * SMT sched-domains: | 7090 | * SMT sched-domains: |
| 7059 | */ | 7091 | */ |
| 7060 | #ifdef CONFIG_SCHED_SMT | 7092 | #ifdef CONFIG_SCHED_SMT |
| 7061 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 7093 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
| 7062 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); | 7094 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); |
| 7063 | 7095 | ||
| 7064 | static int | 7096 | static int |
| 7065 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7097 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
| 7066 | cpumask_t *unused) | 7098 | struct sched_group **sg, struct cpumask *unused) |
| 7067 | { | 7099 | { |
| 7068 | if (sg) | 7100 | if (sg) |
| 7069 | *sg = &per_cpu(sched_group_cpus, cpu); | 7101 | *sg = &per_cpu(sched_group_cpus, cpu).sg; |
| 7070 | return cpu; | 7102 | return cpu; |
| 7071 | } | 7103 | } |
| 7072 | #endif /* CONFIG_SCHED_SMT */ | 7104 | #endif /* CONFIG_SCHED_SMT */ |
| @@ -7075,56 +7107,55 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | |||
| 7075 | * multi-core sched-domains: | 7107 | * multi-core sched-domains: |
| 7076 | */ | 7108 | */ |
| 7077 | #ifdef CONFIG_SCHED_MC | 7109 | #ifdef CONFIG_SCHED_MC |
| 7078 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 7110 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
| 7079 | static DEFINE_PER_CPU(struct sched_group, sched_group_core); | 7111 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
| 7080 | #endif /* CONFIG_SCHED_MC */ | 7112 | #endif /* CONFIG_SCHED_MC */ |
| 7081 | 7113 | ||
| 7082 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 7114 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
| 7083 | static int | 7115 | static int |
| 7084 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7116 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
| 7085 | cpumask_t *mask) | 7117 | struct sched_group **sg, struct cpumask *mask) |
| 7086 | { | 7118 | { |
| 7087 | int group; | 7119 | int group; |
| 7088 | 7120 | ||
| 7089 | *mask = per_cpu(cpu_sibling_map, cpu); | 7121 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
| 7090 | cpus_and(*mask, *mask, *cpu_map); | 7122 | group = cpumask_first(mask); |
| 7091 | group = first_cpu(*mask); | ||
| 7092 | if (sg) | 7123 | if (sg) |
| 7093 | *sg = &per_cpu(sched_group_core, group); | 7124 | *sg = &per_cpu(sched_group_core, group).sg; |
| 7094 | return group; | 7125 | return group; |
| 7095 | } | 7126 | } |
| 7096 | #elif defined(CONFIG_SCHED_MC) | 7127 | #elif defined(CONFIG_SCHED_MC) |
| 7097 | static int | 7128 | static int |
| 7098 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7129 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
| 7099 | cpumask_t *unused) | 7130 | struct sched_group **sg, struct cpumask *unused) |
| 7100 | { | 7131 | { |
| 7101 | if (sg) | 7132 | if (sg) |
| 7102 | *sg = &per_cpu(sched_group_core, cpu); | 7133 | *sg = &per_cpu(sched_group_core, cpu).sg; |
| 7103 | return cpu; | 7134 | return cpu; |
| 7104 | } | 7135 | } |
| 7105 | #endif | 7136 | #endif |
| 7106 | 7137 | ||
| 7107 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 7138 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
| 7108 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); | 7139 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
| 7109 | 7140 | ||
| 7110 | static int | 7141 | static int |
| 7111 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7142 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, |
| 7112 | cpumask_t *mask) | 7143 | struct sched_group **sg, struct cpumask *mask) |
| 7113 | { | 7144 | { |
| 7114 | int group; | 7145 | int group; |
| 7115 | #ifdef CONFIG_SCHED_MC | 7146 | #ifdef CONFIG_SCHED_MC |
| 7147 | /* FIXME: Use cpu_coregroup_mask. */ | ||
| 7116 | *mask = cpu_coregroup_map(cpu); | 7148 | *mask = cpu_coregroup_map(cpu); |
| 7117 | cpus_and(*mask, *mask, *cpu_map); | 7149 | cpus_and(*mask, *mask, *cpu_map); |
| 7118 | group = first_cpu(*mask); | 7150 | group = cpumask_first(mask); |
| 7119 | #elif defined(CONFIG_SCHED_SMT) | 7151 | #elif defined(CONFIG_SCHED_SMT) |
| 7120 | *mask = per_cpu(cpu_sibling_map, cpu); | 7152 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
| 7121 | cpus_and(*mask, *mask, *cpu_map); | 7153 | group = cpumask_first(mask); |
| 7122 | group = first_cpu(*mask); | ||
| 7123 | #else | 7154 | #else |
| 7124 | group = cpu; | 7155 | group = cpu; |
| 7125 | #endif | 7156 | #endif |
| 7126 | if (sg) | 7157 | if (sg) |
| 7127 | *sg = &per_cpu(sched_group_phys, group); | 7158 | *sg = &per_cpu(sched_group_phys, group).sg; |
| 7128 | return group; | 7159 | return group; |
| 7129 | } | 7160 | } |
| 7130 | 7161 | ||
| @@ -7138,19 +7169,21 @@ static DEFINE_PER_CPU(struct sched_domain, node_domains); | |||
| 7138 | static struct sched_group ***sched_group_nodes_bycpu; | 7169 | static struct sched_group ***sched_group_nodes_bycpu; |
| 7139 | 7170 | ||
| 7140 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7171 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
| 7141 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); | 7172 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
| 7142 | 7173 | ||
| 7143 | static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, | 7174 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
| 7144 | struct sched_group **sg, cpumask_t *nodemask) | 7175 | struct sched_group **sg, |
| 7176 | struct cpumask *nodemask) | ||
| 7145 | { | 7177 | { |
| 7146 | int group; | 7178 | int group; |
| 7179 | /* FIXME: use cpumask_of_node */ | ||
| 7180 | node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); | ||
| 7147 | 7181 | ||
| 7148 | *nodemask = node_to_cpumask(cpu_to_node(cpu)); | 7182 | cpumask_and(nodemask, pnodemask, cpu_map); |
| 7149 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7183 | group = cpumask_first(nodemask); |
| 7150 | group = first_cpu(*nodemask); | ||
| 7151 | 7184 | ||
| 7152 | if (sg) | 7185 | if (sg) |
| 7153 | *sg = &per_cpu(sched_group_allnodes, group); | 7186 | *sg = &per_cpu(sched_group_allnodes, group).sg; |
| 7154 | return group; | 7187 | return group; |
| 7155 | } | 7188 | } |
| 7156 | 7189 | ||
| @@ -7162,11 +7195,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
| 7162 | if (!sg) | 7195 | if (!sg) |
| 7163 | return; | 7196 | return; |
| 7164 | do { | 7197 | do { |
| 7165 | for_each_cpu_mask_nr(j, sg->cpumask) { | 7198 | for_each_cpu(j, sched_group_cpus(sg)) { |
| 7166 | struct sched_domain *sd; | 7199 | struct sched_domain *sd; |
| 7167 | 7200 | ||
| 7168 | sd = &per_cpu(phys_domains, j); | 7201 | sd = &per_cpu(phys_domains, j).sd; |
| 7169 | if (j != first_cpu(sd->groups->cpumask)) { | 7202 | if (j != cpumask_first(sched_group_cpus(sd->groups))) { |
| 7170 | /* | 7203 | /* |
| 7171 | * Only add "power" once for each | 7204 | * Only add "power" once for each |
| 7172 | * physical package. | 7205 | * physical package. |
| @@ -7183,11 +7216,12 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
| 7183 | 7216 | ||
| 7184 | #ifdef CONFIG_NUMA | 7217 | #ifdef CONFIG_NUMA |
| 7185 | /* Free memory allocated for various sched_group structures */ | 7218 | /* Free memory allocated for various sched_group structures */ |
| 7186 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7219 | static void free_sched_groups(const struct cpumask *cpu_map, |
| 7220 | struct cpumask *nodemask) | ||
| 7187 | { | 7221 | { |
| 7188 | int cpu, i; | 7222 | int cpu, i; |
| 7189 | 7223 | ||
| 7190 | for_each_cpu_mask_nr(cpu, *cpu_map) { | 7224 | for_each_cpu(cpu, cpu_map) { |
| 7191 | struct sched_group **sched_group_nodes | 7225 | struct sched_group **sched_group_nodes |
| 7192 | = sched_group_nodes_bycpu[cpu]; | 7226 | = sched_group_nodes_bycpu[cpu]; |
| 7193 | 7227 | ||
| @@ -7196,10 +7230,11 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
| 7196 | 7230 | ||
| 7197 | for (i = 0; i < nr_node_ids; i++) { | 7231 | for (i = 0; i < nr_node_ids; i++) { |
| 7198 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7232 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
| 7233 | /* FIXME: Use cpumask_of_node */ | ||
| 7234 | node_to_cpumask_ptr(pnodemask, i); | ||
| 7199 | 7235 | ||
| 7200 | *nodemask = node_to_cpumask(i); | 7236 | cpus_and(*nodemask, *pnodemask, *cpu_map); |
| 7201 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7237 | if (cpumask_empty(nodemask)) |
| 7202 | if (cpus_empty(*nodemask)) | ||
| 7203 | continue; | 7238 | continue; |
| 7204 | 7239 | ||
| 7205 | if (sg == NULL) | 7240 | if (sg == NULL) |
| @@ -7217,7 +7252,8 @@ next_sg: | |||
| 7217 | } | 7252 | } |
| 7218 | } | 7253 | } |
| 7219 | #else /* !CONFIG_NUMA */ | 7254 | #else /* !CONFIG_NUMA */ |
| 7220 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7255 | static void free_sched_groups(const struct cpumask *cpu_map, |
| 7256 | struct cpumask *nodemask) | ||
| 7221 | { | 7257 | { |
| 7222 | } | 7258 | } |
| 7223 | #endif /* CONFIG_NUMA */ | 7259 | #endif /* CONFIG_NUMA */ |
| @@ -7243,7 +7279,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
| 7243 | 7279 | ||
| 7244 | WARN_ON(!sd || !sd->groups); | 7280 | WARN_ON(!sd || !sd->groups); |
| 7245 | 7281 | ||
| 7246 | if (cpu != first_cpu(sd->groups->cpumask)) | 7282 | if (cpu != cpumask_first(sched_group_cpus(sd->groups))) |
| 7247 | return; | 7283 | return; |
| 7248 | 7284 | ||
| 7249 | child = sd->child; | 7285 | child = sd->child; |
| @@ -7308,40 +7344,6 @@ SD_INIT_FUNC(CPU) | |||
| 7308 | SD_INIT_FUNC(MC) | 7344 | SD_INIT_FUNC(MC) |
| 7309 | #endif | 7345 | #endif |
| 7310 | 7346 | ||
| 7311 | /* | ||
| 7312 | * To minimize stack usage kmalloc room for cpumasks and share the | ||
| 7313 | * space as the usage in build_sched_domains() dictates. Used only | ||
| 7314 | * if the amount of space is significant. | ||
| 7315 | */ | ||
| 7316 | struct allmasks { | ||
| 7317 | cpumask_t tmpmask; /* make this one first */ | ||
| 7318 | union { | ||
| 7319 | cpumask_t nodemask; | ||
| 7320 | cpumask_t this_sibling_map; | ||
| 7321 | cpumask_t this_core_map; | ||
| 7322 | }; | ||
| 7323 | cpumask_t send_covered; | ||
| 7324 | |||
| 7325 | #ifdef CONFIG_NUMA | ||
| 7326 | cpumask_t domainspan; | ||
| 7327 | cpumask_t covered; | ||
| 7328 | cpumask_t notcovered; | ||
| 7329 | #endif | ||
| 7330 | }; | ||
| 7331 | |||
| 7332 | #if NR_CPUS > 128 | ||
| 7333 | #define SCHED_CPUMASK_ALLOC 1 | ||
| 7334 | #define SCHED_CPUMASK_FREE(v) kfree(v) | ||
| 7335 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | ||
| 7336 | #else | ||
| 7337 | #define SCHED_CPUMASK_ALLOC 0 | ||
| 7338 | #define SCHED_CPUMASK_FREE(v) | ||
| 7339 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | ||
| 7340 | #endif | ||
| 7341 | |||
| 7342 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | ||
| 7343 | ((unsigned long)(a) + offsetof(struct allmasks, v)) | ||
| 7344 | |||
| 7345 | static int default_relax_domain_level = -1; | 7347 | static int default_relax_domain_level = -1; |
| 7346 | 7348 | ||
| 7347 | static int __init setup_relax_domain_level(char *str) | 7349 | static int __init setup_relax_domain_level(char *str) |
| @@ -7381,17 +7383,38 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
| 7381 | * Build sched domains for a given set of cpus and attach the sched domains | 7383 | * Build sched domains for a given set of cpus and attach the sched domains |
| 7382 | * to the individual cpus | 7384 | * to the individual cpus |
| 7383 | */ | 7385 | */ |
| 7384 | static int __build_sched_domains(const cpumask_t *cpu_map, | 7386 | static int __build_sched_domains(const struct cpumask *cpu_map, |
| 7385 | struct sched_domain_attr *attr) | 7387 | struct sched_domain_attr *attr) |
| 7386 | { | 7388 | { |
| 7387 | int i; | 7389 | int i, err = -ENOMEM; |
| 7388 | struct root_domain *rd; | 7390 | struct root_domain *rd; |
| 7389 | SCHED_CPUMASK_DECLARE(allmasks); | 7391 | cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered, |
| 7390 | cpumask_t *tmpmask; | 7392 | tmpmask; |
| 7391 | #ifdef CONFIG_NUMA | 7393 | #ifdef CONFIG_NUMA |
| 7394 | cpumask_var_t domainspan, covered, notcovered; | ||
| 7392 | struct sched_group **sched_group_nodes = NULL; | 7395 | struct sched_group **sched_group_nodes = NULL; |
| 7393 | int sd_allnodes = 0; | 7396 | int sd_allnodes = 0; |
| 7394 | 7397 | ||
| 7398 | if (!alloc_cpumask_var(&domainspan, GFP_KERNEL)) | ||
| 7399 | goto out; | ||
| 7400 | if (!alloc_cpumask_var(&covered, GFP_KERNEL)) | ||
| 7401 | goto free_domainspan; | ||
| 7402 | if (!alloc_cpumask_var(¬covered, GFP_KERNEL)) | ||
| 7403 | goto free_covered; | ||
| 7404 | #endif | ||
| 7405 | |||
| 7406 | if (!alloc_cpumask_var(&nodemask, GFP_KERNEL)) | ||
| 7407 | goto free_notcovered; | ||
| 7408 | if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL)) | ||
| 7409 | goto free_nodemask; | ||
| 7410 | if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL)) | ||
| 7411 | goto free_this_sibling_map; | ||
| 7412 | if (!alloc_cpumask_var(&send_covered, GFP_KERNEL)) | ||
| 7413 | goto free_this_core_map; | ||
| 7414 | if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) | ||
| 7415 | goto free_send_covered; | ||
| 7416 | |||
| 7417 | #ifdef CONFIG_NUMA | ||
| 7395 | /* | 7418 | /* |
| 7396 | * Allocate the per-node list of sched groups | 7419 | * Allocate the per-node list of sched groups |
| 7397 | */ | 7420 | */ |
| @@ -7399,55 +7422,37 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7399 | GFP_KERNEL); | 7422 | GFP_KERNEL); |
| 7400 | if (!sched_group_nodes) { | 7423 | if (!sched_group_nodes) { |
| 7401 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 7424 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
| 7402 | return -ENOMEM; | 7425 | goto free_tmpmask; |
| 7403 | } | 7426 | } |
| 7404 | #endif | 7427 | #endif |
| 7405 | 7428 | ||
| 7406 | rd = alloc_rootdomain(); | 7429 | rd = alloc_rootdomain(); |
| 7407 | if (!rd) { | 7430 | if (!rd) { |
| 7408 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 7431 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
| 7409 | #ifdef CONFIG_NUMA | 7432 | goto free_sched_groups; |
| 7410 | kfree(sched_group_nodes); | ||
| 7411 | #endif | ||
| 7412 | return -ENOMEM; | ||
| 7413 | } | 7433 | } |
| 7414 | 7434 | ||
| 7415 | #if SCHED_CPUMASK_ALLOC | ||
| 7416 | /* get space for all scratch cpumask variables */ | ||
| 7417 | allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); | ||
| 7418 | if (!allmasks) { | ||
| 7419 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | ||
| 7420 | kfree(rd); | ||
| 7421 | #ifdef CONFIG_NUMA | ||
| 7422 | kfree(sched_group_nodes); | ||
| 7423 | #endif | ||
| 7424 | return -ENOMEM; | ||
| 7425 | } | ||
| 7426 | #endif | ||
| 7427 | tmpmask = (cpumask_t *)allmasks; | ||
| 7428 | |||
| 7429 | |||
| 7430 | #ifdef CONFIG_NUMA | 7435 | #ifdef CONFIG_NUMA |
| 7431 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | 7436 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; |
| 7432 | #endif | 7437 | #endif |
| 7433 | 7438 | ||
| 7434 | /* | 7439 | /* |
| 7435 | * Set up domains for cpus specified by the cpu_map. | 7440 | * Set up domains for cpus specified by the cpu_map. |
| 7436 | */ | 7441 | */ |
| 7437 | for_each_cpu_mask_nr(i, *cpu_map) { | 7442 | for_each_cpu(i, cpu_map) { |
| 7438 | struct sched_domain *sd = NULL, *p; | 7443 | struct sched_domain *sd = NULL, *p; |
| 7439 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
| 7440 | 7444 | ||
| 7445 | /* FIXME: use cpumask_of_node */ | ||
| 7441 | *nodemask = node_to_cpumask(cpu_to_node(i)); | 7446 | *nodemask = node_to_cpumask(cpu_to_node(i)); |
| 7442 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7447 | cpus_and(*nodemask, *nodemask, *cpu_map); |
| 7443 | 7448 | ||
| 7444 | #ifdef CONFIG_NUMA | 7449 | #ifdef CONFIG_NUMA |
| 7445 | if (cpus_weight(*cpu_map) > | 7450 | if (cpumask_weight(cpu_map) > |
| 7446 | SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { | 7451 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
| 7447 | sd = &per_cpu(allnodes_domains, i); | 7452 | sd = &per_cpu(allnodes_domains, i); |
| 7448 | SD_INIT(sd, ALLNODES); | 7453 | SD_INIT(sd, ALLNODES); |
| 7449 | set_domain_attribute(sd, attr); | 7454 | set_domain_attribute(sd, attr); |
| 7450 | sd->span = *cpu_map; | 7455 | cpumask_copy(sched_domain_span(sd), cpu_map); |
| 7451 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); | 7456 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); |
| 7452 | p = sd; | 7457 | p = sd; |
| 7453 | sd_allnodes = 1; | 7458 | sd_allnodes = 1; |
| @@ -7457,18 +7462,19 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7457 | sd = &per_cpu(node_domains, i); | 7462 | sd = &per_cpu(node_domains, i); |
| 7458 | SD_INIT(sd, NODE); | 7463 | SD_INIT(sd, NODE); |
| 7459 | set_domain_attribute(sd, attr); | 7464 | set_domain_attribute(sd, attr); |
| 7460 | sched_domain_node_span(cpu_to_node(i), &sd->span); | 7465 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
| 7461 | sd->parent = p; | 7466 | sd->parent = p; |
| 7462 | if (p) | 7467 | if (p) |
| 7463 | p->child = sd; | 7468 | p->child = sd; |
| 7464 | cpus_and(sd->span, sd->span, *cpu_map); | 7469 | cpumask_and(sched_domain_span(sd), |
| 7470 | sched_domain_span(sd), cpu_map); | ||
| 7465 | #endif | 7471 | #endif |
| 7466 | 7472 | ||
| 7467 | p = sd; | 7473 | p = sd; |
| 7468 | sd = &per_cpu(phys_domains, i); | 7474 | sd = &per_cpu(phys_domains, i).sd; |
| 7469 | SD_INIT(sd, CPU); | 7475 | SD_INIT(sd, CPU); |
| 7470 | set_domain_attribute(sd, attr); | 7476 | set_domain_attribute(sd, attr); |
| 7471 | sd->span = *nodemask; | 7477 | cpumask_copy(sched_domain_span(sd), nodemask); |
| 7472 | sd->parent = p; | 7478 | sd->parent = p; |
| 7473 | if (p) | 7479 | if (p) |
| 7474 | p->child = sd; | 7480 | p->child = sd; |
| @@ -7476,11 +7482,12 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7476 | 7482 | ||
| 7477 | #ifdef CONFIG_SCHED_MC | 7483 | #ifdef CONFIG_SCHED_MC |
| 7478 | p = sd; | 7484 | p = sd; |
| 7479 | sd = &per_cpu(core_domains, i); | 7485 | sd = &per_cpu(core_domains, i).sd; |
| 7480 | SD_INIT(sd, MC); | 7486 | SD_INIT(sd, MC); |
| 7481 | set_domain_attribute(sd, attr); | 7487 | set_domain_attribute(sd, attr); |
| 7482 | sd->span = cpu_coregroup_map(i); | 7488 | *sched_domain_span(sd) = cpu_coregroup_map(i); |
| 7483 | cpus_and(sd->span, sd->span, *cpu_map); | 7489 | cpumask_and(sched_domain_span(sd), |
| 7490 | sched_domain_span(sd), cpu_map); | ||
| 7484 | sd->parent = p; | 7491 | sd->parent = p; |
| 7485 | p->child = sd; | 7492 | p->child = sd; |
| 7486 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | 7493 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
| @@ -7488,11 +7495,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7488 | 7495 | ||
| 7489 | #ifdef CONFIG_SCHED_SMT | 7496 | #ifdef CONFIG_SCHED_SMT |
| 7490 | p = sd; | 7497 | p = sd; |
| 7491 | sd = &per_cpu(cpu_domains, i); | 7498 | sd = &per_cpu(cpu_domains, i).sd; |
| 7492 | SD_INIT(sd, SIBLING); | 7499 | SD_INIT(sd, SIBLING); |
| 7493 | set_domain_attribute(sd, attr); | 7500 | set_domain_attribute(sd, attr); |
| 7494 | sd->span = per_cpu(cpu_sibling_map, i); | 7501 | cpumask_and(sched_domain_span(sd), |
| 7495 | cpus_and(sd->span, sd->span, *cpu_map); | 7502 | &per_cpu(cpu_sibling_map, i), cpu_map); |
| 7496 | sd->parent = p; | 7503 | sd->parent = p; |
| 7497 | p->child = sd; | 7504 | p->child = sd; |
| 7498 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); | 7505 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); |
| @@ -7501,13 +7508,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7501 | 7508 | ||
| 7502 | #ifdef CONFIG_SCHED_SMT | 7509 | #ifdef CONFIG_SCHED_SMT |
| 7503 | /* Set up CPU (sibling) groups */ | 7510 | /* Set up CPU (sibling) groups */ |
| 7504 | for_each_cpu_mask_nr(i, *cpu_map) { | 7511 | for_each_cpu(i, cpu_map) { |
| 7505 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); | 7512 | cpumask_and(this_sibling_map, |
| 7506 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7513 | &per_cpu(cpu_sibling_map, i), cpu_map); |
| 7507 | 7514 | if (i != cpumask_first(this_sibling_map)) | |
| 7508 | *this_sibling_map = per_cpu(cpu_sibling_map, i); | ||
| 7509 | cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); | ||
| 7510 | if (i != first_cpu(*this_sibling_map)) | ||
| 7511 | continue; | 7515 | continue; |
| 7512 | 7516 | ||
| 7513 | init_sched_build_groups(this_sibling_map, cpu_map, | 7517 | init_sched_build_groups(this_sibling_map, cpu_map, |
| @@ -7518,13 +7522,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7518 | 7522 | ||
| 7519 | #ifdef CONFIG_SCHED_MC | 7523 | #ifdef CONFIG_SCHED_MC |
| 7520 | /* Set up multi-core groups */ | 7524 | /* Set up multi-core groups */ |
| 7521 | for_each_cpu_mask_nr(i, *cpu_map) { | 7525 | for_each_cpu(i, cpu_map) { |
| 7522 | SCHED_CPUMASK_VAR(this_core_map, allmasks); | 7526 | /* FIXME: Use cpu_coregroup_mask */ |
| 7523 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
| 7524 | |||
| 7525 | *this_core_map = cpu_coregroup_map(i); | 7527 | *this_core_map = cpu_coregroup_map(i); |
| 7526 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | 7528 | cpus_and(*this_core_map, *this_core_map, *cpu_map); |
| 7527 | if (i != first_cpu(*this_core_map)) | 7529 | if (i != cpumask_first(this_core_map)) |
| 7528 | continue; | 7530 | continue; |
| 7529 | 7531 | ||
| 7530 | init_sched_build_groups(this_core_map, cpu_map, | 7532 | init_sched_build_groups(this_core_map, cpu_map, |
| @@ -7535,12 +7537,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7535 | 7537 | ||
| 7536 | /* Set up physical groups */ | 7538 | /* Set up physical groups */ |
| 7537 | for (i = 0; i < nr_node_ids; i++) { | 7539 | for (i = 0; i < nr_node_ids; i++) { |
| 7538 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7540 | /* FIXME: Use cpumask_of_node */ |
| 7539 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
| 7540 | |||
| 7541 | *nodemask = node_to_cpumask(i); | 7541 | *nodemask = node_to_cpumask(i); |
| 7542 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7542 | cpus_and(*nodemask, *nodemask, *cpu_map); |
| 7543 | if (cpus_empty(*nodemask)) | 7543 | if (cpumask_empty(nodemask)) |
| 7544 | continue; | 7544 | continue; |
| 7545 | 7545 | ||
| 7546 | init_sched_build_groups(nodemask, cpu_map, | 7546 | init_sched_build_groups(nodemask, cpu_map, |
| @@ -7551,8 +7551,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7551 | #ifdef CONFIG_NUMA | 7551 | #ifdef CONFIG_NUMA |
| 7552 | /* Set up node groups */ | 7552 | /* Set up node groups */ |
| 7553 | if (sd_allnodes) { | 7553 | if (sd_allnodes) { |
| 7554 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
| 7555 | |||
| 7556 | init_sched_build_groups(cpu_map, cpu_map, | 7554 | init_sched_build_groups(cpu_map, cpu_map, |
| 7557 | &cpu_to_allnodes_group, | 7555 | &cpu_to_allnodes_group, |
| 7558 | send_covered, tmpmask); | 7556 | send_covered, tmpmask); |
| @@ -7561,58 +7559,58 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7561 | for (i = 0; i < nr_node_ids; i++) { | 7559 | for (i = 0; i < nr_node_ids; i++) { |
| 7562 | /* Set up node groups */ | 7560 | /* Set up node groups */ |
| 7563 | struct sched_group *sg, *prev; | 7561 | struct sched_group *sg, *prev; |
| 7564 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
| 7565 | SCHED_CPUMASK_VAR(domainspan, allmasks); | ||
| 7566 | SCHED_CPUMASK_VAR(covered, allmasks); | ||
| 7567 | int j; | 7562 | int j; |
| 7568 | 7563 | ||
| 7564 | /* FIXME: Use cpumask_of_node */ | ||
| 7569 | *nodemask = node_to_cpumask(i); | 7565 | *nodemask = node_to_cpumask(i); |
| 7570 | cpus_clear(*covered); | 7566 | cpumask_clear(covered); |
| 7571 | 7567 | ||
| 7572 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7568 | cpus_and(*nodemask, *nodemask, *cpu_map); |
| 7573 | if (cpus_empty(*nodemask)) { | 7569 | if (cpumask_empty(nodemask)) { |
| 7574 | sched_group_nodes[i] = NULL; | 7570 | sched_group_nodes[i] = NULL; |
| 7575 | continue; | 7571 | continue; |
| 7576 | } | 7572 | } |
| 7577 | 7573 | ||
| 7578 | sched_domain_node_span(i, domainspan); | 7574 | sched_domain_node_span(i, domainspan); |
| 7579 | cpus_and(*domainspan, *domainspan, *cpu_map); | 7575 | cpumask_and(domainspan, domainspan, cpu_map); |
| 7580 | 7576 | ||
| 7581 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); | 7577 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 7578 | GFP_KERNEL, i); | ||
| 7582 | if (!sg) { | 7579 | if (!sg) { |
| 7583 | printk(KERN_WARNING "Can not alloc domain group for " | 7580 | printk(KERN_WARNING "Can not alloc domain group for " |
| 7584 | "node %d\n", i); | 7581 | "node %d\n", i); |
| 7585 | goto error; | 7582 | goto error; |
| 7586 | } | 7583 | } |
| 7587 | sched_group_nodes[i] = sg; | 7584 | sched_group_nodes[i] = sg; |
| 7588 | for_each_cpu_mask_nr(j, *nodemask) { | 7585 | for_each_cpu(j, nodemask) { |
| 7589 | struct sched_domain *sd; | 7586 | struct sched_domain *sd; |
| 7590 | 7587 | ||
| 7591 | sd = &per_cpu(node_domains, j); | 7588 | sd = &per_cpu(node_domains, j); |
| 7592 | sd->groups = sg; | 7589 | sd->groups = sg; |
| 7593 | } | 7590 | } |
| 7594 | sg->__cpu_power = 0; | 7591 | sg->__cpu_power = 0; |
| 7595 | sg->cpumask = *nodemask; | 7592 | cpumask_copy(sched_group_cpus(sg), nodemask); |
| 7596 | sg->next = sg; | 7593 | sg->next = sg; |
| 7597 | cpus_or(*covered, *covered, *nodemask); | 7594 | cpumask_or(covered, covered, nodemask); |
| 7598 | prev = sg; | 7595 | prev = sg; |
| 7599 | 7596 | ||
| 7600 | for (j = 0; j < nr_node_ids; j++) { | 7597 | for (j = 0; j < nr_node_ids; j++) { |
| 7601 | SCHED_CPUMASK_VAR(notcovered, allmasks); | ||
| 7602 | int n = (i + j) % nr_node_ids; | 7598 | int n = (i + j) % nr_node_ids; |
| 7599 | /* FIXME: Use cpumask_of_node */ | ||
| 7603 | node_to_cpumask_ptr(pnodemask, n); | 7600 | node_to_cpumask_ptr(pnodemask, n); |
| 7604 | 7601 | ||
| 7605 | cpus_complement(*notcovered, *covered); | 7602 | cpumask_complement(notcovered, covered); |
| 7606 | cpus_and(*tmpmask, *notcovered, *cpu_map); | 7603 | cpumask_and(tmpmask, notcovered, cpu_map); |
| 7607 | cpus_and(*tmpmask, *tmpmask, *domainspan); | 7604 | cpumask_and(tmpmask, tmpmask, domainspan); |
| 7608 | if (cpus_empty(*tmpmask)) | 7605 | if (cpumask_empty(tmpmask)) |
| 7609 | break; | 7606 | break; |
| 7610 | 7607 | ||
| 7611 | cpus_and(*tmpmask, *tmpmask, *pnodemask); | 7608 | cpumask_and(tmpmask, tmpmask, pnodemask); |
| 7612 | if (cpus_empty(*tmpmask)) | 7609 | if (cpumask_empty(tmpmask)) |
| 7613 | continue; | 7610 | continue; |
| 7614 | 7611 | ||
| 7615 | sg = kmalloc_node(sizeof(struct sched_group), | 7612 | sg = kmalloc_node(sizeof(struct sched_group) + |
| 7613 | cpumask_size(), | ||
| 7616 | GFP_KERNEL, i); | 7614 | GFP_KERNEL, i); |
| 7617 | if (!sg) { | 7615 | if (!sg) { |
| 7618 | printk(KERN_WARNING | 7616 | printk(KERN_WARNING |
| @@ -7620,9 +7618,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7620 | goto error; | 7618 | goto error; |
| 7621 | } | 7619 | } |
| 7622 | sg->__cpu_power = 0; | 7620 | sg->__cpu_power = 0; |
| 7623 | sg->cpumask = *tmpmask; | 7621 | cpumask_copy(sched_group_cpus(sg), tmpmask); |
| 7624 | sg->next = prev->next; | 7622 | sg->next = prev->next; |
| 7625 | cpus_or(*covered, *covered, *tmpmask); | 7623 | cpumask_or(covered, covered, tmpmask); |
| 7626 | prev->next = sg; | 7624 | prev->next = sg; |
| 7627 | prev = sg; | 7625 | prev = sg; |
| 7628 | } | 7626 | } |
| @@ -7631,22 +7629,22 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7631 | 7629 | ||
| 7632 | /* Calculate CPU power for physical packages and nodes */ | 7630 | /* Calculate CPU power for physical packages and nodes */ |
| 7633 | #ifdef CONFIG_SCHED_SMT | 7631 | #ifdef CONFIG_SCHED_SMT |
| 7634 | for_each_cpu_mask_nr(i, *cpu_map) { | 7632 | for_each_cpu(i, cpu_map) { |
| 7635 | struct sched_domain *sd = &per_cpu(cpu_domains, i); | 7633 | struct sched_domain *sd = &per_cpu(cpu_domains, i).sd; |
| 7636 | 7634 | ||
| 7637 | init_sched_groups_power(i, sd); | 7635 | init_sched_groups_power(i, sd); |
| 7638 | } | 7636 | } |
| 7639 | #endif | 7637 | #endif |
| 7640 | #ifdef CONFIG_SCHED_MC | 7638 | #ifdef CONFIG_SCHED_MC |
| 7641 | for_each_cpu_mask_nr(i, *cpu_map) { | 7639 | for_each_cpu(i, cpu_map) { |
| 7642 | struct sched_domain *sd = &per_cpu(core_domains, i); | 7640 | struct sched_domain *sd = &per_cpu(core_domains, i).sd; |
| 7643 | 7641 | ||
| 7644 | init_sched_groups_power(i, sd); | 7642 | init_sched_groups_power(i, sd); |
| 7645 | } | 7643 | } |
| 7646 | #endif | 7644 | #endif |
| 7647 | 7645 | ||
| 7648 | for_each_cpu_mask_nr(i, *cpu_map) { | 7646 | for_each_cpu(i, cpu_map) { |
| 7649 | struct sched_domain *sd = &per_cpu(phys_domains, i); | 7647 | struct sched_domain *sd = &per_cpu(phys_domains, i).sd; |
| 7650 | 7648 | ||
| 7651 | init_sched_groups_power(i, sd); | 7649 | init_sched_groups_power(i, sd); |
| 7652 | } | 7650 | } |
| @@ -7658,56 +7656,87 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7658 | if (sd_allnodes) { | 7656 | if (sd_allnodes) { |
| 7659 | struct sched_group *sg; | 7657 | struct sched_group *sg; |
| 7660 | 7658 | ||
| 7661 | cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, | 7659 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
| 7662 | tmpmask); | 7660 | tmpmask); |
| 7663 | init_numa_sched_groups_power(sg); | 7661 | init_numa_sched_groups_power(sg); |
| 7664 | } | 7662 | } |
| 7665 | #endif | 7663 | #endif |
| 7666 | 7664 | ||
| 7667 | /* Attach the domains */ | 7665 | /* Attach the domains */ |
| 7668 | for_each_cpu_mask_nr(i, *cpu_map) { | 7666 | for_each_cpu(i, cpu_map) { |
| 7669 | struct sched_domain *sd; | 7667 | struct sched_domain *sd; |
| 7670 | #ifdef CONFIG_SCHED_SMT | 7668 | #ifdef CONFIG_SCHED_SMT |
| 7671 | sd = &per_cpu(cpu_domains, i); | 7669 | sd = &per_cpu(cpu_domains, i).sd; |
| 7672 | #elif defined(CONFIG_SCHED_MC) | 7670 | #elif defined(CONFIG_SCHED_MC) |
| 7673 | sd = &per_cpu(core_domains, i); | 7671 | sd = &per_cpu(core_domains, i).sd; |
| 7674 | #else | 7672 | #else |
| 7675 | sd = &per_cpu(phys_domains, i); | 7673 | sd = &per_cpu(phys_domains, i).sd; |
| 7676 | #endif | 7674 | #endif |
| 7677 | cpu_attach_domain(sd, rd, i); | 7675 | cpu_attach_domain(sd, rd, i); |
| 7678 | } | 7676 | } |
| 7679 | 7677 | ||
| 7680 | SCHED_CPUMASK_FREE((void *)allmasks); | 7678 | err = 0; |
| 7681 | return 0; | 7679 | |
| 7680 | free_tmpmask: | ||
| 7681 | free_cpumask_var(tmpmask); | ||
| 7682 | free_send_covered: | ||
| 7683 | free_cpumask_var(send_covered); | ||
| 7684 | free_this_core_map: | ||
| 7685 | free_cpumask_var(this_core_map); | ||
| 7686 | free_this_sibling_map: | ||
| 7687 | free_cpumask_var(this_sibling_map); | ||
| 7688 | free_nodemask: | ||
| 7689 | free_cpumask_var(nodemask); | ||
| 7690 | free_notcovered: | ||
| 7691 | #ifdef CONFIG_NUMA | ||
| 7692 | free_cpumask_var(notcovered); | ||
| 7693 | free_covered: | ||
| 7694 | free_cpumask_var(covered); | ||
| 7695 | free_domainspan: | ||
| 7696 | free_cpumask_var(domainspan); | ||
| 7697 | out: | ||
| 7698 | #endif | ||
| 7699 | return err; | ||
| 7700 | |||
| 7701 | free_sched_groups: | ||
| 7702 | #ifdef CONFIG_NUMA | ||
| 7703 | kfree(sched_group_nodes); | ||
| 7704 | #endif | ||
| 7705 | goto free_tmpmask; | ||
| 7682 | 7706 | ||
| 7683 | #ifdef CONFIG_NUMA | 7707 | #ifdef CONFIG_NUMA |
| 7684 | error: | 7708 | error: |
| 7685 | free_sched_groups(cpu_map, tmpmask); | 7709 | free_sched_groups(cpu_map, tmpmask); |
| 7686 | SCHED_CPUMASK_FREE((void *)allmasks); | 7710 | free_rootdomain(rd); |
| 7687 | kfree(rd); | 7711 | goto free_tmpmask; |
| 7688 | return -ENOMEM; | ||
| 7689 | #endif | 7712 | #endif |
| 7690 | } | 7713 | } |
| 7691 | 7714 | ||
| 7692 | static int build_sched_domains(const cpumask_t *cpu_map) | 7715 | static int build_sched_domains(const struct cpumask *cpu_map) |
| 7693 | { | 7716 | { |
| 7694 | return __build_sched_domains(cpu_map, NULL); | 7717 | return __build_sched_domains(cpu_map, NULL); |
| 7695 | } | 7718 | } |
| 7696 | 7719 | ||
| 7697 | static cpumask_t *doms_cur; /* current sched domains */ | 7720 | static struct cpumask *doms_cur; /* current sched domains */ |
| 7698 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 7721 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
| 7699 | static struct sched_domain_attr *dattr_cur; | 7722 | static struct sched_domain_attr *dattr_cur; |
| 7700 | /* attribues of custom domains in 'doms_cur' */ | 7723 | /* attribues of custom domains in 'doms_cur' */ |
| 7701 | 7724 | ||
| 7702 | /* | 7725 | /* |
| 7703 | * Special case: If a kmalloc of a doms_cur partition (array of | 7726 | * Special case: If a kmalloc of a doms_cur partition (array of |
| 7704 | * cpumask_t) fails, then fallback to a single sched domain, | 7727 | * cpumask) fails, then fallback to a single sched domain, |
| 7705 | * as determined by the single cpumask_t fallback_doms. | 7728 | * as determined by the single cpumask fallback_doms. |
| 7706 | */ | 7729 | */ |
| 7707 | static cpumask_t fallback_doms; | 7730 | static cpumask_var_t fallback_doms; |
| 7708 | 7731 | ||
| 7709 | void __attribute__((weak)) arch_update_cpu_topology(void) | 7732 | /* |
| 7733 | * arch_update_cpu_topology lets virtualized architectures update the | ||
| 7734 | * cpu core maps. It is supposed to return 1 if the topology changed | ||
| 7735 | * or 0 if it stayed the same. | ||
| 7736 | */ | ||
| 7737 | int __attribute__((weak)) arch_update_cpu_topology(void) | ||
| 7710 | { | 7738 | { |
| 7739 | return 0; | ||
| 7711 | } | 7740 | } |
| 7712 | 7741 | ||
| 7713 | /* | 7742 | /* |
| @@ -7715,16 +7744,16 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
| 7715 | * For now this just excludes isolated cpus, but could be used to | 7744 | * For now this just excludes isolated cpus, but could be used to |
| 7716 | * exclude other special cases in the future. | 7745 | * exclude other special cases in the future. |
| 7717 | */ | 7746 | */ |
| 7718 | static int arch_init_sched_domains(const cpumask_t *cpu_map) | 7747 | static int arch_init_sched_domains(const struct cpumask *cpu_map) |
| 7719 | { | 7748 | { |
| 7720 | int err; | 7749 | int err; |
| 7721 | 7750 | ||
| 7722 | arch_update_cpu_topology(); | 7751 | arch_update_cpu_topology(); |
| 7723 | ndoms_cur = 1; | 7752 | ndoms_cur = 1; |
| 7724 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 7753 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); |
| 7725 | if (!doms_cur) | 7754 | if (!doms_cur) |
| 7726 | doms_cur = &fallback_doms; | 7755 | doms_cur = fallback_doms; |
| 7727 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); | 7756 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); |
| 7728 | dattr_cur = NULL; | 7757 | dattr_cur = NULL; |
| 7729 | err = build_sched_domains(doms_cur); | 7758 | err = build_sched_domains(doms_cur); |
| 7730 | register_sched_domain_sysctl(); | 7759 | register_sched_domain_sysctl(); |
| @@ -7732,8 +7761,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
| 7732 | return err; | 7761 | return err; |
| 7733 | } | 7762 | } |
| 7734 | 7763 | ||
| 7735 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | 7764 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, |
| 7736 | cpumask_t *tmpmask) | 7765 | struct cpumask *tmpmask) |
| 7737 | { | 7766 | { |
| 7738 | free_sched_groups(cpu_map, tmpmask); | 7767 | free_sched_groups(cpu_map, tmpmask); |
| 7739 | } | 7768 | } |
| @@ -7742,17 +7771,16 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | |||
| 7742 | * Detach sched domains from a group of cpus specified in cpu_map | 7771 | * Detach sched domains from a group of cpus specified in cpu_map |
| 7743 | * These cpus will now be attached to the NULL domain | 7772 | * These cpus will now be attached to the NULL domain |
| 7744 | */ | 7773 | */ |
| 7745 | static void detach_destroy_domains(const cpumask_t *cpu_map) | 7774 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
| 7746 | { | 7775 | { |
| 7747 | cpumask_t tmpmask; | 7776 | /* Save because hotplug lock held. */ |
| 7777 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); | ||
| 7748 | int i; | 7778 | int i; |
| 7749 | 7779 | ||
| 7750 | unregister_sched_domain_sysctl(); | 7780 | for_each_cpu(i, cpu_map) |
| 7751 | |||
| 7752 | for_each_cpu_mask_nr(i, *cpu_map) | ||
| 7753 | cpu_attach_domain(NULL, &def_root_domain, i); | 7781 | cpu_attach_domain(NULL, &def_root_domain, i); |
| 7754 | synchronize_sched(); | 7782 | synchronize_sched(); |
| 7755 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7783 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); |
| 7756 | } | 7784 | } |
| 7757 | 7785 | ||
| 7758 | /* handle null as "default" */ | 7786 | /* handle null as "default" */ |
| @@ -7777,7 +7805,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
| 7777 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 7805 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
| 7778 | * It destroys each deleted domain and builds each new domain. | 7806 | * It destroys each deleted domain and builds each new domain. |
| 7779 | * | 7807 | * |
| 7780 | * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. | 7808 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. |
| 7781 | * The masks don't intersect (don't overlap.) We should setup one | 7809 | * The masks don't intersect (don't overlap.) We should setup one |
| 7782 | * sched domain for each mask. CPUs not in any of the cpumasks will | 7810 | * sched domain for each mask. CPUs not in any of the cpumasks will |
| 7783 | * not be load balanced. If the same cpumask appears both in the | 7811 | * not be load balanced. If the same cpumask appears both in the |
| @@ -7786,32 +7814,38 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
| 7786 | * | 7814 | * |
| 7787 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 7815 | * The passed in 'doms_new' should be kmalloc'd. This routine takes |
| 7788 | * ownership of it and will kfree it when done with it. If the caller | 7816 | * ownership of it and will kfree it when done with it. If the caller |
| 7789 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7817 | * failed the kmalloc call, then it can pass in doms_new == NULL && |
| 7790 | * and partition_sched_domains() will fallback to the single partition | 7818 | * ndoms_new == 1, and partition_sched_domains() will fallback to |
| 7791 | * 'fallback_doms', it also forces the domains to be rebuilt. | 7819 | * the single partition 'fallback_doms', it also forces the domains |
| 7820 | * to be rebuilt. | ||
| 7792 | * | 7821 | * |
| 7793 | * If doms_new==NULL it will be replaced with cpu_online_map. | 7822 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
| 7794 | * ndoms_new==0 is a special case for destroying existing domains. | 7823 | * ndoms_new == 0 is a special case for destroying existing domains, |
| 7795 | * It will not create the default domain. | 7824 | * and it will not create the default domain. |
| 7796 | * | 7825 | * |
| 7797 | * Call with hotplug lock held | 7826 | * Call with hotplug lock held |
| 7798 | */ | 7827 | */ |
| 7799 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7828 | /* FIXME: Change to struct cpumask *doms_new[] */ |
| 7829 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
| 7800 | struct sched_domain_attr *dattr_new) | 7830 | struct sched_domain_attr *dattr_new) |
| 7801 | { | 7831 | { |
| 7802 | int i, j, n; | 7832 | int i, j, n; |
| 7833 | int new_topology; | ||
| 7803 | 7834 | ||
| 7804 | mutex_lock(&sched_domains_mutex); | 7835 | mutex_lock(&sched_domains_mutex); |
| 7805 | 7836 | ||
| 7806 | /* always unregister in case we don't destroy any domains */ | 7837 | /* always unregister in case we don't destroy any domains */ |
| 7807 | unregister_sched_domain_sysctl(); | 7838 | unregister_sched_domain_sysctl(); |
| 7808 | 7839 | ||
| 7840 | /* Let architecture update cpu core mappings. */ | ||
| 7841 | new_topology = arch_update_cpu_topology(); | ||
| 7842 | |||
| 7809 | n = doms_new ? ndoms_new : 0; | 7843 | n = doms_new ? ndoms_new : 0; |
| 7810 | 7844 | ||
| 7811 | /* Destroy deleted domains */ | 7845 | /* Destroy deleted domains */ |
| 7812 | for (i = 0; i < ndoms_cur; i++) { | 7846 | for (i = 0; i < ndoms_cur; i++) { |
| 7813 | for (j = 0; j < n; j++) { | 7847 | for (j = 0; j < n && !new_topology; j++) { |
| 7814 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7848 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) |
| 7815 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7849 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
| 7816 | goto match1; | 7850 | goto match1; |
| 7817 | } | 7851 | } |
| @@ -7823,15 +7857,15 @@ match1: | |||
| 7823 | 7857 | ||
| 7824 | if (doms_new == NULL) { | 7858 | if (doms_new == NULL) { |
| 7825 | ndoms_cur = 0; | 7859 | ndoms_cur = 0; |
| 7826 | doms_new = &fallback_doms; | 7860 | doms_new = fallback_doms; |
| 7827 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7861 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); |
| 7828 | dattr_new = NULL; | 7862 | WARN_ON_ONCE(dattr_new); |
| 7829 | } | 7863 | } |
| 7830 | 7864 | ||
| 7831 | /* Build new domains */ | 7865 | /* Build new domains */ |
| 7832 | for (i = 0; i < ndoms_new; i++) { | 7866 | for (i = 0; i < ndoms_new; i++) { |
| 7833 | for (j = 0; j < ndoms_cur; j++) { | 7867 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
| 7834 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7868 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) |
| 7835 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7869 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
| 7836 | goto match2; | 7870 | goto match2; |
| 7837 | } | 7871 | } |
| @@ -7843,7 +7877,7 @@ match2: | |||
| 7843 | } | 7877 | } |
| 7844 | 7878 | ||
| 7845 | /* Remember the new sched domains */ | 7879 | /* Remember the new sched domains */ |
| 7846 | if (doms_cur != &fallback_doms) | 7880 | if (doms_cur != fallback_doms) |
| 7847 | kfree(doms_cur); | 7881 | kfree(doms_cur); |
| 7848 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 7882 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
| 7849 | doms_cur = doms_new; | 7883 | doms_cur = doms_new; |
| @@ -7983,7 +8017,9 @@ static int update_runtime(struct notifier_block *nfb, | |||
| 7983 | 8017 | ||
| 7984 | void __init sched_init_smp(void) | 8018 | void __init sched_init_smp(void) |
| 7985 | { | 8019 | { |
| 7986 | cpumask_t non_isolated_cpus; | 8020 | cpumask_var_t non_isolated_cpus; |
| 8021 | |||
| 8022 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | ||
| 7987 | 8023 | ||
| 7988 | #if defined(CONFIG_NUMA) | 8024 | #if defined(CONFIG_NUMA) |
| 7989 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | 8025 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
| @@ -7992,10 +8028,10 @@ void __init sched_init_smp(void) | |||
| 7992 | #endif | 8028 | #endif |
| 7993 | get_online_cpus(); | 8029 | get_online_cpus(); |
| 7994 | mutex_lock(&sched_domains_mutex); | 8030 | mutex_lock(&sched_domains_mutex); |
| 7995 | arch_init_sched_domains(&cpu_online_map); | 8031 | arch_init_sched_domains(cpu_online_mask); |
| 7996 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 8032 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
| 7997 | if (cpus_empty(non_isolated_cpus)) | 8033 | if (cpumask_empty(non_isolated_cpus)) |
| 7998 | cpu_set(smp_processor_id(), non_isolated_cpus); | 8034 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
| 7999 | mutex_unlock(&sched_domains_mutex); | 8035 | mutex_unlock(&sched_domains_mutex); |
| 8000 | put_online_cpus(); | 8036 | put_online_cpus(); |
| 8001 | 8037 | ||
| @@ -8010,9 +8046,13 @@ void __init sched_init_smp(void) | |||
| 8010 | init_hrtick(); | 8046 | init_hrtick(); |
| 8011 | 8047 | ||
| 8012 | /* Move init over to a non-isolated CPU */ | 8048 | /* Move init over to a non-isolated CPU */ |
| 8013 | if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) | 8049 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
| 8014 | BUG(); | 8050 | BUG(); |
| 8015 | sched_init_granularity(); | 8051 | sched_init_granularity(); |
| 8052 | free_cpumask_var(non_isolated_cpus); | ||
| 8053 | |||
| 8054 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | ||
| 8055 | init_sched_rt_class(); | ||
| 8016 | } | 8056 | } |
| 8017 | #else | 8057 | #else |
| 8018 | void __init sched_init_smp(void) | 8058 | void __init sched_init_smp(void) |
| @@ -8327,6 +8367,15 @@ void __init sched_init(void) | |||
| 8327 | */ | 8367 | */ |
| 8328 | current->sched_class = &fair_sched_class; | 8368 | current->sched_class = &fair_sched_class; |
| 8329 | 8369 | ||
| 8370 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | ||
| 8371 | alloc_bootmem_cpumask_var(&nohz_cpu_mask); | ||
| 8372 | #ifdef CONFIG_SMP | ||
| 8373 | #ifdef CONFIG_NO_HZ | ||
| 8374 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); | ||
| 8375 | #endif | ||
| 8376 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
| 8377 | #endif /* SMP */ | ||
| 8378 | |||
| 8330 | scheduler_running = 1; | 8379 | scheduler_running = 1; |
| 8331 | } | 8380 | } |
| 8332 | 8381 | ||
| @@ -8485,7 +8534,7 @@ static | |||
| 8485 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 8534 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
| 8486 | { | 8535 | { |
| 8487 | struct cfs_rq *cfs_rq; | 8536 | struct cfs_rq *cfs_rq; |
| 8488 | struct sched_entity *se, *parent_se; | 8537 | struct sched_entity *se; |
| 8489 | struct rq *rq; | 8538 | struct rq *rq; |
| 8490 | int i; | 8539 | int i; |
| 8491 | 8540 | ||
| @@ -8501,18 +8550,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 8501 | for_each_possible_cpu(i) { | 8550 | for_each_possible_cpu(i) { |
| 8502 | rq = cpu_rq(i); | 8551 | rq = cpu_rq(i); |
| 8503 | 8552 | ||
| 8504 | cfs_rq = kmalloc_node(sizeof(struct cfs_rq), | 8553 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
| 8505 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8554 | GFP_KERNEL, cpu_to_node(i)); |
| 8506 | if (!cfs_rq) | 8555 | if (!cfs_rq) |
| 8507 | goto err; | 8556 | goto err; |
| 8508 | 8557 | ||
| 8509 | se = kmalloc_node(sizeof(struct sched_entity), | 8558 | se = kzalloc_node(sizeof(struct sched_entity), |
| 8510 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8559 | GFP_KERNEL, cpu_to_node(i)); |
| 8511 | if (!se) | 8560 | if (!se) |
| 8512 | goto err; | 8561 | goto err; |
| 8513 | 8562 | ||
| 8514 | parent_se = parent ? parent->se[i] : NULL; | 8563 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
| 8515 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se); | ||
| 8516 | } | 8564 | } |
| 8517 | 8565 | ||
| 8518 | return 1; | 8566 | return 1; |
| @@ -8573,7 +8621,7 @@ static | |||
| 8573 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | 8621 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
| 8574 | { | 8622 | { |
| 8575 | struct rt_rq *rt_rq; | 8623 | struct rt_rq *rt_rq; |
| 8576 | struct sched_rt_entity *rt_se, *parent_se; | 8624 | struct sched_rt_entity *rt_se; |
| 8577 | struct rq *rq; | 8625 | struct rq *rq; |
| 8578 | int i; | 8626 | int i; |
| 8579 | 8627 | ||
| @@ -8590,18 +8638,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 8590 | for_each_possible_cpu(i) { | 8638 | for_each_possible_cpu(i) { |
| 8591 | rq = cpu_rq(i); | 8639 | rq = cpu_rq(i); |
| 8592 | 8640 | ||
| 8593 | rt_rq = kmalloc_node(sizeof(struct rt_rq), | 8641 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
| 8594 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8642 | GFP_KERNEL, cpu_to_node(i)); |
| 8595 | if (!rt_rq) | 8643 | if (!rt_rq) |
| 8596 | goto err; | 8644 | goto err; |
| 8597 | 8645 | ||
| 8598 | rt_se = kmalloc_node(sizeof(struct sched_rt_entity), | 8646 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
| 8599 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8647 | GFP_KERNEL, cpu_to_node(i)); |
| 8600 | if (!rt_se) | 8648 | if (!rt_se) |
| 8601 | goto err; | 8649 | goto err; |
| 8602 | 8650 | ||
| 8603 | parent_se = parent ? parent->rt_se[i] : NULL; | 8651 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
| 8604 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se); | ||
| 8605 | } | 8652 | } |
| 8606 | 8653 | ||
| 8607 | return 1; | 8654 | return 1; |
| @@ -9244,11 +9291,12 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
| 9244 | * (balbir@in.ibm.com). | 9291 | * (balbir@in.ibm.com). |
| 9245 | */ | 9292 | */ |
| 9246 | 9293 | ||
| 9247 | /* track cpu usage of a group of tasks */ | 9294 | /* track cpu usage of a group of tasks and its child groups */ |
| 9248 | struct cpuacct { | 9295 | struct cpuacct { |
| 9249 | struct cgroup_subsys_state css; | 9296 | struct cgroup_subsys_state css; |
| 9250 | /* cpuusage holds pointer to a u64-type object on every cpu */ | 9297 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
| 9251 | u64 *cpuusage; | 9298 | u64 *cpuusage; |
| 9299 | struct cpuacct *parent; | ||
| 9252 | }; | 9300 | }; |
| 9253 | 9301 | ||
| 9254 | struct cgroup_subsys cpuacct_subsys; | 9302 | struct cgroup_subsys cpuacct_subsys; |
| @@ -9282,6 +9330,9 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
| 9282 | return ERR_PTR(-ENOMEM); | 9330 | return ERR_PTR(-ENOMEM); |
| 9283 | } | 9331 | } |
| 9284 | 9332 | ||
| 9333 | if (cgrp->parent) | ||
| 9334 | ca->parent = cgroup_ca(cgrp->parent); | ||
| 9335 | |||
| 9285 | return &ca->css; | 9336 | return &ca->css; |
| 9286 | } | 9337 | } |
| 9287 | 9338 | ||
| @@ -9361,14 +9412,16 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
| 9361 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | 9412 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
| 9362 | { | 9413 | { |
| 9363 | struct cpuacct *ca; | 9414 | struct cpuacct *ca; |
| 9415 | int cpu; | ||
| 9364 | 9416 | ||
| 9365 | if (!cpuacct_subsys.active) | 9417 | if (!cpuacct_subsys.active) |
| 9366 | return; | 9418 | return; |
| 9367 | 9419 | ||
| 9420 | cpu = task_cpu(tsk); | ||
| 9368 | ca = task_ca(tsk); | 9421 | ca = task_ca(tsk); |
| 9369 | if (ca) { | ||
| 9370 | u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); | ||
| 9371 | 9422 | ||
| 9423 | for (; ca; ca = ca->parent) { | ||
| 9424 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
| 9372 | *cpuusage += cputime; | 9425 | *cpuusage += cputime; |
| 9373 | } | 9426 | } |
| 9374 | } | 9427 | } |
