diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 1468 |
1 files changed, 833 insertions, 635 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b7480fb5c3dc..545c6fccd1dc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -118,6 +118,12 @@ | |||
118 | */ | 118 | */ |
119 | #define RUNTIME_INF ((u64)~0ULL) | 119 | #define RUNTIME_INF ((u64)~0ULL) |
120 | 120 | ||
121 | DEFINE_TRACE(sched_wait_task); | ||
122 | DEFINE_TRACE(sched_wakeup); | ||
123 | DEFINE_TRACE(sched_wakeup_new); | ||
124 | DEFINE_TRACE(sched_switch); | ||
125 | DEFINE_TRACE(sched_migrate_task); | ||
126 | |||
121 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
122 | /* | 128 | /* |
123 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
@@ -203,7 +209,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
203 | hrtimer_init(&rt_b->rt_period_timer, | 209 | hrtimer_init(&rt_b->rt_period_timer, |
204 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 210 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
205 | rt_b->rt_period_timer.function = sched_rt_period_timer; | 211 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
206 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; | ||
207 | } | 212 | } |
208 | 213 | ||
209 | static inline int rt_bandwidth_enabled(void) | 214 | static inline int rt_bandwidth_enabled(void) |
@@ -261,6 +266,10 @@ struct task_group { | |||
261 | struct cgroup_subsys_state css; | 266 | struct cgroup_subsys_state css; |
262 | #endif | 267 | #endif |
263 | 268 | ||
269 | #ifdef CONFIG_USER_SCHED | ||
270 | uid_t uid; | ||
271 | #endif | ||
272 | |||
264 | #ifdef CONFIG_FAIR_GROUP_SCHED | 273 | #ifdef CONFIG_FAIR_GROUP_SCHED |
265 | /* schedulable entities of this group on each cpu */ | 274 | /* schedulable entities of this group on each cpu */ |
266 | struct sched_entity **se; | 275 | struct sched_entity **se; |
@@ -286,6 +295,12 @@ struct task_group { | |||
286 | 295 | ||
287 | #ifdef CONFIG_USER_SCHED | 296 | #ifdef CONFIG_USER_SCHED |
288 | 297 | ||
298 | /* Helper function to pass uid information to create_sched_user() */ | ||
299 | void set_tg_uid(struct user_struct *user) | ||
300 | { | ||
301 | user->tg->uid = user->uid; | ||
302 | } | ||
303 | |||
289 | /* | 304 | /* |
290 | * Root task group. | 305 | * Root task group. |
291 | * Every UID task group (including init_task_group aka UID-0) will | 306 | * Every UID task group (including init_task_group aka UID-0) will |
@@ -345,7 +360,9 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
345 | struct task_group *tg; | 360 | struct task_group *tg; |
346 | 361 | ||
347 | #ifdef CONFIG_USER_SCHED | 362 | #ifdef CONFIG_USER_SCHED |
348 | tg = p->user->tg; | 363 | rcu_read_lock(); |
364 | tg = __task_cred(p)->user->tg; | ||
365 | rcu_read_unlock(); | ||
349 | #elif defined(CONFIG_CGROUP_SCHED) | 366 | #elif defined(CONFIG_CGROUP_SCHED) |
350 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), | 367 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), |
351 | struct task_group, css); | 368 | struct task_group, css); |
@@ -481,18 +498,26 @@ struct rt_rq { | |||
481 | */ | 498 | */ |
482 | struct root_domain { | 499 | struct root_domain { |
483 | atomic_t refcount; | 500 | atomic_t refcount; |
484 | cpumask_t span; | 501 | cpumask_var_t span; |
485 | cpumask_t online; | 502 | cpumask_var_t online; |
486 | 503 | ||
487 | /* | 504 | /* |
488 | * The "RT overload" flag: it gets set if a CPU has more than | 505 | * The "RT overload" flag: it gets set if a CPU has more than |
489 | * one runnable RT task. | 506 | * one runnable RT task. |
490 | */ | 507 | */ |
491 | cpumask_t rto_mask; | 508 | cpumask_var_t rto_mask; |
492 | atomic_t rto_count; | 509 | atomic_t rto_count; |
493 | #ifdef CONFIG_SMP | 510 | #ifdef CONFIG_SMP |
494 | struct cpupri cpupri; | 511 | struct cpupri cpupri; |
495 | #endif | 512 | #endif |
513 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
514 | /* | ||
515 | * Preferred wake up cpu nominated by sched_mc balance that will be | ||
516 | * used when most cpus are idle in the system indicating overall very | ||
517 | * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2) | ||
518 | */ | ||
519 | unsigned int sched_mc_preferred_wakeup_cpu; | ||
520 | #endif | ||
496 | }; | 521 | }; |
497 | 522 | ||
498 | /* | 523 | /* |
@@ -586,6 +611,8 @@ struct rq { | |||
586 | #ifdef CONFIG_SCHEDSTATS | 611 | #ifdef CONFIG_SCHEDSTATS |
587 | /* latency stats */ | 612 | /* latency stats */ |
588 | struct sched_info rq_sched_info; | 613 | struct sched_info rq_sched_info; |
614 | unsigned long long rq_cpu_time; | ||
615 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ | ||
589 | 616 | ||
590 | /* sys_sched_yield() stats */ | 617 | /* sys_sched_yield() stats */ |
591 | unsigned int yld_exp_empty; | 618 | unsigned int yld_exp_empty; |
@@ -703,45 +730,18 @@ static __read_mostly char *sched_feat_names[] = { | |||
703 | 730 | ||
704 | #undef SCHED_FEAT | 731 | #undef SCHED_FEAT |
705 | 732 | ||
706 | static int sched_feat_open(struct inode *inode, struct file *filp) | 733 | static int sched_feat_show(struct seq_file *m, void *v) |
707 | { | 734 | { |
708 | filp->private_data = inode->i_private; | ||
709 | return 0; | ||
710 | } | ||
711 | |||
712 | static ssize_t | ||
713 | sched_feat_read(struct file *filp, char __user *ubuf, | ||
714 | size_t cnt, loff_t *ppos) | ||
715 | { | ||
716 | char *buf; | ||
717 | int r = 0; | ||
718 | int len = 0; | ||
719 | int i; | 735 | int i; |
720 | 736 | ||
721 | for (i = 0; sched_feat_names[i]; i++) { | 737 | for (i = 0; sched_feat_names[i]; i++) { |
722 | len += strlen(sched_feat_names[i]); | 738 | if (!(sysctl_sched_features & (1UL << i))) |
723 | len += 4; | 739 | seq_puts(m, "NO_"); |
740 | seq_printf(m, "%s ", sched_feat_names[i]); | ||
724 | } | 741 | } |
742 | seq_puts(m, "\n"); | ||
725 | 743 | ||
726 | buf = kmalloc(len + 2, GFP_KERNEL); | 744 | return 0; |
727 | if (!buf) | ||
728 | return -ENOMEM; | ||
729 | |||
730 | for (i = 0; sched_feat_names[i]; i++) { | ||
731 | if (sysctl_sched_features & (1UL << i)) | ||
732 | r += sprintf(buf + r, "%s ", sched_feat_names[i]); | ||
733 | else | ||
734 | r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]); | ||
735 | } | ||
736 | |||
737 | r += sprintf(buf + r, "\n"); | ||
738 | WARN_ON(r >= len + 2); | ||
739 | |||
740 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
741 | |||
742 | kfree(buf); | ||
743 | |||
744 | return r; | ||
745 | } | 745 | } |
746 | 746 | ||
747 | static ssize_t | 747 | static ssize_t |
@@ -786,10 +786,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
786 | return cnt; | 786 | return cnt; |
787 | } | 787 | } |
788 | 788 | ||
789 | static int sched_feat_open(struct inode *inode, struct file *filp) | ||
790 | { | ||
791 | return single_open(filp, sched_feat_show, NULL); | ||
792 | } | ||
793 | |||
789 | static struct file_operations sched_feat_fops = { | 794 | static struct file_operations sched_feat_fops = { |
790 | .open = sched_feat_open, | 795 | .open = sched_feat_open, |
791 | .read = sched_feat_read, | 796 | .write = sched_feat_write, |
792 | .write = sched_feat_write, | 797 | .read = seq_read, |
798 | .llseek = seq_lseek, | ||
799 | .release = single_release, | ||
793 | }; | 800 | }; |
794 | 801 | ||
795 | static __init int sched_init_debug(void) | 802 | static __init int sched_init_debug(void) |
@@ -1139,7 +1146,6 @@ static void init_rq_hrtick(struct rq *rq) | |||
1139 | 1146 | ||
1140 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1147 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1141 | rq->hrtick_timer.function = hrtick; | 1148 | rq->hrtick_timer.function = hrtick; |
1142 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | ||
1143 | } | 1149 | } |
1144 | #else /* CONFIG_SCHED_HRTICK */ | 1150 | #else /* CONFIG_SCHED_HRTICK */ |
1145 | static inline void hrtick_clear(struct rq *rq) | 1151 | static inline void hrtick_clear(struct rq *rq) |
@@ -1474,27 +1480,13 @@ static void | |||
1474 | update_group_shares_cpu(struct task_group *tg, int cpu, | 1480 | update_group_shares_cpu(struct task_group *tg, int cpu, |
1475 | unsigned long sd_shares, unsigned long sd_rq_weight) | 1481 | unsigned long sd_shares, unsigned long sd_rq_weight) |
1476 | { | 1482 | { |
1477 | int boost = 0; | ||
1478 | unsigned long shares; | 1483 | unsigned long shares; |
1479 | unsigned long rq_weight; | 1484 | unsigned long rq_weight; |
1480 | 1485 | ||
1481 | if (!tg->se[cpu]) | 1486 | if (!tg->se[cpu]) |
1482 | return; | 1487 | return; |
1483 | 1488 | ||
1484 | rq_weight = tg->cfs_rq[cpu]->load.weight; | 1489 | rq_weight = tg->cfs_rq[cpu]->rq_weight; |
1485 | |||
1486 | /* | ||
1487 | * If there are currently no tasks on the cpu pretend there is one of | ||
1488 | * average load so that when a new task gets to run here it will not | ||
1489 | * get delayed by group starvation. | ||
1490 | */ | ||
1491 | if (!rq_weight) { | ||
1492 | boost = 1; | ||
1493 | rq_weight = NICE_0_LOAD; | ||
1494 | } | ||
1495 | |||
1496 | if (unlikely(rq_weight > sd_rq_weight)) | ||
1497 | rq_weight = sd_rq_weight; | ||
1498 | 1490 | ||
1499 | /* | 1491 | /* |
1500 | * \Sum shares * rq_weight | 1492 | * \Sum shares * rq_weight |
@@ -1502,7 +1494,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1502 | * \Sum rq_weight | 1494 | * \Sum rq_weight |
1503 | * | 1495 | * |
1504 | */ | 1496 | */ |
1505 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); | 1497 | shares = (sd_shares * rq_weight) / sd_rq_weight; |
1506 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); | 1498 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); |
1507 | 1499 | ||
1508 | if (abs(shares - tg->se[cpu]->load.weight) > | 1500 | if (abs(shares - tg->se[cpu]->load.weight) > |
@@ -1511,11 +1503,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1511 | unsigned long flags; | 1503 | unsigned long flags; |
1512 | 1504 | ||
1513 | spin_lock_irqsave(&rq->lock, flags); | 1505 | spin_lock_irqsave(&rq->lock, flags); |
1514 | /* | 1506 | tg->cfs_rq[cpu]->shares = shares; |
1515 | * record the actual number of shares, not the boosted amount. | ||
1516 | */ | ||
1517 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | ||
1518 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1519 | 1507 | ||
1520 | __set_se_shares(tg->se[cpu], shares); | 1508 | __set_se_shares(tg->se[cpu], shares); |
1521 | spin_unlock_irqrestore(&rq->lock, flags); | 1509 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -1529,13 +1517,23 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1529 | */ | 1517 | */ |
1530 | static int tg_shares_up(struct task_group *tg, void *data) | 1518 | static int tg_shares_up(struct task_group *tg, void *data) |
1531 | { | 1519 | { |
1532 | unsigned long rq_weight = 0; | 1520 | unsigned long weight, rq_weight = 0; |
1533 | unsigned long shares = 0; | 1521 | unsigned long shares = 0; |
1534 | struct sched_domain *sd = data; | 1522 | struct sched_domain *sd = data; |
1535 | int i; | 1523 | int i; |
1536 | 1524 | ||
1537 | for_each_cpu_mask(i, sd->span) { | 1525 | for_each_cpu(i, sched_domain_span(sd)) { |
1538 | rq_weight += tg->cfs_rq[i]->load.weight; | 1526 | /* |
1527 | * If there are currently no tasks on the cpu pretend there | ||
1528 | * is one of average load so that when a new task gets to | ||
1529 | * run here it will not get delayed by group starvation. | ||
1530 | */ | ||
1531 | weight = tg->cfs_rq[i]->load.weight; | ||
1532 | if (!weight) | ||
1533 | weight = NICE_0_LOAD; | ||
1534 | |||
1535 | tg->cfs_rq[i]->rq_weight = weight; | ||
1536 | rq_weight += weight; | ||
1539 | shares += tg->cfs_rq[i]->shares; | 1537 | shares += tg->cfs_rq[i]->shares; |
1540 | } | 1538 | } |
1541 | 1539 | ||
@@ -1545,10 +1543,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1545 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) | 1543 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
1546 | shares = tg->shares; | 1544 | shares = tg->shares; |
1547 | 1545 | ||
1548 | if (!rq_weight) | 1546 | for_each_cpu(i, sched_domain_span(sd)) |
1549 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; | ||
1550 | |||
1551 | for_each_cpu_mask(i, sd->span) | ||
1552 | update_group_shares_cpu(tg, i, shares, rq_weight); | 1547 | update_group_shares_cpu(tg, i, shares, rq_weight); |
1553 | 1548 | ||
1554 | return 0; | 1549 | return 0; |
@@ -1612,6 +1607,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1612 | 1607 | ||
1613 | #endif | 1608 | #endif |
1614 | 1609 | ||
1610 | /* | ||
1611 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
1612 | */ | ||
1613 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
1614 | __releases(this_rq->lock) | ||
1615 | __acquires(busiest->lock) | ||
1616 | __acquires(this_rq->lock) | ||
1617 | { | ||
1618 | int ret = 0; | ||
1619 | |||
1620 | if (unlikely(!irqs_disabled())) { | ||
1621 | /* printk() doesn't work good under rq->lock */ | ||
1622 | spin_unlock(&this_rq->lock); | ||
1623 | BUG_ON(1); | ||
1624 | } | ||
1625 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
1626 | if (busiest < this_rq) { | ||
1627 | spin_unlock(&this_rq->lock); | ||
1628 | spin_lock(&busiest->lock); | ||
1629 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
1630 | ret = 1; | ||
1631 | } else | ||
1632 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
1633 | } | ||
1634 | return ret; | ||
1635 | } | ||
1636 | |||
1637 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
1638 | __releases(busiest->lock) | ||
1639 | { | ||
1640 | spin_unlock(&busiest->lock); | ||
1641 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
1642 | } | ||
1615 | #endif | 1643 | #endif |
1616 | 1644 | ||
1617 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1645 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -1845,6 +1873,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1845 | 1873 | ||
1846 | clock_offset = old_rq->clock - new_rq->clock; | 1874 | clock_offset = old_rq->clock - new_rq->clock; |
1847 | 1875 | ||
1876 | trace_sched_migrate_task(p, task_cpu(p), new_cpu); | ||
1877 | |||
1848 | #ifdef CONFIG_SCHEDSTATS | 1878 | #ifdef CONFIG_SCHEDSTATS |
1849 | if (p->se.wait_start) | 1879 | if (p->se.wait_start) |
1850 | p->se.wait_start -= clock_offset; | 1880 | p->se.wait_start -= clock_offset; |
@@ -2079,15 +2109,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2079 | int i; | 2109 | int i; |
2080 | 2110 | ||
2081 | /* Skip over this group if it has no CPUs allowed */ | 2111 | /* Skip over this group if it has no CPUs allowed */ |
2082 | if (!cpus_intersects(group->cpumask, p->cpus_allowed)) | 2112 | if (!cpumask_intersects(sched_group_cpus(group), |
2113 | &p->cpus_allowed)) | ||
2083 | continue; | 2114 | continue; |
2084 | 2115 | ||
2085 | local_group = cpu_isset(this_cpu, group->cpumask); | 2116 | local_group = cpumask_test_cpu(this_cpu, |
2117 | sched_group_cpus(group)); | ||
2086 | 2118 | ||
2087 | /* Tally up the load of all CPUs in the group */ | 2119 | /* Tally up the load of all CPUs in the group */ |
2088 | avg_load = 0; | 2120 | avg_load = 0; |
2089 | 2121 | ||
2090 | for_each_cpu_mask_nr(i, group->cpumask) { | 2122 | for_each_cpu(i, sched_group_cpus(group)) { |
2091 | /* Bias balancing toward cpus of our domain */ | 2123 | /* Bias balancing toward cpus of our domain */ |
2092 | if (local_group) | 2124 | if (local_group) |
2093 | load = source_load(i, load_idx); | 2125 | load = source_load(i, load_idx); |
@@ -2119,17 +2151,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2119 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | 2151 | * find_idlest_cpu - find the idlest cpu among the cpus in group. |
2120 | */ | 2152 | */ |
2121 | static int | 2153 | static int |
2122 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, | 2154 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
2123 | cpumask_t *tmp) | ||
2124 | { | 2155 | { |
2125 | unsigned long load, min_load = ULONG_MAX; | 2156 | unsigned long load, min_load = ULONG_MAX; |
2126 | int idlest = -1; | 2157 | int idlest = -1; |
2127 | int i; | 2158 | int i; |
2128 | 2159 | ||
2129 | /* Traverse only the allowed CPUs */ | 2160 | /* Traverse only the allowed CPUs */ |
2130 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); | 2161 | for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { |
2131 | |||
2132 | for_each_cpu_mask_nr(i, *tmp) { | ||
2133 | load = weighted_cpuload(i); | 2162 | load = weighted_cpuload(i); |
2134 | 2163 | ||
2135 | if (load < min_load || (load == min_load && i == this_cpu)) { | 2164 | if (load < min_load || (load == min_load && i == this_cpu)) { |
@@ -2171,7 +2200,6 @@ static int sched_balance_self(int cpu, int flag) | |||
2171 | update_shares(sd); | 2200 | update_shares(sd); |
2172 | 2201 | ||
2173 | while (sd) { | 2202 | while (sd) { |
2174 | cpumask_t span, tmpmask; | ||
2175 | struct sched_group *group; | 2203 | struct sched_group *group; |
2176 | int new_cpu, weight; | 2204 | int new_cpu, weight; |
2177 | 2205 | ||
@@ -2180,14 +2208,13 @@ static int sched_balance_self(int cpu, int flag) | |||
2180 | continue; | 2208 | continue; |
2181 | } | 2209 | } |
2182 | 2210 | ||
2183 | span = sd->span; | ||
2184 | group = find_idlest_group(sd, t, cpu); | 2211 | group = find_idlest_group(sd, t, cpu); |
2185 | if (!group) { | 2212 | if (!group) { |
2186 | sd = sd->child; | 2213 | sd = sd->child; |
2187 | continue; | 2214 | continue; |
2188 | } | 2215 | } |
2189 | 2216 | ||
2190 | new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); | 2217 | new_cpu = find_idlest_cpu(group, t, cpu); |
2191 | if (new_cpu == -1 || new_cpu == cpu) { | 2218 | if (new_cpu == -1 || new_cpu == cpu) { |
2192 | /* Now try balancing at a lower domain level of cpu */ | 2219 | /* Now try balancing at a lower domain level of cpu */ |
2193 | sd = sd->child; | 2220 | sd = sd->child; |
@@ -2196,10 +2223,10 @@ static int sched_balance_self(int cpu, int flag) | |||
2196 | 2223 | ||
2197 | /* Now try balancing at a lower domain level of new_cpu */ | 2224 | /* Now try balancing at a lower domain level of new_cpu */ |
2198 | cpu = new_cpu; | 2225 | cpu = new_cpu; |
2226 | weight = cpumask_weight(sched_domain_span(sd)); | ||
2199 | sd = NULL; | 2227 | sd = NULL; |
2200 | weight = cpus_weight(span); | ||
2201 | for_each_domain(cpu, tmp) { | 2228 | for_each_domain(cpu, tmp) { |
2202 | if (weight <= cpus_weight(tmp->span)) | 2229 | if (weight <= cpumask_weight(sched_domain_span(tmp))) |
2203 | break; | 2230 | break; |
2204 | if (tmp->flags & flag) | 2231 | if (tmp->flags & flag) |
2205 | sd = tmp; | 2232 | sd = tmp; |
@@ -2244,7 +2271,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2244 | cpu = task_cpu(p); | 2271 | cpu = task_cpu(p); |
2245 | 2272 | ||
2246 | for_each_domain(this_cpu, sd) { | 2273 | for_each_domain(this_cpu, sd) { |
2247 | if (cpu_isset(cpu, sd->span)) { | 2274 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2248 | update_shares(sd); | 2275 | update_shares(sd); |
2249 | break; | 2276 | break; |
2250 | } | 2277 | } |
@@ -2254,6 +2281,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2254 | 2281 | ||
2255 | smp_wmb(); | 2282 | smp_wmb(); |
2256 | rq = task_rq_lock(p, &flags); | 2283 | rq = task_rq_lock(p, &flags); |
2284 | update_rq_clock(rq); | ||
2257 | old_state = p->state; | 2285 | old_state = p->state; |
2258 | if (!(old_state & state)) | 2286 | if (!(old_state & state)) |
2259 | goto out; | 2287 | goto out; |
@@ -2292,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2292 | else { | 2320 | else { |
2293 | struct sched_domain *sd; | 2321 | struct sched_domain *sd; |
2294 | for_each_domain(this_cpu, sd) { | 2322 | for_each_domain(this_cpu, sd) { |
2295 | if (cpu_isset(cpu, sd->span)) { | 2323 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2296 | schedstat_inc(sd, ttwu_wake_remote); | 2324 | schedstat_inc(sd, ttwu_wake_remote); |
2297 | break; | 2325 | break; |
2298 | } | 2326 | } |
@@ -2311,12 +2339,11 @@ out_activate: | |||
2311 | schedstat_inc(p, se.nr_wakeups_local); | 2339 | schedstat_inc(p, se.nr_wakeups_local); |
2312 | else | 2340 | else |
2313 | schedstat_inc(p, se.nr_wakeups_remote); | 2341 | schedstat_inc(p, se.nr_wakeups_remote); |
2314 | update_rq_clock(rq); | ||
2315 | activate_task(rq, p, 1); | 2342 | activate_task(rq, p, 1); |
2316 | success = 1; | 2343 | success = 1; |
2317 | 2344 | ||
2318 | out_running: | 2345 | out_running: |
2319 | trace_sched_wakeup(rq, p); | 2346 | trace_sched_wakeup(rq, p, success); |
2320 | check_preempt_curr(rq, p, sync); | 2347 | check_preempt_curr(rq, p, sync); |
2321 | 2348 | ||
2322 | p->state = TASK_RUNNING; | 2349 | p->state = TASK_RUNNING; |
@@ -2449,7 +2476,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2449 | p->sched_class->task_new(rq, p); | 2476 | p->sched_class->task_new(rq, p); |
2450 | inc_nr_running(rq); | 2477 | inc_nr_running(rq); |
2451 | } | 2478 | } |
2452 | trace_sched_wakeup_new(rq, p); | 2479 | trace_sched_wakeup_new(rq, p, 1); |
2453 | check_preempt_curr(rq, p, 0); | 2480 | check_preempt_curr(rq, p, 0); |
2454 | #ifdef CONFIG_SMP | 2481 | #ifdef CONFIG_SMP |
2455 | if (p->sched_class->task_wake_up) | 2482 | if (p->sched_class->task_wake_up) |
@@ -2812,40 +2839,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
2812 | } | 2839 | } |
2813 | 2840 | ||
2814 | /* | 2841 | /* |
2815 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
2816 | */ | ||
2817 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
2818 | __releases(this_rq->lock) | ||
2819 | __acquires(busiest->lock) | ||
2820 | __acquires(this_rq->lock) | ||
2821 | { | ||
2822 | int ret = 0; | ||
2823 | |||
2824 | if (unlikely(!irqs_disabled())) { | ||
2825 | /* printk() doesn't work good under rq->lock */ | ||
2826 | spin_unlock(&this_rq->lock); | ||
2827 | BUG_ON(1); | ||
2828 | } | ||
2829 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
2830 | if (busiest < this_rq) { | ||
2831 | spin_unlock(&this_rq->lock); | ||
2832 | spin_lock(&busiest->lock); | ||
2833 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
2834 | ret = 1; | ||
2835 | } else | ||
2836 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
2837 | } | ||
2838 | return ret; | ||
2839 | } | ||
2840 | |||
2841 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2842 | __releases(busiest->lock) | ||
2843 | { | ||
2844 | spin_unlock(&busiest->lock); | ||
2845 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2846 | } | ||
2847 | |||
2848 | /* | ||
2849 | * If dest_cpu is allowed for this process, migrate the task to it. | 2842 | * If dest_cpu is allowed for this process, migrate the task to it. |
2850 | * This is accomplished by forcing the cpu_allowed mask to only | 2843 | * This is accomplished by forcing the cpu_allowed mask to only |
2851 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | 2844 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then |
@@ -2858,11 +2851,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2858 | struct rq *rq; | 2851 | struct rq *rq; |
2859 | 2852 | ||
2860 | rq = task_rq_lock(p, &flags); | 2853 | rq = task_rq_lock(p, &flags); |
2861 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2854 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
2862 | || unlikely(!cpu_active(dest_cpu))) | 2855 | || unlikely(!cpu_active(dest_cpu))) |
2863 | goto out; | 2856 | goto out; |
2864 | 2857 | ||
2865 | trace_sched_migrate_task(rq, p, dest_cpu); | ||
2866 | /* force the process onto the specified CPU */ | 2858 | /* force the process onto the specified CPU */ |
2867 | if (migrate_task(p, dest_cpu, &req)) { | 2859 | if (migrate_task(p, dest_cpu, &req)) { |
2868 | /* Need to wait for migration thread (might exit: take ref). */ | 2860 | /* Need to wait for migration thread (might exit: take ref). */ |
@@ -2924,7 +2916,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2924 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | 2916 | * 2) cannot be migrated to this CPU due to cpus_allowed, or |
2925 | * 3) are cache-hot on their current CPU. | 2917 | * 3) are cache-hot on their current CPU. |
2926 | */ | 2918 | */ |
2927 | if (!cpu_isset(this_cpu, p->cpus_allowed)) { | 2919 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { |
2928 | schedstat_inc(p, se.nr_failed_migrations_affine); | 2920 | schedstat_inc(p, se.nr_failed_migrations_affine); |
2929 | return 0; | 2921 | return 0; |
2930 | } | 2922 | } |
@@ -3099,7 +3091,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3099 | static struct sched_group * | 3091 | static struct sched_group * |
3100 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 3092 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
3101 | unsigned long *imbalance, enum cpu_idle_type idle, | 3093 | unsigned long *imbalance, enum cpu_idle_type idle, |
3102 | int *sd_idle, const cpumask_t *cpus, int *balance) | 3094 | int *sd_idle, const struct cpumask *cpus, int *balance) |
3103 | { | 3095 | { |
3104 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 3096 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
3105 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 3097 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
@@ -3135,10 +3127,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3135 | unsigned long sum_avg_load_per_task; | 3127 | unsigned long sum_avg_load_per_task; |
3136 | unsigned long avg_load_per_task; | 3128 | unsigned long avg_load_per_task; |
3137 | 3129 | ||
3138 | local_group = cpu_isset(this_cpu, group->cpumask); | 3130 | local_group = cpumask_test_cpu(this_cpu, |
3131 | sched_group_cpus(group)); | ||
3139 | 3132 | ||
3140 | if (local_group) | 3133 | if (local_group) |
3141 | balance_cpu = first_cpu(group->cpumask); | 3134 | balance_cpu = cpumask_first(sched_group_cpus(group)); |
3142 | 3135 | ||
3143 | /* Tally up the load of all CPUs in the group */ | 3136 | /* Tally up the load of all CPUs in the group */ |
3144 | sum_weighted_load = sum_nr_running = avg_load = 0; | 3137 | sum_weighted_load = sum_nr_running = avg_load = 0; |
@@ -3147,13 +3140,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3147 | max_cpu_load = 0; | 3140 | max_cpu_load = 0; |
3148 | min_cpu_load = ~0UL; | 3141 | min_cpu_load = ~0UL; |
3149 | 3142 | ||
3150 | for_each_cpu_mask_nr(i, group->cpumask) { | 3143 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { |
3151 | struct rq *rq; | 3144 | struct rq *rq = cpu_rq(i); |
3152 | |||
3153 | if (!cpu_isset(i, *cpus)) | ||
3154 | continue; | ||
3155 | |||
3156 | rq = cpu_rq(i); | ||
3157 | 3145 | ||
3158 | if (*sd_idle && rq->nr_running) | 3146 | if (*sd_idle && rq->nr_running) |
3159 | *sd_idle = 0; | 3147 | *sd_idle = 0; |
@@ -3264,8 +3252,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3264 | */ | 3252 | */ |
3265 | if ((sum_nr_running < min_nr_running) || | 3253 | if ((sum_nr_running < min_nr_running) || |
3266 | (sum_nr_running == min_nr_running && | 3254 | (sum_nr_running == min_nr_running && |
3267 | first_cpu(group->cpumask) < | 3255 | cpumask_first(sched_group_cpus(group)) > |
3268 | first_cpu(group_min->cpumask))) { | 3256 | cpumask_first(sched_group_cpus(group_min)))) { |
3269 | group_min = group; | 3257 | group_min = group; |
3270 | min_nr_running = sum_nr_running; | 3258 | min_nr_running = sum_nr_running; |
3271 | min_load_per_task = sum_weighted_load / | 3259 | min_load_per_task = sum_weighted_load / |
@@ -3280,8 +3268,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3280 | if (sum_nr_running <= group_capacity - 1) { | 3268 | if (sum_nr_running <= group_capacity - 1) { |
3281 | if (sum_nr_running > leader_nr_running || | 3269 | if (sum_nr_running > leader_nr_running || |
3282 | (sum_nr_running == leader_nr_running && | 3270 | (sum_nr_running == leader_nr_running && |
3283 | first_cpu(group->cpumask) > | 3271 | cpumask_first(sched_group_cpus(group)) < |
3284 | first_cpu(group_leader->cpumask))) { | 3272 | cpumask_first(sched_group_cpus(group_leader)))) { |
3285 | group_leader = group; | 3273 | group_leader = group; |
3286 | leader_nr_running = sum_nr_running; | 3274 | leader_nr_running = sum_nr_running; |
3287 | } | 3275 | } |
@@ -3407,6 +3395,10 @@ out_balanced: | |||
3407 | 3395 | ||
3408 | if (this == group_leader && group_leader != group_min) { | 3396 | if (this == group_leader && group_leader != group_min) { |
3409 | *imbalance = min_load_per_task; | 3397 | *imbalance = min_load_per_task; |
3398 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { | ||
3399 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = | ||
3400 | cpumask_first(sched_group_cpus(group_leader)); | ||
3401 | } | ||
3410 | return group_min; | 3402 | return group_min; |
3411 | } | 3403 | } |
3412 | #endif | 3404 | #endif |
@@ -3420,16 +3412,16 @@ ret: | |||
3420 | */ | 3412 | */ |
3421 | static struct rq * | 3413 | static struct rq * |
3422 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | 3414 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, |
3423 | unsigned long imbalance, const cpumask_t *cpus) | 3415 | unsigned long imbalance, const struct cpumask *cpus) |
3424 | { | 3416 | { |
3425 | struct rq *busiest = NULL, *rq; | 3417 | struct rq *busiest = NULL, *rq; |
3426 | unsigned long max_load = 0; | 3418 | unsigned long max_load = 0; |
3427 | int i; | 3419 | int i; |
3428 | 3420 | ||
3429 | for_each_cpu_mask_nr(i, group->cpumask) { | 3421 | for_each_cpu(i, sched_group_cpus(group)) { |
3430 | unsigned long wl; | 3422 | unsigned long wl; |
3431 | 3423 | ||
3432 | if (!cpu_isset(i, *cpus)) | 3424 | if (!cpumask_test_cpu(i, cpus)) |
3433 | continue; | 3425 | continue; |
3434 | 3426 | ||
3435 | rq = cpu_rq(i); | 3427 | rq = cpu_rq(i); |
@@ -3459,7 +3451,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3459 | */ | 3451 | */ |
3460 | static int load_balance(int this_cpu, struct rq *this_rq, | 3452 | static int load_balance(int this_cpu, struct rq *this_rq, |
3461 | struct sched_domain *sd, enum cpu_idle_type idle, | 3453 | struct sched_domain *sd, enum cpu_idle_type idle, |
3462 | int *balance, cpumask_t *cpus) | 3454 | int *balance, struct cpumask *cpus) |
3463 | { | 3455 | { |
3464 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 3456 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
3465 | struct sched_group *group; | 3457 | struct sched_group *group; |
@@ -3467,7 +3459,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
3467 | struct rq *busiest; | 3459 | struct rq *busiest; |
3468 | unsigned long flags; | 3460 | unsigned long flags; |
3469 | 3461 | ||
3470 | cpus_setall(*cpus); | 3462 | cpumask_setall(cpus); |
3471 | 3463 | ||
3472 | /* | 3464 | /* |
3473 | * When power savings policy is enabled for the parent domain, idle | 3465 | * When power savings policy is enabled for the parent domain, idle |
@@ -3527,8 +3519,8 @@ redo: | |||
3527 | 3519 | ||
3528 | /* All tasks on this runqueue were pinned by CPU affinity */ | 3520 | /* All tasks on this runqueue were pinned by CPU affinity */ |
3529 | if (unlikely(all_pinned)) { | 3521 | if (unlikely(all_pinned)) { |
3530 | cpu_clear(cpu_of(busiest), *cpus); | 3522 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3531 | if (!cpus_empty(*cpus)) | 3523 | if (!cpumask_empty(cpus)) |
3532 | goto redo; | 3524 | goto redo; |
3533 | goto out_balanced; | 3525 | goto out_balanced; |
3534 | } | 3526 | } |
@@ -3545,7 +3537,8 @@ redo: | |||
3545 | /* don't kick the migration_thread, if the curr | 3537 | /* don't kick the migration_thread, if the curr |
3546 | * task on busiest cpu can't be moved to this_cpu | 3538 | * task on busiest cpu can't be moved to this_cpu |
3547 | */ | 3539 | */ |
3548 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3540 | if (!cpumask_test_cpu(this_cpu, |
3541 | &busiest->curr->cpus_allowed)) { | ||
3549 | spin_unlock_irqrestore(&busiest->lock, flags); | 3542 | spin_unlock_irqrestore(&busiest->lock, flags); |
3550 | all_pinned = 1; | 3543 | all_pinned = 1; |
3551 | goto out_one_pinned; | 3544 | goto out_one_pinned; |
@@ -3620,7 +3613,7 @@ out: | |||
3620 | */ | 3613 | */ |
3621 | static int | 3614 | static int |
3622 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | 3615 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, |
3623 | cpumask_t *cpus) | 3616 | struct cpumask *cpus) |
3624 | { | 3617 | { |
3625 | struct sched_group *group; | 3618 | struct sched_group *group; |
3626 | struct rq *busiest = NULL; | 3619 | struct rq *busiest = NULL; |
@@ -3629,7 +3622,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | |||
3629 | int sd_idle = 0; | 3622 | int sd_idle = 0; |
3630 | int all_pinned = 0; | 3623 | int all_pinned = 0; |
3631 | 3624 | ||
3632 | cpus_setall(*cpus); | 3625 | cpumask_setall(cpus); |
3633 | 3626 | ||
3634 | /* | 3627 | /* |
3635 | * When power savings policy is enabled for the parent domain, idle | 3628 | * When power savings policy is enabled for the parent domain, idle |
@@ -3673,17 +3666,71 @@ redo: | |||
3673 | double_unlock_balance(this_rq, busiest); | 3666 | double_unlock_balance(this_rq, busiest); |
3674 | 3667 | ||
3675 | if (unlikely(all_pinned)) { | 3668 | if (unlikely(all_pinned)) { |
3676 | cpu_clear(cpu_of(busiest), *cpus); | 3669 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3677 | if (!cpus_empty(*cpus)) | 3670 | if (!cpumask_empty(cpus)) |
3678 | goto redo; | 3671 | goto redo; |
3679 | } | 3672 | } |
3680 | } | 3673 | } |
3681 | 3674 | ||
3682 | if (!ld_moved) { | 3675 | if (!ld_moved) { |
3676 | int active_balance = 0; | ||
3677 | |||
3683 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); | 3678 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); |
3684 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3679 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
3685 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 3680 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
3686 | return -1; | 3681 | return -1; |
3682 | |||
3683 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | ||
3684 | return -1; | ||
3685 | |||
3686 | if (sd->nr_balance_failed++ < 2) | ||
3687 | return -1; | ||
3688 | |||
3689 | /* | ||
3690 | * The only task running in a non-idle cpu can be moved to this | ||
3691 | * cpu in an attempt to completely freeup the other CPU | ||
3692 | * package. The same method used to move task in load_balance() | ||
3693 | * have been extended for load_balance_newidle() to speedup | ||
3694 | * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2) | ||
3695 | * | ||
3696 | * The package power saving logic comes from | ||
3697 | * find_busiest_group(). If there are no imbalance, then | ||
3698 | * f_b_g() will return NULL. However when sched_mc={1,2} then | ||
3699 | * f_b_g() will select a group from which a running task may be | ||
3700 | * pulled to this cpu in order to make the other package idle. | ||
3701 | * If there is no opportunity to make a package idle and if | ||
3702 | * there are no imbalance, then f_b_g() will return NULL and no | ||
3703 | * action will be taken in load_balance_newidle(). | ||
3704 | * | ||
3705 | * Under normal task pull operation due to imbalance, there | ||
3706 | * will be more than one task in the source run queue and | ||
3707 | * move_tasks() will succeed. ld_moved will be true and this | ||
3708 | * active balance code will not be triggered. | ||
3709 | */ | ||
3710 | |||
3711 | /* Lock busiest in correct order while this_rq is held */ | ||
3712 | double_lock_balance(this_rq, busiest); | ||
3713 | |||
3714 | /* | ||
3715 | * don't kick the migration_thread, if the curr | ||
3716 | * task on busiest cpu can't be moved to this_cpu | ||
3717 | */ | ||
3718 | if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { | ||
3719 | double_unlock_balance(this_rq, busiest); | ||
3720 | all_pinned = 1; | ||
3721 | return ld_moved; | ||
3722 | } | ||
3723 | |||
3724 | if (!busiest->active_balance) { | ||
3725 | busiest->active_balance = 1; | ||
3726 | busiest->push_cpu = this_cpu; | ||
3727 | active_balance = 1; | ||
3728 | } | ||
3729 | |||
3730 | double_unlock_balance(this_rq, busiest); | ||
3731 | if (active_balance) | ||
3732 | wake_up_process(busiest->migration_thread); | ||
3733 | |||
3687 | } else | 3734 | } else |
3688 | sd->nr_balance_failed = 0; | 3735 | sd->nr_balance_failed = 0; |
3689 | 3736 | ||
@@ -3707,9 +3754,12 @@ out_balanced: | |||
3707 | static void idle_balance(int this_cpu, struct rq *this_rq) | 3754 | static void idle_balance(int this_cpu, struct rq *this_rq) |
3708 | { | 3755 | { |
3709 | struct sched_domain *sd; | 3756 | struct sched_domain *sd; |
3710 | int pulled_task = -1; | 3757 | int pulled_task = 0; |
3711 | unsigned long next_balance = jiffies + HZ; | 3758 | unsigned long next_balance = jiffies + HZ; |
3712 | cpumask_t tmpmask; | 3759 | cpumask_var_t tmpmask; |
3760 | |||
3761 | if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) | ||
3762 | return; | ||
3713 | 3763 | ||
3714 | for_each_domain(this_cpu, sd) { | 3764 | for_each_domain(this_cpu, sd) { |
3715 | unsigned long interval; | 3765 | unsigned long interval; |
@@ -3720,7 +3770,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3720 | if (sd->flags & SD_BALANCE_NEWIDLE) | 3770 | if (sd->flags & SD_BALANCE_NEWIDLE) |
3721 | /* If we've pulled tasks over stop searching: */ | 3771 | /* If we've pulled tasks over stop searching: */ |
3722 | pulled_task = load_balance_newidle(this_cpu, this_rq, | 3772 | pulled_task = load_balance_newidle(this_cpu, this_rq, |
3723 | sd, &tmpmask); | 3773 | sd, tmpmask); |
3724 | 3774 | ||
3725 | interval = msecs_to_jiffies(sd->balance_interval); | 3775 | interval = msecs_to_jiffies(sd->balance_interval); |
3726 | if (time_after(next_balance, sd->last_balance + interval)) | 3776 | if (time_after(next_balance, sd->last_balance + interval)) |
@@ -3735,6 +3785,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3735 | */ | 3785 | */ |
3736 | this_rq->next_balance = next_balance; | 3786 | this_rq->next_balance = next_balance; |
3737 | } | 3787 | } |
3788 | free_cpumask_var(tmpmask); | ||
3738 | } | 3789 | } |
3739 | 3790 | ||
3740 | /* | 3791 | /* |
@@ -3772,7 +3823,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3772 | /* Search for an sd spanning us and the target CPU. */ | 3823 | /* Search for an sd spanning us and the target CPU. */ |
3773 | for_each_domain(target_cpu, sd) { | 3824 | for_each_domain(target_cpu, sd) { |
3774 | if ((sd->flags & SD_LOAD_BALANCE) && | 3825 | if ((sd->flags & SD_LOAD_BALANCE) && |
3775 | cpu_isset(busiest_cpu, sd->span)) | 3826 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) |
3776 | break; | 3827 | break; |
3777 | } | 3828 | } |
3778 | 3829 | ||
@@ -3791,10 +3842,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3791 | #ifdef CONFIG_NO_HZ | 3842 | #ifdef CONFIG_NO_HZ |
3792 | static struct { | 3843 | static struct { |
3793 | atomic_t load_balancer; | 3844 | atomic_t load_balancer; |
3794 | cpumask_t cpu_mask; | 3845 | cpumask_var_t cpu_mask; |
3795 | } nohz ____cacheline_aligned = { | 3846 | } nohz ____cacheline_aligned = { |
3796 | .load_balancer = ATOMIC_INIT(-1), | 3847 | .load_balancer = ATOMIC_INIT(-1), |
3797 | .cpu_mask = CPU_MASK_NONE, | ||
3798 | }; | 3848 | }; |
3799 | 3849 | ||
3800 | /* | 3850 | /* |
@@ -3822,7 +3872,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3822 | int cpu = smp_processor_id(); | 3872 | int cpu = smp_processor_id(); |
3823 | 3873 | ||
3824 | if (stop_tick) { | 3874 | if (stop_tick) { |
3825 | cpu_set(cpu, nohz.cpu_mask); | 3875 | cpumask_set_cpu(cpu, nohz.cpu_mask); |
3826 | cpu_rq(cpu)->in_nohz_recently = 1; | 3876 | cpu_rq(cpu)->in_nohz_recently = 1; |
3827 | 3877 | ||
3828 | /* | 3878 | /* |
@@ -3836,7 +3886,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3836 | } | 3886 | } |
3837 | 3887 | ||
3838 | /* time for ilb owner also to sleep */ | 3888 | /* time for ilb owner also to sleep */ |
3839 | if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 3889 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
3840 | if (atomic_read(&nohz.load_balancer) == cpu) | 3890 | if (atomic_read(&nohz.load_balancer) == cpu) |
3841 | atomic_set(&nohz.load_balancer, -1); | 3891 | atomic_set(&nohz.load_balancer, -1); |
3842 | return 0; | 3892 | return 0; |
@@ -3849,10 +3899,10 @@ int select_nohz_load_balancer(int stop_tick) | |||
3849 | } else if (atomic_read(&nohz.load_balancer) == cpu) | 3899 | } else if (atomic_read(&nohz.load_balancer) == cpu) |
3850 | return 1; | 3900 | return 1; |
3851 | } else { | 3901 | } else { |
3852 | if (!cpu_isset(cpu, nohz.cpu_mask)) | 3902 | if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) |
3853 | return 0; | 3903 | return 0; |
3854 | 3904 | ||
3855 | cpu_clear(cpu, nohz.cpu_mask); | 3905 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3856 | 3906 | ||
3857 | if (atomic_read(&nohz.load_balancer) == cpu) | 3907 | if (atomic_read(&nohz.load_balancer) == cpu) |
3858 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3908 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
@@ -3880,7 +3930,11 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3880 | unsigned long next_balance = jiffies + 60*HZ; | 3930 | unsigned long next_balance = jiffies + 60*HZ; |
3881 | int update_next_balance = 0; | 3931 | int update_next_balance = 0; |
3882 | int need_serialize; | 3932 | int need_serialize; |
3883 | cpumask_t tmp; | 3933 | cpumask_var_t tmp; |
3934 | |||
3935 | /* Fails alloc? Rebalancing probably not a priority right now. */ | ||
3936 | if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) | ||
3937 | return; | ||
3884 | 3938 | ||
3885 | for_each_domain(cpu, sd) { | 3939 | for_each_domain(cpu, sd) { |
3886 | if (!(sd->flags & SD_LOAD_BALANCE)) | 3940 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -3905,7 +3959,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3905 | } | 3959 | } |
3906 | 3960 | ||
3907 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 3961 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
3908 | if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { | 3962 | if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { |
3909 | /* | 3963 | /* |
3910 | * We've pulled tasks over so either we're no | 3964 | * We've pulled tasks over so either we're no |
3911 | * longer idle, or one of our SMT siblings is | 3965 | * longer idle, or one of our SMT siblings is |
@@ -3939,6 +3993,8 @@ out: | |||
3939 | */ | 3993 | */ |
3940 | if (likely(update_next_balance)) | 3994 | if (likely(update_next_balance)) |
3941 | rq->next_balance = next_balance; | 3995 | rq->next_balance = next_balance; |
3996 | |||
3997 | free_cpumask_var(tmp); | ||
3942 | } | 3998 | } |
3943 | 3999 | ||
3944 | /* | 4000 | /* |
@@ -3963,12 +4019,13 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3963 | */ | 4019 | */ |
3964 | if (this_rq->idle_at_tick && | 4020 | if (this_rq->idle_at_tick && |
3965 | atomic_read(&nohz.load_balancer) == this_cpu) { | 4021 | atomic_read(&nohz.load_balancer) == this_cpu) { |
3966 | cpumask_t cpus = nohz.cpu_mask; | ||
3967 | struct rq *rq; | 4022 | struct rq *rq; |
3968 | int balance_cpu; | 4023 | int balance_cpu; |
3969 | 4024 | ||
3970 | cpu_clear(this_cpu, cpus); | 4025 | for_each_cpu(balance_cpu, nohz.cpu_mask) { |
3971 | for_each_cpu_mask_nr(balance_cpu, cpus) { | 4026 | if (balance_cpu == this_cpu) |
4027 | continue; | ||
4028 | |||
3972 | /* | 4029 | /* |
3973 | * If this cpu gets work to do, stop the load balancing | 4030 | * If this cpu gets work to do, stop the load balancing |
3974 | * work being done for other cpus. Next load | 4031 | * work being done for other cpus. Next load |
@@ -4006,7 +4063,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4006 | rq->in_nohz_recently = 0; | 4063 | rq->in_nohz_recently = 0; |
4007 | 4064 | ||
4008 | if (atomic_read(&nohz.load_balancer) == cpu) { | 4065 | if (atomic_read(&nohz.load_balancer) == cpu) { |
4009 | cpu_clear(cpu, nohz.cpu_mask); | 4066 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
4010 | atomic_set(&nohz.load_balancer, -1); | 4067 | atomic_set(&nohz.load_balancer, -1); |
4011 | } | 4068 | } |
4012 | 4069 | ||
@@ -4019,7 +4076,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4019 | * TBD: Traverse the sched domains and nominate | 4076 | * TBD: Traverse the sched domains and nominate |
4020 | * the nearest cpu in the nohz.cpu_mask. | 4077 | * the nearest cpu in the nohz.cpu_mask. |
4021 | */ | 4078 | */ |
4022 | int ilb = first_cpu(nohz.cpu_mask); | 4079 | int ilb = cpumask_first(nohz.cpu_mask); |
4023 | 4080 | ||
4024 | if (ilb < nr_cpu_ids) | 4081 | if (ilb < nr_cpu_ids) |
4025 | resched_cpu(ilb); | 4082 | resched_cpu(ilb); |
@@ -4031,7 +4088,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4031 | * cpus with ticks stopped, is it time for that to stop? | 4088 | * cpus with ticks stopped, is it time for that to stop? |
4032 | */ | 4089 | */ |
4033 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && | 4090 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && |
4034 | cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 4091 | cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
4035 | resched_cpu(cpu); | 4092 | resched_cpu(cpu); |
4036 | return; | 4093 | return; |
4037 | } | 4094 | } |
@@ -4041,7 +4098,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4041 | * someone else, then no need raise the SCHED_SOFTIRQ | 4098 | * someone else, then no need raise the SCHED_SOFTIRQ |
4042 | */ | 4099 | */ |
4043 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && | 4100 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && |
4044 | cpu_isset(cpu, nohz.cpu_mask)) | 4101 | cpumask_test_cpu(cpu, nohz.cpu_mask)) |
4045 | return; | 4102 | return; |
4046 | #endif | 4103 | #endif |
4047 | if (time_after_eq(jiffies, rq->next_balance)) | 4104 | if (time_after_eq(jiffies, rq->next_balance)) |
@@ -4093,13 +4150,17 @@ unsigned long long task_delta_exec(struct task_struct *p) | |||
4093 | * Account user cpu time to a process. | 4150 | * Account user cpu time to a process. |
4094 | * @p: the process that the cpu time gets accounted to | 4151 | * @p: the process that the cpu time gets accounted to |
4095 | * @cputime: the cpu time spent in user space since the last update | 4152 | * @cputime: the cpu time spent in user space since the last update |
4153 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4096 | */ | 4154 | */ |
4097 | void account_user_time(struct task_struct *p, cputime_t cputime) | 4155 | void account_user_time(struct task_struct *p, cputime_t cputime, |
4156 | cputime_t cputime_scaled) | ||
4098 | { | 4157 | { |
4099 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4158 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4100 | cputime64_t tmp; | 4159 | cputime64_t tmp; |
4101 | 4160 | ||
4161 | /* Add user time to process. */ | ||
4102 | p->utime = cputime_add(p->utime, cputime); | 4162 | p->utime = cputime_add(p->utime, cputime); |
4163 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4103 | account_group_user_time(p, cputime); | 4164 | account_group_user_time(p, cputime); |
4104 | 4165 | ||
4105 | /* Add user time to cpustat. */ | 4166 | /* Add user time to cpustat. */ |
@@ -4116,51 +4177,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
4116 | * Account guest cpu time to a process. | 4177 | * Account guest cpu time to a process. |
4117 | * @p: the process that the cpu time gets accounted to | 4178 | * @p: the process that the cpu time gets accounted to |
4118 | * @cputime: the cpu time spent in virtual machine since the last update | 4179 | * @cputime: the cpu time spent in virtual machine since the last update |
4180 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4119 | */ | 4181 | */ |
4120 | static void account_guest_time(struct task_struct *p, cputime_t cputime) | 4182 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
4183 | cputime_t cputime_scaled) | ||
4121 | { | 4184 | { |
4122 | cputime64_t tmp; | 4185 | cputime64_t tmp; |
4123 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4186 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4124 | 4187 | ||
4125 | tmp = cputime_to_cputime64(cputime); | 4188 | tmp = cputime_to_cputime64(cputime); |
4126 | 4189 | ||
4190 | /* Add guest time to process. */ | ||
4127 | p->utime = cputime_add(p->utime, cputime); | 4191 | p->utime = cputime_add(p->utime, cputime); |
4192 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4128 | account_group_user_time(p, cputime); | 4193 | account_group_user_time(p, cputime); |
4129 | p->gtime = cputime_add(p->gtime, cputime); | 4194 | p->gtime = cputime_add(p->gtime, cputime); |
4130 | 4195 | ||
4196 | /* Add guest time to cpustat. */ | ||
4131 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4197 | cpustat->user = cputime64_add(cpustat->user, tmp); |
4132 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 4198 | cpustat->guest = cputime64_add(cpustat->guest, tmp); |
4133 | } | 4199 | } |
4134 | 4200 | ||
4135 | /* | 4201 | /* |
4136 | * Account scaled user cpu time to a process. | ||
4137 | * @p: the process that the cpu time gets accounted to | ||
4138 | * @cputime: the cpu time spent in user space since the last update | ||
4139 | */ | ||
4140 | void account_user_time_scaled(struct task_struct *p, cputime_t cputime) | ||
4141 | { | ||
4142 | p->utimescaled = cputime_add(p->utimescaled, cputime); | ||
4143 | } | ||
4144 | |||
4145 | /* | ||
4146 | * Account system cpu time to a process. | 4202 | * Account system cpu time to a process. |
4147 | * @p: the process that the cpu time gets accounted to | 4203 | * @p: the process that the cpu time gets accounted to |
4148 | * @hardirq_offset: the offset to subtract from hardirq_count() | 4204 | * @hardirq_offset: the offset to subtract from hardirq_count() |
4149 | * @cputime: the cpu time spent in kernel space since the last update | 4205 | * @cputime: the cpu time spent in kernel space since the last update |
4206 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4150 | */ | 4207 | */ |
4151 | void account_system_time(struct task_struct *p, int hardirq_offset, | 4208 | void account_system_time(struct task_struct *p, int hardirq_offset, |
4152 | cputime_t cputime) | 4209 | cputime_t cputime, cputime_t cputime_scaled) |
4153 | { | 4210 | { |
4154 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4211 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4155 | struct rq *rq = this_rq(); | ||
4156 | cputime64_t tmp; | 4212 | cputime64_t tmp; |
4157 | 4213 | ||
4158 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | 4214 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
4159 | account_guest_time(p, cputime); | 4215 | account_guest_time(p, cputime, cputime_scaled); |
4160 | return; | 4216 | return; |
4161 | } | 4217 | } |
4162 | 4218 | ||
4219 | /* Add system time to process. */ | ||
4163 | p->stime = cputime_add(p->stime, cputime); | 4220 | p->stime = cputime_add(p->stime, cputime); |
4221 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); | ||
4164 | account_group_system_time(p, cputime); | 4222 | account_group_system_time(p, cputime); |
4165 | 4223 | ||
4166 | /* Add system time to cpustat. */ | 4224 | /* Add system time to cpustat. */ |
@@ -4169,50 +4227,85 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
4169 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 4227 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
4170 | else if (softirq_count()) | 4228 | else if (softirq_count()) |
4171 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 4229 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
4172 | else if (p != rq->idle) | ||
4173 | cpustat->system = cputime64_add(cpustat->system, tmp); | ||
4174 | else if (atomic_read(&rq->nr_iowait) > 0) | ||
4175 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | ||
4176 | else | 4230 | else |
4177 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4231 | cpustat->system = cputime64_add(cpustat->system, tmp); |
4232 | |||
4178 | /* Account for system time used */ | 4233 | /* Account for system time used */ |
4179 | acct_update_integrals(p); | 4234 | acct_update_integrals(p); |
4180 | } | 4235 | } |
4181 | 4236 | ||
4182 | /* | 4237 | /* |
4183 | * Account scaled system cpu time to a process. | 4238 | * Account for involuntary wait time. |
4184 | * @p: the process that the cpu time gets accounted to | 4239 | * @steal: the cpu time spent in involuntary wait |
4185 | * @hardirq_offset: the offset to subtract from hardirq_count() | ||
4186 | * @cputime: the cpu time spent in kernel space since the last update | ||
4187 | */ | 4240 | */ |
4188 | void account_system_time_scaled(struct task_struct *p, cputime_t cputime) | 4241 | void account_steal_time(cputime_t cputime) |
4189 | { | 4242 | { |
4190 | p->stimescaled = cputime_add(p->stimescaled, cputime); | 4243 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4244 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | ||
4245 | |||
4246 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); | ||
4191 | } | 4247 | } |
4192 | 4248 | ||
4193 | /* | 4249 | /* |
4194 | * Account for involuntary wait time. | 4250 | * Account for idle time. |
4195 | * @p: the process from which the cpu time has been stolen | 4251 | * @cputime: the cpu time spent in idle wait |
4196 | * @steal: the cpu time spent in involuntary wait | ||
4197 | */ | 4252 | */ |
4198 | void account_steal_time(struct task_struct *p, cputime_t steal) | 4253 | void account_idle_time(cputime_t cputime) |
4199 | { | 4254 | { |
4200 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4255 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4201 | cputime64_t tmp = cputime_to_cputime64(steal); | 4256 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
4202 | struct rq *rq = this_rq(); | 4257 | struct rq *rq = this_rq(); |
4203 | 4258 | ||
4204 | if (p == rq->idle) { | 4259 | if (atomic_read(&rq->nr_iowait) > 0) |
4205 | p->stime = cputime_add(p->stime, steal); | 4260 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); |
4206 | account_group_system_time(p, steal); | 4261 | else |
4207 | if (atomic_read(&rq->nr_iowait) > 0) | 4262 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
4208 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 4263 | } |
4209 | else | 4264 | |
4210 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4265 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
4211 | } else | 4266 | |
4212 | cpustat->steal = cputime64_add(cpustat->steal, tmp); | 4267 | /* |
4268 | * Account a single tick of cpu time. | ||
4269 | * @p: the process that the cpu time gets accounted to | ||
4270 | * @user_tick: indicates if the tick is a user or a system tick | ||
4271 | */ | ||
4272 | void account_process_tick(struct task_struct *p, int user_tick) | ||
4273 | { | ||
4274 | cputime_t one_jiffy = jiffies_to_cputime(1); | ||
4275 | cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); | ||
4276 | struct rq *rq = this_rq(); | ||
4277 | |||
4278 | if (user_tick) | ||
4279 | account_user_time(p, one_jiffy, one_jiffy_scaled); | ||
4280 | else if (p != rq->idle) | ||
4281 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | ||
4282 | one_jiffy_scaled); | ||
4283 | else | ||
4284 | account_idle_time(one_jiffy); | ||
4285 | } | ||
4286 | |||
4287 | /* | ||
4288 | * Account multiple ticks of steal time. | ||
4289 | * @p: the process from which the cpu time has been stolen | ||
4290 | * @ticks: number of stolen ticks | ||
4291 | */ | ||
4292 | void account_steal_ticks(unsigned long ticks) | ||
4293 | { | ||
4294 | account_steal_time(jiffies_to_cputime(ticks)); | ||
4213 | } | 4295 | } |
4214 | 4296 | ||
4215 | /* | 4297 | /* |
4298 | * Account multiple ticks of idle time. | ||
4299 | * @ticks: number of stolen ticks | ||
4300 | */ | ||
4301 | void account_idle_ticks(unsigned long ticks) | ||
4302 | { | ||
4303 | account_idle_time(jiffies_to_cputime(ticks)); | ||
4304 | } | ||
4305 | |||
4306 | #endif | ||
4307 | |||
4308 | /* | ||
4216 | * Use precise platform statistics if available: | 4309 | * Use precise platform statistics if available: |
4217 | */ | 4310 | */ |
4218 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 4311 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
@@ -4339,7 +4432,7 @@ void __kprobes sub_preempt_count(int val) | |||
4339 | /* | 4432 | /* |
4340 | * Underflow? | 4433 | * Underflow? |
4341 | */ | 4434 | */ |
4342 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) | 4435 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) |
4343 | return; | 4436 | return; |
4344 | /* | 4437 | /* |
4345 | * Is the spinlock portion underflowing? | 4438 | * Is the spinlock portion underflowing? |
@@ -5134,6 +5227,22 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
5134 | set_load_weight(p); | 5227 | set_load_weight(p); |
5135 | } | 5228 | } |
5136 | 5229 | ||
5230 | /* | ||
5231 | * check the target process has a UID that matches the current process's | ||
5232 | */ | ||
5233 | static bool check_same_owner(struct task_struct *p) | ||
5234 | { | ||
5235 | const struct cred *cred = current_cred(), *pcred; | ||
5236 | bool match; | ||
5237 | |||
5238 | rcu_read_lock(); | ||
5239 | pcred = __task_cred(p); | ||
5240 | match = (cred->euid == pcred->euid || | ||
5241 | cred->euid == pcred->uid); | ||
5242 | rcu_read_unlock(); | ||
5243 | return match; | ||
5244 | } | ||
5245 | |||
5137 | static int __sched_setscheduler(struct task_struct *p, int policy, | 5246 | static int __sched_setscheduler(struct task_struct *p, int policy, |
5138 | struct sched_param *param, bool user) | 5247 | struct sched_param *param, bool user) |
5139 | { | 5248 | { |
@@ -5193,8 +5302,7 @@ recheck: | |||
5193 | return -EPERM; | 5302 | return -EPERM; |
5194 | 5303 | ||
5195 | /* can't change other user's priorities */ | 5304 | /* can't change other user's priorities */ |
5196 | if ((current->euid != p->euid) && | 5305 | if (!check_same_owner(p)) |
5197 | (current->euid != p->uid)) | ||
5198 | return -EPERM; | 5306 | return -EPERM; |
5199 | } | 5307 | } |
5200 | 5308 | ||
@@ -5400,10 +5508,9 @@ out_unlock: | |||
5400 | return retval; | 5508 | return retval; |
5401 | } | 5509 | } |
5402 | 5510 | ||
5403 | long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | 5511 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
5404 | { | 5512 | { |
5405 | cpumask_t cpus_allowed; | 5513 | cpumask_var_t cpus_allowed, new_mask; |
5406 | cpumask_t new_mask = *in_mask; | ||
5407 | struct task_struct *p; | 5514 | struct task_struct *p; |
5408 | int retval; | 5515 | int retval; |
5409 | 5516 | ||
@@ -5425,46 +5532,57 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5425 | get_task_struct(p); | 5532 | get_task_struct(p); |
5426 | read_unlock(&tasklist_lock); | 5533 | read_unlock(&tasklist_lock); |
5427 | 5534 | ||
5535 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | ||
5536 | retval = -ENOMEM; | ||
5537 | goto out_put_task; | ||
5538 | } | ||
5539 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { | ||
5540 | retval = -ENOMEM; | ||
5541 | goto out_free_cpus_allowed; | ||
5542 | } | ||
5428 | retval = -EPERM; | 5543 | retval = -EPERM; |
5429 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 5544 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
5430 | !capable(CAP_SYS_NICE)) | ||
5431 | goto out_unlock; | 5545 | goto out_unlock; |
5432 | 5546 | ||
5433 | retval = security_task_setscheduler(p, 0, NULL); | 5547 | retval = security_task_setscheduler(p, 0, NULL); |
5434 | if (retval) | 5548 | if (retval) |
5435 | goto out_unlock; | 5549 | goto out_unlock; |
5436 | 5550 | ||
5437 | cpuset_cpus_allowed(p, &cpus_allowed); | 5551 | cpuset_cpus_allowed(p, cpus_allowed); |
5438 | cpus_and(new_mask, new_mask, cpus_allowed); | 5552 | cpumask_and(new_mask, in_mask, cpus_allowed); |
5439 | again: | 5553 | again: |
5440 | retval = set_cpus_allowed_ptr(p, &new_mask); | 5554 | retval = set_cpus_allowed_ptr(p, new_mask); |
5441 | 5555 | ||
5442 | if (!retval) { | 5556 | if (!retval) { |
5443 | cpuset_cpus_allowed(p, &cpus_allowed); | 5557 | cpuset_cpus_allowed(p, cpus_allowed); |
5444 | if (!cpus_subset(new_mask, cpus_allowed)) { | 5558 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
5445 | /* | 5559 | /* |
5446 | * We must have raced with a concurrent cpuset | 5560 | * We must have raced with a concurrent cpuset |
5447 | * update. Just reset the cpus_allowed to the | 5561 | * update. Just reset the cpus_allowed to the |
5448 | * cpuset's cpus_allowed | 5562 | * cpuset's cpus_allowed |
5449 | */ | 5563 | */ |
5450 | new_mask = cpus_allowed; | 5564 | cpumask_copy(new_mask, cpus_allowed); |
5451 | goto again; | 5565 | goto again; |
5452 | } | 5566 | } |
5453 | } | 5567 | } |
5454 | out_unlock: | 5568 | out_unlock: |
5569 | free_cpumask_var(new_mask); | ||
5570 | out_free_cpus_allowed: | ||
5571 | free_cpumask_var(cpus_allowed); | ||
5572 | out_put_task: | ||
5455 | put_task_struct(p); | 5573 | put_task_struct(p); |
5456 | put_online_cpus(); | 5574 | put_online_cpus(); |
5457 | return retval; | 5575 | return retval; |
5458 | } | 5576 | } |
5459 | 5577 | ||
5460 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | 5578 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
5461 | cpumask_t *new_mask) | 5579 | struct cpumask *new_mask) |
5462 | { | 5580 | { |
5463 | if (len < sizeof(cpumask_t)) { | 5581 | if (len < cpumask_size()) |
5464 | memset(new_mask, 0, sizeof(cpumask_t)); | 5582 | cpumask_clear(new_mask); |
5465 | } else if (len > sizeof(cpumask_t)) { | 5583 | else if (len > cpumask_size()) |
5466 | len = sizeof(cpumask_t); | 5584 | len = cpumask_size(); |
5467 | } | 5585 | |
5468 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; | 5586 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
5469 | } | 5587 | } |
5470 | 5588 | ||
@@ -5477,17 +5595,20 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
5477 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 5595 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, |
5478 | unsigned long __user *user_mask_ptr) | 5596 | unsigned long __user *user_mask_ptr) |
5479 | { | 5597 | { |
5480 | cpumask_t new_mask; | 5598 | cpumask_var_t new_mask; |
5481 | int retval; | 5599 | int retval; |
5482 | 5600 | ||
5483 | retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); | 5601 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
5484 | if (retval) | 5602 | return -ENOMEM; |
5485 | return retval; | ||
5486 | 5603 | ||
5487 | return sched_setaffinity(pid, &new_mask); | 5604 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
5605 | if (retval == 0) | ||
5606 | retval = sched_setaffinity(pid, new_mask); | ||
5607 | free_cpumask_var(new_mask); | ||
5608 | return retval; | ||
5488 | } | 5609 | } |
5489 | 5610 | ||
5490 | long sched_getaffinity(pid_t pid, cpumask_t *mask) | 5611 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
5491 | { | 5612 | { |
5492 | struct task_struct *p; | 5613 | struct task_struct *p; |
5493 | int retval; | 5614 | int retval; |
@@ -5504,7 +5625,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
5504 | if (retval) | 5625 | if (retval) |
5505 | goto out_unlock; | 5626 | goto out_unlock; |
5506 | 5627 | ||
5507 | cpus_and(*mask, p->cpus_allowed, cpu_online_map); | 5628 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
5508 | 5629 | ||
5509 | out_unlock: | 5630 | out_unlock: |
5510 | read_unlock(&tasklist_lock); | 5631 | read_unlock(&tasklist_lock); |
@@ -5523,19 +5644,24 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
5523 | unsigned long __user *user_mask_ptr) | 5644 | unsigned long __user *user_mask_ptr) |
5524 | { | 5645 | { |
5525 | int ret; | 5646 | int ret; |
5526 | cpumask_t mask; | 5647 | cpumask_var_t mask; |
5527 | 5648 | ||
5528 | if (len < sizeof(cpumask_t)) | 5649 | if (len < cpumask_size()) |
5529 | return -EINVAL; | 5650 | return -EINVAL; |
5530 | 5651 | ||
5531 | ret = sched_getaffinity(pid, &mask); | 5652 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
5532 | if (ret < 0) | 5653 | return -ENOMEM; |
5533 | return ret; | ||
5534 | 5654 | ||
5535 | if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) | 5655 | ret = sched_getaffinity(pid, mask); |
5536 | return -EFAULT; | 5656 | if (ret == 0) { |
5657 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | ||
5658 | ret = -EFAULT; | ||
5659 | else | ||
5660 | ret = cpumask_size(); | ||
5661 | } | ||
5662 | free_cpumask_var(mask); | ||
5537 | 5663 | ||
5538 | return sizeof(cpumask_t); | 5664 | return ret; |
5539 | } | 5665 | } |
5540 | 5666 | ||
5541 | /** | 5667 | /** |
@@ -5877,7 +6003,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5877 | idle->se.exec_start = sched_clock(); | 6003 | idle->se.exec_start = sched_clock(); |
5878 | 6004 | ||
5879 | idle->prio = idle->normal_prio = MAX_PRIO; | 6005 | idle->prio = idle->normal_prio = MAX_PRIO; |
5880 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 6006 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
5881 | __set_task_cpu(idle, cpu); | 6007 | __set_task_cpu(idle, cpu); |
5882 | 6008 | ||
5883 | rq->curr = rq->idle = idle; | 6009 | rq->curr = rq->idle = idle; |
@@ -5896,6 +6022,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5896 | * The idle tasks have their own, simple scheduling class: | 6022 | * The idle tasks have their own, simple scheduling class: |
5897 | */ | 6023 | */ |
5898 | idle->sched_class = &idle_sched_class; | 6024 | idle->sched_class = &idle_sched_class; |
6025 | ftrace_graph_init_task(idle); | ||
5899 | } | 6026 | } |
5900 | 6027 | ||
5901 | /* | 6028 | /* |
@@ -5903,9 +6030,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5903 | * indicates which cpus entered this state. This is used | 6030 | * indicates which cpus entered this state. This is used |
5904 | * in the rcu update to wait only for active cpus. For system | 6031 | * in the rcu update to wait only for active cpus. For system |
5905 | * which do not switch off the HZ timer nohz_cpu_mask should | 6032 | * which do not switch off the HZ timer nohz_cpu_mask should |
5906 | * always be CPU_MASK_NONE. | 6033 | * always be CPU_BITS_NONE. |
5907 | */ | 6034 | */ |
5908 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | 6035 | cpumask_var_t nohz_cpu_mask; |
5909 | 6036 | ||
5910 | /* | 6037 | /* |
5911 | * Increase the granularity value when there are more CPUs, | 6038 | * Increase the granularity value when there are more CPUs, |
@@ -5960,7 +6087,7 @@ static inline void sched_init_granularity(void) | |||
5960 | * task must not exit() & deallocate itself prematurely. The | 6087 | * task must not exit() & deallocate itself prematurely. The |
5961 | * call is not atomic; no spinlocks may be held. | 6088 | * call is not atomic; no spinlocks may be held. |
5962 | */ | 6089 | */ |
5963 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | 6090 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
5964 | { | 6091 | { |
5965 | struct migration_req req; | 6092 | struct migration_req req; |
5966 | unsigned long flags; | 6093 | unsigned long flags; |
@@ -5968,13 +6095,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5968 | int ret = 0; | 6095 | int ret = 0; |
5969 | 6096 | ||
5970 | rq = task_rq_lock(p, &flags); | 6097 | rq = task_rq_lock(p, &flags); |
5971 | if (!cpus_intersects(*new_mask, cpu_online_map)) { | 6098 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { |
5972 | ret = -EINVAL; | 6099 | ret = -EINVAL; |
5973 | goto out; | 6100 | goto out; |
5974 | } | 6101 | } |
5975 | 6102 | ||
5976 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && | 6103 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && |
5977 | !cpus_equal(p->cpus_allowed, *new_mask))) { | 6104 | !cpumask_equal(&p->cpus_allowed, new_mask))) { |
5978 | ret = -EINVAL; | 6105 | ret = -EINVAL; |
5979 | goto out; | 6106 | goto out; |
5980 | } | 6107 | } |
@@ -5982,15 +6109,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5982 | if (p->sched_class->set_cpus_allowed) | 6109 | if (p->sched_class->set_cpus_allowed) |
5983 | p->sched_class->set_cpus_allowed(p, new_mask); | 6110 | p->sched_class->set_cpus_allowed(p, new_mask); |
5984 | else { | 6111 | else { |
5985 | p->cpus_allowed = *new_mask; | 6112 | cpumask_copy(&p->cpus_allowed, new_mask); |
5986 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); | 6113 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
5987 | } | 6114 | } |
5988 | 6115 | ||
5989 | /* Can the task run on the task's current CPU? If so, we're done */ | 6116 | /* Can the task run on the task's current CPU? If so, we're done */ |
5990 | if (cpu_isset(task_cpu(p), *new_mask)) | 6117 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
5991 | goto out; | 6118 | goto out; |
5992 | 6119 | ||
5993 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { | 6120 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { |
5994 | /* Need help from migration thread: drop lock and wait. */ | 6121 | /* Need help from migration thread: drop lock and wait. */ |
5995 | task_rq_unlock(rq, &flags); | 6122 | task_rq_unlock(rq, &flags); |
5996 | wake_up_process(rq->migration_thread); | 6123 | wake_up_process(rq->migration_thread); |
@@ -6032,7 +6159,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6032 | if (task_cpu(p) != src_cpu) | 6159 | if (task_cpu(p) != src_cpu) |
6033 | goto done; | 6160 | goto done; |
6034 | /* Affinity changed (again). */ | 6161 | /* Affinity changed (again). */ |
6035 | if (!cpu_isset(dest_cpu, p->cpus_allowed)) | 6162 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6036 | goto fail; | 6163 | goto fail; |
6037 | 6164 | ||
6038 | on_rq = p->se.on_rq; | 6165 | on_rq = p->se.on_rq; |
@@ -6126,54 +6253,44 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6126 | 6253 | ||
6127 | /* | 6254 | /* |
6128 | * Figure out where task on dead CPU should go, use force if necessary. | 6255 | * Figure out where task on dead CPU should go, use force if necessary. |
6129 | * NOTE: interrupts should be disabled by the caller | ||
6130 | */ | 6256 | */ |
6131 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6257 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6132 | { | 6258 | { |
6133 | unsigned long flags; | ||
6134 | cpumask_t mask; | ||
6135 | struct rq *rq; | ||
6136 | int dest_cpu; | 6259 | int dest_cpu; |
6260 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); | ||
6137 | 6261 | ||
6138 | do { | 6262 | again: |
6139 | /* On same node? */ | 6263 | /* Look for allowed, online CPU in same node. */ |
6140 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 6264 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) |
6141 | cpus_and(mask, mask, p->cpus_allowed); | 6265 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6142 | dest_cpu = any_online_cpu(mask); | 6266 | goto move; |
6143 | 6267 | ||
6144 | /* On any allowed CPU? */ | 6268 | /* Any allowed, online CPU? */ |
6145 | if (dest_cpu >= nr_cpu_ids) | 6269 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); |
6146 | dest_cpu = any_online_cpu(p->cpus_allowed); | 6270 | if (dest_cpu < nr_cpu_ids) |
6271 | goto move; | ||
6147 | 6272 | ||
6148 | /* No more Mr. Nice Guy. */ | 6273 | /* No more Mr. Nice Guy. */ |
6149 | if (dest_cpu >= nr_cpu_ids) { | 6274 | if (dest_cpu >= nr_cpu_ids) { |
6150 | cpumask_t cpus_allowed; | 6275 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); |
6276 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | ||
6151 | 6277 | ||
6152 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | 6278 | /* |
6153 | /* | 6279 | * Don't tell them about moving exiting tasks or |
6154 | * Try to stay on the same cpuset, where the | 6280 | * kernel threads (both mm NULL), since they never |
6155 | * current cpuset may be a subset of all cpus. | 6281 | * leave kernel. |
6156 | * The cpuset_cpus_allowed_locked() variant of | 6282 | */ |
6157 | * cpuset_cpus_allowed() will not block. It must be | 6283 | if (p->mm && printk_ratelimit()) { |
6158 | * called within calls to cpuset_lock/cpuset_unlock. | 6284 | printk(KERN_INFO "process %d (%s) no " |
6159 | */ | 6285 | "longer affine to cpu%d\n", |
6160 | rq = task_rq_lock(p, &flags); | 6286 | task_pid_nr(p), p->comm, dead_cpu); |
6161 | p->cpus_allowed = cpus_allowed; | ||
6162 | dest_cpu = any_online_cpu(p->cpus_allowed); | ||
6163 | task_rq_unlock(rq, &flags); | ||
6164 | |||
6165 | /* | ||
6166 | * Don't tell them about moving exiting tasks or | ||
6167 | * kernel threads (both mm NULL), since they never | ||
6168 | * leave kernel. | ||
6169 | */ | ||
6170 | if (p->mm && printk_ratelimit()) { | ||
6171 | printk(KERN_INFO "process %d (%s) no " | ||
6172 | "longer affine to cpu%d\n", | ||
6173 | task_pid_nr(p), p->comm, dead_cpu); | ||
6174 | } | ||
6175 | } | 6287 | } |
6176 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); | 6288 | } |
6289 | |||
6290 | move: | ||
6291 | /* It can have affinity changed while we were choosing. */ | ||
6292 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | ||
6293 | goto again; | ||
6177 | } | 6294 | } |
6178 | 6295 | ||
6179 | /* | 6296 | /* |
@@ -6185,7 +6302,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
6185 | */ | 6302 | */ |
6186 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 6303 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
6187 | { | 6304 | { |
6188 | struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); | 6305 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); |
6189 | unsigned long flags; | 6306 | unsigned long flags; |
6190 | 6307 | ||
6191 | local_irq_save(flags); | 6308 | local_irq_save(flags); |
@@ -6475,7 +6592,7 @@ static void set_rq_online(struct rq *rq) | |||
6475 | if (!rq->online) { | 6592 | if (!rq->online) { |
6476 | const struct sched_class *class; | 6593 | const struct sched_class *class; |
6477 | 6594 | ||
6478 | cpu_set(rq->cpu, rq->rd->online); | 6595 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
6479 | rq->online = 1; | 6596 | rq->online = 1; |
6480 | 6597 | ||
6481 | for_each_class(class) { | 6598 | for_each_class(class) { |
@@ -6495,7 +6612,7 @@ static void set_rq_offline(struct rq *rq) | |||
6495 | class->rq_offline(rq); | 6612 | class->rq_offline(rq); |
6496 | } | 6613 | } |
6497 | 6614 | ||
6498 | cpu_clear(rq->cpu, rq->rd->online); | 6615 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
6499 | rq->online = 0; | 6616 | rq->online = 0; |
6500 | } | 6617 | } |
6501 | } | 6618 | } |
@@ -6536,7 +6653,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6536 | rq = cpu_rq(cpu); | 6653 | rq = cpu_rq(cpu); |
6537 | spin_lock_irqsave(&rq->lock, flags); | 6654 | spin_lock_irqsave(&rq->lock, flags); |
6538 | if (rq->rd) { | 6655 | if (rq->rd) { |
6539 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6656 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6540 | 6657 | ||
6541 | set_rq_online(rq); | 6658 | set_rq_online(rq); |
6542 | } | 6659 | } |
@@ -6550,7 +6667,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6550 | break; | 6667 | break; |
6551 | /* Unbind it from offline cpu so it can run. Fall thru. */ | 6668 | /* Unbind it from offline cpu so it can run. Fall thru. */ |
6552 | kthread_bind(cpu_rq(cpu)->migration_thread, | 6669 | kthread_bind(cpu_rq(cpu)->migration_thread, |
6553 | any_online_cpu(cpu_online_map)); | 6670 | cpumask_any(cpu_online_mask)); |
6554 | kthread_stop(cpu_rq(cpu)->migration_thread); | 6671 | kthread_stop(cpu_rq(cpu)->migration_thread); |
6555 | cpu_rq(cpu)->migration_thread = NULL; | 6672 | cpu_rq(cpu)->migration_thread = NULL; |
6556 | break; | 6673 | break; |
@@ -6587,7 +6704,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6587 | req = list_entry(rq->migration_queue.next, | 6704 | req = list_entry(rq->migration_queue.next, |
6588 | struct migration_req, list); | 6705 | struct migration_req, list); |
6589 | list_del_init(&req->list); | 6706 | list_del_init(&req->list); |
6707 | spin_unlock_irq(&rq->lock); | ||
6590 | complete(&req->done); | 6708 | complete(&req->done); |
6709 | spin_lock_irq(&rq->lock); | ||
6591 | } | 6710 | } |
6592 | spin_unlock_irq(&rq->lock); | 6711 | spin_unlock_irq(&rq->lock); |
6593 | break; | 6712 | break; |
@@ -6598,7 +6717,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6598 | rq = cpu_rq(cpu); | 6717 | rq = cpu_rq(cpu); |
6599 | spin_lock_irqsave(&rq->lock, flags); | 6718 | spin_lock_irqsave(&rq->lock, flags); |
6600 | if (rq->rd) { | 6719 | if (rq->rd) { |
6601 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6720 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6602 | set_rq_offline(rq); | 6721 | set_rq_offline(rq); |
6603 | } | 6722 | } |
6604 | spin_unlock_irqrestore(&rq->lock, flags); | 6723 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -6636,36 +6755,14 @@ early_initcall(migration_init); | |||
6636 | 6755 | ||
6637 | #ifdef CONFIG_SCHED_DEBUG | 6756 | #ifdef CONFIG_SCHED_DEBUG |
6638 | 6757 | ||
6639 | static inline const char *sd_level_to_string(enum sched_domain_level lvl) | ||
6640 | { | ||
6641 | switch (lvl) { | ||
6642 | case SD_LV_NONE: | ||
6643 | return "NONE"; | ||
6644 | case SD_LV_SIBLING: | ||
6645 | return "SIBLING"; | ||
6646 | case SD_LV_MC: | ||
6647 | return "MC"; | ||
6648 | case SD_LV_CPU: | ||
6649 | return "CPU"; | ||
6650 | case SD_LV_NODE: | ||
6651 | return "NODE"; | ||
6652 | case SD_LV_ALLNODES: | ||
6653 | return "ALLNODES"; | ||
6654 | case SD_LV_MAX: | ||
6655 | return "MAX"; | ||
6656 | |||
6657 | } | ||
6658 | return "MAX"; | ||
6659 | } | ||
6660 | |||
6661 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6758 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6662 | cpumask_t *groupmask) | 6759 | struct cpumask *groupmask) |
6663 | { | 6760 | { |
6664 | struct sched_group *group = sd->groups; | 6761 | struct sched_group *group = sd->groups; |
6665 | char str[256]; | 6762 | char str[256]; |
6666 | 6763 | ||
6667 | cpulist_scnprintf(str, sizeof(str), sd->span); | 6764 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); |
6668 | cpus_clear(*groupmask); | 6765 | cpumask_clear(groupmask); |
6669 | 6766 | ||
6670 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 6767 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
6671 | 6768 | ||
@@ -6677,14 +6774,13 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6677 | return -1; | 6774 | return -1; |
6678 | } | 6775 | } |
6679 | 6776 | ||
6680 | printk(KERN_CONT "span %s level %s\n", | 6777 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
6681 | str, sd_level_to_string(sd->level)); | ||
6682 | 6778 | ||
6683 | if (!cpu_isset(cpu, sd->span)) { | 6779 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
6684 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6780 | printk(KERN_ERR "ERROR: domain->span does not contain " |
6685 | "CPU%d\n", cpu); | 6781 | "CPU%d\n", cpu); |
6686 | } | 6782 | } |
6687 | if (!cpu_isset(cpu, group->cpumask)) { | 6783 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
6688 | printk(KERN_ERR "ERROR: domain->groups does not contain" | 6784 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
6689 | " CPU%d\n", cpu); | 6785 | " CPU%d\n", cpu); |
6690 | } | 6786 | } |
@@ -6704,31 +6800,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6704 | break; | 6800 | break; |
6705 | } | 6801 | } |
6706 | 6802 | ||
6707 | if (!cpus_weight(group->cpumask)) { | 6803 | if (!cpumask_weight(sched_group_cpus(group))) { |
6708 | printk(KERN_CONT "\n"); | 6804 | printk(KERN_CONT "\n"); |
6709 | printk(KERN_ERR "ERROR: empty group\n"); | 6805 | printk(KERN_ERR "ERROR: empty group\n"); |
6710 | break; | 6806 | break; |
6711 | } | 6807 | } |
6712 | 6808 | ||
6713 | if (cpus_intersects(*groupmask, group->cpumask)) { | 6809 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
6714 | printk(KERN_CONT "\n"); | 6810 | printk(KERN_CONT "\n"); |
6715 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 6811 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
6716 | break; | 6812 | break; |
6717 | } | 6813 | } |
6718 | 6814 | ||
6719 | cpus_or(*groupmask, *groupmask, group->cpumask); | 6815 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
6720 | 6816 | ||
6721 | cpulist_scnprintf(str, sizeof(str), group->cpumask); | 6817 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
6722 | printk(KERN_CONT " %s", str); | 6818 | printk(KERN_CONT " %s", str); |
6723 | 6819 | ||
6724 | group = group->next; | 6820 | group = group->next; |
6725 | } while (group != sd->groups); | 6821 | } while (group != sd->groups); |
6726 | printk(KERN_CONT "\n"); | 6822 | printk(KERN_CONT "\n"); |
6727 | 6823 | ||
6728 | if (!cpus_equal(sd->span, *groupmask)) | 6824 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
6729 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | 6825 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
6730 | 6826 | ||
6731 | if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) | 6827 | if (sd->parent && |
6828 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | ||
6732 | printk(KERN_ERR "ERROR: parent span is not a superset " | 6829 | printk(KERN_ERR "ERROR: parent span is not a superset " |
6733 | "of domain->span\n"); | 6830 | "of domain->span\n"); |
6734 | return 0; | 6831 | return 0; |
@@ -6736,7 +6833,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6736 | 6833 | ||
6737 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 6834 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
6738 | { | 6835 | { |
6739 | cpumask_t *groupmask; | 6836 | cpumask_var_t groupmask; |
6740 | int level = 0; | 6837 | int level = 0; |
6741 | 6838 | ||
6742 | if (!sd) { | 6839 | if (!sd) { |
@@ -6746,8 +6843,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6746 | 6843 | ||
6747 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 6844 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
6748 | 6845 | ||
6749 | groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 6846 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { |
6750 | if (!groupmask) { | ||
6751 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); | 6847 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); |
6752 | return; | 6848 | return; |
6753 | } | 6849 | } |
@@ -6760,7 +6856,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6760 | if (!sd) | 6856 | if (!sd) |
6761 | break; | 6857 | break; |
6762 | } | 6858 | } |
6763 | kfree(groupmask); | 6859 | free_cpumask_var(groupmask); |
6764 | } | 6860 | } |
6765 | #else /* !CONFIG_SCHED_DEBUG */ | 6861 | #else /* !CONFIG_SCHED_DEBUG */ |
6766 | # define sched_domain_debug(sd, cpu) do { } while (0) | 6862 | # define sched_domain_debug(sd, cpu) do { } while (0) |
@@ -6768,7 +6864,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6768 | 6864 | ||
6769 | static int sd_degenerate(struct sched_domain *sd) | 6865 | static int sd_degenerate(struct sched_domain *sd) |
6770 | { | 6866 | { |
6771 | if (cpus_weight(sd->span) == 1) | 6867 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
6772 | return 1; | 6868 | return 1; |
6773 | 6869 | ||
6774 | /* Following flags need at least 2 groups */ | 6870 | /* Following flags need at least 2 groups */ |
@@ -6799,7 +6895,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6799 | if (sd_degenerate(parent)) | 6895 | if (sd_degenerate(parent)) |
6800 | return 1; | 6896 | return 1; |
6801 | 6897 | ||
6802 | if (!cpus_equal(sd->span, parent->span)) | 6898 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
6803 | return 0; | 6899 | return 0; |
6804 | 6900 | ||
6805 | /* Does parent contain flags not in child? */ | 6901 | /* Does parent contain flags not in child? */ |
@@ -6814,6 +6910,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6814 | SD_BALANCE_EXEC | | 6910 | SD_BALANCE_EXEC | |
6815 | SD_SHARE_CPUPOWER | | 6911 | SD_SHARE_CPUPOWER | |
6816 | SD_SHARE_PKG_RESOURCES); | 6912 | SD_SHARE_PKG_RESOURCES); |
6913 | if (nr_node_ids == 1) | ||
6914 | pflags &= ~SD_SERIALIZE; | ||
6817 | } | 6915 | } |
6818 | if (~cflags & pflags) | 6916 | if (~cflags & pflags) |
6819 | return 0; | 6917 | return 0; |
@@ -6821,6 +6919,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6821 | return 1; | 6919 | return 1; |
6822 | } | 6920 | } |
6823 | 6921 | ||
6922 | static void free_rootdomain(struct root_domain *rd) | ||
6923 | { | ||
6924 | cpupri_cleanup(&rd->cpupri); | ||
6925 | |||
6926 | free_cpumask_var(rd->rto_mask); | ||
6927 | free_cpumask_var(rd->online); | ||
6928 | free_cpumask_var(rd->span); | ||
6929 | kfree(rd); | ||
6930 | } | ||
6931 | |||
6824 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6932 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
6825 | { | 6933 | { |
6826 | unsigned long flags; | 6934 | unsigned long flags; |
@@ -6830,38 +6938,63 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6830 | if (rq->rd) { | 6938 | if (rq->rd) { |
6831 | struct root_domain *old_rd = rq->rd; | 6939 | struct root_domain *old_rd = rq->rd; |
6832 | 6940 | ||
6833 | if (cpu_isset(rq->cpu, old_rd->online)) | 6941 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
6834 | set_rq_offline(rq); | 6942 | set_rq_offline(rq); |
6835 | 6943 | ||
6836 | cpu_clear(rq->cpu, old_rd->span); | 6944 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
6837 | 6945 | ||
6838 | if (atomic_dec_and_test(&old_rd->refcount)) | 6946 | if (atomic_dec_and_test(&old_rd->refcount)) |
6839 | kfree(old_rd); | 6947 | free_rootdomain(old_rd); |
6840 | } | 6948 | } |
6841 | 6949 | ||
6842 | atomic_inc(&rd->refcount); | 6950 | atomic_inc(&rd->refcount); |
6843 | rq->rd = rd; | 6951 | rq->rd = rd; |
6844 | 6952 | ||
6845 | cpu_set(rq->cpu, rd->span); | 6953 | cpumask_set_cpu(rq->cpu, rd->span); |
6846 | if (cpu_isset(rq->cpu, cpu_online_map)) | 6954 | if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) |
6847 | set_rq_online(rq); | 6955 | set_rq_online(rq); |
6848 | 6956 | ||
6849 | spin_unlock_irqrestore(&rq->lock, flags); | 6957 | spin_unlock_irqrestore(&rq->lock, flags); |
6850 | } | 6958 | } |
6851 | 6959 | ||
6852 | static void init_rootdomain(struct root_domain *rd) | 6960 | static int init_rootdomain(struct root_domain *rd, bool bootmem) |
6853 | { | 6961 | { |
6854 | memset(rd, 0, sizeof(*rd)); | 6962 | memset(rd, 0, sizeof(*rd)); |
6855 | 6963 | ||
6856 | cpus_clear(rd->span); | 6964 | if (bootmem) { |
6857 | cpus_clear(rd->online); | 6965 | alloc_bootmem_cpumask_var(&def_root_domain.span); |
6966 | alloc_bootmem_cpumask_var(&def_root_domain.online); | ||
6967 | alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); | ||
6968 | cpupri_init(&rd->cpupri, true); | ||
6969 | return 0; | ||
6970 | } | ||
6858 | 6971 | ||
6859 | cpupri_init(&rd->cpupri); | 6972 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) |
6973 | goto free_rd; | ||
6974 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | ||
6975 | goto free_span; | ||
6976 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | ||
6977 | goto free_online; | ||
6978 | |||
6979 | if (cpupri_init(&rd->cpupri, false) != 0) | ||
6980 | goto free_rto_mask; | ||
6981 | return 0; | ||
6982 | |||
6983 | free_rto_mask: | ||
6984 | free_cpumask_var(rd->rto_mask); | ||
6985 | free_online: | ||
6986 | free_cpumask_var(rd->online); | ||
6987 | free_span: | ||
6988 | free_cpumask_var(rd->span); | ||
6989 | free_rd: | ||
6990 | kfree(rd); | ||
6991 | return -ENOMEM; | ||
6860 | } | 6992 | } |
6861 | 6993 | ||
6862 | static void init_defrootdomain(void) | 6994 | static void init_defrootdomain(void) |
6863 | { | 6995 | { |
6864 | init_rootdomain(&def_root_domain); | 6996 | init_rootdomain(&def_root_domain, true); |
6997 | |||
6865 | atomic_set(&def_root_domain.refcount, 1); | 6998 | atomic_set(&def_root_domain.refcount, 1); |
6866 | } | 6999 | } |
6867 | 7000 | ||
@@ -6873,7 +7006,10 @@ static struct root_domain *alloc_rootdomain(void) | |||
6873 | if (!rd) | 7006 | if (!rd) |
6874 | return NULL; | 7007 | return NULL; |
6875 | 7008 | ||
6876 | init_rootdomain(rd); | 7009 | if (init_rootdomain(rd, false) != 0) { |
7010 | kfree(rd); | ||
7011 | return NULL; | ||
7012 | } | ||
6877 | 7013 | ||
6878 | return rd; | 7014 | return rd; |
6879 | } | 7015 | } |
@@ -6915,19 +7051,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6915 | } | 7051 | } |
6916 | 7052 | ||
6917 | /* cpus with isolated domains */ | 7053 | /* cpus with isolated domains */ |
6918 | static cpumask_t cpu_isolated_map = CPU_MASK_NONE; | 7054 | static cpumask_var_t cpu_isolated_map; |
6919 | 7055 | ||
6920 | /* Setup the mask of cpus configured for isolated domains */ | 7056 | /* Setup the mask of cpus configured for isolated domains */ |
6921 | static int __init isolated_cpu_setup(char *str) | 7057 | static int __init isolated_cpu_setup(char *str) |
6922 | { | 7058 | { |
6923 | static int __initdata ints[NR_CPUS]; | 7059 | cpulist_parse(str, cpu_isolated_map); |
6924 | int i; | ||
6925 | |||
6926 | str = get_options(str, ARRAY_SIZE(ints), ints); | ||
6927 | cpus_clear(cpu_isolated_map); | ||
6928 | for (i = 1; i <= ints[0]; i++) | ||
6929 | if (ints[i] < NR_CPUS) | ||
6930 | cpu_set(ints[i], cpu_isolated_map); | ||
6931 | return 1; | 7060 | return 1; |
6932 | } | 7061 | } |
6933 | 7062 | ||
@@ -6936,42 +7065,43 @@ __setup("isolcpus=", isolated_cpu_setup); | |||
6936 | /* | 7065 | /* |
6937 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer | 7066 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer |
6938 | * to a function which identifies what group(along with sched group) a CPU | 7067 | * to a function which identifies what group(along with sched group) a CPU |
6939 | * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS | 7068 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
6940 | * (due to the fact that we keep track of groups covered with a cpumask_t). | 7069 | * (due to the fact that we keep track of groups covered with a struct cpumask). |
6941 | * | 7070 | * |
6942 | * init_sched_build_groups will build a circular linked list of the groups | 7071 | * init_sched_build_groups will build a circular linked list of the groups |
6943 | * covered by the given span, and will set each group's ->cpumask correctly, | 7072 | * covered by the given span, and will set each group's ->cpumask correctly, |
6944 | * and ->cpu_power to 0. | 7073 | * and ->cpu_power to 0. |
6945 | */ | 7074 | */ |
6946 | static void | 7075 | static void |
6947 | init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | 7076 | init_sched_build_groups(const struct cpumask *span, |
6948 | int (*group_fn)(int cpu, const cpumask_t *cpu_map, | 7077 | const struct cpumask *cpu_map, |
7078 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, | ||
6949 | struct sched_group **sg, | 7079 | struct sched_group **sg, |
6950 | cpumask_t *tmpmask), | 7080 | struct cpumask *tmpmask), |
6951 | cpumask_t *covered, cpumask_t *tmpmask) | 7081 | struct cpumask *covered, struct cpumask *tmpmask) |
6952 | { | 7082 | { |
6953 | struct sched_group *first = NULL, *last = NULL; | 7083 | struct sched_group *first = NULL, *last = NULL; |
6954 | int i; | 7084 | int i; |
6955 | 7085 | ||
6956 | cpus_clear(*covered); | 7086 | cpumask_clear(covered); |
6957 | 7087 | ||
6958 | for_each_cpu_mask_nr(i, *span) { | 7088 | for_each_cpu(i, span) { |
6959 | struct sched_group *sg; | 7089 | struct sched_group *sg; |
6960 | int group = group_fn(i, cpu_map, &sg, tmpmask); | 7090 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
6961 | int j; | 7091 | int j; |
6962 | 7092 | ||
6963 | if (cpu_isset(i, *covered)) | 7093 | if (cpumask_test_cpu(i, covered)) |
6964 | continue; | 7094 | continue; |
6965 | 7095 | ||
6966 | cpus_clear(sg->cpumask); | 7096 | cpumask_clear(sched_group_cpus(sg)); |
6967 | sg->__cpu_power = 0; | 7097 | sg->__cpu_power = 0; |
6968 | 7098 | ||
6969 | for_each_cpu_mask_nr(j, *span) { | 7099 | for_each_cpu(j, span) { |
6970 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 7100 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6971 | continue; | 7101 | continue; |
6972 | 7102 | ||
6973 | cpu_set(j, *covered); | 7103 | cpumask_set_cpu(j, covered); |
6974 | cpu_set(j, sg->cpumask); | 7104 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
6975 | } | 7105 | } |
6976 | if (!first) | 7106 | if (!first) |
6977 | first = sg; | 7107 | first = sg; |
@@ -7035,23 +7165,21 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
7035 | * should be one that prevents unnecessary balancing, but also spreads tasks | 7165 | * should be one that prevents unnecessary balancing, but also spreads tasks |
7036 | * out optimally. | 7166 | * out optimally. |
7037 | */ | 7167 | */ |
7038 | static void sched_domain_node_span(int node, cpumask_t *span) | 7168 | static void sched_domain_node_span(int node, struct cpumask *span) |
7039 | { | 7169 | { |
7040 | nodemask_t used_nodes; | 7170 | nodemask_t used_nodes; |
7041 | node_to_cpumask_ptr(nodemask, node); | ||
7042 | int i; | 7171 | int i; |
7043 | 7172 | ||
7044 | cpus_clear(*span); | 7173 | cpumask_clear(span); |
7045 | nodes_clear(used_nodes); | 7174 | nodes_clear(used_nodes); |
7046 | 7175 | ||
7047 | cpus_or(*span, *span, *nodemask); | 7176 | cpumask_or(span, span, cpumask_of_node(node)); |
7048 | node_set(node, used_nodes); | 7177 | node_set(node, used_nodes); |
7049 | 7178 | ||
7050 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 7179 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
7051 | int next_node = find_next_best_node(node, &used_nodes); | 7180 | int next_node = find_next_best_node(node, &used_nodes); |
7052 | 7181 | ||
7053 | node_to_cpumask_ptr_next(nodemask, next_node); | 7182 | cpumask_or(span, span, cpumask_of_node(next_node)); |
7054 | cpus_or(*span, *span, *nodemask); | ||
7055 | } | 7183 | } |
7056 | } | 7184 | } |
7057 | #endif /* CONFIG_NUMA */ | 7185 | #endif /* CONFIG_NUMA */ |
@@ -7059,18 +7187,33 @@ static void sched_domain_node_span(int node, cpumask_t *span) | |||
7059 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | 7187 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
7060 | 7188 | ||
7061 | /* | 7189 | /* |
7190 | * The cpus mask in sched_group and sched_domain hangs off the end. | ||
7191 | * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space | ||
7192 | * for nr_cpu_ids < CONFIG_NR_CPUS. | ||
7193 | */ | ||
7194 | struct static_sched_group { | ||
7195 | struct sched_group sg; | ||
7196 | DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); | ||
7197 | }; | ||
7198 | |||
7199 | struct static_sched_domain { | ||
7200 | struct sched_domain sd; | ||
7201 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); | ||
7202 | }; | ||
7203 | |||
7204 | /* | ||
7062 | * SMT sched-domains: | 7205 | * SMT sched-domains: |
7063 | */ | 7206 | */ |
7064 | #ifdef CONFIG_SCHED_SMT | 7207 | #ifdef CONFIG_SCHED_SMT |
7065 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 7208 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
7066 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); | 7209 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); |
7067 | 7210 | ||
7068 | static int | 7211 | static int |
7069 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7212 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
7070 | cpumask_t *unused) | 7213 | struct sched_group **sg, struct cpumask *unused) |
7071 | { | 7214 | { |
7072 | if (sg) | 7215 | if (sg) |
7073 | *sg = &per_cpu(sched_group_cpus, cpu); | 7216 | *sg = &per_cpu(sched_group_cpus, cpu).sg; |
7074 | return cpu; | 7217 | return cpu; |
7075 | } | 7218 | } |
7076 | #endif /* CONFIG_SCHED_SMT */ | 7219 | #endif /* CONFIG_SCHED_SMT */ |
@@ -7079,56 +7222,53 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | |||
7079 | * multi-core sched-domains: | 7222 | * multi-core sched-domains: |
7080 | */ | 7223 | */ |
7081 | #ifdef CONFIG_SCHED_MC | 7224 | #ifdef CONFIG_SCHED_MC |
7082 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 7225 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
7083 | static DEFINE_PER_CPU(struct sched_group, sched_group_core); | 7226 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
7084 | #endif /* CONFIG_SCHED_MC */ | 7227 | #endif /* CONFIG_SCHED_MC */ |
7085 | 7228 | ||
7086 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 7229 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
7087 | static int | 7230 | static int |
7088 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7231 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7089 | cpumask_t *mask) | 7232 | struct sched_group **sg, struct cpumask *mask) |
7090 | { | 7233 | { |
7091 | int group; | 7234 | int group; |
7092 | 7235 | ||
7093 | *mask = per_cpu(cpu_sibling_map, cpu); | 7236 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7094 | cpus_and(*mask, *mask, *cpu_map); | 7237 | group = cpumask_first(mask); |
7095 | group = first_cpu(*mask); | ||
7096 | if (sg) | 7238 | if (sg) |
7097 | *sg = &per_cpu(sched_group_core, group); | 7239 | *sg = &per_cpu(sched_group_core, group).sg; |
7098 | return group; | 7240 | return group; |
7099 | } | 7241 | } |
7100 | #elif defined(CONFIG_SCHED_MC) | 7242 | #elif defined(CONFIG_SCHED_MC) |
7101 | static int | 7243 | static int |
7102 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7244 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7103 | cpumask_t *unused) | 7245 | struct sched_group **sg, struct cpumask *unused) |
7104 | { | 7246 | { |
7105 | if (sg) | 7247 | if (sg) |
7106 | *sg = &per_cpu(sched_group_core, cpu); | 7248 | *sg = &per_cpu(sched_group_core, cpu).sg; |
7107 | return cpu; | 7249 | return cpu; |
7108 | } | 7250 | } |
7109 | #endif | 7251 | #endif |
7110 | 7252 | ||
7111 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 7253 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
7112 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); | 7254 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
7113 | 7255 | ||
7114 | static int | 7256 | static int |
7115 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7257 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, |
7116 | cpumask_t *mask) | 7258 | struct sched_group **sg, struct cpumask *mask) |
7117 | { | 7259 | { |
7118 | int group; | 7260 | int group; |
7119 | #ifdef CONFIG_SCHED_MC | 7261 | #ifdef CONFIG_SCHED_MC |
7120 | *mask = cpu_coregroup_map(cpu); | 7262 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
7121 | cpus_and(*mask, *mask, *cpu_map); | 7263 | group = cpumask_first(mask); |
7122 | group = first_cpu(*mask); | ||
7123 | #elif defined(CONFIG_SCHED_SMT) | 7264 | #elif defined(CONFIG_SCHED_SMT) |
7124 | *mask = per_cpu(cpu_sibling_map, cpu); | 7265 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7125 | cpus_and(*mask, *mask, *cpu_map); | 7266 | group = cpumask_first(mask); |
7126 | group = first_cpu(*mask); | ||
7127 | #else | 7267 | #else |
7128 | group = cpu; | 7268 | group = cpu; |
7129 | #endif | 7269 | #endif |
7130 | if (sg) | 7270 | if (sg) |
7131 | *sg = &per_cpu(sched_group_phys, group); | 7271 | *sg = &per_cpu(sched_group_phys, group).sg; |
7132 | return group; | 7272 | return group; |
7133 | } | 7273 | } |
7134 | 7274 | ||
@@ -7142,19 +7282,19 @@ static DEFINE_PER_CPU(struct sched_domain, node_domains); | |||
7142 | static struct sched_group ***sched_group_nodes_bycpu; | 7282 | static struct sched_group ***sched_group_nodes_bycpu; |
7143 | 7283 | ||
7144 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7284 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
7145 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); | 7285 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
7146 | 7286 | ||
7147 | static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, | 7287 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
7148 | struct sched_group **sg, cpumask_t *nodemask) | 7288 | struct sched_group **sg, |
7289 | struct cpumask *nodemask) | ||
7149 | { | 7290 | { |
7150 | int group; | 7291 | int group; |
7151 | 7292 | ||
7152 | *nodemask = node_to_cpumask(cpu_to_node(cpu)); | 7293 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); |
7153 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7294 | group = cpumask_first(nodemask); |
7154 | group = first_cpu(*nodemask); | ||
7155 | 7295 | ||
7156 | if (sg) | 7296 | if (sg) |
7157 | *sg = &per_cpu(sched_group_allnodes, group); | 7297 | *sg = &per_cpu(sched_group_allnodes, group).sg; |
7158 | return group; | 7298 | return group; |
7159 | } | 7299 | } |
7160 | 7300 | ||
@@ -7166,11 +7306,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7166 | if (!sg) | 7306 | if (!sg) |
7167 | return; | 7307 | return; |
7168 | do { | 7308 | do { |
7169 | for_each_cpu_mask_nr(j, sg->cpumask) { | 7309 | for_each_cpu(j, sched_group_cpus(sg)) { |
7170 | struct sched_domain *sd; | 7310 | struct sched_domain *sd; |
7171 | 7311 | ||
7172 | sd = &per_cpu(phys_domains, j); | 7312 | sd = &per_cpu(phys_domains, j).sd; |
7173 | if (j != first_cpu(sd->groups->cpumask)) { | 7313 | if (j != cpumask_first(sched_group_cpus(sd->groups))) { |
7174 | /* | 7314 | /* |
7175 | * Only add "power" once for each | 7315 | * Only add "power" once for each |
7176 | * physical package. | 7316 | * physical package. |
@@ -7187,11 +7327,12 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7187 | 7327 | ||
7188 | #ifdef CONFIG_NUMA | 7328 | #ifdef CONFIG_NUMA |
7189 | /* Free memory allocated for various sched_group structures */ | 7329 | /* Free memory allocated for various sched_group structures */ |
7190 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7330 | static void free_sched_groups(const struct cpumask *cpu_map, |
7331 | struct cpumask *nodemask) | ||
7191 | { | 7332 | { |
7192 | int cpu, i; | 7333 | int cpu, i; |
7193 | 7334 | ||
7194 | for_each_cpu_mask_nr(cpu, *cpu_map) { | 7335 | for_each_cpu(cpu, cpu_map) { |
7195 | struct sched_group **sched_group_nodes | 7336 | struct sched_group **sched_group_nodes |
7196 | = sched_group_nodes_bycpu[cpu]; | 7337 | = sched_group_nodes_bycpu[cpu]; |
7197 | 7338 | ||
@@ -7201,9 +7342,8 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
7201 | for (i = 0; i < nr_node_ids; i++) { | 7342 | for (i = 0; i < nr_node_ids; i++) { |
7202 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7343 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
7203 | 7344 | ||
7204 | *nodemask = node_to_cpumask(i); | 7345 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7205 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7346 | if (cpumask_empty(nodemask)) |
7206 | if (cpus_empty(*nodemask)) | ||
7207 | continue; | 7347 | continue; |
7208 | 7348 | ||
7209 | if (sg == NULL) | 7349 | if (sg == NULL) |
@@ -7221,7 +7361,8 @@ next_sg: | |||
7221 | } | 7361 | } |
7222 | } | 7362 | } |
7223 | #else /* !CONFIG_NUMA */ | 7363 | #else /* !CONFIG_NUMA */ |
7224 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7364 | static void free_sched_groups(const struct cpumask *cpu_map, |
7365 | struct cpumask *nodemask) | ||
7225 | { | 7366 | { |
7226 | } | 7367 | } |
7227 | #endif /* CONFIG_NUMA */ | 7368 | #endif /* CONFIG_NUMA */ |
@@ -7247,7 +7388,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
7247 | 7388 | ||
7248 | WARN_ON(!sd || !sd->groups); | 7389 | WARN_ON(!sd || !sd->groups); |
7249 | 7390 | ||
7250 | if (cpu != first_cpu(sd->groups->cpumask)) | 7391 | if (cpu != cpumask_first(sched_group_cpus(sd->groups))) |
7251 | return; | 7392 | return; |
7252 | 7393 | ||
7253 | child = sd->child; | 7394 | child = sd->child; |
@@ -7312,40 +7453,6 @@ SD_INIT_FUNC(CPU) | |||
7312 | SD_INIT_FUNC(MC) | 7453 | SD_INIT_FUNC(MC) |
7313 | #endif | 7454 | #endif |
7314 | 7455 | ||
7315 | /* | ||
7316 | * To minimize stack usage kmalloc room for cpumasks and share the | ||
7317 | * space as the usage in build_sched_domains() dictates. Used only | ||
7318 | * if the amount of space is significant. | ||
7319 | */ | ||
7320 | struct allmasks { | ||
7321 | cpumask_t tmpmask; /* make this one first */ | ||
7322 | union { | ||
7323 | cpumask_t nodemask; | ||
7324 | cpumask_t this_sibling_map; | ||
7325 | cpumask_t this_core_map; | ||
7326 | }; | ||
7327 | cpumask_t send_covered; | ||
7328 | |||
7329 | #ifdef CONFIG_NUMA | ||
7330 | cpumask_t domainspan; | ||
7331 | cpumask_t covered; | ||
7332 | cpumask_t notcovered; | ||
7333 | #endif | ||
7334 | }; | ||
7335 | |||
7336 | #if NR_CPUS > 128 | ||
7337 | #define SCHED_CPUMASK_ALLOC 1 | ||
7338 | #define SCHED_CPUMASK_FREE(v) kfree(v) | ||
7339 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | ||
7340 | #else | ||
7341 | #define SCHED_CPUMASK_ALLOC 0 | ||
7342 | #define SCHED_CPUMASK_FREE(v) | ||
7343 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | ||
7344 | #endif | ||
7345 | |||
7346 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | ||
7347 | ((unsigned long)(a) + offsetof(struct allmasks, v)) | ||
7348 | |||
7349 | static int default_relax_domain_level = -1; | 7456 | static int default_relax_domain_level = -1; |
7350 | 7457 | ||
7351 | static int __init setup_relax_domain_level(char *str) | 7458 | static int __init setup_relax_domain_level(char *str) |
@@ -7385,17 +7492,38 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
7385 | * Build sched domains for a given set of cpus and attach the sched domains | 7492 | * Build sched domains for a given set of cpus and attach the sched domains |
7386 | * to the individual cpus | 7493 | * to the individual cpus |
7387 | */ | 7494 | */ |
7388 | static int __build_sched_domains(const cpumask_t *cpu_map, | 7495 | static int __build_sched_domains(const struct cpumask *cpu_map, |
7389 | struct sched_domain_attr *attr) | 7496 | struct sched_domain_attr *attr) |
7390 | { | 7497 | { |
7391 | int i; | 7498 | int i, err = -ENOMEM; |
7392 | struct root_domain *rd; | 7499 | struct root_domain *rd; |
7393 | SCHED_CPUMASK_DECLARE(allmasks); | 7500 | cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered, |
7394 | cpumask_t *tmpmask; | 7501 | tmpmask; |
7395 | #ifdef CONFIG_NUMA | 7502 | #ifdef CONFIG_NUMA |
7503 | cpumask_var_t domainspan, covered, notcovered; | ||
7396 | struct sched_group **sched_group_nodes = NULL; | 7504 | struct sched_group **sched_group_nodes = NULL; |
7397 | int sd_allnodes = 0; | 7505 | int sd_allnodes = 0; |
7398 | 7506 | ||
7507 | if (!alloc_cpumask_var(&domainspan, GFP_KERNEL)) | ||
7508 | goto out; | ||
7509 | if (!alloc_cpumask_var(&covered, GFP_KERNEL)) | ||
7510 | goto free_domainspan; | ||
7511 | if (!alloc_cpumask_var(¬covered, GFP_KERNEL)) | ||
7512 | goto free_covered; | ||
7513 | #endif | ||
7514 | |||
7515 | if (!alloc_cpumask_var(&nodemask, GFP_KERNEL)) | ||
7516 | goto free_notcovered; | ||
7517 | if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL)) | ||
7518 | goto free_nodemask; | ||
7519 | if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL)) | ||
7520 | goto free_this_sibling_map; | ||
7521 | if (!alloc_cpumask_var(&send_covered, GFP_KERNEL)) | ||
7522 | goto free_this_core_map; | ||
7523 | if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) | ||
7524 | goto free_send_covered; | ||
7525 | |||
7526 | #ifdef CONFIG_NUMA | ||
7399 | /* | 7527 | /* |
7400 | * Allocate the per-node list of sched groups | 7528 | * Allocate the per-node list of sched groups |
7401 | */ | 7529 | */ |
@@ -7403,55 +7531,35 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7403 | GFP_KERNEL); | 7531 | GFP_KERNEL); |
7404 | if (!sched_group_nodes) { | 7532 | if (!sched_group_nodes) { |
7405 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 7533 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
7406 | return -ENOMEM; | 7534 | goto free_tmpmask; |
7407 | } | 7535 | } |
7408 | #endif | 7536 | #endif |
7409 | 7537 | ||
7410 | rd = alloc_rootdomain(); | 7538 | rd = alloc_rootdomain(); |
7411 | if (!rd) { | 7539 | if (!rd) { |
7412 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 7540 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
7413 | #ifdef CONFIG_NUMA | 7541 | goto free_sched_groups; |
7414 | kfree(sched_group_nodes); | ||
7415 | #endif | ||
7416 | return -ENOMEM; | ||
7417 | } | 7542 | } |
7418 | 7543 | ||
7419 | #if SCHED_CPUMASK_ALLOC | ||
7420 | /* get space for all scratch cpumask variables */ | ||
7421 | allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); | ||
7422 | if (!allmasks) { | ||
7423 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | ||
7424 | kfree(rd); | ||
7425 | #ifdef CONFIG_NUMA | 7544 | #ifdef CONFIG_NUMA |
7426 | kfree(sched_group_nodes); | 7545 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; |
7427 | #endif | ||
7428 | return -ENOMEM; | ||
7429 | } | ||
7430 | #endif | ||
7431 | tmpmask = (cpumask_t *)allmasks; | ||
7432 | |||
7433 | |||
7434 | #ifdef CONFIG_NUMA | ||
7435 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | ||
7436 | #endif | 7546 | #endif |
7437 | 7547 | ||
7438 | /* | 7548 | /* |
7439 | * Set up domains for cpus specified by the cpu_map. | 7549 | * Set up domains for cpus specified by the cpu_map. |
7440 | */ | 7550 | */ |
7441 | for_each_cpu_mask_nr(i, *cpu_map) { | 7551 | for_each_cpu(i, cpu_map) { |
7442 | struct sched_domain *sd = NULL, *p; | 7552 | struct sched_domain *sd = NULL, *p; |
7443 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7444 | 7553 | ||
7445 | *nodemask = node_to_cpumask(cpu_to_node(i)); | 7554 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); |
7446 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7447 | 7555 | ||
7448 | #ifdef CONFIG_NUMA | 7556 | #ifdef CONFIG_NUMA |
7449 | if (cpus_weight(*cpu_map) > | 7557 | if (cpumask_weight(cpu_map) > |
7450 | SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { | 7558 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
7451 | sd = &per_cpu(allnodes_domains, i); | 7559 | sd = &per_cpu(allnodes_domains, i); |
7452 | SD_INIT(sd, ALLNODES); | 7560 | SD_INIT(sd, ALLNODES); |
7453 | set_domain_attribute(sd, attr); | 7561 | set_domain_attribute(sd, attr); |
7454 | sd->span = *cpu_map; | 7562 | cpumask_copy(sched_domain_span(sd), cpu_map); |
7455 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); | 7563 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); |
7456 | p = sd; | 7564 | p = sd; |
7457 | sd_allnodes = 1; | 7565 | sd_allnodes = 1; |
@@ -7461,18 +7569,19 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7461 | sd = &per_cpu(node_domains, i); | 7569 | sd = &per_cpu(node_domains, i); |
7462 | SD_INIT(sd, NODE); | 7570 | SD_INIT(sd, NODE); |
7463 | set_domain_attribute(sd, attr); | 7571 | set_domain_attribute(sd, attr); |
7464 | sched_domain_node_span(cpu_to_node(i), &sd->span); | 7572 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
7465 | sd->parent = p; | 7573 | sd->parent = p; |
7466 | if (p) | 7574 | if (p) |
7467 | p->child = sd; | 7575 | p->child = sd; |
7468 | cpus_and(sd->span, sd->span, *cpu_map); | 7576 | cpumask_and(sched_domain_span(sd), |
7577 | sched_domain_span(sd), cpu_map); | ||
7469 | #endif | 7578 | #endif |
7470 | 7579 | ||
7471 | p = sd; | 7580 | p = sd; |
7472 | sd = &per_cpu(phys_domains, i); | 7581 | sd = &per_cpu(phys_domains, i).sd; |
7473 | SD_INIT(sd, CPU); | 7582 | SD_INIT(sd, CPU); |
7474 | set_domain_attribute(sd, attr); | 7583 | set_domain_attribute(sd, attr); |
7475 | sd->span = *nodemask; | 7584 | cpumask_copy(sched_domain_span(sd), nodemask); |
7476 | sd->parent = p; | 7585 | sd->parent = p; |
7477 | if (p) | 7586 | if (p) |
7478 | p->child = sd; | 7587 | p->child = sd; |
@@ -7480,11 +7589,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7480 | 7589 | ||
7481 | #ifdef CONFIG_SCHED_MC | 7590 | #ifdef CONFIG_SCHED_MC |
7482 | p = sd; | 7591 | p = sd; |
7483 | sd = &per_cpu(core_domains, i); | 7592 | sd = &per_cpu(core_domains, i).sd; |
7484 | SD_INIT(sd, MC); | 7593 | SD_INIT(sd, MC); |
7485 | set_domain_attribute(sd, attr); | 7594 | set_domain_attribute(sd, attr); |
7486 | sd->span = cpu_coregroup_map(i); | 7595 | cpumask_and(sched_domain_span(sd), cpu_map, |
7487 | cpus_and(sd->span, sd->span, *cpu_map); | 7596 | cpu_coregroup_mask(i)); |
7488 | sd->parent = p; | 7597 | sd->parent = p; |
7489 | p->child = sd; | 7598 | p->child = sd; |
7490 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | 7599 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7492,11 +7601,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7492 | 7601 | ||
7493 | #ifdef CONFIG_SCHED_SMT | 7602 | #ifdef CONFIG_SCHED_SMT |
7494 | p = sd; | 7603 | p = sd; |
7495 | sd = &per_cpu(cpu_domains, i); | 7604 | sd = &per_cpu(cpu_domains, i).sd; |
7496 | SD_INIT(sd, SIBLING); | 7605 | SD_INIT(sd, SIBLING); |
7497 | set_domain_attribute(sd, attr); | 7606 | set_domain_attribute(sd, attr); |
7498 | sd->span = per_cpu(cpu_sibling_map, i); | 7607 | cpumask_and(sched_domain_span(sd), |
7499 | cpus_and(sd->span, sd->span, *cpu_map); | 7608 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7500 | sd->parent = p; | 7609 | sd->parent = p; |
7501 | p->child = sd; | 7610 | p->child = sd; |
7502 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); | 7611 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7505,13 +7614,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7505 | 7614 | ||
7506 | #ifdef CONFIG_SCHED_SMT | 7615 | #ifdef CONFIG_SCHED_SMT |
7507 | /* Set up CPU (sibling) groups */ | 7616 | /* Set up CPU (sibling) groups */ |
7508 | for_each_cpu_mask_nr(i, *cpu_map) { | 7617 | for_each_cpu(i, cpu_map) { |
7509 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); | 7618 | cpumask_and(this_sibling_map, |
7510 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7619 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7511 | 7620 | if (i != cpumask_first(this_sibling_map)) | |
7512 | *this_sibling_map = per_cpu(cpu_sibling_map, i); | ||
7513 | cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); | ||
7514 | if (i != first_cpu(*this_sibling_map)) | ||
7515 | continue; | 7621 | continue; |
7516 | 7622 | ||
7517 | init_sched_build_groups(this_sibling_map, cpu_map, | 7623 | init_sched_build_groups(this_sibling_map, cpu_map, |
@@ -7522,13 +7628,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7522 | 7628 | ||
7523 | #ifdef CONFIG_SCHED_MC | 7629 | #ifdef CONFIG_SCHED_MC |
7524 | /* Set up multi-core groups */ | 7630 | /* Set up multi-core groups */ |
7525 | for_each_cpu_mask_nr(i, *cpu_map) { | 7631 | for_each_cpu(i, cpu_map) { |
7526 | SCHED_CPUMASK_VAR(this_core_map, allmasks); | 7632 | cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); |
7527 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7633 | if (i != cpumask_first(this_core_map)) |
7528 | |||
7529 | *this_core_map = cpu_coregroup_map(i); | ||
7530 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | ||
7531 | if (i != first_cpu(*this_core_map)) | ||
7532 | continue; | 7634 | continue; |
7533 | 7635 | ||
7534 | init_sched_build_groups(this_core_map, cpu_map, | 7636 | init_sched_build_groups(this_core_map, cpu_map, |
@@ -7539,12 +7641,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7539 | 7641 | ||
7540 | /* Set up physical groups */ | 7642 | /* Set up physical groups */ |
7541 | for (i = 0; i < nr_node_ids; i++) { | 7643 | for (i = 0; i < nr_node_ids; i++) { |
7542 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7644 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7543 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7645 | if (cpumask_empty(nodemask)) |
7544 | |||
7545 | *nodemask = node_to_cpumask(i); | ||
7546 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7547 | if (cpus_empty(*nodemask)) | ||
7548 | continue; | 7646 | continue; |
7549 | 7647 | ||
7550 | init_sched_build_groups(nodemask, cpu_map, | 7648 | init_sched_build_groups(nodemask, cpu_map, |
@@ -7555,8 +7653,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7555 | #ifdef CONFIG_NUMA | 7653 | #ifdef CONFIG_NUMA |
7556 | /* Set up node groups */ | 7654 | /* Set up node groups */ |
7557 | if (sd_allnodes) { | 7655 | if (sd_allnodes) { |
7558 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
7559 | |||
7560 | init_sched_build_groups(cpu_map, cpu_map, | 7656 | init_sched_build_groups(cpu_map, cpu_map, |
7561 | &cpu_to_allnodes_group, | 7657 | &cpu_to_allnodes_group, |
7562 | send_covered, tmpmask); | 7658 | send_covered, tmpmask); |
@@ -7565,58 +7661,53 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7565 | for (i = 0; i < nr_node_ids; i++) { | 7661 | for (i = 0; i < nr_node_ids; i++) { |
7566 | /* Set up node groups */ | 7662 | /* Set up node groups */ |
7567 | struct sched_group *sg, *prev; | 7663 | struct sched_group *sg, *prev; |
7568 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7569 | SCHED_CPUMASK_VAR(domainspan, allmasks); | ||
7570 | SCHED_CPUMASK_VAR(covered, allmasks); | ||
7571 | int j; | 7664 | int j; |
7572 | 7665 | ||
7573 | *nodemask = node_to_cpumask(i); | 7666 | cpumask_clear(covered); |
7574 | cpus_clear(*covered); | 7667 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7575 | 7668 | if (cpumask_empty(nodemask)) { | |
7576 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7577 | if (cpus_empty(*nodemask)) { | ||
7578 | sched_group_nodes[i] = NULL; | 7669 | sched_group_nodes[i] = NULL; |
7579 | continue; | 7670 | continue; |
7580 | } | 7671 | } |
7581 | 7672 | ||
7582 | sched_domain_node_span(i, domainspan); | 7673 | sched_domain_node_span(i, domainspan); |
7583 | cpus_and(*domainspan, *domainspan, *cpu_map); | 7674 | cpumask_and(domainspan, domainspan, cpu_map); |
7584 | 7675 | ||
7585 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); | 7676 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
7677 | GFP_KERNEL, i); | ||
7586 | if (!sg) { | 7678 | if (!sg) { |
7587 | printk(KERN_WARNING "Can not alloc domain group for " | 7679 | printk(KERN_WARNING "Can not alloc domain group for " |
7588 | "node %d\n", i); | 7680 | "node %d\n", i); |
7589 | goto error; | 7681 | goto error; |
7590 | } | 7682 | } |
7591 | sched_group_nodes[i] = sg; | 7683 | sched_group_nodes[i] = sg; |
7592 | for_each_cpu_mask_nr(j, *nodemask) { | 7684 | for_each_cpu(j, nodemask) { |
7593 | struct sched_domain *sd; | 7685 | struct sched_domain *sd; |
7594 | 7686 | ||
7595 | sd = &per_cpu(node_domains, j); | 7687 | sd = &per_cpu(node_domains, j); |
7596 | sd->groups = sg; | 7688 | sd->groups = sg; |
7597 | } | 7689 | } |
7598 | sg->__cpu_power = 0; | 7690 | sg->__cpu_power = 0; |
7599 | sg->cpumask = *nodemask; | 7691 | cpumask_copy(sched_group_cpus(sg), nodemask); |
7600 | sg->next = sg; | 7692 | sg->next = sg; |
7601 | cpus_or(*covered, *covered, *nodemask); | 7693 | cpumask_or(covered, covered, nodemask); |
7602 | prev = sg; | 7694 | prev = sg; |
7603 | 7695 | ||
7604 | for (j = 0; j < nr_node_ids; j++) { | 7696 | for (j = 0; j < nr_node_ids; j++) { |
7605 | SCHED_CPUMASK_VAR(notcovered, allmasks); | ||
7606 | int n = (i + j) % nr_node_ids; | 7697 | int n = (i + j) % nr_node_ids; |
7607 | node_to_cpumask_ptr(pnodemask, n); | ||
7608 | 7698 | ||
7609 | cpus_complement(*notcovered, *covered); | 7699 | cpumask_complement(notcovered, covered); |
7610 | cpus_and(*tmpmask, *notcovered, *cpu_map); | 7700 | cpumask_and(tmpmask, notcovered, cpu_map); |
7611 | cpus_and(*tmpmask, *tmpmask, *domainspan); | 7701 | cpumask_and(tmpmask, tmpmask, domainspan); |
7612 | if (cpus_empty(*tmpmask)) | 7702 | if (cpumask_empty(tmpmask)) |
7613 | break; | 7703 | break; |
7614 | 7704 | ||
7615 | cpus_and(*tmpmask, *tmpmask, *pnodemask); | 7705 | cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); |
7616 | if (cpus_empty(*tmpmask)) | 7706 | if (cpumask_empty(tmpmask)) |
7617 | continue; | 7707 | continue; |
7618 | 7708 | ||
7619 | sg = kmalloc_node(sizeof(struct sched_group), | 7709 | sg = kmalloc_node(sizeof(struct sched_group) + |
7710 | cpumask_size(), | ||
7620 | GFP_KERNEL, i); | 7711 | GFP_KERNEL, i); |
7621 | if (!sg) { | 7712 | if (!sg) { |
7622 | printk(KERN_WARNING | 7713 | printk(KERN_WARNING |
@@ -7624,9 +7715,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7624 | goto error; | 7715 | goto error; |
7625 | } | 7716 | } |
7626 | sg->__cpu_power = 0; | 7717 | sg->__cpu_power = 0; |
7627 | sg->cpumask = *tmpmask; | 7718 | cpumask_copy(sched_group_cpus(sg), tmpmask); |
7628 | sg->next = prev->next; | 7719 | sg->next = prev->next; |
7629 | cpus_or(*covered, *covered, *tmpmask); | 7720 | cpumask_or(covered, covered, tmpmask); |
7630 | prev->next = sg; | 7721 | prev->next = sg; |
7631 | prev = sg; | 7722 | prev = sg; |
7632 | } | 7723 | } |
@@ -7635,22 +7726,22 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7635 | 7726 | ||
7636 | /* Calculate CPU power for physical packages and nodes */ | 7727 | /* Calculate CPU power for physical packages and nodes */ |
7637 | #ifdef CONFIG_SCHED_SMT | 7728 | #ifdef CONFIG_SCHED_SMT |
7638 | for_each_cpu_mask_nr(i, *cpu_map) { | 7729 | for_each_cpu(i, cpu_map) { |
7639 | struct sched_domain *sd = &per_cpu(cpu_domains, i); | 7730 | struct sched_domain *sd = &per_cpu(cpu_domains, i).sd; |
7640 | 7731 | ||
7641 | init_sched_groups_power(i, sd); | 7732 | init_sched_groups_power(i, sd); |
7642 | } | 7733 | } |
7643 | #endif | 7734 | #endif |
7644 | #ifdef CONFIG_SCHED_MC | 7735 | #ifdef CONFIG_SCHED_MC |
7645 | for_each_cpu_mask_nr(i, *cpu_map) { | 7736 | for_each_cpu(i, cpu_map) { |
7646 | struct sched_domain *sd = &per_cpu(core_domains, i); | 7737 | struct sched_domain *sd = &per_cpu(core_domains, i).sd; |
7647 | 7738 | ||
7648 | init_sched_groups_power(i, sd); | 7739 | init_sched_groups_power(i, sd); |
7649 | } | 7740 | } |
7650 | #endif | 7741 | #endif |
7651 | 7742 | ||
7652 | for_each_cpu_mask_nr(i, *cpu_map) { | 7743 | for_each_cpu(i, cpu_map) { |
7653 | struct sched_domain *sd = &per_cpu(phys_domains, i); | 7744 | struct sched_domain *sd = &per_cpu(phys_domains, i).sd; |
7654 | 7745 | ||
7655 | init_sched_groups_power(i, sd); | 7746 | init_sched_groups_power(i, sd); |
7656 | } | 7747 | } |
@@ -7662,56 +7753,87 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7662 | if (sd_allnodes) { | 7753 | if (sd_allnodes) { |
7663 | struct sched_group *sg; | 7754 | struct sched_group *sg; |
7664 | 7755 | ||
7665 | cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, | 7756 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
7666 | tmpmask); | 7757 | tmpmask); |
7667 | init_numa_sched_groups_power(sg); | 7758 | init_numa_sched_groups_power(sg); |
7668 | } | 7759 | } |
7669 | #endif | 7760 | #endif |
7670 | 7761 | ||
7671 | /* Attach the domains */ | 7762 | /* Attach the domains */ |
7672 | for_each_cpu_mask_nr(i, *cpu_map) { | 7763 | for_each_cpu(i, cpu_map) { |
7673 | struct sched_domain *sd; | 7764 | struct sched_domain *sd; |
7674 | #ifdef CONFIG_SCHED_SMT | 7765 | #ifdef CONFIG_SCHED_SMT |
7675 | sd = &per_cpu(cpu_domains, i); | 7766 | sd = &per_cpu(cpu_domains, i).sd; |
7676 | #elif defined(CONFIG_SCHED_MC) | 7767 | #elif defined(CONFIG_SCHED_MC) |
7677 | sd = &per_cpu(core_domains, i); | 7768 | sd = &per_cpu(core_domains, i).sd; |
7678 | #else | 7769 | #else |
7679 | sd = &per_cpu(phys_domains, i); | 7770 | sd = &per_cpu(phys_domains, i).sd; |
7680 | #endif | 7771 | #endif |
7681 | cpu_attach_domain(sd, rd, i); | 7772 | cpu_attach_domain(sd, rd, i); |
7682 | } | 7773 | } |
7683 | 7774 | ||
7684 | SCHED_CPUMASK_FREE((void *)allmasks); | 7775 | err = 0; |
7685 | return 0; | 7776 | |
7777 | free_tmpmask: | ||
7778 | free_cpumask_var(tmpmask); | ||
7779 | free_send_covered: | ||
7780 | free_cpumask_var(send_covered); | ||
7781 | free_this_core_map: | ||
7782 | free_cpumask_var(this_core_map); | ||
7783 | free_this_sibling_map: | ||
7784 | free_cpumask_var(this_sibling_map); | ||
7785 | free_nodemask: | ||
7786 | free_cpumask_var(nodemask); | ||
7787 | free_notcovered: | ||
7788 | #ifdef CONFIG_NUMA | ||
7789 | free_cpumask_var(notcovered); | ||
7790 | free_covered: | ||
7791 | free_cpumask_var(covered); | ||
7792 | free_domainspan: | ||
7793 | free_cpumask_var(domainspan); | ||
7794 | out: | ||
7795 | #endif | ||
7796 | return err; | ||
7797 | |||
7798 | free_sched_groups: | ||
7799 | #ifdef CONFIG_NUMA | ||
7800 | kfree(sched_group_nodes); | ||
7801 | #endif | ||
7802 | goto free_tmpmask; | ||
7686 | 7803 | ||
7687 | #ifdef CONFIG_NUMA | 7804 | #ifdef CONFIG_NUMA |
7688 | error: | 7805 | error: |
7689 | free_sched_groups(cpu_map, tmpmask); | 7806 | free_sched_groups(cpu_map, tmpmask); |
7690 | SCHED_CPUMASK_FREE((void *)allmasks); | 7807 | free_rootdomain(rd); |
7691 | kfree(rd); | 7808 | goto free_tmpmask; |
7692 | return -ENOMEM; | ||
7693 | #endif | 7809 | #endif |
7694 | } | 7810 | } |
7695 | 7811 | ||
7696 | static int build_sched_domains(const cpumask_t *cpu_map) | 7812 | static int build_sched_domains(const struct cpumask *cpu_map) |
7697 | { | 7813 | { |
7698 | return __build_sched_domains(cpu_map, NULL); | 7814 | return __build_sched_domains(cpu_map, NULL); |
7699 | } | 7815 | } |
7700 | 7816 | ||
7701 | static cpumask_t *doms_cur; /* current sched domains */ | 7817 | static struct cpumask *doms_cur; /* current sched domains */ |
7702 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 7818 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
7703 | static struct sched_domain_attr *dattr_cur; | 7819 | static struct sched_domain_attr *dattr_cur; |
7704 | /* attribues of custom domains in 'doms_cur' */ | 7820 | /* attribues of custom domains in 'doms_cur' */ |
7705 | 7821 | ||
7706 | /* | 7822 | /* |
7707 | * Special case: If a kmalloc of a doms_cur partition (array of | 7823 | * Special case: If a kmalloc of a doms_cur partition (array of |
7708 | * cpumask_t) fails, then fallback to a single sched domain, | 7824 | * cpumask) fails, then fallback to a single sched domain, |
7709 | * as determined by the single cpumask_t fallback_doms. | 7825 | * as determined by the single cpumask fallback_doms. |
7710 | */ | 7826 | */ |
7711 | static cpumask_t fallback_doms; | 7827 | static cpumask_var_t fallback_doms; |
7712 | 7828 | ||
7713 | void __attribute__((weak)) arch_update_cpu_topology(void) | 7829 | /* |
7830 | * arch_update_cpu_topology lets virtualized architectures update the | ||
7831 | * cpu core maps. It is supposed to return 1 if the topology changed | ||
7832 | * or 0 if it stayed the same. | ||
7833 | */ | ||
7834 | int __attribute__((weak)) arch_update_cpu_topology(void) | ||
7714 | { | 7835 | { |
7836 | return 0; | ||
7715 | } | 7837 | } |
7716 | 7838 | ||
7717 | /* | 7839 | /* |
@@ -7719,16 +7841,16 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
7719 | * For now this just excludes isolated cpus, but could be used to | 7841 | * For now this just excludes isolated cpus, but could be used to |
7720 | * exclude other special cases in the future. | 7842 | * exclude other special cases in the future. |
7721 | */ | 7843 | */ |
7722 | static int arch_init_sched_domains(const cpumask_t *cpu_map) | 7844 | static int arch_init_sched_domains(const struct cpumask *cpu_map) |
7723 | { | 7845 | { |
7724 | int err; | 7846 | int err; |
7725 | 7847 | ||
7726 | arch_update_cpu_topology(); | 7848 | arch_update_cpu_topology(); |
7727 | ndoms_cur = 1; | 7849 | ndoms_cur = 1; |
7728 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 7850 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); |
7729 | if (!doms_cur) | 7851 | if (!doms_cur) |
7730 | doms_cur = &fallback_doms; | 7852 | doms_cur = fallback_doms; |
7731 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); | 7853 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); |
7732 | dattr_cur = NULL; | 7854 | dattr_cur = NULL; |
7733 | err = build_sched_domains(doms_cur); | 7855 | err = build_sched_domains(doms_cur); |
7734 | register_sched_domain_sysctl(); | 7856 | register_sched_domain_sysctl(); |
@@ -7736,8 +7858,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
7736 | return err; | 7858 | return err; |
7737 | } | 7859 | } |
7738 | 7860 | ||
7739 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | 7861 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, |
7740 | cpumask_t *tmpmask) | 7862 | struct cpumask *tmpmask) |
7741 | { | 7863 | { |
7742 | free_sched_groups(cpu_map, tmpmask); | 7864 | free_sched_groups(cpu_map, tmpmask); |
7743 | } | 7865 | } |
@@ -7746,17 +7868,16 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | |||
7746 | * Detach sched domains from a group of cpus specified in cpu_map | 7868 | * Detach sched domains from a group of cpus specified in cpu_map |
7747 | * These cpus will now be attached to the NULL domain | 7869 | * These cpus will now be attached to the NULL domain |
7748 | */ | 7870 | */ |
7749 | static void detach_destroy_domains(const cpumask_t *cpu_map) | 7871 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
7750 | { | 7872 | { |
7751 | cpumask_t tmpmask; | 7873 | /* Save because hotplug lock held. */ |
7874 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); | ||
7752 | int i; | 7875 | int i; |
7753 | 7876 | ||
7754 | unregister_sched_domain_sysctl(); | 7877 | for_each_cpu(i, cpu_map) |
7755 | |||
7756 | for_each_cpu_mask_nr(i, *cpu_map) | ||
7757 | cpu_attach_domain(NULL, &def_root_domain, i); | 7878 | cpu_attach_domain(NULL, &def_root_domain, i); |
7758 | synchronize_sched(); | 7879 | synchronize_sched(); |
7759 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7880 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); |
7760 | } | 7881 | } |
7761 | 7882 | ||
7762 | /* handle null as "default" */ | 7883 | /* handle null as "default" */ |
@@ -7781,7 +7902,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7781 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 7902 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
7782 | * It destroys each deleted domain and builds each new domain. | 7903 | * It destroys each deleted domain and builds each new domain. |
7783 | * | 7904 | * |
7784 | * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. | 7905 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. |
7785 | * The masks don't intersect (don't overlap.) We should setup one | 7906 | * The masks don't intersect (don't overlap.) We should setup one |
7786 | * sched domain for each mask. CPUs not in any of the cpumasks will | 7907 | * sched domain for each mask. CPUs not in any of the cpumasks will |
7787 | * not be load balanced. If the same cpumask appears both in the | 7908 | * not be load balanced. If the same cpumask appears both in the |
@@ -7795,28 +7916,33 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7795 | * the single partition 'fallback_doms', it also forces the domains | 7916 | * the single partition 'fallback_doms', it also forces the domains |
7796 | * to be rebuilt. | 7917 | * to be rebuilt. |
7797 | * | 7918 | * |
7798 | * If doms_new == NULL it will be replaced with cpu_online_map. | 7919 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
7799 | * ndoms_new == 0 is a special case for destroying existing domains, | 7920 | * ndoms_new == 0 is a special case for destroying existing domains, |
7800 | * and it will not create the default domain. | 7921 | * and it will not create the default domain. |
7801 | * | 7922 | * |
7802 | * Call with hotplug lock held | 7923 | * Call with hotplug lock held |
7803 | */ | 7924 | */ |
7804 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7925 | /* FIXME: Change to struct cpumask *doms_new[] */ |
7926 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
7805 | struct sched_domain_attr *dattr_new) | 7927 | struct sched_domain_attr *dattr_new) |
7806 | { | 7928 | { |
7807 | int i, j, n; | 7929 | int i, j, n; |
7930 | int new_topology; | ||
7808 | 7931 | ||
7809 | mutex_lock(&sched_domains_mutex); | 7932 | mutex_lock(&sched_domains_mutex); |
7810 | 7933 | ||
7811 | /* always unregister in case we don't destroy any domains */ | 7934 | /* always unregister in case we don't destroy any domains */ |
7812 | unregister_sched_domain_sysctl(); | 7935 | unregister_sched_domain_sysctl(); |
7813 | 7936 | ||
7937 | /* Let architecture update cpu core mappings. */ | ||
7938 | new_topology = arch_update_cpu_topology(); | ||
7939 | |||
7814 | n = doms_new ? ndoms_new : 0; | 7940 | n = doms_new ? ndoms_new : 0; |
7815 | 7941 | ||
7816 | /* Destroy deleted domains */ | 7942 | /* Destroy deleted domains */ |
7817 | for (i = 0; i < ndoms_cur; i++) { | 7943 | for (i = 0; i < ndoms_cur; i++) { |
7818 | for (j = 0; j < n; j++) { | 7944 | for (j = 0; j < n && !new_topology; j++) { |
7819 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7945 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) |
7820 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7946 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7821 | goto match1; | 7947 | goto match1; |
7822 | } | 7948 | } |
@@ -7828,15 +7954,15 @@ match1: | |||
7828 | 7954 | ||
7829 | if (doms_new == NULL) { | 7955 | if (doms_new == NULL) { |
7830 | ndoms_cur = 0; | 7956 | ndoms_cur = 0; |
7831 | doms_new = &fallback_doms; | 7957 | doms_new = fallback_doms; |
7832 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7958 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); |
7833 | dattr_new = NULL; | 7959 | WARN_ON_ONCE(dattr_new); |
7834 | } | 7960 | } |
7835 | 7961 | ||
7836 | /* Build new domains */ | 7962 | /* Build new domains */ |
7837 | for (i = 0; i < ndoms_new; i++) { | 7963 | for (i = 0; i < ndoms_new; i++) { |
7838 | for (j = 0; j < ndoms_cur; j++) { | 7964 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
7839 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7965 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) |
7840 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7966 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
7841 | goto match2; | 7967 | goto match2; |
7842 | } | 7968 | } |
@@ -7848,7 +7974,7 @@ match2: | |||
7848 | } | 7974 | } |
7849 | 7975 | ||
7850 | /* Remember the new sched domains */ | 7976 | /* Remember the new sched domains */ |
7851 | if (doms_cur != &fallback_doms) | 7977 | if (doms_cur != fallback_doms) |
7852 | kfree(doms_cur); | 7978 | kfree(doms_cur); |
7853 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 7979 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
7854 | doms_cur = doms_new; | 7980 | doms_cur = doms_new; |
@@ -7877,14 +8003,25 @@ int arch_reinit_sched_domains(void) | |||
7877 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | 8003 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
7878 | { | 8004 | { |
7879 | int ret; | 8005 | int ret; |
8006 | unsigned int level = 0; | ||
8007 | |||
8008 | if (sscanf(buf, "%u", &level) != 1) | ||
8009 | return -EINVAL; | ||
8010 | |||
8011 | /* | ||
8012 | * level is always be positive so don't check for | ||
8013 | * level < POWERSAVINGS_BALANCE_NONE which is 0 | ||
8014 | * What happens on 0 or 1 byte write, | ||
8015 | * need to check for count as well? | ||
8016 | */ | ||
7880 | 8017 | ||
7881 | if (buf[0] != '0' && buf[0] != '1') | 8018 | if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) |
7882 | return -EINVAL; | 8019 | return -EINVAL; |
7883 | 8020 | ||
7884 | if (smt) | 8021 | if (smt) |
7885 | sched_smt_power_savings = (buf[0] == '1'); | 8022 | sched_smt_power_savings = level; |
7886 | else | 8023 | else |
7887 | sched_mc_power_savings = (buf[0] == '1'); | 8024 | sched_mc_power_savings = level; |
7888 | 8025 | ||
7889 | ret = arch_reinit_sched_domains(); | 8026 | ret = arch_reinit_sched_domains(); |
7890 | 8027 | ||
@@ -7988,7 +8125,9 @@ static int update_runtime(struct notifier_block *nfb, | |||
7988 | 8125 | ||
7989 | void __init sched_init_smp(void) | 8126 | void __init sched_init_smp(void) |
7990 | { | 8127 | { |
7991 | cpumask_t non_isolated_cpus; | 8128 | cpumask_var_t non_isolated_cpus; |
8129 | |||
8130 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | ||
7992 | 8131 | ||
7993 | #if defined(CONFIG_NUMA) | 8132 | #if defined(CONFIG_NUMA) |
7994 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | 8133 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
@@ -7997,10 +8136,10 @@ void __init sched_init_smp(void) | |||
7997 | #endif | 8136 | #endif |
7998 | get_online_cpus(); | 8137 | get_online_cpus(); |
7999 | mutex_lock(&sched_domains_mutex); | 8138 | mutex_lock(&sched_domains_mutex); |
8000 | arch_init_sched_domains(&cpu_online_map); | 8139 | arch_init_sched_domains(cpu_online_mask); |
8001 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 8140 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
8002 | if (cpus_empty(non_isolated_cpus)) | 8141 | if (cpumask_empty(non_isolated_cpus)) |
8003 | cpu_set(smp_processor_id(), non_isolated_cpus); | 8142 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
8004 | mutex_unlock(&sched_domains_mutex); | 8143 | mutex_unlock(&sched_domains_mutex); |
8005 | put_online_cpus(); | 8144 | put_online_cpus(); |
8006 | 8145 | ||
@@ -8015,9 +8154,13 @@ void __init sched_init_smp(void) | |||
8015 | init_hrtick(); | 8154 | init_hrtick(); |
8016 | 8155 | ||
8017 | /* Move init over to a non-isolated CPU */ | 8156 | /* Move init over to a non-isolated CPU */ |
8018 | if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) | 8157 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
8019 | BUG(); | 8158 | BUG(); |
8020 | sched_init_granularity(); | 8159 | sched_init_granularity(); |
8160 | free_cpumask_var(non_isolated_cpus); | ||
8161 | |||
8162 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | ||
8163 | init_sched_rt_class(); | ||
8021 | } | 8164 | } |
8022 | #else | 8165 | #else |
8023 | void __init sched_init_smp(void) | 8166 | void __init sched_init_smp(void) |
@@ -8332,6 +8475,15 @@ void __init sched_init(void) | |||
8332 | */ | 8475 | */ |
8333 | current->sched_class = &fair_sched_class; | 8476 | current->sched_class = &fair_sched_class; |
8334 | 8477 | ||
8478 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | ||
8479 | alloc_bootmem_cpumask_var(&nohz_cpu_mask); | ||
8480 | #ifdef CONFIG_SMP | ||
8481 | #ifdef CONFIG_NO_HZ | ||
8482 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); | ||
8483 | #endif | ||
8484 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
8485 | #endif /* SMP */ | ||
8486 | |||
8335 | scheduler_running = 1; | 8487 | scheduler_running = 1; |
8336 | } | 8488 | } |
8337 | 8489 | ||
@@ -8490,7 +8642,7 @@ static | |||
8490 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 8642 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
8491 | { | 8643 | { |
8492 | struct cfs_rq *cfs_rq; | 8644 | struct cfs_rq *cfs_rq; |
8493 | struct sched_entity *se, *parent_se; | 8645 | struct sched_entity *se; |
8494 | struct rq *rq; | 8646 | struct rq *rq; |
8495 | int i; | 8647 | int i; |
8496 | 8648 | ||
@@ -8506,18 +8658,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8506 | for_each_possible_cpu(i) { | 8658 | for_each_possible_cpu(i) { |
8507 | rq = cpu_rq(i); | 8659 | rq = cpu_rq(i); |
8508 | 8660 | ||
8509 | cfs_rq = kmalloc_node(sizeof(struct cfs_rq), | 8661 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
8510 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8662 | GFP_KERNEL, cpu_to_node(i)); |
8511 | if (!cfs_rq) | 8663 | if (!cfs_rq) |
8512 | goto err; | 8664 | goto err; |
8513 | 8665 | ||
8514 | se = kmalloc_node(sizeof(struct sched_entity), | 8666 | se = kzalloc_node(sizeof(struct sched_entity), |
8515 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8667 | GFP_KERNEL, cpu_to_node(i)); |
8516 | if (!se) | 8668 | if (!se) |
8517 | goto err; | 8669 | goto err; |
8518 | 8670 | ||
8519 | parent_se = parent ? parent->se[i] : NULL; | 8671 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
8520 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se); | ||
8521 | } | 8672 | } |
8522 | 8673 | ||
8523 | return 1; | 8674 | return 1; |
@@ -8578,7 +8729,7 @@ static | |||
8578 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | 8729 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
8579 | { | 8730 | { |
8580 | struct rt_rq *rt_rq; | 8731 | struct rt_rq *rt_rq; |
8581 | struct sched_rt_entity *rt_se, *parent_se; | 8732 | struct sched_rt_entity *rt_se; |
8582 | struct rq *rq; | 8733 | struct rq *rq; |
8583 | int i; | 8734 | int i; |
8584 | 8735 | ||
@@ -8595,18 +8746,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
8595 | for_each_possible_cpu(i) { | 8746 | for_each_possible_cpu(i) { |
8596 | rq = cpu_rq(i); | 8747 | rq = cpu_rq(i); |
8597 | 8748 | ||
8598 | rt_rq = kmalloc_node(sizeof(struct rt_rq), | 8749 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
8599 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8750 | GFP_KERNEL, cpu_to_node(i)); |
8600 | if (!rt_rq) | 8751 | if (!rt_rq) |
8601 | goto err; | 8752 | goto err; |
8602 | 8753 | ||
8603 | rt_se = kmalloc_node(sizeof(struct sched_rt_entity), | 8754 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
8604 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8755 | GFP_KERNEL, cpu_to_node(i)); |
8605 | if (!rt_se) | 8756 | if (!rt_se) |
8606 | goto err; | 8757 | goto err; |
8607 | 8758 | ||
8608 | parent_se = parent ? parent->rt_se[i] : NULL; | 8759 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
8609 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se); | ||
8610 | } | 8760 | } |
8611 | 8761 | ||
8612 | return 1; | 8762 | return 1; |
@@ -9249,11 +9399,12 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
9249 | * (balbir@in.ibm.com). | 9399 | * (balbir@in.ibm.com). |
9250 | */ | 9400 | */ |
9251 | 9401 | ||
9252 | /* track cpu usage of a group of tasks */ | 9402 | /* track cpu usage of a group of tasks and its child groups */ |
9253 | struct cpuacct { | 9403 | struct cpuacct { |
9254 | struct cgroup_subsys_state css; | 9404 | struct cgroup_subsys_state css; |
9255 | /* cpuusage holds pointer to a u64-type object on every cpu */ | 9405 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
9256 | u64 *cpuusage; | 9406 | u64 *cpuusage; |
9407 | struct cpuacct *parent; | ||
9257 | }; | 9408 | }; |
9258 | 9409 | ||
9259 | struct cgroup_subsys cpuacct_subsys; | 9410 | struct cgroup_subsys cpuacct_subsys; |
@@ -9287,6 +9438,9 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
9287 | return ERR_PTR(-ENOMEM); | 9438 | return ERR_PTR(-ENOMEM); |
9288 | } | 9439 | } |
9289 | 9440 | ||
9441 | if (cgrp->parent) | ||
9442 | ca->parent = cgroup_ca(cgrp->parent); | ||
9443 | |||
9290 | return &ca->css; | 9444 | return &ca->css; |
9291 | } | 9445 | } |
9292 | 9446 | ||
@@ -9300,6 +9454,41 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9300 | kfree(ca); | 9454 | kfree(ca); |
9301 | } | 9455 | } |
9302 | 9456 | ||
9457 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | ||
9458 | { | ||
9459 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9460 | u64 data; | ||
9461 | |||
9462 | #ifndef CONFIG_64BIT | ||
9463 | /* | ||
9464 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | ||
9465 | */ | ||
9466 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
9467 | data = *cpuusage; | ||
9468 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
9469 | #else | ||
9470 | data = *cpuusage; | ||
9471 | #endif | ||
9472 | |||
9473 | return data; | ||
9474 | } | ||
9475 | |||
9476 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | ||
9477 | { | ||
9478 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9479 | |||
9480 | #ifndef CONFIG_64BIT | ||
9481 | /* | ||
9482 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | ||
9483 | */ | ||
9484 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
9485 | *cpuusage = val; | ||
9486 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
9487 | #else | ||
9488 | *cpuusage = val; | ||
9489 | #endif | ||
9490 | } | ||
9491 | |||
9303 | /* return total cpu usage (in nanoseconds) of a group */ | 9492 | /* return total cpu usage (in nanoseconds) of a group */ |
9304 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | 9493 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) |
9305 | { | 9494 | { |
@@ -9307,17 +9496,8 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | |||
9307 | u64 totalcpuusage = 0; | 9496 | u64 totalcpuusage = 0; |
9308 | int i; | 9497 | int i; |
9309 | 9498 | ||
9310 | for_each_possible_cpu(i) { | 9499 | for_each_present_cpu(i) |
9311 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9500 | totalcpuusage += cpuacct_cpuusage_read(ca, i); |
9312 | |||
9313 | /* | ||
9314 | * Take rq->lock to make 64-bit addition safe on 32-bit | ||
9315 | * platforms. | ||
9316 | */ | ||
9317 | spin_lock_irq(&cpu_rq(i)->lock); | ||
9318 | totalcpuusage += *cpuusage; | ||
9319 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
9320 | } | ||
9321 | 9501 | ||
9322 | return totalcpuusage; | 9502 | return totalcpuusage; |
9323 | } | 9503 | } |
@@ -9334,23 +9514,39 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, | |||
9334 | goto out; | 9514 | goto out; |
9335 | } | 9515 | } |
9336 | 9516 | ||
9337 | for_each_possible_cpu(i) { | 9517 | for_each_present_cpu(i) |
9338 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9518 | cpuacct_cpuusage_write(ca, i, 0); |
9339 | 9519 | ||
9340 | spin_lock_irq(&cpu_rq(i)->lock); | ||
9341 | *cpuusage = 0; | ||
9342 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
9343 | } | ||
9344 | out: | 9520 | out: |
9345 | return err; | 9521 | return err; |
9346 | } | 9522 | } |
9347 | 9523 | ||
9524 | static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, | ||
9525 | struct seq_file *m) | ||
9526 | { | ||
9527 | struct cpuacct *ca = cgroup_ca(cgroup); | ||
9528 | u64 percpu; | ||
9529 | int i; | ||
9530 | |||
9531 | for_each_present_cpu(i) { | ||
9532 | percpu = cpuacct_cpuusage_read(ca, i); | ||
9533 | seq_printf(m, "%llu ", (unsigned long long) percpu); | ||
9534 | } | ||
9535 | seq_printf(m, "\n"); | ||
9536 | return 0; | ||
9537 | } | ||
9538 | |||
9348 | static struct cftype files[] = { | 9539 | static struct cftype files[] = { |
9349 | { | 9540 | { |
9350 | .name = "usage", | 9541 | .name = "usage", |
9351 | .read_u64 = cpuusage_read, | 9542 | .read_u64 = cpuusage_read, |
9352 | .write_u64 = cpuusage_write, | 9543 | .write_u64 = cpuusage_write, |
9353 | }, | 9544 | }, |
9545 | { | ||
9546 | .name = "usage_percpu", | ||
9547 | .read_seq_string = cpuacct_percpu_seq_read, | ||
9548 | }, | ||
9549 | |||
9354 | }; | 9550 | }; |
9355 | 9551 | ||
9356 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | 9552 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
@@ -9366,14 +9562,16 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9366 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | 9562 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
9367 | { | 9563 | { |
9368 | struct cpuacct *ca; | 9564 | struct cpuacct *ca; |
9565 | int cpu; | ||
9369 | 9566 | ||
9370 | if (!cpuacct_subsys.active) | 9567 | if (!cpuacct_subsys.active) |
9371 | return; | 9568 | return; |
9372 | 9569 | ||
9570 | cpu = task_cpu(tsk); | ||
9373 | ca = task_ca(tsk); | 9571 | ca = task_ca(tsk); |
9374 | if (ca) { | ||
9375 | u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); | ||
9376 | 9572 | ||
9573 | for (; ca; ca = ca->parent) { | ||
9574 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9377 | *cpuusage += cputime; | 9575 | *cpuusage += cputime; |
9378 | } | 9576 | } |
9379 | } | 9577 | } |