diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 1582 |
1 files changed, 898 insertions, 684 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 50a21f964679..61245b8d0f16 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -118,7 +118,16 @@ | |||
118 | */ | 118 | */ |
119 | #define RUNTIME_INF ((u64)~0ULL) | 119 | #define RUNTIME_INF ((u64)~0ULL) |
120 | 120 | ||
121 | DEFINE_TRACE(sched_wait_task); | ||
122 | DEFINE_TRACE(sched_wakeup); | ||
123 | DEFINE_TRACE(sched_wakeup_new); | ||
124 | DEFINE_TRACE(sched_switch); | ||
125 | DEFINE_TRACE(sched_migrate_task); | ||
126 | |||
121 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
128 | |||
129 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
130 | |||
122 | /* | 131 | /* |
123 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 132 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
124 | * Since cpu_power is a 'constant', we can use a reciprocal divide. | 133 | * Since cpu_power is a 'constant', we can use a reciprocal divide. |
@@ -203,7 +212,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
203 | hrtimer_init(&rt_b->rt_period_timer, | 212 | hrtimer_init(&rt_b->rt_period_timer, |
204 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 213 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
205 | rt_b->rt_period_timer.function = sched_rt_period_timer; | 214 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
206 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; | ||
207 | } | 215 | } |
208 | 216 | ||
209 | static inline int rt_bandwidth_enabled(void) | 217 | static inline int rt_bandwidth_enabled(void) |
@@ -261,6 +269,10 @@ struct task_group { | |||
261 | struct cgroup_subsys_state css; | 269 | struct cgroup_subsys_state css; |
262 | #endif | 270 | #endif |
263 | 271 | ||
272 | #ifdef CONFIG_USER_SCHED | ||
273 | uid_t uid; | ||
274 | #endif | ||
275 | |||
264 | #ifdef CONFIG_FAIR_GROUP_SCHED | 276 | #ifdef CONFIG_FAIR_GROUP_SCHED |
265 | /* schedulable entities of this group on each cpu */ | 277 | /* schedulable entities of this group on each cpu */ |
266 | struct sched_entity **se; | 278 | struct sched_entity **se; |
@@ -286,6 +298,12 @@ struct task_group { | |||
286 | 298 | ||
287 | #ifdef CONFIG_USER_SCHED | 299 | #ifdef CONFIG_USER_SCHED |
288 | 300 | ||
301 | /* Helper function to pass uid information to create_sched_user() */ | ||
302 | void set_tg_uid(struct user_struct *user) | ||
303 | { | ||
304 | user->tg->uid = user->uid; | ||
305 | } | ||
306 | |||
289 | /* | 307 | /* |
290 | * Root task group. | 308 | * Root task group. |
291 | * Every UID task group (including init_task_group aka UID-0) will | 309 | * Every UID task group (including init_task_group aka UID-0) will |
@@ -345,7 +363,9 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
345 | struct task_group *tg; | 363 | struct task_group *tg; |
346 | 364 | ||
347 | #ifdef CONFIG_USER_SCHED | 365 | #ifdef CONFIG_USER_SCHED |
348 | tg = p->user->tg; | 366 | rcu_read_lock(); |
367 | tg = __task_cred(p)->user->tg; | ||
368 | rcu_read_unlock(); | ||
349 | #elif defined(CONFIG_CGROUP_SCHED) | 369 | #elif defined(CONFIG_CGROUP_SCHED) |
350 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), | 370 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), |
351 | struct task_group, css); | 371 | struct task_group, css); |
@@ -481,18 +501,26 @@ struct rt_rq { | |||
481 | */ | 501 | */ |
482 | struct root_domain { | 502 | struct root_domain { |
483 | atomic_t refcount; | 503 | atomic_t refcount; |
484 | cpumask_t span; | 504 | cpumask_var_t span; |
485 | cpumask_t online; | 505 | cpumask_var_t online; |
486 | 506 | ||
487 | /* | 507 | /* |
488 | * The "RT overload" flag: it gets set if a CPU has more than | 508 | * The "RT overload" flag: it gets set if a CPU has more than |
489 | * one runnable RT task. | 509 | * one runnable RT task. |
490 | */ | 510 | */ |
491 | cpumask_t rto_mask; | 511 | cpumask_var_t rto_mask; |
492 | atomic_t rto_count; | 512 | atomic_t rto_count; |
493 | #ifdef CONFIG_SMP | 513 | #ifdef CONFIG_SMP |
494 | struct cpupri cpupri; | 514 | struct cpupri cpupri; |
495 | #endif | 515 | #endif |
516 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
517 | /* | ||
518 | * Preferred wake up cpu nominated by sched_mc balance that will be | ||
519 | * used when most cpus are idle in the system indicating overall very | ||
520 | * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2) | ||
521 | */ | ||
522 | unsigned int sched_mc_preferred_wakeup_cpu; | ||
523 | #endif | ||
496 | }; | 524 | }; |
497 | 525 | ||
498 | /* | 526 | /* |
@@ -586,6 +614,8 @@ struct rq { | |||
586 | #ifdef CONFIG_SCHEDSTATS | 614 | #ifdef CONFIG_SCHEDSTATS |
587 | /* latency stats */ | 615 | /* latency stats */ |
588 | struct sched_info rq_sched_info; | 616 | struct sched_info rq_sched_info; |
617 | unsigned long long rq_cpu_time; | ||
618 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ | ||
589 | 619 | ||
590 | /* sys_sched_yield() stats */ | 620 | /* sys_sched_yield() stats */ |
591 | unsigned int yld_exp_empty; | 621 | unsigned int yld_exp_empty; |
@@ -703,45 +733,18 @@ static __read_mostly char *sched_feat_names[] = { | |||
703 | 733 | ||
704 | #undef SCHED_FEAT | 734 | #undef SCHED_FEAT |
705 | 735 | ||
706 | static int sched_feat_open(struct inode *inode, struct file *filp) | 736 | static int sched_feat_show(struct seq_file *m, void *v) |
707 | { | ||
708 | filp->private_data = inode->i_private; | ||
709 | return 0; | ||
710 | } | ||
711 | |||
712 | static ssize_t | ||
713 | sched_feat_read(struct file *filp, char __user *ubuf, | ||
714 | size_t cnt, loff_t *ppos) | ||
715 | { | 737 | { |
716 | char *buf; | ||
717 | int r = 0; | ||
718 | int len = 0; | ||
719 | int i; | 738 | int i; |
720 | 739 | ||
721 | for (i = 0; sched_feat_names[i]; i++) { | 740 | for (i = 0; sched_feat_names[i]; i++) { |
722 | len += strlen(sched_feat_names[i]); | 741 | if (!(sysctl_sched_features & (1UL << i))) |
723 | len += 4; | 742 | seq_puts(m, "NO_"); |
743 | seq_printf(m, "%s ", sched_feat_names[i]); | ||
724 | } | 744 | } |
745 | seq_puts(m, "\n"); | ||
725 | 746 | ||
726 | buf = kmalloc(len + 2, GFP_KERNEL); | 747 | return 0; |
727 | if (!buf) | ||
728 | return -ENOMEM; | ||
729 | |||
730 | for (i = 0; sched_feat_names[i]; i++) { | ||
731 | if (sysctl_sched_features & (1UL << i)) | ||
732 | r += sprintf(buf + r, "%s ", sched_feat_names[i]); | ||
733 | else | ||
734 | r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]); | ||
735 | } | ||
736 | |||
737 | r += sprintf(buf + r, "\n"); | ||
738 | WARN_ON(r >= len + 2); | ||
739 | |||
740 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
741 | |||
742 | kfree(buf); | ||
743 | |||
744 | return r; | ||
745 | } | 748 | } |
746 | 749 | ||
747 | static ssize_t | 750 | static ssize_t |
@@ -786,10 +789,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
786 | return cnt; | 789 | return cnt; |
787 | } | 790 | } |
788 | 791 | ||
792 | static int sched_feat_open(struct inode *inode, struct file *filp) | ||
793 | { | ||
794 | return single_open(filp, sched_feat_show, NULL); | ||
795 | } | ||
796 | |||
789 | static struct file_operations sched_feat_fops = { | 797 | static struct file_operations sched_feat_fops = { |
790 | .open = sched_feat_open, | 798 | .open = sched_feat_open, |
791 | .read = sched_feat_read, | 799 | .write = sched_feat_write, |
792 | .write = sched_feat_write, | 800 | .read = seq_read, |
801 | .llseek = seq_lseek, | ||
802 | .release = single_release, | ||
793 | }; | 803 | }; |
794 | 804 | ||
795 | static __init int sched_init_debug(void) | 805 | static __init int sched_init_debug(void) |
@@ -1139,7 +1149,6 @@ static void init_rq_hrtick(struct rq *rq) | |||
1139 | 1149 | ||
1140 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1150 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1141 | rq->hrtick_timer.function = hrtick; | 1151 | rq->hrtick_timer.function = hrtick; |
1142 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | ||
1143 | } | 1152 | } |
1144 | #else /* CONFIG_SCHED_HRTICK */ | 1153 | #else /* CONFIG_SCHED_HRTICK */ |
1145 | static inline void hrtick_clear(struct rq *rq) | 1154 | static inline void hrtick_clear(struct rq *rq) |
@@ -1314,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | |||
1314 | * slice expiry etc. | 1323 | * slice expiry etc. |
1315 | */ | 1324 | */ |
1316 | 1325 | ||
1317 | #define WEIGHT_IDLEPRIO 2 | 1326 | #define WEIGHT_IDLEPRIO 3 |
1318 | #define WMULT_IDLEPRIO (1 << 31) | 1327 | #define WMULT_IDLEPRIO 1431655765 |
1319 | 1328 | ||
1320 | /* | 1329 | /* |
1321 | * Nice levels are multiplicative, with a gentle 10% change for every | 1330 | * Nice levels are multiplicative, with a gentle 10% change for every |
@@ -1453,9 +1462,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | |||
1453 | static unsigned long cpu_avg_load_per_task(int cpu) | 1462 | static unsigned long cpu_avg_load_per_task(int cpu) |
1454 | { | 1463 | { |
1455 | struct rq *rq = cpu_rq(cpu); | 1464 | struct rq *rq = cpu_rq(cpu); |
1465 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); | ||
1456 | 1466 | ||
1457 | if (rq->nr_running) | 1467 | if (nr_running) |
1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1468 | rq->avg_load_per_task = rq->load.weight / nr_running; |
1469 | else | ||
1470 | rq->avg_load_per_task = 0; | ||
1459 | 1471 | ||
1460 | return rq->avg_load_per_task; | 1472 | return rq->avg_load_per_task; |
1461 | } | 1473 | } |
@@ -1471,27 +1483,13 @@ static void | |||
1471 | update_group_shares_cpu(struct task_group *tg, int cpu, | 1483 | update_group_shares_cpu(struct task_group *tg, int cpu, |
1472 | unsigned long sd_shares, unsigned long sd_rq_weight) | 1484 | unsigned long sd_shares, unsigned long sd_rq_weight) |
1473 | { | 1485 | { |
1474 | int boost = 0; | ||
1475 | unsigned long shares; | 1486 | unsigned long shares; |
1476 | unsigned long rq_weight; | 1487 | unsigned long rq_weight; |
1477 | 1488 | ||
1478 | if (!tg->se[cpu]) | 1489 | if (!tg->se[cpu]) |
1479 | return; | 1490 | return; |
1480 | 1491 | ||
1481 | rq_weight = tg->cfs_rq[cpu]->load.weight; | 1492 | rq_weight = tg->cfs_rq[cpu]->rq_weight; |
1482 | |||
1483 | /* | ||
1484 | * If there are currently no tasks on the cpu pretend there is one of | ||
1485 | * average load so that when a new task gets to run here it will not | ||
1486 | * get delayed by group starvation. | ||
1487 | */ | ||
1488 | if (!rq_weight) { | ||
1489 | boost = 1; | ||
1490 | rq_weight = NICE_0_LOAD; | ||
1491 | } | ||
1492 | |||
1493 | if (unlikely(rq_weight > sd_rq_weight)) | ||
1494 | rq_weight = sd_rq_weight; | ||
1495 | 1493 | ||
1496 | /* | 1494 | /* |
1497 | * \Sum shares * rq_weight | 1495 | * \Sum shares * rq_weight |
@@ -1499,7 +1497,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1499 | * \Sum rq_weight | 1497 | * \Sum rq_weight |
1500 | * | 1498 | * |
1501 | */ | 1499 | */ |
1502 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); | 1500 | shares = (sd_shares * rq_weight) / sd_rq_weight; |
1503 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); | 1501 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); |
1504 | 1502 | ||
1505 | if (abs(shares - tg->se[cpu]->load.weight) > | 1503 | if (abs(shares - tg->se[cpu]->load.weight) > |
@@ -1508,11 +1506,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1508 | unsigned long flags; | 1506 | unsigned long flags; |
1509 | 1507 | ||
1510 | spin_lock_irqsave(&rq->lock, flags); | 1508 | spin_lock_irqsave(&rq->lock, flags); |
1511 | /* | 1509 | tg->cfs_rq[cpu]->shares = shares; |
1512 | * record the actual number of shares, not the boosted amount. | ||
1513 | */ | ||
1514 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | ||
1515 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1516 | 1510 | ||
1517 | __set_se_shares(tg->se[cpu], shares); | 1511 | __set_se_shares(tg->se[cpu], shares); |
1518 | spin_unlock_irqrestore(&rq->lock, flags); | 1512 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -1526,13 +1520,23 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1526 | */ | 1520 | */ |
1527 | static int tg_shares_up(struct task_group *tg, void *data) | 1521 | static int tg_shares_up(struct task_group *tg, void *data) |
1528 | { | 1522 | { |
1529 | unsigned long rq_weight = 0; | 1523 | unsigned long weight, rq_weight = 0; |
1530 | unsigned long shares = 0; | 1524 | unsigned long shares = 0; |
1531 | struct sched_domain *sd = data; | 1525 | struct sched_domain *sd = data; |
1532 | int i; | 1526 | int i; |
1533 | 1527 | ||
1534 | for_each_cpu_mask(i, sd->span) { | 1528 | for_each_cpu(i, sched_domain_span(sd)) { |
1535 | rq_weight += tg->cfs_rq[i]->load.weight; | 1529 | /* |
1530 | * If there are currently no tasks on the cpu pretend there | ||
1531 | * is one of average load so that when a new task gets to | ||
1532 | * run here it will not get delayed by group starvation. | ||
1533 | */ | ||
1534 | weight = tg->cfs_rq[i]->load.weight; | ||
1535 | if (!weight) | ||
1536 | weight = NICE_0_LOAD; | ||
1537 | |||
1538 | tg->cfs_rq[i]->rq_weight = weight; | ||
1539 | rq_weight += weight; | ||
1536 | shares += tg->cfs_rq[i]->shares; | 1540 | shares += tg->cfs_rq[i]->shares; |
1537 | } | 1541 | } |
1538 | 1542 | ||
@@ -1542,10 +1546,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1542 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) | 1546 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
1543 | shares = tg->shares; | 1547 | shares = tg->shares; |
1544 | 1548 | ||
1545 | if (!rq_weight) | 1549 | for_each_cpu(i, sched_domain_span(sd)) |
1546 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; | ||
1547 | |||
1548 | for_each_cpu_mask(i, sd->span) | ||
1549 | update_group_shares_cpu(tg, i, shares, rq_weight); | 1550 | update_group_shares_cpu(tg, i, shares, rq_weight); |
1550 | 1551 | ||
1551 | return 0; | 1552 | return 0; |
@@ -1609,6 +1610,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1609 | 1610 | ||
1610 | #endif | 1611 | #endif |
1611 | 1612 | ||
1613 | /* | ||
1614 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
1615 | */ | ||
1616 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
1617 | __releases(this_rq->lock) | ||
1618 | __acquires(busiest->lock) | ||
1619 | __acquires(this_rq->lock) | ||
1620 | { | ||
1621 | int ret = 0; | ||
1622 | |||
1623 | if (unlikely(!irqs_disabled())) { | ||
1624 | /* printk() doesn't work good under rq->lock */ | ||
1625 | spin_unlock(&this_rq->lock); | ||
1626 | BUG_ON(1); | ||
1627 | } | ||
1628 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
1629 | if (busiest < this_rq) { | ||
1630 | spin_unlock(&this_rq->lock); | ||
1631 | spin_lock(&busiest->lock); | ||
1632 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
1633 | ret = 1; | ||
1634 | } else | ||
1635 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
1636 | } | ||
1637 | return ret; | ||
1638 | } | ||
1639 | |||
1640 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
1641 | __releases(busiest->lock) | ||
1642 | { | ||
1643 | spin_unlock(&busiest->lock); | ||
1644 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
1645 | } | ||
1612 | #endif | 1646 | #endif |
1613 | 1647 | ||
1614 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1648 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -1842,6 +1876,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1842 | 1876 | ||
1843 | clock_offset = old_rq->clock - new_rq->clock; | 1877 | clock_offset = old_rq->clock - new_rq->clock; |
1844 | 1878 | ||
1879 | trace_sched_migrate_task(p, task_cpu(p), new_cpu); | ||
1880 | |||
1845 | #ifdef CONFIG_SCHEDSTATS | 1881 | #ifdef CONFIG_SCHEDSTATS |
1846 | if (p->se.wait_start) | 1882 | if (p->se.wait_start) |
1847 | p->se.wait_start -= clock_offset; | 1883 | p->se.wait_start -= clock_offset; |
@@ -2076,15 +2112,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2076 | int i; | 2112 | int i; |
2077 | 2113 | ||
2078 | /* Skip over this group if it has no CPUs allowed */ | 2114 | /* Skip over this group if it has no CPUs allowed */ |
2079 | if (!cpus_intersects(group->cpumask, p->cpus_allowed)) | 2115 | if (!cpumask_intersects(sched_group_cpus(group), |
2116 | &p->cpus_allowed)) | ||
2080 | continue; | 2117 | continue; |
2081 | 2118 | ||
2082 | local_group = cpu_isset(this_cpu, group->cpumask); | 2119 | local_group = cpumask_test_cpu(this_cpu, |
2120 | sched_group_cpus(group)); | ||
2083 | 2121 | ||
2084 | /* Tally up the load of all CPUs in the group */ | 2122 | /* Tally up the load of all CPUs in the group */ |
2085 | avg_load = 0; | 2123 | avg_load = 0; |
2086 | 2124 | ||
2087 | for_each_cpu_mask_nr(i, group->cpumask) { | 2125 | for_each_cpu(i, sched_group_cpus(group)) { |
2088 | /* Bias balancing toward cpus of our domain */ | 2126 | /* Bias balancing toward cpus of our domain */ |
2089 | if (local_group) | 2127 | if (local_group) |
2090 | load = source_load(i, load_idx); | 2128 | load = source_load(i, load_idx); |
@@ -2116,17 +2154,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2116 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | 2154 | * find_idlest_cpu - find the idlest cpu among the cpus in group. |
2117 | */ | 2155 | */ |
2118 | static int | 2156 | static int |
2119 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, | 2157 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
2120 | cpumask_t *tmp) | ||
2121 | { | 2158 | { |
2122 | unsigned long load, min_load = ULONG_MAX; | 2159 | unsigned long load, min_load = ULONG_MAX; |
2123 | int idlest = -1; | 2160 | int idlest = -1; |
2124 | int i; | 2161 | int i; |
2125 | 2162 | ||
2126 | /* Traverse only the allowed CPUs */ | 2163 | /* Traverse only the allowed CPUs */ |
2127 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); | 2164 | for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { |
2128 | |||
2129 | for_each_cpu_mask_nr(i, *tmp) { | ||
2130 | load = weighted_cpuload(i); | 2165 | load = weighted_cpuload(i); |
2131 | 2166 | ||
2132 | if (load < min_load || (load == min_load && i == this_cpu)) { | 2167 | if (load < min_load || (load == min_load && i == this_cpu)) { |
@@ -2168,7 +2203,6 @@ static int sched_balance_self(int cpu, int flag) | |||
2168 | update_shares(sd); | 2203 | update_shares(sd); |
2169 | 2204 | ||
2170 | while (sd) { | 2205 | while (sd) { |
2171 | cpumask_t span, tmpmask; | ||
2172 | struct sched_group *group; | 2206 | struct sched_group *group; |
2173 | int new_cpu, weight; | 2207 | int new_cpu, weight; |
2174 | 2208 | ||
@@ -2177,14 +2211,13 @@ static int sched_balance_self(int cpu, int flag) | |||
2177 | continue; | 2211 | continue; |
2178 | } | 2212 | } |
2179 | 2213 | ||
2180 | span = sd->span; | ||
2181 | group = find_idlest_group(sd, t, cpu); | 2214 | group = find_idlest_group(sd, t, cpu); |
2182 | if (!group) { | 2215 | if (!group) { |
2183 | sd = sd->child; | 2216 | sd = sd->child; |
2184 | continue; | 2217 | continue; |
2185 | } | 2218 | } |
2186 | 2219 | ||
2187 | new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); | 2220 | new_cpu = find_idlest_cpu(group, t, cpu); |
2188 | if (new_cpu == -1 || new_cpu == cpu) { | 2221 | if (new_cpu == -1 || new_cpu == cpu) { |
2189 | /* Now try balancing at a lower domain level of cpu */ | 2222 | /* Now try balancing at a lower domain level of cpu */ |
2190 | sd = sd->child; | 2223 | sd = sd->child; |
@@ -2193,10 +2226,10 @@ static int sched_balance_self(int cpu, int flag) | |||
2193 | 2226 | ||
2194 | /* Now try balancing at a lower domain level of new_cpu */ | 2227 | /* Now try balancing at a lower domain level of new_cpu */ |
2195 | cpu = new_cpu; | 2228 | cpu = new_cpu; |
2229 | weight = cpumask_weight(sched_domain_span(sd)); | ||
2196 | sd = NULL; | 2230 | sd = NULL; |
2197 | weight = cpus_weight(span); | ||
2198 | for_each_domain(cpu, tmp) { | 2231 | for_each_domain(cpu, tmp) { |
2199 | if (weight <= cpus_weight(tmp->span)) | 2232 | if (weight <= cpumask_weight(sched_domain_span(tmp))) |
2200 | break; | 2233 | break; |
2201 | if (tmp->flags & flag) | 2234 | if (tmp->flags & flag) |
2202 | sd = tmp; | 2235 | sd = tmp; |
@@ -2241,7 +2274,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2241 | cpu = task_cpu(p); | 2274 | cpu = task_cpu(p); |
2242 | 2275 | ||
2243 | for_each_domain(this_cpu, sd) { | 2276 | for_each_domain(this_cpu, sd) { |
2244 | if (cpu_isset(cpu, sd->span)) { | 2277 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2245 | update_shares(sd); | 2278 | update_shares(sd); |
2246 | break; | 2279 | break; |
2247 | } | 2280 | } |
@@ -2251,6 +2284,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2251 | 2284 | ||
2252 | smp_wmb(); | 2285 | smp_wmb(); |
2253 | rq = task_rq_lock(p, &flags); | 2286 | rq = task_rq_lock(p, &flags); |
2287 | update_rq_clock(rq); | ||
2254 | old_state = p->state; | 2288 | old_state = p->state; |
2255 | if (!(old_state & state)) | 2289 | if (!(old_state & state)) |
2256 | goto out; | 2290 | goto out; |
@@ -2289,7 +2323,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2289 | else { | 2323 | else { |
2290 | struct sched_domain *sd; | 2324 | struct sched_domain *sd; |
2291 | for_each_domain(this_cpu, sd) { | 2325 | for_each_domain(this_cpu, sd) { |
2292 | if (cpu_isset(cpu, sd->span)) { | 2326 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2293 | schedstat_inc(sd, ttwu_wake_remote); | 2327 | schedstat_inc(sd, ttwu_wake_remote); |
2294 | break; | 2328 | break; |
2295 | } | 2329 | } |
@@ -2308,12 +2342,11 @@ out_activate: | |||
2308 | schedstat_inc(p, se.nr_wakeups_local); | 2342 | schedstat_inc(p, se.nr_wakeups_local); |
2309 | else | 2343 | else |
2310 | schedstat_inc(p, se.nr_wakeups_remote); | 2344 | schedstat_inc(p, se.nr_wakeups_remote); |
2311 | update_rq_clock(rq); | ||
2312 | activate_task(rq, p, 1); | 2345 | activate_task(rq, p, 1); |
2313 | success = 1; | 2346 | success = 1; |
2314 | 2347 | ||
2315 | out_running: | 2348 | out_running: |
2316 | trace_sched_wakeup(rq, p); | 2349 | trace_sched_wakeup(rq, p, success); |
2317 | check_preempt_curr(rq, p, sync); | 2350 | check_preempt_curr(rq, p, sync); |
2318 | 2351 | ||
2319 | p->state = TASK_RUNNING; | 2352 | p->state = TASK_RUNNING; |
@@ -2446,7 +2479,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2446 | p->sched_class->task_new(rq, p); | 2479 | p->sched_class->task_new(rq, p); |
2447 | inc_nr_running(rq); | 2480 | inc_nr_running(rq); |
2448 | } | 2481 | } |
2449 | trace_sched_wakeup_new(rq, p); | 2482 | trace_sched_wakeup_new(rq, p, 1); |
2450 | check_preempt_curr(rq, p, 0); | 2483 | check_preempt_curr(rq, p, 0); |
2451 | #ifdef CONFIG_SMP | 2484 | #ifdef CONFIG_SMP |
2452 | if (p->sched_class->task_wake_up) | 2485 | if (p->sched_class->task_wake_up) |
@@ -2809,40 +2842,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
2809 | } | 2842 | } |
2810 | 2843 | ||
2811 | /* | 2844 | /* |
2812 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
2813 | */ | ||
2814 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
2815 | __releases(this_rq->lock) | ||
2816 | __acquires(busiest->lock) | ||
2817 | __acquires(this_rq->lock) | ||
2818 | { | ||
2819 | int ret = 0; | ||
2820 | |||
2821 | if (unlikely(!irqs_disabled())) { | ||
2822 | /* printk() doesn't work good under rq->lock */ | ||
2823 | spin_unlock(&this_rq->lock); | ||
2824 | BUG_ON(1); | ||
2825 | } | ||
2826 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
2827 | if (busiest < this_rq) { | ||
2828 | spin_unlock(&this_rq->lock); | ||
2829 | spin_lock(&busiest->lock); | ||
2830 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
2831 | ret = 1; | ||
2832 | } else | ||
2833 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
2834 | } | ||
2835 | return ret; | ||
2836 | } | ||
2837 | |||
2838 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2839 | __releases(busiest->lock) | ||
2840 | { | ||
2841 | spin_unlock(&busiest->lock); | ||
2842 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2843 | } | ||
2844 | |||
2845 | /* | ||
2846 | * If dest_cpu is allowed for this process, migrate the task to it. | 2845 | * If dest_cpu is allowed for this process, migrate the task to it. |
2847 | * This is accomplished by forcing the cpu_allowed mask to only | 2846 | * This is accomplished by forcing the cpu_allowed mask to only |
2848 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | 2847 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then |
@@ -2855,11 +2854,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2855 | struct rq *rq; | 2854 | struct rq *rq; |
2856 | 2855 | ||
2857 | rq = task_rq_lock(p, &flags); | 2856 | rq = task_rq_lock(p, &flags); |
2858 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2857 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
2859 | || unlikely(!cpu_active(dest_cpu))) | 2858 | || unlikely(!cpu_active(dest_cpu))) |
2860 | goto out; | 2859 | goto out; |
2861 | 2860 | ||
2862 | trace_sched_migrate_task(rq, p, dest_cpu); | ||
2863 | /* force the process onto the specified CPU */ | 2861 | /* force the process onto the specified CPU */ |
2864 | if (migrate_task(p, dest_cpu, &req)) { | 2862 | if (migrate_task(p, dest_cpu, &req)) { |
2865 | /* Need to wait for migration thread (might exit: take ref). */ | 2863 | /* Need to wait for migration thread (might exit: take ref). */ |
@@ -2921,7 +2919,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2921 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | 2919 | * 2) cannot be migrated to this CPU due to cpus_allowed, or |
2922 | * 3) are cache-hot on their current CPU. | 2920 | * 3) are cache-hot on their current CPU. |
2923 | */ | 2921 | */ |
2924 | if (!cpu_isset(this_cpu, p->cpus_allowed)) { | 2922 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { |
2925 | schedstat_inc(p, se.nr_failed_migrations_affine); | 2923 | schedstat_inc(p, se.nr_failed_migrations_affine); |
2926 | return 0; | 2924 | return 0; |
2927 | } | 2925 | } |
@@ -3096,7 +3094,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3096 | static struct sched_group * | 3094 | static struct sched_group * |
3097 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 3095 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
3098 | unsigned long *imbalance, enum cpu_idle_type idle, | 3096 | unsigned long *imbalance, enum cpu_idle_type idle, |
3099 | int *sd_idle, const cpumask_t *cpus, int *balance) | 3097 | int *sd_idle, const struct cpumask *cpus, int *balance) |
3100 | { | 3098 | { |
3101 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 3099 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
3102 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 3100 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
@@ -3132,10 +3130,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3132 | unsigned long sum_avg_load_per_task; | 3130 | unsigned long sum_avg_load_per_task; |
3133 | unsigned long avg_load_per_task; | 3131 | unsigned long avg_load_per_task; |
3134 | 3132 | ||
3135 | local_group = cpu_isset(this_cpu, group->cpumask); | 3133 | local_group = cpumask_test_cpu(this_cpu, |
3134 | sched_group_cpus(group)); | ||
3136 | 3135 | ||
3137 | if (local_group) | 3136 | if (local_group) |
3138 | balance_cpu = first_cpu(group->cpumask); | 3137 | balance_cpu = cpumask_first(sched_group_cpus(group)); |
3139 | 3138 | ||
3140 | /* Tally up the load of all CPUs in the group */ | 3139 | /* Tally up the load of all CPUs in the group */ |
3141 | sum_weighted_load = sum_nr_running = avg_load = 0; | 3140 | sum_weighted_load = sum_nr_running = avg_load = 0; |
@@ -3144,13 +3143,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3144 | max_cpu_load = 0; | 3143 | max_cpu_load = 0; |
3145 | min_cpu_load = ~0UL; | 3144 | min_cpu_load = ~0UL; |
3146 | 3145 | ||
3147 | for_each_cpu_mask_nr(i, group->cpumask) { | 3146 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { |
3148 | struct rq *rq; | 3147 | struct rq *rq = cpu_rq(i); |
3149 | |||
3150 | if (!cpu_isset(i, *cpus)) | ||
3151 | continue; | ||
3152 | |||
3153 | rq = cpu_rq(i); | ||
3154 | 3148 | ||
3155 | if (*sd_idle && rq->nr_running) | 3149 | if (*sd_idle && rq->nr_running) |
3156 | *sd_idle = 0; | 3150 | *sd_idle = 0; |
@@ -3261,8 +3255,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3261 | */ | 3255 | */ |
3262 | if ((sum_nr_running < min_nr_running) || | 3256 | if ((sum_nr_running < min_nr_running) || |
3263 | (sum_nr_running == min_nr_running && | 3257 | (sum_nr_running == min_nr_running && |
3264 | first_cpu(group->cpumask) < | 3258 | cpumask_first(sched_group_cpus(group)) > |
3265 | first_cpu(group_min->cpumask))) { | 3259 | cpumask_first(sched_group_cpus(group_min)))) { |
3266 | group_min = group; | 3260 | group_min = group; |
3267 | min_nr_running = sum_nr_running; | 3261 | min_nr_running = sum_nr_running; |
3268 | min_load_per_task = sum_weighted_load / | 3262 | min_load_per_task = sum_weighted_load / |
@@ -3277,8 +3271,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3277 | if (sum_nr_running <= group_capacity - 1) { | 3271 | if (sum_nr_running <= group_capacity - 1) { |
3278 | if (sum_nr_running > leader_nr_running || | 3272 | if (sum_nr_running > leader_nr_running || |
3279 | (sum_nr_running == leader_nr_running && | 3273 | (sum_nr_running == leader_nr_running && |
3280 | first_cpu(group->cpumask) > | 3274 | cpumask_first(sched_group_cpus(group)) < |
3281 | first_cpu(group_leader->cpumask))) { | 3275 | cpumask_first(sched_group_cpus(group_leader)))) { |
3282 | group_leader = group; | 3276 | group_leader = group; |
3283 | leader_nr_running = sum_nr_running; | 3277 | leader_nr_running = sum_nr_running; |
3284 | } | 3278 | } |
@@ -3404,6 +3398,10 @@ out_balanced: | |||
3404 | 3398 | ||
3405 | if (this == group_leader && group_leader != group_min) { | 3399 | if (this == group_leader && group_leader != group_min) { |
3406 | *imbalance = min_load_per_task; | 3400 | *imbalance = min_load_per_task; |
3401 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { | ||
3402 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = | ||
3403 | cpumask_first(sched_group_cpus(group_leader)); | ||
3404 | } | ||
3407 | return group_min; | 3405 | return group_min; |
3408 | } | 3406 | } |
3409 | #endif | 3407 | #endif |
@@ -3417,16 +3415,16 @@ ret: | |||
3417 | */ | 3415 | */ |
3418 | static struct rq * | 3416 | static struct rq * |
3419 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | 3417 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, |
3420 | unsigned long imbalance, const cpumask_t *cpus) | 3418 | unsigned long imbalance, const struct cpumask *cpus) |
3421 | { | 3419 | { |
3422 | struct rq *busiest = NULL, *rq; | 3420 | struct rq *busiest = NULL, *rq; |
3423 | unsigned long max_load = 0; | 3421 | unsigned long max_load = 0; |
3424 | int i; | 3422 | int i; |
3425 | 3423 | ||
3426 | for_each_cpu_mask_nr(i, group->cpumask) { | 3424 | for_each_cpu(i, sched_group_cpus(group)) { |
3427 | unsigned long wl; | 3425 | unsigned long wl; |
3428 | 3426 | ||
3429 | if (!cpu_isset(i, *cpus)) | 3427 | if (!cpumask_test_cpu(i, cpus)) |
3430 | continue; | 3428 | continue; |
3431 | 3429 | ||
3432 | rq = cpu_rq(i); | 3430 | rq = cpu_rq(i); |
@@ -3456,7 +3454,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3456 | */ | 3454 | */ |
3457 | static int load_balance(int this_cpu, struct rq *this_rq, | 3455 | static int load_balance(int this_cpu, struct rq *this_rq, |
3458 | struct sched_domain *sd, enum cpu_idle_type idle, | 3456 | struct sched_domain *sd, enum cpu_idle_type idle, |
3459 | int *balance, cpumask_t *cpus) | 3457 | int *balance, struct cpumask *cpus) |
3460 | { | 3458 | { |
3461 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 3459 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
3462 | struct sched_group *group; | 3460 | struct sched_group *group; |
@@ -3464,7 +3462,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
3464 | struct rq *busiest; | 3462 | struct rq *busiest; |
3465 | unsigned long flags; | 3463 | unsigned long flags; |
3466 | 3464 | ||
3467 | cpus_setall(*cpus); | 3465 | cpumask_setall(cpus); |
3468 | 3466 | ||
3469 | /* | 3467 | /* |
3470 | * When power savings policy is enabled for the parent domain, idle | 3468 | * When power savings policy is enabled for the parent domain, idle |
@@ -3524,8 +3522,8 @@ redo: | |||
3524 | 3522 | ||
3525 | /* All tasks on this runqueue were pinned by CPU affinity */ | 3523 | /* All tasks on this runqueue were pinned by CPU affinity */ |
3526 | if (unlikely(all_pinned)) { | 3524 | if (unlikely(all_pinned)) { |
3527 | cpu_clear(cpu_of(busiest), *cpus); | 3525 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3528 | if (!cpus_empty(*cpus)) | 3526 | if (!cpumask_empty(cpus)) |
3529 | goto redo; | 3527 | goto redo; |
3530 | goto out_balanced; | 3528 | goto out_balanced; |
3531 | } | 3529 | } |
@@ -3542,7 +3540,8 @@ redo: | |||
3542 | /* don't kick the migration_thread, if the curr | 3540 | /* don't kick the migration_thread, if the curr |
3543 | * task on busiest cpu can't be moved to this_cpu | 3541 | * task on busiest cpu can't be moved to this_cpu |
3544 | */ | 3542 | */ |
3545 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3543 | if (!cpumask_test_cpu(this_cpu, |
3544 | &busiest->curr->cpus_allowed)) { | ||
3546 | spin_unlock_irqrestore(&busiest->lock, flags); | 3545 | spin_unlock_irqrestore(&busiest->lock, flags); |
3547 | all_pinned = 1; | 3546 | all_pinned = 1; |
3548 | goto out_one_pinned; | 3547 | goto out_one_pinned; |
@@ -3617,7 +3616,7 @@ out: | |||
3617 | */ | 3616 | */ |
3618 | static int | 3617 | static int |
3619 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | 3618 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, |
3620 | cpumask_t *cpus) | 3619 | struct cpumask *cpus) |
3621 | { | 3620 | { |
3622 | struct sched_group *group; | 3621 | struct sched_group *group; |
3623 | struct rq *busiest = NULL; | 3622 | struct rq *busiest = NULL; |
@@ -3626,7 +3625,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | |||
3626 | int sd_idle = 0; | 3625 | int sd_idle = 0; |
3627 | int all_pinned = 0; | 3626 | int all_pinned = 0; |
3628 | 3627 | ||
3629 | cpus_setall(*cpus); | 3628 | cpumask_setall(cpus); |
3630 | 3629 | ||
3631 | /* | 3630 | /* |
3632 | * When power savings policy is enabled for the parent domain, idle | 3631 | * When power savings policy is enabled for the parent domain, idle |
@@ -3670,17 +3669,76 @@ redo: | |||
3670 | double_unlock_balance(this_rq, busiest); | 3669 | double_unlock_balance(this_rq, busiest); |
3671 | 3670 | ||
3672 | if (unlikely(all_pinned)) { | 3671 | if (unlikely(all_pinned)) { |
3673 | cpu_clear(cpu_of(busiest), *cpus); | 3672 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3674 | if (!cpus_empty(*cpus)) | 3673 | if (!cpumask_empty(cpus)) |
3675 | goto redo; | 3674 | goto redo; |
3676 | } | 3675 | } |
3677 | } | 3676 | } |
3678 | 3677 | ||
3679 | if (!ld_moved) { | 3678 | if (!ld_moved) { |
3679 | int active_balance = 0; | ||
3680 | |||
3680 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); | 3681 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); |
3681 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3682 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
3682 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 3683 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
3683 | return -1; | 3684 | return -1; |
3685 | |||
3686 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | ||
3687 | return -1; | ||
3688 | |||
3689 | if (sd->nr_balance_failed++ < 2) | ||
3690 | return -1; | ||
3691 | |||
3692 | /* | ||
3693 | * The only task running in a non-idle cpu can be moved to this | ||
3694 | * cpu in an attempt to completely freeup the other CPU | ||
3695 | * package. The same method used to move task in load_balance() | ||
3696 | * have been extended for load_balance_newidle() to speedup | ||
3697 | * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2) | ||
3698 | * | ||
3699 | * The package power saving logic comes from | ||
3700 | * find_busiest_group(). If there are no imbalance, then | ||
3701 | * f_b_g() will return NULL. However when sched_mc={1,2} then | ||
3702 | * f_b_g() will select a group from which a running task may be | ||
3703 | * pulled to this cpu in order to make the other package idle. | ||
3704 | * If there is no opportunity to make a package idle and if | ||
3705 | * there are no imbalance, then f_b_g() will return NULL and no | ||
3706 | * action will be taken in load_balance_newidle(). | ||
3707 | * | ||
3708 | * Under normal task pull operation due to imbalance, there | ||
3709 | * will be more than one task in the source run queue and | ||
3710 | * move_tasks() will succeed. ld_moved will be true and this | ||
3711 | * active balance code will not be triggered. | ||
3712 | */ | ||
3713 | |||
3714 | /* Lock busiest in correct order while this_rq is held */ | ||
3715 | double_lock_balance(this_rq, busiest); | ||
3716 | |||
3717 | /* | ||
3718 | * don't kick the migration_thread, if the curr | ||
3719 | * task on busiest cpu can't be moved to this_cpu | ||
3720 | */ | ||
3721 | if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { | ||
3722 | double_unlock_balance(this_rq, busiest); | ||
3723 | all_pinned = 1; | ||
3724 | return ld_moved; | ||
3725 | } | ||
3726 | |||
3727 | if (!busiest->active_balance) { | ||
3728 | busiest->active_balance = 1; | ||
3729 | busiest->push_cpu = this_cpu; | ||
3730 | active_balance = 1; | ||
3731 | } | ||
3732 | |||
3733 | double_unlock_balance(this_rq, busiest); | ||
3734 | /* | ||
3735 | * Should not call ttwu while holding a rq->lock | ||
3736 | */ | ||
3737 | spin_unlock(&this_rq->lock); | ||
3738 | if (active_balance) | ||
3739 | wake_up_process(busiest->migration_thread); | ||
3740 | spin_lock(&this_rq->lock); | ||
3741 | |||
3684 | } else | 3742 | } else |
3685 | sd->nr_balance_failed = 0; | 3743 | sd->nr_balance_failed = 0; |
3686 | 3744 | ||
@@ -3704,9 +3762,12 @@ out_balanced: | |||
3704 | static void idle_balance(int this_cpu, struct rq *this_rq) | 3762 | static void idle_balance(int this_cpu, struct rq *this_rq) |
3705 | { | 3763 | { |
3706 | struct sched_domain *sd; | 3764 | struct sched_domain *sd; |
3707 | int pulled_task = -1; | 3765 | int pulled_task = 0; |
3708 | unsigned long next_balance = jiffies + HZ; | 3766 | unsigned long next_balance = jiffies + HZ; |
3709 | cpumask_t tmpmask; | 3767 | cpumask_var_t tmpmask; |
3768 | |||
3769 | if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) | ||
3770 | return; | ||
3710 | 3771 | ||
3711 | for_each_domain(this_cpu, sd) { | 3772 | for_each_domain(this_cpu, sd) { |
3712 | unsigned long interval; | 3773 | unsigned long interval; |
@@ -3717,7 +3778,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3717 | if (sd->flags & SD_BALANCE_NEWIDLE) | 3778 | if (sd->flags & SD_BALANCE_NEWIDLE) |
3718 | /* If we've pulled tasks over stop searching: */ | 3779 | /* If we've pulled tasks over stop searching: */ |
3719 | pulled_task = load_balance_newidle(this_cpu, this_rq, | 3780 | pulled_task = load_balance_newidle(this_cpu, this_rq, |
3720 | sd, &tmpmask); | 3781 | sd, tmpmask); |
3721 | 3782 | ||
3722 | interval = msecs_to_jiffies(sd->balance_interval); | 3783 | interval = msecs_to_jiffies(sd->balance_interval); |
3723 | if (time_after(next_balance, sd->last_balance + interval)) | 3784 | if (time_after(next_balance, sd->last_balance + interval)) |
@@ -3732,6 +3793,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3732 | */ | 3793 | */ |
3733 | this_rq->next_balance = next_balance; | 3794 | this_rq->next_balance = next_balance; |
3734 | } | 3795 | } |
3796 | free_cpumask_var(tmpmask); | ||
3735 | } | 3797 | } |
3736 | 3798 | ||
3737 | /* | 3799 | /* |
@@ -3769,7 +3831,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3769 | /* Search for an sd spanning us and the target CPU. */ | 3831 | /* Search for an sd spanning us and the target CPU. */ |
3770 | for_each_domain(target_cpu, sd) { | 3832 | for_each_domain(target_cpu, sd) { |
3771 | if ((sd->flags & SD_LOAD_BALANCE) && | 3833 | if ((sd->flags & SD_LOAD_BALANCE) && |
3772 | cpu_isset(busiest_cpu, sd->span)) | 3834 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) |
3773 | break; | 3835 | break; |
3774 | } | 3836 | } |
3775 | 3837 | ||
@@ -3788,10 +3850,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3788 | #ifdef CONFIG_NO_HZ | 3850 | #ifdef CONFIG_NO_HZ |
3789 | static struct { | 3851 | static struct { |
3790 | atomic_t load_balancer; | 3852 | atomic_t load_balancer; |
3791 | cpumask_t cpu_mask; | 3853 | cpumask_var_t cpu_mask; |
3792 | } nohz ____cacheline_aligned = { | 3854 | } nohz ____cacheline_aligned = { |
3793 | .load_balancer = ATOMIC_INIT(-1), | 3855 | .load_balancer = ATOMIC_INIT(-1), |
3794 | .cpu_mask = CPU_MASK_NONE, | ||
3795 | }; | 3856 | }; |
3796 | 3857 | ||
3797 | /* | 3858 | /* |
@@ -3819,21 +3880,26 @@ int select_nohz_load_balancer(int stop_tick) | |||
3819 | int cpu = smp_processor_id(); | 3880 | int cpu = smp_processor_id(); |
3820 | 3881 | ||
3821 | if (stop_tick) { | 3882 | if (stop_tick) { |
3822 | cpu_set(cpu, nohz.cpu_mask); | ||
3823 | cpu_rq(cpu)->in_nohz_recently = 1; | 3883 | cpu_rq(cpu)->in_nohz_recently = 1; |
3824 | 3884 | ||
3825 | /* | 3885 | if (!cpu_active(cpu)) { |
3826 | * If we are going offline and still the leader, give up! | 3886 | if (atomic_read(&nohz.load_balancer) != cpu) |
3827 | */ | 3887 | return 0; |
3828 | if (!cpu_active(cpu) && | 3888 | |
3829 | atomic_read(&nohz.load_balancer) == cpu) { | 3889 | /* |
3890 | * If we are going offline and still the leader, | ||
3891 | * give up! | ||
3892 | */ | ||
3830 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3893 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
3831 | BUG(); | 3894 | BUG(); |
3895 | |||
3832 | return 0; | 3896 | return 0; |
3833 | } | 3897 | } |
3834 | 3898 | ||
3899 | cpumask_set_cpu(cpu, nohz.cpu_mask); | ||
3900 | |||
3835 | /* time for ilb owner also to sleep */ | 3901 | /* time for ilb owner also to sleep */ |
3836 | if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 3902 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
3837 | if (atomic_read(&nohz.load_balancer) == cpu) | 3903 | if (atomic_read(&nohz.load_balancer) == cpu) |
3838 | atomic_set(&nohz.load_balancer, -1); | 3904 | atomic_set(&nohz.load_balancer, -1); |
3839 | return 0; | 3905 | return 0; |
@@ -3846,10 +3912,10 @@ int select_nohz_load_balancer(int stop_tick) | |||
3846 | } else if (atomic_read(&nohz.load_balancer) == cpu) | 3912 | } else if (atomic_read(&nohz.load_balancer) == cpu) |
3847 | return 1; | 3913 | return 1; |
3848 | } else { | 3914 | } else { |
3849 | if (!cpu_isset(cpu, nohz.cpu_mask)) | 3915 | if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) |
3850 | return 0; | 3916 | return 0; |
3851 | 3917 | ||
3852 | cpu_clear(cpu, nohz.cpu_mask); | 3918 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3853 | 3919 | ||
3854 | if (atomic_read(&nohz.load_balancer) == cpu) | 3920 | if (atomic_read(&nohz.load_balancer) == cpu) |
3855 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3921 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
@@ -3877,7 +3943,11 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3877 | unsigned long next_balance = jiffies + 60*HZ; | 3943 | unsigned long next_balance = jiffies + 60*HZ; |
3878 | int update_next_balance = 0; | 3944 | int update_next_balance = 0; |
3879 | int need_serialize; | 3945 | int need_serialize; |
3880 | cpumask_t tmp; | 3946 | cpumask_var_t tmp; |
3947 | |||
3948 | /* Fails alloc? Rebalancing probably not a priority right now. */ | ||
3949 | if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) | ||
3950 | return; | ||
3881 | 3951 | ||
3882 | for_each_domain(cpu, sd) { | 3952 | for_each_domain(cpu, sd) { |
3883 | if (!(sd->flags & SD_LOAD_BALANCE)) | 3953 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -3902,7 +3972,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3902 | } | 3972 | } |
3903 | 3973 | ||
3904 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 3974 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
3905 | if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { | 3975 | if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { |
3906 | /* | 3976 | /* |
3907 | * We've pulled tasks over so either we're no | 3977 | * We've pulled tasks over so either we're no |
3908 | * longer idle, or one of our SMT siblings is | 3978 | * longer idle, or one of our SMT siblings is |
@@ -3936,6 +4006,8 @@ out: | |||
3936 | */ | 4006 | */ |
3937 | if (likely(update_next_balance)) | 4007 | if (likely(update_next_balance)) |
3938 | rq->next_balance = next_balance; | 4008 | rq->next_balance = next_balance; |
4009 | |||
4010 | free_cpumask_var(tmp); | ||
3939 | } | 4011 | } |
3940 | 4012 | ||
3941 | /* | 4013 | /* |
@@ -3960,12 +4032,13 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3960 | */ | 4032 | */ |
3961 | if (this_rq->idle_at_tick && | 4033 | if (this_rq->idle_at_tick && |
3962 | atomic_read(&nohz.load_balancer) == this_cpu) { | 4034 | atomic_read(&nohz.load_balancer) == this_cpu) { |
3963 | cpumask_t cpus = nohz.cpu_mask; | ||
3964 | struct rq *rq; | 4035 | struct rq *rq; |
3965 | int balance_cpu; | 4036 | int balance_cpu; |
3966 | 4037 | ||
3967 | cpu_clear(this_cpu, cpus); | 4038 | for_each_cpu(balance_cpu, nohz.cpu_mask) { |
3968 | for_each_cpu_mask_nr(balance_cpu, cpus) { | 4039 | if (balance_cpu == this_cpu) |
4040 | continue; | ||
4041 | |||
3969 | /* | 4042 | /* |
3970 | * If this cpu gets work to do, stop the load balancing | 4043 | * If this cpu gets work to do, stop the load balancing |
3971 | * work being done for other cpus. Next load | 4044 | * work being done for other cpus. Next load |
@@ -4003,7 +4076,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4003 | rq->in_nohz_recently = 0; | 4076 | rq->in_nohz_recently = 0; |
4004 | 4077 | ||
4005 | if (atomic_read(&nohz.load_balancer) == cpu) { | 4078 | if (atomic_read(&nohz.load_balancer) == cpu) { |
4006 | cpu_clear(cpu, nohz.cpu_mask); | 4079 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
4007 | atomic_set(&nohz.load_balancer, -1); | 4080 | atomic_set(&nohz.load_balancer, -1); |
4008 | } | 4081 | } |
4009 | 4082 | ||
@@ -4016,7 +4089,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4016 | * TBD: Traverse the sched domains and nominate | 4089 | * TBD: Traverse the sched domains and nominate |
4017 | * the nearest cpu in the nohz.cpu_mask. | 4090 | * the nearest cpu in the nohz.cpu_mask. |
4018 | */ | 4091 | */ |
4019 | int ilb = first_cpu(nohz.cpu_mask); | 4092 | int ilb = cpumask_first(nohz.cpu_mask); |
4020 | 4093 | ||
4021 | if (ilb < nr_cpu_ids) | 4094 | if (ilb < nr_cpu_ids) |
4022 | resched_cpu(ilb); | 4095 | resched_cpu(ilb); |
@@ -4028,7 +4101,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4028 | * cpus with ticks stopped, is it time for that to stop? | 4101 | * cpus with ticks stopped, is it time for that to stop? |
4029 | */ | 4102 | */ |
4030 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && | 4103 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && |
4031 | cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 4104 | cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
4032 | resched_cpu(cpu); | 4105 | resched_cpu(cpu); |
4033 | return; | 4106 | return; |
4034 | } | 4107 | } |
@@ -4038,7 +4111,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4038 | * someone else, then no need raise the SCHED_SOFTIRQ | 4111 | * someone else, then no need raise the SCHED_SOFTIRQ |
4039 | */ | 4112 | */ |
4040 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && | 4113 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && |
4041 | cpu_isset(cpu, nohz.cpu_mask)) | 4114 | cpumask_test_cpu(cpu, nohz.cpu_mask)) |
4042 | return; | 4115 | return; |
4043 | #endif | 4116 | #endif |
4044 | if (time_after_eq(jiffies, rq->next_balance)) | 4117 | if (time_after_eq(jiffies, rq->next_balance)) |
@@ -4090,13 +4163,17 @@ unsigned long long task_delta_exec(struct task_struct *p) | |||
4090 | * Account user cpu time to a process. | 4163 | * Account user cpu time to a process. |
4091 | * @p: the process that the cpu time gets accounted to | 4164 | * @p: the process that the cpu time gets accounted to |
4092 | * @cputime: the cpu time spent in user space since the last update | 4165 | * @cputime: the cpu time spent in user space since the last update |
4166 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4093 | */ | 4167 | */ |
4094 | void account_user_time(struct task_struct *p, cputime_t cputime) | 4168 | void account_user_time(struct task_struct *p, cputime_t cputime, |
4169 | cputime_t cputime_scaled) | ||
4095 | { | 4170 | { |
4096 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4171 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4097 | cputime64_t tmp; | 4172 | cputime64_t tmp; |
4098 | 4173 | ||
4174 | /* Add user time to process. */ | ||
4099 | p->utime = cputime_add(p->utime, cputime); | 4175 | p->utime = cputime_add(p->utime, cputime); |
4176 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4100 | account_group_user_time(p, cputime); | 4177 | account_group_user_time(p, cputime); |
4101 | 4178 | ||
4102 | /* Add user time to cpustat. */ | 4179 | /* Add user time to cpustat. */ |
@@ -4113,51 +4190,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
4113 | * Account guest cpu time to a process. | 4190 | * Account guest cpu time to a process. |
4114 | * @p: the process that the cpu time gets accounted to | 4191 | * @p: the process that the cpu time gets accounted to |
4115 | * @cputime: the cpu time spent in virtual machine since the last update | 4192 | * @cputime: the cpu time spent in virtual machine since the last update |
4193 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4116 | */ | 4194 | */ |
4117 | static void account_guest_time(struct task_struct *p, cputime_t cputime) | 4195 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
4196 | cputime_t cputime_scaled) | ||
4118 | { | 4197 | { |
4119 | cputime64_t tmp; | 4198 | cputime64_t tmp; |
4120 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4199 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4121 | 4200 | ||
4122 | tmp = cputime_to_cputime64(cputime); | 4201 | tmp = cputime_to_cputime64(cputime); |
4123 | 4202 | ||
4203 | /* Add guest time to process. */ | ||
4124 | p->utime = cputime_add(p->utime, cputime); | 4204 | p->utime = cputime_add(p->utime, cputime); |
4205 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4125 | account_group_user_time(p, cputime); | 4206 | account_group_user_time(p, cputime); |
4126 | p->gtime = cputime_add(p->gtime, cputime); | 4207 | p->gtime = cputime_add(p->gtime, cputime); |
4127 | 4208 | ||
4209 | /* Add guest time to cpustat. */ | ||
4128 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4210 | cpustat->user = cputime64_add(cpustat->user, tmp); |
4129 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 4211 | cpustat->guest = cputime64_add(cpustat->guest, tmp); |
4130 | } | 4212 | } |
4131 | 4213 | ||
4132 | /* | 4214 | /* |
4133 | * Account scaled user cpu time to a process. | ||
4134 | * @p: the process that the cpu time gets accounted to | ||
4135 | * @cputime: the cpu time spent in user space since the last update | ||
4136 | */ | ||
4137 | void account_user_time_scaled(struct task_struct *p, cputime_t cputime) | ||
4138 | { | ||
4139 | p->utimescaled = cputime_add(p->utimescaled, cputime); | ||
4140 | } | ||
4141 | |||
4142 | /* | ||
4143 | * Account system cpu time to a process. | 4215 | * Account system cpu time to a process. |
4144 | * @p: the process that the cpu time gets accounted to | 4216 | * @p: the process that the cpu time gets accounted to |
4145 | * @hardirq_offset: the offset to subtract from hardirq_count() | 4217 | * @hardirq_offset: the offset to subtract from hardirq_count() |
4146 | * @cputime: the cpu time spent in kernel space since the last update | 4218 | * @cputime: the cpu time spent in kernel space since the last update |
4219 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4147 | */ | 4220 | */ |
4148 | void account_system_time(struct task_struct *p, int hardirq_offset, | 4221 | void account_system_time(struct task_struct *p, int hardirq_offset, |
4149 | cputime_t cputime) | 4222 | cputime_t cputime, cputime_t cputime_scaled) |
4150 | { | 4223 | { |
4151 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4224 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4152 | struct rq *rq = this_rq(); | ||
4153 | cputime64_t tmp; | 4225 | cputime64_t tmp; |
4154 | 4226 | ||
4155 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | 4227 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
4156 | account_guest_time(p, cputime); | 4228 | account_guest_time(p, cputime, cputime_scaled); |
4157 | return; | 4229 | return; |
4158 | } | 4230 | } |
4159 | 4231 | ||
4232 | /* Add system time to process. */ | ||
4160 | p->stime = cputime_add(p->stime, cputime); | 4233 | p->stime = cputime_add(p->stime, cputime); |
4234 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); | ||
4161 | account_group_system_time(p, cputime); | 4235 | account_group_system_time(p, cputime); |
4162 | 4236 | ||
4163 | /* Add system time to cpustat. */ | 4237 | /* Add system time to cpustat. */ |
@@ -4166,49 +4240,84 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
4166 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 4240 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
4167 | else if (softirq_count()) | 4241 | else if (softirq_count()) |
4168 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 4242 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
4169 | else if (p != rq->idle) | ||
4170 | cpustat->system = cputime64_add(cpustat->system, tmp); | ||
4171 | else if (atomic_read(&rq->nr_iowait) > 0) | ||
4172 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | ||
4173 | else | 4243 | else |
4174 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4244 | cpustat->system = cputime64_add(cpustat->system, tmp); |
4245 | |||
4175 | /* Account for system time used */ | 4246 | /* Account for system time used */ |
4176 | acct_update_integrals(p); | 4247 | acct_update_integrals(p); |
4177 | } | 4248 | } |
4178 | 4249 | ||
4179 | /* | 4250 | /* |
4180 | * Account scaled system cpu time to a process. | 4251 | * Account for involuntary wait time. |
4181 | * @p: the process that the cpu time gets accounted to | 4252 | * @steal: the cpu time spent in involuntary wait |
4182 | * @hardirq_offset: the offset to subtract from hardirq_count() | ||
4183 | * @cputime: the cpu time spent in kernel space since the last update | ||
4184 | */ | 4253 | */ |
4185 | void account_system_time_scaled(struct task_struct *p, cputime_t cputime) | 4254 | void account_steal_time(cputime_t cputime) |
4186 | { | 4255 | { |
4187 | p->stimescaled = cputime_add(p->stimescaled, cputime); | 4256 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4257 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | ||
4258 | |||
4259 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); | ||
4188 | } | 4260 | } |
4189 | 4261 | ||
4190 | /* | 4262 | /* |
4191 | * Account for involuntary wait time. | 4263 | * Account for idle time. |
4192 | * @p: the process from which the cpu time has been stolen | 4264 | * @cputime: the cpu time spent in idle wait |
4193 | * @steal: the cpu time spent in involuntary wait | ||
4194 | */ | 4265 | */ |
4195 | void account_steal_time(struct task_struct *p, cputime_t steal) | 4266 | void account_idle_time(cputime_t cputime) |
4196 | { | 4267 | { |
4197 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4268 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4198 | cputime64_t tmp = cputime_to_cputime64(steal); | 4269 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
4199 | struct rq *rq = this_rq(); | 4270 | struct rq *rq = this_rq(); |
4200 | 4271 | ||
4201 | if (p == rq->idle) { | 4272 | if (atomic_read(&rq->nr_iowait) > 0) |
4202 | p->stime = cputime_add(p->stime, steal); | 4273 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); |
4203 | account_group_system_time(p, steal); | 4274 | else |
4204 | if (atomic_read(&rq->nr_iowait) > 0) | 4275 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
4205 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | ||
4206 | else | ||
4207 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | ||
4208 | } else | ||
4209 | cpustat->steal = cputime64_add(cpustat->steal, tmp); | ||
4210 | } | 4276 | } |
4211 | 4277 | ||
4278 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
4279 | |||
4280 | /* | ||
4281 | * Account a single tick of cpu time. | ||
4282 | * @p: the process that the cpu time gets accounted to | ||
4283 | * @user_tick: indicates if the tick is a user or a system tick | ||
4284 | */ | ||
4285 | void account_process_tick(struct task_struct *p, int user_tick) | ||
4286 | { | ||
4287 | cputime_t one_jiffy = jiffies_to_cputime(1); | ||
4288 | cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); | ||
4289 | struct rq *rq = this_rq(); | ||
4290 | |||
4291 | if (user_tick) | ||
4292 | account_user_time(p, one_jiffy, one_jiffy_scaled); | ||
4293 | else if (p != rq->idle) | ||
4294 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | ||
4295 | one_jiffy_scaled); | ||
4296 | else | ||
4297 | account_idle_time(one_jiffy); | ||
4298 | } | ||
4299 | |||
4300 | /* | ||
4301 | * Account multiple ticks of steal time. | ||
4302 | * @p: the process from which the cpu time has been stolen | ||
4303 | * @ticks: number of stolen ticks | ||
4304 | */ | ||
4305 | void account_steal_ticks(unsigned long ticks) | ||
4306 | { | ||
4307 | account_steal_time(jiffies_to_cputime(ticks)); | ||
4308 | } | ||
4309 | |||
4310 | /* | ||
4311 | * Account multiple ticks of idle time. | ||
4312 | * @ticks: number of stolen ticks | ||
4313 | */ | ||
4314 | void account_idle_ticks(unsigned long ticks) | ||
4315 | { | ||
4316 | account_idle_time(jiffies_to_cputime(ticks)); | ||
4317 | } | ||
4318 | |||
4319 | #endif | ||
4320 | |||
4212 | /* | 4321 | /* |
4213 | * Use precise platform statistics if available: | 4322 | * Use precise platform statistics if available: |
4214 | */ | 4323 | */ |
@@ -4583,8 +4692,8 @@ EXPORT_SYMBOL(default_wake_function); | |||
4583 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | 4692 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
4584 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | 4693 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
4585 | */ | 4694 | */ |
4586 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 4695 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
4587 | int nr_exclusive, int sync, void *key) | 4696 | int nr_exclusive, int sync, void *key) |
4588 | { | 4697 | { |
4589 | wait_queue_t *curr, *next; | 4698 | wait_queue_t *curr, *next; |
4590 | 4699 | ||
@@ -5022,7 +5131,7 @@ int can_nice(const struct task_struct *p, const int nice) | |||
5022 | * sys_setpriority is a more generic, but much slower function that | 5131 | * sys_setpriority is a more generic, but much slower function that |
5023 | * does similar things. | 5132 | * does similar things. |
5024 | */ | 5133 | */ |
5025 | asmlinkage long sys_nice(int increment) | 5134 | SYSCALL_DEFINE1(nice, int, increment) |
5026 | { | 5135 | { |
5027 | long nice, retval; | 5136 | long nice, retval; |
5028 | 5137 | ||
@@ -5131,6 +5240,22 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
5131 | set_load_weight(p); | 5240 | set_load_weight(p); |
5132 | } | 5241 | } |
5133 | 5242 | ||
5243 | /* | ||
5244 | * check the target process has a UID that matches the current process's | ||
5245 | */ | ||
5246 | static bool check_same_owner(struct task_struct *p) | ||
5247 | { | ||
5248 | const struct cred *cred = current_cred(), *pcred; | ||
5249 | bool match; | ||
5250 | |||
5251 | rcu_read_lock(); | ||
5252 | pcred = __task_cred(p); | ||
5253 | match = (cred->euid == pcred->euid || | ||
5254 | cred->euid == pcred->uid); | ||
5255 | rcu_read_unlock(); | ||
5256 | return match; | ||
5257 | } | ||
5258 | |||
5134 | static int __sched_setscheduler(struct task_struct *p, int policy, | 5259 | static int __sched_setscheduler(struct task_struct *p, int policy, |
5135 | struct sched_param *param, bool user) | 5260 | struct sched_param *param, bool user) |
5136 | { | 5261 | { |
@@ -5190,8 +5315,7 @@ recheck: | |||
5190 | return -EPERM; | 5315 | return -EPERM; |
5191 | 5316 | ||
5192 | /* can't change other user's priorities */ | 5317 | /* can't change other user's priorities */ |
5193 | if ((current->euid != p->euid) && | 5318 | if (!check_same_owner(p)) |
5194 | (current->euid != p->uid)) | ||
5195 | return -EPERM; | 5319 | return -EPERM; |
5196 | } | 5320 | } |
5197 | 5321 | ||
@@ -5314,8 +5438,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
5314 | * @policy: new policy. | 5438 | * @policy: new policy. |
5315 | * @param: structure containing the new RT priority. | 5439 | * @param: structure containing the new RT priority. |
5316 | */ | 5440 | */ |
5317 | asmlinkage long | 5441 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
5318 | sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | 5442 | struct sched_param __user *, param) |
5319 | { | 5443 | { |
5320 | /* negative values for policy are not valid */ | 5444 | /* negative values for policy are not valid */ |
5321 | if (policy < 0) | 5445 | if (policy < 0) |
@@ -5329,7 +5453,7 @@ sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
5329 | * @pid: the pid in question. | 5453 | * @pid: the pid in question. |
5330 | * @param: structure containing the new RT priority. | 5454 | * @param: structure containing the new RT priority. |
5331 | */ | 5455 | */ |
5332 | asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) | 5456 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
5333 | { | 5457 | { |
5334 | return do_sched_setscheduler(pid, -1, param); | 5458 | return do_sched_setscheduler(pid, -1, param); |
5335 | } | 5459 | } |
@@ -5338,7 +5462,7 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) | |||
5338 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread | 5462 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
5339 | * @pid: the pid in question. | 5463 | * @pid: the pid in question. |
5340 | */ | 5464 | */ |
5341 | asmlinkage long sys_sched_getscheduler(pid_t pid) | 5465 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
5342 | { | 5466 | { |
5343 | struct task_struct *p; | 5467 | struct task_struct *p; |
5344 | int retval; | 5468 | int retval; |
@@ -5363,7 +5487,7 @@ asmlinkage long sys_sched_getscheduler(pid_t pid) | |||
5363 | * @pid: the pid in question. | 5487 | * @pid: the pid in question. |
5364 | * @param: structure containing the RT priority. | 5488 | * @param: structure containing the RT priority. |
5365 | */ | 5489 | */ |
5366 | asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) | 5490 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
5367 | { | 5491 | { |
5368 | struct sched_param lp; | 5492 | struct sched_param lp; |
5369 | struct task_struct *p; | 5493 | struct task_struct *p; |
@@ -5397,10 +5521,9 @@ out_unlock: | |||
5397 | return retval; | 5521 | return retval; |
5398 | } | 5522 | } |
5399 | 5523 | ||
5400 | long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | 5524 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
5401 | { | 5525 | { |
5402 | cpumask_t cpus_allowed; | 5526 | cpumask_var_t cpus_allowed, new_mask; |
5403 | cpumask_t new_mask = *in_mask; | ||
5404 | struct task_struct *p; | 5527 | struct task_struct *p; |
5405 | int retval; | 5528 | int retval; |
5406 | 5529 | ||
@@ -5422,46 +5545,57 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5422 | get_task_struct(p); | 5545 | get_task_struct(p); |
5423 | read_unlock(&tasklist_lock); | 5546 | read_unlock(&tasklist_lock); |
5424 | 5547 | ||
5548 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | ||
5549 | retval = -ENOMEM; | ||
5550 | goto out_put_task; | ||
5551 | } | ||
5552 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { | ||
5553 | retval = -ENOMEM; | ||
5554 | goto out_free_cpus_allowed; | ||
5555 | } | ||
5425 | retval = -EPERM; | 5556 | retval = -EPERM; |
5426 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 5557 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
5427 | !capable(CAP_SYS_NICE)) | ||
5428 | goto out_unlock; | 5558 | goto out_unlock; |
5429 | 5559 | ||
5430 | retval = security_task_setscheduler(p, 0, NULL); | 5560 | retval = security_task_setscheduler(p, 0, NULL); |
5431 | if (retval) | 5561 | if (retval) |
5432 | goto out_unlock; | 5562 | goto out_unlock; |
5433 | 5563 | ||
5434 | cpuset_cpus_allowed(p, &cpus_allowed); | 5564 | cpuset_cpus_allowed(p, cpus_allowed); |
5435 | cpus_and(new_mask, new_mask, cpus_allowed); | 5565 | cpumask_and(new_mask, in_mask, cpus_allowed); |
5436 | again: | 5566 | again: |
5437 | retval = set_cpus_allowed_ptr(p, &new_mask); | 5567 | retval = set_cpus_allowed_ptr(p, new_mask); |
5438 | 5568 | ||
5439 | if (!retval) { | 5569 | if (!retval) { |
5440 | cpuset_cpus_allowed(p, &cpus_allowed); | 5570 | cpuset_cpus_allowed(p, cpus_allowed); |
5441 | if (!cpus_subset(new_mask, cpus_allowed)) { | 5571 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
5442 | /* | 5572 | /* |
5443 | * We must have raced with a concurrent cpuset | 5573 | * We must have raced with a concurrent cpuset |
5444 | * update. Just reset the cpus_allowed to the | 5574 | * update. Just reset the cpus_allowed to the |
5445 | * cpuset's cpus_allowed | 5575 | * cpuset's cpus_allowed |
5446 | */ | 5576 | */ |
5447 | new_mask = cpus_allowed; | 5577 | cpumask_copy(new_mask, cpus_allowed); |
5448 | goto again; | 5578 | goto again; |
5449 | } | 5579 | } |
5450 | } | 5580 | } |
5451 | out_unlock: | 5581 | out_unlock: |
5582 | free_cpumask_var(new_mask); | ||
5583 | out_free_cpus_allowed: | ||
5584 | free_cpumask_var(cpus_allowed); | ||
5585 | out_put_task: | ||
5452 | put_task_struct(p); | 5586 | put_task_struct(p); |
5453 | put_online_cpus(); | 5587 | put_online_cpus(); |
5454 | return retval; | 5588 | return retval; |
5455 | } | 5589 | } |
5456 | 5590 | ||
5457 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | 5591 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
5458 | cpumask_t *new_mask) | 5592 | struct cpumask *new_mask) |
5459 | { | 5593 | { |
5460 | if (len < sizeof(cpumask_t)) { | 5594 | if (len < cpumask_size()) |
5461 | memset(new_mask, 0, sizeof(cpumask_t)); | 5595 | cpumask_clear(new_mask); |
5462 | } else if (len > sizeof(cpumask_t)) { | 5596 | else if (len > cpumask_size()) |
5463 | len = sizeof(cpumask_t); | 5597 | len = cpumask_size(); |
5464 | } | 5598 | |
5465 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; | 5599 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
5466 | } | 5600 | } |
5467 | 5601 | ||
@@ -5471,20 +5605,23 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
5471 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 5605 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
5472 | * @user_mask_ptr: user-space pointer to the new cpu mask | 5606 | * @user_mask_ptr: user-space pointer to the new cpu mask |
5473 | */ | 5607 | */ |
5474 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 5608 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
5475 | unsigned long __user *user_mask_ptr) | 5609 | unsigned long __user *, user_mask_ptr) |
5476 | { | 5610 | { |
5477 | cpumask_t new_mask; | 5611 | cpumask_var_t new_mask; |
5478 | int retval; | 5612 | int retval; |
5479 | 5613 | ||
5480 | retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); | 5614 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
5481 | if (retval) | 5615 | return -ENOMEM; |
5482 | return retval; | ||
5483 | 5616 | ||
5484 | return sched_setaffinity(pid, &new_mask); | 5617 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
5618 | if (retval == 0) | ||
5619 | retval = sched_setaffinity(pid, new_mask); | ||
5620 | free_cpumask_var(new_mask); | ||
5621 | return retval; | ||
5485 | } | 5622 | } |
5486 | 5623 | ||
5487 | long sched_getaffinity(pid_t pid, cpumask_t *mask) | 5624 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
5488 | { | 5625 | { |
5489 | struct task_struct *p; | 5626 | struct task_struct *p; |
5490 | int retval; | 5627 | int retval; |
@@ -5501,7 +5638,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
5501 | if (retval) | 5638 | if (retval) |
5502 | goto out_unlock; | 5639 | goto out_unlock; |
5503 | 5640 | ||
5504 | cpus_and(*mask, p->cpus_allowed, cpu_online_map); | 5641 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
5505 | 5642 | ||
5506 | out_unlock: | 5643 | out_unlock: |
5507 | read_unlock(&tasklist_lock); | 5644 | read_unlock(&tasklist_lock); |
@@ -5516,23 +5653,28 @@ out_unlock: | |||
5516 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 5653 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
5517 | * @user_mask_ptr: user-space pointer to hold the current cpu mask | 5654 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
5518 | */ | 5655 | */ |
5519 | asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | 5656 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
5520 | unsigned long __user *user_mask_ptr) | 5657 | unsigned long __user *, user_mask_ptr) |
5521 | { | 5658 | { |
5522 | int ret; | 5659 | int ret; |
5523 | cpumask_t mask; | 5660 | cpumask_var_t mask; |
5524 | 5661 | ||
5525 | if (len < sizeof(cpumask_t)) | 5662 | if (len < cpumask_size()) |
5526 | return -EINVAL; | 5663 | return -EINVAL; |
5527 | 5664 | ||
5528 | ret = sched_getaffinity(pid, &mask); | 5665 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
5529 | if (ret < 0) | 5666 | return -ENOMEM; |
5530 | return ret; | ||
5531 | 5667 | ||
5532 | if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) | 5668 | ret = sched_getaffinity(pid, mask); |
5533 | return -EFAULT; | 5669 | if (ret == 0) { |
5670 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | ||
5671 | ret = -EFAULT; | ||
5672 | else | ||
5673 | ret = cpumask_size(); | ||
5674 | } | ||
5675 | free_cpumask_var(mask); | ||
5534 | 5676 | ||
5535 | return sizeof(cpumask_t); | 5677 | return ret; |
5536 | } | 5678 | } |
5537 | 5679 | ||
5538 | /** | 5680 | /** |
@@ -5541,7 +5683,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
5541 | * This function yields the current CPU to other tasks. If there are no | 5683 | * This function yields the current CPU to other tasks. If there are no |
5542 | * other threads running on this CPU then this function will return. | 5684 | * other threads running on this CPU then this function will return. |
5543 | */ | 5685 | */ |
5544 | asmlinkage long sys_sched_yield(void) | 5686 | SYSCALL_DEFINE0(sched_yield) |
5545 | { | 5687 | { |
5546 | struct rq *rq = this_rq_lock(); | 5688 | struct rq *rq = this_rq_lock(); |
5547 | 5689 | ||
@@ -5682,7 +5824,7 @@ long __sched io_schedule_timeout(long timeout) | |||
5682 | * this syscall returns the maximum rt_priority that can be used | 5824 | * this syscall returns the maximum rt_priority that can be used |
5683 | * by a given scheduling class. | 5825 | * by a given scheduling class. |
5684 | */ | 5826 | */ |
5685 | asmlinkage long sys_sched_get_priority_max(int policy) | 5827 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
5686 | { | 5828 | { |
5687 | int ret = -EINVAL; | 5829 | int ret = -EINVAL; |
5688 | 5830 | ||
@@ -5707,7 +5849,7 @@ asmlinkage long sys_sched_get_priority_max(int policy) | |||
5707 | * this syscall returns the minimum rt_priority that can be used | 5849 | * this syscall returns the minimum rt_priority that can be used |
5708 | * by a given scheduling class. | 5850 | * by a given scheduling class. |
5709 | */ | 5851 | */ |
5710 | asmlinkage long sys_sched_get_priority_min(int policy) | 5852 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
5711 | { | 5853 | { |
5712 | int ret = -EINVAL; | 5854 | int ret = -EINVAL; |
5713 | 5855 | ||
@@ -5732,8 +5874,8 @@ asmlinkage long sys_sched_get_priority_min(int policy) | |||
5732 | * this syscall writes the default timeslice value of a given process | 5874 | * this syscall writes the default timeslice value of a given process |
5733 | * into the user-space timespec buffer. A value of '0' means infinity. | 5875 | * into the user-space timespec buffer. A value of '0' means infinity. |
5734 | */ | 5876 | */ |
5735 | asmlinkage | 5877 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
5736 | long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) | 5878 | struct timespec __user *, interval) |
5737 | { | 5879 | { |
5738 | struct task_struct *p; | 5880 | struct task_struct *p; |
5739 | unsigned int time_slice; | 5881 | unsigned int time_slice; |
@@ -5802,12 +5944,7 @@ void sched_show_task(struct task_struct *p) | |||
5802 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); | 5944 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
5803 | #endif | 5945 | #endif |
5804 | #ifdef CONFIG_DEBUG_STACK_USAGE | 5946 | #ifdef CONFIG_DEBUG_STACK_USAGE |
5805 | { | 5947 | free = stack_not_used(p); |
5806 | unsigned long *n = end_of_stack(p); | ||
5807 | while (!*n) | ||
5808 | n++; | ||
5809 | free = (unsigned long)n - (unsigned long)end_of_stack(p); | ||
5810 | } | ||
5811 | #endif | 5948 | #endif |
5812 | printk(KERN_CONT "%5lu %5d %6d\n", free, | 5949 | printk(KERN_CONT "%5lu %5d %6d\n", free, |
5813 | task_pid_nr(p), task_pid_nr(p->real_parent)); | 5950 | task_pid_nr(p), task_pid_nr(p->real_parent)); |
@@ -5868,14 +6005,15 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5868 | struct rq *rq = cpu_rq(cpu); | 6005 | struct rq *rq = cpu_rq(cpu); |
5869 | unsigned long flags; | 6006 | unsigned long flags; |
5870 | 6007 | ||
6008 | spin_lock_irqsave(&rq->lock, flags); | ||
6009 | |||
5871 | __sched_fork(idle); | 6010 | __sched_fork(idle); |
5872 | idle->se.exec_start = sched_clock(); | 6011 | idle->se.exec_start = sched_clock(); |
5873 | 6012 | ||
5874 | idle->prio = idle->normal_prio = MAX_PRIO; | 6013 | idle->prio = idle->normal_prio = MAX_PRIO; |
5875 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 6014 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
5876 | __set_task_cpu(idle, cpu); | 6015 | __set_task_cpu(idle, cpu); |
5877 | 6016 | ||
5878 | spin_lock_irqsave(&rq->lock, flags); | ||
5879 | rq->curr = rq->idle = idle; | 6017 | rq->curr = rq->idle = idle; |
5880 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 6018 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
5881 | idle->oncpu = 1; | 6019 | idle->oncpu = 1; |
@@ -5892,6 +6030,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5892 | * The idle tasks have their own, simple scheduling class: | 6030 | * The idle tasks have their own, simple scheduling class: |
5893 | */ | 6031 | */ |
5894 | idle->sched_class = &idle_sched_class; | 6032 | idle->sched_class = &idle_sched_class; |
6033 | ftrace_graph_init_task(idle); | ||
5895 | } | 6034 | } |
5896 | 6035 | ||
5897 | /* | 6036 | /* |
@@ -5899,9 +6038,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5899 | * indicates which cpus entered this state. This is used | 6038 | * indicates which cpus entered this state. This is used |
5900 | * in the rcu update to wait only for active cpus. For system | 6039 | * in the rcu update to wait only for active cpus. For system |
5901 | * which do not switch off the HZ timer nohz_cpu_mask should | 6040 | * which do not switch off the HZ timer nohz_cpu_mask should |
5902 | * always be CPU_MASK_NONE. | 6041 | * always be CPU_BITS_NONE. |
5903 | */ | 6042 | */ |
5904 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | 6043 | cpumask_var_t nohz_cpu_mask; |
5905 | 6044 | ||
5906 | /* | 6045 | /* |
5907 | * Increase the granularity value when there are more CPUs, | 6046 | * Increase the granularity value when there are more CPUs, |
@@ -5956,7 +6095,7 @@ static inline void sched_init_granularity(void) | |||
5956 | * task must not exit() & deallocate itself prematurely. The | 6095 | * task must not exit() & deallocate itself prematurely. The |
5957 | * call is not atomic; no spinlocks may be held. | 6096 | * call is not atomic; no spinlocks may be held. |
5958 | */ | 6097 | */ |
5959 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | 6098 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
5960 | { | 6099 | { |
5961 | struct migration_req req; | 6100 | struct migration_req req; |
5962 | unsigned long flags; | 6101 | unsigned long flags; |
@@ -5964,13 +6103,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5964 | int ret = 0; | 6103 | int ret = 0; |
5965 | 6104 | ||
5966 | rq = task_rq_lock(p, &flags); | 6105 | rq = task_rq_lock(p, &flags); |
5967 | if (!cpus_intersects(*new_mask, cpu_online_map)) { | 6106 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { |
5968 | ret = -EINVAL; | 6107 | ret = -EINVAL; |
5969 | goto out; | 6108 | goto out; |
5970 | } | 6109 | } |
5971 | 6110 | ||
5972 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && | 6111 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && |
5973 | !cpus_equal(p->cpus_allowed, *new_mask))) { | 6112 | !cpumask_equal(&p->cpus_allowed, new_mask))) { |
5974 | ret = -EINVAL; | 6113 | ret = -EINVAL; |
5975 | goto out; | 6114 | goto out; |
5976 | } | 6115 | } |
@@ -5978,15 +6117,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5978 | if (p->sched_class->set_cpus_allowed) | 6117 | if (p->sched_class->set_cpus_allowed) |
5979 | p->sched_class->set_cpus_allowed(p, new_mask); | 6118 | p->sched_class->set_cpus_allowed(p, new_mask); |
5980 | else { | 6119 | else { |
5981 | p->cpus_allowed = *new_mask; | 6120 | cpumask_copy(&p->cpus_allowed, new_mask); |
5982 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); | 6121 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
5983 | } | 6122 | } |
5984 | 6123 | ||
5985 | /* Can the task run on the task's current CPU? If so, we're done */ | 6124 | /* Can the task run on the task's current CPU? If so, we're done */ |
5986 | if (cpu_isset(task_cpu(p), *new_mask)) | 6125 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
5987 | goto out; | 6126 | goto out; |
5988 | 6127 | ||
5989 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { | 6128 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { |
5990 | /* Need help from migration thread: drop lock and wait. */ | 6129 | /* Need help from migration thread: drop lock and wait. */ |
5991 | task_rq_unlock(rq, &flags); | 6130 | task_rq_unlock(rq, &flags); |
5992 | wake_up_process(rq->migration_thread); | 6131 | wake_up_process(rq->migration_thread); |
@@ -6028,7 +6167,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6028 | if (task_cpu(p) != src_cpu) | 6167 | if (task_cpu(p) != src_cpu) |
6029 | goto done; | 6168 | goto done; |
6030 | /* Affinity changed (again). */ | 6169 | /* Affinity changed (again). */ |
6031 | if (!cpu_isset(dest_cpu, p->cpus_allowed)) | 6170 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6032 | goto fail; | 6171 | goto fail; |
6033 | 6172 | ||
6034 | on_rq = p->se.on_rq; | 6173 | on_rq = p->se.on_rq; |
@@ -6122,54 +6261,44 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6122 | 6261 | ||
6123 | /* | 6262 | /* |
6124 | * Figure out where task on dead CPU should go, use force if necessary. | 6263 | * Figure out where task on dead CPU should go, use force if necessary. |
6125 | * NOTE: interrupts should be disabled by the caller | ||
6126 | */ | 6264 | */ |
6127 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6265 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6128 | { | 6266 | { |
6129 | unsigned long flags; | ||
6130 | cpumask_t mask; | ||
6131 | struct rq *rq; | ||
6132 | int dest_cpu; | 6267 | int dest_cpu; |
6268 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); | ||
6133 | 6269 | ||
6134 | do { | 6270 | again: |
6135 | /* On same node? */ | 6271 | /* Look for allowed, online CPU in same node. */ |
6136 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 6272 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) |
6137 | cpus_and(mask, mask, p->cpus_allowed); | 6273 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6138 | dest_cpu = any_online_cpu(mask); | 6274 | goto move; |
6139 | |||
6140 | /* On any allowed CPU? */ | ||
6141 | if (dest_cpu >= nr_cpu_ids) | ||
6142 | dest_cpu = any_online_cpu(p->cpus_allowed); | ||
6143 | 6275 | ||
6144 | /* No more Mr. Nice Guy. */ | 6276 | /* Any allowed, online CPU? */ |
6145 | if (dest_cpu >= nr_cpu_ids) { | 6277 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); |
6146 | cpumask_t cpus_allowed; | 6278 | if (dest_cpu < nr_cpu_ids) |
6279 | goto move; | ||
6147 | 6280 | ||
6148 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | 6281 | /* No more Mr. Nice Guy. */ |
6149 | /* | 6282 | if (dest_cpu >= nr_cpu_ids) { |
6150 | * Try to stay on the same cpuset, where the | 6283 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); |
6151 | * current cpuset may be a subset of all cpus. | 6284 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); |
6152 | * The cpuset_cpus_allowed_locked() variant of | ||
6153 | * cpuset_cpus_allowed() will not block. It must be | ||
6154 | * called within calls to cpuset_lock/cpuset_unlock. | ||
6155 | */ | ||
6156 | rq = task_rq_lock(p, &flags); | ||
6157 | p->cpus_allowed = cpus_allowed; | ||
6158 | dest_cpu = any_online_cpu(p->cpus_allowed); | ||
6159 | task_rq_unlock(rq, &flags); | ||
6160 | 6285 | ||
6161 | /* | 6286 | /* |
6162 | * Don't tell them about moving exiting tasks or | 6287 | * Don't tell them about moving exiting tasks or |
6163 | * kernel threads (both mm NULL), since they never | 6288 | * kernel threads (both mm NULL), since they never |
6164 | * leave kernel. | 6289 | * leave kernel. |
6165 | */ | 6290 | */ |
6166 | if (p->mm && printk_ratelimit()) { | 6291 | if (p->mm && printk_ratelimit()) { |
6167 | printk(KERN_INFO "process %d (%s) no " | 6292 | printk(KERN_INFO "process %d (%s) no " |
6168 | "longer affine to cpu%d\n", | 6293 | "longer affine to cpu%d\n", |
6169 | task_pid_nr(p), p->comm, dead_cpu); | 6294 | task_pid_nr(p), p->comm, dead_cpu); |
6170 | } | ||
6171 | } | 6295 | } |
6172 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); | 6296 | } |
6297 | |||
6298 | move: | ||
6299 | /* It can have affinity changed while we were choosing. */ | ||
6300 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | ||
6301 | goto again; | ||
6173 | } | 6302 | } |
6174 | 6303 | ||
6175 | /* | 6304 | /* |
@@ -6181,7 +6310,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
6181 | */ | 6310 | */ |
6182 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 6311 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
6183 | { | 6312 | { |
6184 | struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); | 6313 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); |
6185 | unsigned long flags; | 6314 | unsigned long flags; |
6186 | 6315 | ||
6187 | local_irq_save(flags); | 6316 | local_irq_save(flags); |
@@ -6471,7 +6600,7 @@ static void set_rq_online(struct rq *rq) | |||
6471 | if (!rq->online) { | 6600 | if (!rq->online) { |
6472 | const struct sched_class *class; | 6601 | const struct sched_class *class; |
6473 | 6602 | ||
6474 | cpu_set(rq->cpu, rq->rd->online); | 6603 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
6475 | rq->online = 1; | 6604 | rq->online = 1; |
6476 | 6605 | ||
6477 | for_each_class(class) { | 6606 | for_each_class(class) { |
@@ -6491,7 +6620,7 @@ static void set_rq_offline(struct rq *rq) | |||
6491 | class->rq_offline(rq); | 6620 | class->rq_offline(rq); |
6492 | } | 6621 | } |
6493 | 6622 | ||
6494 | cpu_clear(rq->cpu, rq->rd->online); | 6623 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
6495 | rq->online = 0; | 6624 | rq->online = 0; |
6496 | } | 6625 | } |
6497 | } | 6626 | } |
@@ -6532,7 +6661,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6532 | rq = cpu_rq(cpu); | 6661 | rq = cpu_rq(cpu); |
6533 | spin_lock_irqsave(&rq->lock, flags); | 6662 | spin_lock_irqsave(&rq->lock, flags); |
6534 | if (rq->rd) { | 6663 | if (rq->rd) { |
6535 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6664 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6536 | 6665 | ||
6537 | set_rq_online(rq); | 6666 | set_rq_online(rq); |
6538 | } | 6667 | } |
@@ -6546,7 +6675,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6546 | break; | 6675 | break; |
6547 | /* Unbind it from offline cpu so it can run. Fall thru. */ | 6676 | /* Unbind it from offline cpu so it can run. Fall thru. */ |
6548 | kthread_bind(cpu_rq(cpu)->migration_thread, | 6677 | kthread_bind(cpu_rq(cpu)->migration_thread, |
6549 | any_online_cpu(cpu_online_map)); | 6678 | cpumask_any(cpu_online_mask)); |
6550 | kthread_stop(cpu_rq(cpu)->migration_thread); | 6679 | kthread_stop(cpu_rq(cpu)->migration_thread); |
6551 | cpu_rq(cpu)->migration_thread = NULL; | 6680 | cpu_rq(cpu)->migration_thread = NULL; |
6552 | break; | 6681 | break; |
@@ -6583,7 +6712,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6583 | req = list_entry(rq->migration_queue.next, | 6712 | req = list_entry(rq->migration_queue.next, |
6584 | struct migration_req, list); | 6713 | struct migration_req, list); |
6585 | list_del_init(&req->list); | 6714 | list_del_init(&req->list); |
6715 | spin_unlock_irq(&rq->lock); | ||
6586 | complete(&req->done); | 6716 | complete(&req->done); |
6717 | spin_lock_irq(&rq->lock); | ||
6587 | } | 6718 | } |
6588 | spin_unlock_irq(&rq->lock); | 6719 | spin_unlock_irq(&rq->lock); |
6589 | break; | 6720 | break; |
@@ -6594,7 +6725,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6594 | rq = cpu_rq(cpu); | 6725 | rq = cpu_rq(cpu); |
6595 | spin_lock_irqsave(&rq->lock, flags); | 6726 | spin_lock_irqsave(&rq->lock, flags); |
6596 | if (rq->rd) { | 6727 | if (rq->rd) { |
6597 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6728 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6598 | set_rq_offline(rq); | 6729 | set_rq_offline(rq); |
6599 | } | 6730 | } |
6600 | spin_unlock_irqrestore(&rq->lock, flags); | 6731 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -6632,36 +6763,14 @@ early_initcall(migration_init); | |||
6632 | 6763 | ||
6633 | #ifdef CONFIG_SCHED_DEBUG | 6764 | #ifdef CONFIG_SCHED_DEBUG |
6634 | 6765 | ||
6635 | static inline const char *sd_level_to_string(enum sched_domain_level lvl) | ||
6636 | { | ||
6637 | switch (lvl) { | ||
6638 | case SD_LV_NONE: | ||
6639 | return "NONE"; | ||
6640 | case SD_LV_SIBLING: | ||
6641 | return "SIBLING"; | ||
6642 | case SD_LV_MC: | ||
6643 | return "MC"; | ||
6644 | case SD_LV_CPU: | ||
6645 | return "CPU"; | ||
6646 | case SD_LV_NODE: | ||
6647 | return "NODE"; | ||
6648 | case SD_LV_ALLNODES: | ||
6649 | return "ALLNODES"; | ||
6650 | case SD_LV_MAX: | ||
6651 | return "MAX"; | ||
6652 | |||
6653 | } | ||
6654 | return "MAX"; | ||
6655 | } | ||
6656 | |||
6657 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6766 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6658 | cpumask_t *groupmask) | 6767 | struct cpumask *groupmask) |
6659 | { | 6768 | { |
6660 | struct sched_group *group = sd->groups; | 6769 | struct sched_group *group = sd->groups; |
6661 | char str[256]; | 6770 | char str[256]; |
6662 | 6771 | ||
6663 | cpulist_scnprintf(str, sizeof(str), sd->span); | 6772 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); |
6664 | cpus_clear(*groupmask); | 6773 | cpumask_clear(groupmask); |
6665 | 6774 | ||
6666 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 6775 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
6667 | 6776 | ||
@@ -6673,14 +6782,13 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6673 | return -1; | 6782 | return -1; |
6674 | } | 6783 | } |
6675 | 6784 | ||
6676 | printk(KERN_CONT "span %s level %s\n", | 6785 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
6677 | str, sd_level_to_string(sd->level)); | ||
6678 | 6786 | ||
6679 | if (!cpu_isset(cpu, sd->span)) { | 6787 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
6680 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6788 | printk(KERN_ERR "ERROR: domain->span does not contain " |
6681 | "CPU%d\n", cpu); | 6789 | "CPU%d\n", cpu); |
6682 | } | 6790 | } |
6683 | if (!cpu_isset(cpu, group->cpumask)) { | 6791 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
6684 | printk(KERN_ERR "ERROR: domain->groups does not contain" | 6792 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
6685 | " CPU%d\n", cpu); | 6793 | " CPU%d\n", cpu); |
6686 | } | 6794 | } |
@@ -6700,31 +6808,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6700 | break; | 6808 | break; |
6701 | } | 6809 | } |
6702 | 6810 | ||
6703 | if (!cpus_weight(group->cpumask)) { | 6811 | if (!cpumask_weight(sched_group_cpus(group))) { |
6704 | printk(KERN_CONT "\n"); | 6812 | printk(KERN_CONT "\n"); |
6705 | printk(KERN_ERR "ERROR: empty group\n"); | 6813 | printk(KERN_ERR "ERROR: empty group\n"); |
6706 | break; | 6814 | break; |
6707 | } | 6815 | } |
6708 | 6816 | ||
6709 | if (cpus_intersects(*groupmask, group->cpumask)) { | 6817 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
6710 | printk(KERN_CONT "\n"); | 6818 | printk(KERN_CONT "\n"); |
6711 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 6819 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
6712 | break; | 6820 | break; |
6713 | } | 6821 | } |
6714 | 6822 | ||
6715 | cpus_or(*groupmask, *groupmask, group->cpumask); | 6823 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
6716 | 6824 | ||
6717 | cpulist_scnprintf(str, sizeof(str), group->cpumask); | 6825 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
6718 | printk(KERN_CONT " %s", str); | 6826 | printk(KERN_CONT " %s", str); |
6719 | 6827 | ||
6720 | group = group->next; | 6828 | group = group->next; |
6721 | } while (group != sd->groups); | 6829 | } while (group != sd->groups); |
6722 | printk(KERN_CONT "\n"); | 6830 | printk(KERN_CONT "\n"); |
6723 | 6831 | ||
6724 | if (!cpus_equal(sd->span, *groupmask)) | 6832 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
6725 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | 6833 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
6726 | 6834 | ||
6727 | if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) | 6835 | if (sd->parent && |
6836 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | ||
6728 | printk(KERN_ERR "ERROR: parent span is not a superset " | 6837 | printk(KERN_ERR "ERROR: parent span is not a superset " |
6729 | "of domain->span\n"); | 6838 | "of domain->span\n"); |
6730 | return 0; | 6839 | return 0; |
@@ -6732,7 +6841,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6732 | 6841 | ||
6733 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 6842 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
6734 | { | 6843 | { |
6735 | cpumask_t *groupmask; | 6844 | cpumask_var_t groupmask; |
6736 | int level = 0; | 6845 | int level = 0; |
6737 | 6846 | ||
6738 | if (!sd) { | 6847 | if (!sd) { |
@@ -6742,8 +6851,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6742 | 6851 | ||
6743 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 6852 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
6744 | 6853 | ||
6745 | groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 6854 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { |
6746 | if (!groupmask) { | ||
6747 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); | 6855 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); |
6748 | return; | 6856 | return; |
6749 | } | 6857 | } |
@@ -6756,7 +6864,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6756 | if (!sd) | 6864 | if (!sd) |
6757 | break; | 6865 | break; |
6758 | } | 6866 | } |
6759 | kfree(groupmask); | 6867 | free_cpumask_var(groupmask); |
6760 | } | 6868 | } |
6761 | #else /* !CONFIG_SCHED_DEBUG */ | 6869 | #else /* !CONFIG_SCHED_DEBUG */ |
6762 | # define sched_domain_debug(sd, cpu) do { } while (0) | 6870 | # define sched_domain_debug(sd, cpu) do { } while (0) |
@@ -6764,7 +6872,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6764 | 6872 | ||
6765 | static int sd_degenerate(struct sched_domain *sd) | 6873 | static int sd_degenerate(struct sched_domain *sd) |
6766 | { | 6874 | { |
6767 | if (cpus_weight(sd->span) == 1) | 6875 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
6768 | return 1; | 6876 | return 1; |
6769 | 6877 | ||
6770 | /* Following flags need at least 2 groups */ | 6878 | /* Following flags need at least 2 groups */ |
@@ -6795,7 +6903,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6795 | if (sd_degenerate(parent)) | 6903 | if (sd_degenerate(parent)) |
6796 | return 1; | 6904 | return 1; |
6797 | 6905 | ||
6798 | if (!cpus_equal(sd->span, parent->span)) | 6906 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
6799 | return 0; | 6907 | return 0; |
6800 | 6908 | ||
6801 | /* Does parent contain flags not in child? */ | 6909 | /* Does parent contain flags not in child? */ |
@@ -6810,6 +6918,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6810 | SD_BALANCE_EXEC | | 6918 | SD_BALANCE_EXEC | |
6811 | SD_SHARE_CPUPOWER | | 6919 | SD_SHARE_CPUPOWER | |
6812 | SD_SHARE_PKG_RESOURCES); | 6920 | SD_SHARE_PKG_RESOURCES); |
6921 | if (nr_node_ids == 1) | ||
6922 | pflags &= ~SD_SERIALIZE; | ||
6813 | } | 6923 | } |
6814 | if (~cflags & pflags) | 6924 | if (~cflags & pflags) |
6815 | return 0; | 6925 | return 0; |
@@ -6817,6 +6927,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6817 | return 1; | 6927 | return 1; |
6818 | } | 6928 | } |
6819 | 6929 | ||
6930 | static void free_rootdomain(struct root_domain *rd) | ||
6931 | { | ||
6932 | cpupri_cleanup(&rd->cpupri); | ||
6933 | |||
6934 | free_cpumask_var(rd->rto_mask); | ||
6935 | free_cpumask_var(rd->online); | ||
6936 | free_cpumask_var(rd->span); | ||
6937 | kfree(rd); | ||
6938 | } | ||
6939 | |||
6820 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6940 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
6821 | { | 6941 | { |
6822 | unsigned long flags; | 6942 | unsigned long flags; |
@@ -6826,38 +6946,62 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6826 | if (rq->rd) { | 6946 | if (rq->rd) { |
6827 | struct root_domain *old_rd = rq->rd; | 6947 | struct root_domain *old_rd = rq->rd; |
6828 | 6948 | ||
6829 | if (cpu_isset(rq->cpu, old_rd->online)) | 6949 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
6830 | set_rq_offline(rq); | 6950 | set_rq_offline(rq); |
6831 | 6951 | ||
6832 | cpu_clear(rq->cpu, old_rd->span); | 6952 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
6833 | 6953 | ||
6834 | if (atomic_dec_and_test(&old_rd->refcount)) | 6954 | if (atomic_dec_and_test(&old_rd->refcount)) |
6835 | kfree(old_rd); | 6955 | free_rootdomain(old_rd); |
6836 | } | 6956 | } |
6837 | 6957 | ||
6838 | atomic_inc(&rd->refcount); | 6958 | atomic_inc(&rd->refcount); |
6839 | rq->rd = rd; | 6959 | rq->rd = rd; |
6840 | 6960 | ||
6841 | cpu_set(rq->cpu, rd->span); | 6961 | cpumask_set_cpu(rq->cpu, rd->span); |
6842 | if (cpu_isset(rq->cpu, cpu_online_map)) | 6962 | if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) |
6843 | set_rq_online(rq); | 6963 | set_rq_online(rq); |
6844 | 6964 | ||
6845 | spin_unlock_irqrestore(&rq->lock, flags); | 6965 | spin_unlock_irqrestore(&rq->lock, flags); |
6846 | } | 6966 | } |
6847 | 6967 | ||
6848 | static void init_rootdomain(struct root_domain *rd) | 6968 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) |
6849 | { | 6969 | { |
6850 | memset(rd, 0, sizeof(*rd)); | 6970 | memset(rd, 0, sizeof(*rd)); |
6851 | 6971 | ||
6852 | cpus_clear(rd->span); | 6972 | if (bootmem) { |
6853 | cpus_clear(rd->online); | 6973 | alloc_bootmem_cpumask_var(&def_root_domain.span); |
6974 | alloc_bootmem_cpumask_var(&def_root_domain.online); | ||
6975 | alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); | ||
6976 | cpupri_init(&rd->cpupri, true); | ||
6977 | return 0; | ||
6978 | } | ||
6979 | |||
6980 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | ||
6981 | goto out; | ||
6982 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | ||
6983 | goto free_span; | ||
6984 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | ||
6985 | goto free_online; | ||
6986 | |||
6987 | if (cpupri_init(&rd->cpupri, false) != 0) | ||
6988 | goto free_rto_mask; | ||
6989 | return 0; | ||
6854 | 6990 | ||
6855 | cpupri_init(&rd->cpupri); | 6991 | free_rto_mask: |
6992 | free_cpumask_var(rd->rto_mask); | ||
6993 | free_online: | ||
6994 | free_cpumask_var(rd->online); | ||
6995 | free_span: | ||
6996 | free_cpumask_var(rd->span); | ||
6997 | out: | ||
6998 | return -ENOMEM; | ||
6856 | } | 6999 | } |
6857 | 7000 | ||
6858 | static void init_defrootdomain(void) | 7001 | static void init_defrootdomain(void) |
6859 | { | 7002 | { |
6860 | init_rootdomain(&def_root_domain); | 7003 | init_rootdomain(&def_root_domain, true); |
7004 | |||
6861 | atomic_set(&def_root_domain.refcount, 1); | 7005 | atomic_set(&def_root_domain.refcount, 1); |
6862 | } | 7006 | } |
6863 | 7007 | ||
@@ -6869,7 +7013,10 @@ static struct root_domain *alloc_rootdomain(void) | |||
6869 | if (!rd) | 7013 | if (!rd) |
6870 | return NULL; | 7014 | return NULL; |
6871 | 7015 | ||
6872 | init_rootdomain(rd); | 7016 | if (init_rootdomain(rd, false) != 0) { |
7017 | kfree(rd); | ||
7018 | return NULL; | ||
7019 | } | ||
6873 | 7020 | ||
6874 | return rd; | 7021 | return rd; |
6875 | } | 7022 | } |
@@ -6911,19 +7058,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6911 | } | 7058 | } |
6912 | 7059 | ||
6913 | /* cpus with isolated domains */ | 7060 | /* cpus with isolated domains */ |
6914 | static cpumask_t cpu_isolated_map = CPU_MASK_NONE; | 7061 | static cpumask_var_t cpu_isolated_map; |
6915 | 7062 | ||
6916 | /* Setup the mask of cpus configured for isolated domains */ | 7063 | /* Setup the mask of cpus configured for isolated domains */ |
6917 | static int __init isolated_cpu_setup(char *str) | 7064 | static int __init isolated_cpu_setup(char *str) |
6918 | { | 7065 | { |
6919 | static int __initdata ints[NR_CPUS]; | 7066 | cpulist_parse(str, cpu_isolated_map); |
6920 | int i; | ||
6921 | |||
6922 | str = get_options(str, ARRAY_SIZE(ints), ints); | ||
6923 | cpus_clear(cpu_isolated_map); | ||
6924 | for (i = 1; i <= ints[0]; i++) | ||
6925 | if (ints[i] < NR_CPUS) | ||
6926 | cpu_set(ints[i], cpu_isolated_map); | ||
6927 | return 1; | 7067 | return 1; |
6928 | } | 7068 | } |
6929 | 7069 | ||
@@ -6932,42 +7072,43 @@ __setup("isolcpus=", isolated_cpu_setup); | |||
6932 | /* | 7072 | /* |
6933 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer | 7073 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer |
6934 | * to a function which identifies what group(along with sched group) a CPU | 7074 | * to a function which identifies what group(along with sched group) a CPU |
6935 | * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS | 7075 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
6936 | * (due to the fact that we keep track of groups covered with a cpumask_t). | 7076 | * (due to the fact that we keep track of groups covered with a struct cpumask). |
6937 | * | 7077 | * |
6938 | * init_sched_build_groups will build a circular linked list of the groups | 7078 | * init_sched_build_groups will build a circular linked list of the groups |
6939 | * covered by the given span, and will set each group's ->cpumask correctly, | 7079 | * covered by the given span, and will set each group's ->cpumask correctly, |
6940 | * and ->cpu_power to 0. | 7080 | * and ->cpu_power to 0. |
6941 | */ | 7081 | */ |
6942 | static void | 7082 | static void |
6943 | init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | 7083 | init_sched_build_groups(const struct cpumask *span, |
6944 | int (*group_fn)(int cpu, const cpumask_t *cpu_map, | 7084 | const struct cpumask *cpu_map, |
7085 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, | ||
6945 | struct sched_group **sg, | 7086 | struct sched_group **sg, |
6946 | cpumask_t *tmpmask), | 7087 | struct cpumask *tmpmask), |
6947 | cpumask_t *covered, cpumask_t *tmpmask) | 7088 | struct cpumask *covered, struct cpumask *tmpmask) |
6948 | { | 7089 | { |
6949 | struct sched_group *first = NULL, *last = NULL; | 7090 | struct sched_group *first = NULL, *last = NULL; |
6950 | int i; | 7091 | int i; |
6951 | 7092 | ||
6952 | cpus_clear(*covered); | 7093 | cpumask_clear(covered); |
6953 | 7094 | ||
6954 | for_each_cpu_mask_nr(i, *span) { | 7095 | for_each_cpu(i, span) { |
6955 | struct sched_group *sg; | 7096 | struct sched_group *sg; |
6956 | int group = group_fn(i, cpu_map, &sg, tmpmask); | 7097 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
6957 | int j; | 7098 | int j; |
6958 | 7099 | ||
6959 | if (cpu_isset(i, *covered)) | 7100 | if (cpumask_test_cpu(i, covered)) |
6960 | continue; | 7101 | continue; |
6961 | 7102 | ||
6962 | cpus_clear(sg->cpumask); | 7103 | cpumask_clear(sched_group_cpus(sg)); |
6963 | sg->__cpu_power = 0; | 7104 | sg->__cpu_power = 0; |
6964 | 7105 | ||
6965 | for_each_cpu_mask_nr(j, *span) { | 7106 | for_each_cpu(j, span) { |
6966 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 7107 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6967 | continue; | 7108 | continue; |
6968 | 7109 | ||
6969 | cpu_set(j, *covered); | 7110 | cpumask_set_cpu(j, covered); |
6970 | cpu_set(j, sg->cpumask); | 7111 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
6971 | } | 7112 | } |
6972 | if (!first) | 7113 | if (!first) |
6973 | first = sg; | 7114 | first = sg; |
@@ -7031,23 +7172,21 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
7031 | * should be one that prevents unnecessary balancing, but also spreads tasks | 7172 | * should be one that prevents unnecessary balancing, but also spreads tasks |
7032 | * out optimally. | 7173 | * out optimally. |
7033 | */ | 7174 | */ |
7034 | static void sched_domain_node_span(int node, cpumask_t *span) | 7175 | static void sched_domain_node_span(int node, struct cpumask *span) |
7035 | { | 7176 | { |
7036 | nodemask_t used_nodes; | 7177 | nodemask_t used_nodes; |
7037 | node_to_cpumask_ptr(nodemask, node); | ||
7038 | int i; | 7178 | int i; |
7039 | 7179 | ||
7040 | cpus_clear(*span); | 7180 | cpumask_clear(span); |
7041 | nodes_clear(used_nodes); | 7181 | nodes_clear(used_nodes); |
7042 | 7182 | ||
7043 | cpus_or(*span, *span, *nodemask); | 7183 | cpumask_or(span, span, cpumask_of_node(node)); |
7044 | node_set(node, used_nodes); | 7184 | node_set(node, used_nodes); |
7045 | 7185 | ||
7046 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 7186 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
7047 | int next_node = find_next_best_node(node, &used_nodes); | 7187 | int next_node = find_next_best_node(node, &used_nodes); |
7048 | 7188 | ||
7049 | node_to_cpumask_ptr_next(nodemask, next_node); | 7189 | cpumask_or(span, span, cpumask_of_node(next_node)); |
7050 | cpus_or(*span, *span, *nodemask); | ||
7051 | } | 7190 | } |
7052 | } | 7191 | } |
7053 | #endif /* CONFIG_NUMA */ | 7192 | #endif /* CONFIG_NUMA */ |
@@ -7055,18 +7194,33 @@ static void sched_domain_node_span(int node, cpumask_t *span) | |||
7055 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | 7194 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
7056 | 7195 | ||
7057 | /* | 7196 | /* |
7197 | * The cpus mask in sched_group and sched_domain hangs off the end. | ||
7198 | * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space | ||
7199 | * for nr_cpu_ids < CONFIG_NR_CPUS. | ||
7200 | */ | ||
7201 | struct static_sched_group { | ||
7202 | struct sched_group sg; | ||
7203 | DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); | ||
7204 | }; | ||
7205 | |||
7206 | struct static_sched_domain { | ||
7207 | struct sched_domain sd; | ||
7208 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); | ||
7209 | }; | ||
7210 | |||
7211 | /* | ||
7058 | * SMT sched-domains: | 7212 | * SMT sched-domains: |
7059 | */ | 7213 | */ |
7060 | #ifdef CONFIG_SCHED_SMT | 7214 | #ifdef CONFIG_SCHED_SMT |
7061 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 7215 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
7062 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); | 7216 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); |
7063 | 7217 | ||
7064 | static int | 7218 | static int |
7065 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7219 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
7066 | cpumask_t *unused) | 7220 | struct sched_group **sg, struct cpumask *unused) |
7067 | { | 7221 | { |
7068 | if (sg) | 7222 | if (sg) |
7069 | *sg = &per_cpu(sched_group_cpus, cpu); | 7223 | *sg = &per_cpu(sched_group_cpus, cpu).sg; |
7070 | return cpu; | 7224 | return cpu; |
7071 | } | 7225 | } |
7072 | #endif /* CONFIG_SCHED_SMT */ | 7226 | #endif /* CONFIG_SCHED_SMT */ |
@@ -7075,56 +7229,53 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | |||
7075 | * multi-core sched-domains: | 7229 | * multi-core sched-domains: |
7076 | */ | 7230 | */ |
7077 | #ifdef CONFIG_SCHED_MC | 7231 | #ifdef CONFIG_SCHED_MC |
7078 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 7232 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
7079 | static DEFINE_PER_CPU(struct sched_group, sched_group_core); | 7233 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
7080 | #endif /* CONFIG_SCHED_MC */ | 7234 | #endif /* CONFIG_SCHED_MC */ |
7081 | 7235 | ||
7082 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 7236 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
7083 | static int | 7237 | static int |
7084 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7238 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7085 | cpumask_t *mask) | 7239 | struct sched_group **sg, struct cpumask *mask) |
7086 | { | 7240 | { |
7087 | int group; | 7241 | int group; |
7088 | 7242 | ||
7089 | *mask = per_cpu(cpu_sibling_map, cpu); | 7243 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7090 | cpus_and(*mask, *mask, *cpu_map); | 7244 | group = cpumask_first(mask); |
7091 | group = first_cpu(*mask); | ||
7092 | if (sg) | 7245 | if (sg) |
7093 | *sg = &per_cpu(sched_group_core, group); | 7246 | *sg = &per_cpu(sched_group_core, group).sg; |
7094 | return group; | 7247 | return group; |
7095 | } | 7248 | } |
7096 | #elif defined(CONFIG_SCHED_MC) | 7249 | #elif defined(CONFIG_SCHED_MC) |
7097 | static int | 7250 | static int |
7098 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7251 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7099 | cpumask_t *unused) | 7252 | struct sched_group **sg, struct cpumask *unused) |
7100 | { | 7253 | { |
7101 | if (sg) | 7254 | if (sg) |
7102 | *sg = &per_cpu(sched_group_core, cpu); | 7255 | *sg = &per_cpu(sched_group_core, cpu).sg; |
7103 | return cpu; | 7256 | return cpu; |
7104 | } | 7257 | } |
7105 | #endif | 7258 | #endif |
7106 | 7259 | ||
7107 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 7260 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
7108 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); | 7261 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
7109 | 7262 | ||
7110 | static int | 7263 | static int |
7111 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7264 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, |
7112 | cpumask_t *mask) | 7265 | struct sched_group **sg, struct cpumask *mask) |
7113 | { | 7266 | { |
7114 | int group; | 7267 | int group; |
7115 | #ifdef CONFIG_SCHED_MC | 7268 | #ifdef CONFIG_SCHED_MC |
7116 | *mask = cpu_coregroup_map(cpu); | 7269 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
7117 | cpus_and(*mask, *mask, *cpu_map); | 7270 | group = cpumask_first(mask); |
7118 | group = first_cpu(*mask); | ||
7119 | #elif defined(CONFIG_SCHED_SMT) | 7271 | #elif defined(CONFIG_SCHED_SMT) |
7120 | *mask = per_cpu(cpu_sibling_map, cpu); | 7272 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7121 | cpus_and(*mask, *mask, *cpu_map); | 7273 | group = cpumask_first(mask); |
7122 | group = first_cpu(*mask); | ||
7123 | #else | 7274 | #else |
7124 | group = cpu; | 7275 | group = cpu; |
7125 | #endif | 7276 | #endif |
7126 | if (sg) | 7277 | if (sg) |
7127 | *sg = &per_cpu(sched_group_phys, group); | 7278 | *sg = &per_cpu(sched_group_phys, group).sg; |
7128 | return group; | 7279 | return group; |
7129 | } | 7280 | } |
7130 | 7281 | ||
@@ -7134,23 +7285,23 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | |||
7134 | * groups, so roll our own. Now each node has its own list of groups which | 7285 | * groups, so roll our own. Now each node has its own list of groups which |
7135 | * gets dynamically allocated. | 7286 | * gets dynamically allocated. |
7136 | */ | 7287 | */ |
7137 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | 7288 | static DEFINE_PER_CPU(struct static_sched_domain, node_domains); |
7138 | static struct sched_group ***sched_group_nodes_bycpu; | 7289 | static struct sched_group ***sched_group_nodes_bycpu; |
7139 | 7290 | ||
7140 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7291 | static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); |
7141 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); | 7292 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
7142 | 7293 | ||
7143 | static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, | 7294 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
7144 | struct sched_group **sg, cpumask_t *nodemask) | 7295 | struct sched_group **sg, |
7296 | struct cpumask *nodemask) | ||
7145 | { | 7297 | { |
7146 | int group; | 7298 | int group; |
7147 | 7299 | ||
7148 | *nodemask = node_to_cpumask(cpu_to_node(cpu)); | 7300 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); |
7149 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7301 | group = cpumask_first(nodemask); |
7150 | group = first_cpu(*nodemask); | ||
7151 | 7302 | ||
7152 | if (sg) | 7303 | if (sg) |
7153 | *sg = &per_cpu(sched_group_allnodes, group); | 7304 | *sg = &per_cpu(sched_group_allnodes, group).sg; |
7154 | return group; | 7305 | return group; |
7155 | } | 7306 | } |
7156 | 7307 | ||
@@ -7162,11 +7313,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7162 | if (!sg) | 7313 | if (!sg) |
7163 | return; | 7314 | return; |
7164 | do { | 7315 | do { |
7165 | for_each_cpu_mask_nr(j, sg->cpumask) { | 7316 | for_each_cpu(j, sched_group_cpus(sg)) { |
7166 | struct sched_domain *sd; | 7317 | struct sched_domain *sd; |
7167 | 7318 | ||
7168 | sd = &per_cpu(phys_domains, j); | 7319 | sd = &per_cpu(phys_domains, j).sd; |
7169 | if (j != first_cpu(sd->groups->cpumask)) { | 7320 | if (j != cpumask_first(sched_group_cpus(sd->groups))) { |
7170 | /* | 7321 | /* |
7171 | * Only add "power" once for each | 7322 | * Only add "power" once for each |
7172 | * physical package. | 7323 | * physical package. |
@@ -7183,11 +7334,12 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7183 | 7334 | ||
7184 | #ifdef CONFIG_NUMA | 7335 | #ifdef CONFIG_NUMA |
7185 | /* Free memory allocated for various sched_group structures */ | 7336 | /* Free memory allocated for various sched_group structures */ |
7186 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7337 | static void free_sched_groups(const struct cpumask *cpu_map, |
7338 | struct cpumask *nodemask) | ||
7187 | { | 7339 | { |
7188 | int cpu, i; | 7340 | int cpu, i; |
7189 | 7341 | ||
7190 | for_each_cpu_mask_nr(cpu, *cpu_map) { | 7342 | for_each_cpu(cpu, cpu_map) { |
7191 | struct sched_group **sched_group_nodes | 7343 | struct sched_group **sched_group_nodes |
7192 | = sched_group_nodes_bycpu[cpu]; | 7344 | = sched_group_nodes_bycpu[cpu]; |
7193 | 7345 | ||
@@ -7197,9 +7349,8 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
7197 | for (i = 0; i < nr_node_ids; i++) { | 7349 | for (i = 0; i < nr_node_ids; i++) { |
7198 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7350 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
7199 | 7351 | ||
7200 | *nodemask = node_to_cpumask(i); | 7352 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7201 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7353 | if (cpumask_empty(nodemask)) |
7202 | if (cpus_empty(*nodemask)) | ||
7203 | continue; | 7354 | continue; |
7204 | 7355 | ||
7205 | if (sg == NULL) | 7356 | if (sg == NULL) |
@@ -7217,7 +7368,8 @@ next_sg: | |||
7217 | } | 7368 | } |
7218 | } | 7369 | } |
7219 | #else /* !CONFIG_NUMA */ | 7370 | #else /* !CONFIG_NUMA */ |
7220 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7371 | static void free_sched_groups(const struct cpumask *cpu_map, |
7372 | struct cpumask *nodemask) | ||
7221 | { | 7373 | { |
7222 | } | 7374 | } |
7223 | #endif /* CONFIG_NUMA */ | 7375 | #endif /* CONFIG_NUMA */ |
@@ -7243,7 +7395,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
7243 | 7395 | ||
7244 | WARN_ON(!sd || !sd->groups); | 7396 | WARN_ON(!sd || !sd->groups); |
7245 | 7397 | ||
7246 | if (cpu != first_cpu(sd->groups->cpumask)) | 7398 | if (cpu != cpumask_first(sched_group_cpus(sd->groups))) |
7247 | return; | 7399 | return; |
7248 | 7400 | ||
7249 | child = sd->child; | 7401 | child = sd->child; |
@@ -7308,40 +7460,6 @@ SD_INIT_FUNC(CPU) | |||
7308 | SD_INIT_FUNC(MC) | 7460 | SD_INIT_FUNC(MC) |
7309 | #endif | 7461 | #endif |
7310 | 7462 | ||
7311 | /* | ||
7312 | * To minimize stack usage kmalloc room for cpumasks and share the | ||
7313 | * space as the usage in build_sched_domains() dictates. Used only | ||
7314 | * if the amount of space is significant. | ||
7315 | */ | ||
7316 | struct allmasks { | ||
7317 | cpumask_t tmpmask; /* make this one first */ | ||
7318 | union { | ||
7319 | cpumask_t nodemask; | ||
7320 | cpumask_t this_sibling_map; | ||
7321 | cpumask_t this_core_map; | ||
7322 | }; | ||
7323 | cpumask_t send_covered; | ||
7324 | |||
7325 | #ifdef CONFIG_NUMA | ||
7326 | cpumask_t domainspan; | ||
7327 | cpumask_t covered; | ||
7328 | cpumask_t notcovered; | ||
7329 | #endif | ||
7330 | }; | ||
7331 | |||
7332 | #if NR_CPUS > 128 | ||
7333 | #define SCHED_CPUMASK_ALLOC 1 | ||
7334 | #define SCHED_CPUMASK_FREE(v) kfree(v) | ||
7335 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | ||
7336 | #else | ||
7337 | #define SCHED_CPUMASK_ALLOC 0 | ||
7338 | #define SCHED_CPUMASK_FREE(v) | ||
7339 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | ||
7340 | #endif | ||
7341 | |||
7342 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | ||
7343 | ((unsigned long)(a) + offsetof(struct allmasks, v)) | ||
7344 | |||
7345 | static int default_relax_domain_level = -1; | 7463 | static int default_relax_domain_level = -1; |
7346 | 7464 | ||
7347 | static int __init setup_relax_domain_level(char *str) | 7465 | static int __init setup_relax_domain_level(char *str) |
@@ -7381,17 +7499,38 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
7381 | * Build sched domains for a given set of cpus and attach the sched domains | 7499 | * Build sched domains for a given set of cpus and attach the sched domains |
7382 | * to the individual cpus | 7500 | * to the individual cpus |
7383 | */ | 7501 | */ |
7384 | static int __build_sched_domains(const cpumask_t *cpu_map, | 7502 | static int __build_sched_domains(const struct cpumask *cpu_map, |
7385 | struct sched_domain_attr *attr) | 7503 | struct sched_domain_attr *attr) |
7386 | { | 7504 | { |
7387 | int i; | 7505 | int i, err = -ENOMEM; |
7388 | struct root_domain *rd; | 7506 | struct root_domain *rd; |
7389 | SCHED_CPUMASK_DECLARE(allmasks); | 7507 | cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered, |
7390 | cpumask_t *tmpmask; | 7508 | tmpmask; |
7391 | #ifdef CONFIG_NUMA | 7509 | #ifdef CONFIG_NUMA |
7510 | cpumask_var_t domainspan, covered, notcovered; | ||
7392 | struct sched_group **sched_group_nodes = NULL; | 7511 | struct sched_group **sched_group_nodes = NULL; |
7393 | int sd_allnodes = 0; | 7512 | int sd_allnodes = 0; |
7394 | 7513 | ||
7514 | if (!alloc_cpumask_var(&domainspan, GFP_KERNEL)) | ||
7515 | goto out; | ||
7516 | if (!alloc_cpumask_var(&covered, GFP_KERNEL)) | ||
7517 | goto free_domainspan; | ||
7518 | if (!alloc_cpumask_var(¬covered, GFP_KERNEL)) | ||
7519 | goto free_covered; | ||
7520 | #endif | ||
7521 | |||
7522 | if (!alloc_cpumask_var(&nodemask, GFP_KERNEL)) | ||
7523 | goto free_notcovered; | ||
7524 | if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL)) | ||
7525 | goto free_nodemask; | ||
7526 | if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL)) | ||
7527 | goto free_this_sibling_map; | ||
7528 | if (!alloc_cpumask_var(&send_covered, GFP_KERNEL)) | ||
7529 | goto free_this_core_map; | ||
7530 | if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) | ||
7531 | goto free_send_covered; | ||
7532 | |||
7533 | #ifdef CONFIG_NUMA | ||
7395 | /* | 7534 | /* |
7396 | * Allocate the per-node list of sched groups | 7535 | * Allocate the per-node list of sched groups |
7397 | */ | 7536 | */ |
@@ -7399,76 +7538,57 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7399 | GFP_KERNEL); | 7538 | GFP_KERNEL); |
7400 | if (!sched_group_nodes) { | 7539 | if (!sched_group_nodes) { |
7401 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 7540 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
7402 | return -ENOMEM; | 7541 | goto free_tmpmask; |
7403 | } | 7542 | } |
7404 | #endif | 7543 | #endif |
7405 | 7544 | ||
7406 | rd = alloc_rootdomain(); | 7545 | rd = alloc_rootdomain(); |
7407 | if (!rd) { | 7546 | if (!rd) { |
7408 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 7547 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
7409 | #ifdef CONFIG_NUMA | 7548 | goto free_sched_groups; |
7410 | kfree(sched_group_nodes); | ||
7411 | #endif | ||
7412 | return -ENOMEM; | ||
7413 | } | ||
7414 | |||
7415 | #if SCHED_CPUMASK_ALLOC | ||
7416 | /* get space for all scratch cpumask variables */ | ||
7417 | allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); | ||
7418 | if (!allmasks) { | ||
7419 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | ||
7420 | kfree(rd); | ||
7421 | #ifdef CONFIG_NUMA | ||
7422 | kfree(sched_group_nodes); | ||
7423 | #endif | ||
7424 | return -ENOMEM; | ||
7425 | } | 7549 | } |
7426 | #endif | ||
7427 | tmpmask = (cpumask_t *)allmasks; | ||
7428 | |||
7429 | 7550 | ||
7430 | #ifdef CONFIG_NUMA | 7551 | #ifdef CONFIG_NUMA |
7431 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | 7552 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; |
7432 | #endif | 7553 | #endif |
7433 | 7554 | ||
7434 | /* | 7555 | /* |
7435 | * Set up domains for cpus specified by the cpu_map. | 7556 | * Set up domains for cpus specified by the cpu_map. |
7436 | */ | 7557 | */ |
7437 | for_each_cpu_mask_nr(i, *cpu_map) { | 7558 | for_each_cpu(i, cpu_map) { |
7438 | struct sched_domain *sd = NULL, *p; | 7559 | struct sched_domain *sd = NULL, *p; |
7439 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7440 | 7560 | ||
7441 | *nodemask = node_to_cpumask(cpu_to_node(i)); | 7561 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); |
7442 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7443 | 7562 | ||
7444 | #ifdef CONFIG_NUMA | 7563 | #ifdef CONFIG_NUMA |
7445 | if (cpus_weight(*cpu_map) > | 7564 | if (cpumask_weight(cpu_map) > |
7446 | SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { | 7565 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
7447 | sd = &per_cpu(allnodes_domains, i); | 7566 | sd = &per_cpu(allnodes_domains, i).sd; |
7448 | SD_INIT(sd, ALLNODES); | 7567 | SD_INIT(sd, ALLNODES); |
7449 | set_domain_attribute(sd, attr); | 7568 | set_domain_attribute(sd, attr); |
7450 | sd->span = *cpu_map; | 7569 | cpumask_copy(sched_domain_span(sd), cpu_map); |
7451 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); | 7570 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); |
7452 | p = sd; | 7571 | p = sd; |
7453 | sd_allnodes = 1; | 7572 | sd_allnodes = 1; |
7454 | } else | 7573 | } else |
7455 | p = NULL; | 7574 | p = NULL; |
7456 | 7575 | ||
7457 | sd = &per_cpu(node_domains, i); | 7576 | sd = &per_cpu(node_domains, i).sd; |
7458 | SD_INIT(sd, NODE); | 7577 | SD_INIT(sd, NODE); |
7459 | set_domain_attribute(sd, attr); | 7578 | set_domain_attribute(sd, attr); |
7460 | sched_domain_node_span(cpu_to_node(i), &sd->span); | 7579 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
7461 | sd->parent = p; | 7580 | sd->parent = p; |
7462 | if (p) | 7581 | if (p) |
7463 | p->child = sd; | 7582 | p->child = sd; |
7464 | cpus_and(sd->span, sd->span, *cpu_map); | 7583 | cpumask_and(sched_domain_span(sd), |
7584 | sched_domain_span(sd), cpu_map); | ||
7465 | #endif | 7585 | #endif |
7466 | 7586 | ||
7467 | p = sd; | 7587 | p = sd; |
7468 | sd = &per_cpu(phys_domains, i); | 7588 | sd = &per_cpu(phys_domains, i).sd; |
7469 | SD_INIT(sd, CPU); | 7589 | SD_INIT(sd, CPU); |
7470 | set_domain_attribute(sd, attr); | 7590 | set_domain_attribute(sd, attr); |
7471 | sd->span = *nodemask; | 7591 | cpumask_copy(sched_domain_span(sd), nodemask); |
7472 | sd->parent = p; | 7592 | sd->parent = p; |
7473 | if (p) | 7593 | if (p) |
7474 | p->child = sd; | 7594 | p->child = sd; |
@@ -7476,11 +7596,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7476 | 7596 | ||
7477 | #ifdef CONFIG_SCHED_MC | 7597 | #ifdef CONFIG_SCHED_MC |
7478 | p = sd; | 7598 | p = sd; |
7479 | sd = &per_cpu(core_domains, i); | 7599 | sd = &per_cpu(core_domains, i).sd; |
7480 | SD_INIT(sd, MC); | 7600 | SD_INIT(sd, MC); |
7481 | set_domain_attribute(sd, attr); | 7601 | set_domain_attribute(sd, attr); |
7482 | sd->span = cpu_coregroup_map(i); | 7602 | cpumask_and(sched_domain_span(sd), cpu_map, |
7483 | cpus_and(sd->span, sd->span, *cpu_map); | 7603 | cpu_coregroup_mask(i)); |
7484 | sd->parent = p; | 7604 | sd->parent = p; |
7485 | p->child = sd; | 7605 | p->child = sd; |
7486 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | 7606 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7488,11 +7608,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7488 | 7608 | ||
7489 | #ifdef CONFIG_SCHED_SMT | 7609 | #ifdef CONFIG_SCHED_SMT |
7490 | p = sd; | 7610 | p = sd; |
7491 | sd = &per_cpu(cpu_domains, i); | 7611 | sd = &per_cpu(cpu_domains, i).sd; |
7492 | SD_INIT(sd, SIBLING); | 7612 | SD_INIT(sd, SIBLING); |
7493 | set_domain_attribute(sd, attr); | 7613 | set_domain_attribute(sd, attr); |
7494 | sd->span = per_cpu(cpu_sibling_map, i); | 7614 | cpumask_and(sched_domain_span(sd), |
7495 | cpus_and(sd->span, sd->span, *cpu_map); | 7615 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7496 | sd->parent = p; | 7616 | sd->parent = p; |
7497 | p->child = sd; | 7617 | p->child = sd; |
7498 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); | 7618 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7501,13 +7621,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7501 | 7621 | ||
7502 | #ifdef CONFIG_SCHED_SMT | 7622 | #ifdef CONFIG_SCHED_SMT |
7503 | /* Set up CPU (sibling) groups */ | 7623 | /* Set up CPU (sibling) groups */ |
7504 | for_each_cpu_mask_nr(i, *cpu_map) { | 7624 | for_each_cpu(i, cpu_map) { |
7505 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); | 7625 | cpumask_and(this_sibling_map, |
7506 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7626 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7507 | 7627 | if (i != cpumask_first(this_sibling_map)) | |
7508 | *this_sibling_map = per_cpu(cpu_sibling_map, i); | ||
7509 | cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); | ||
7510 | if (i != first_cpu(*this_sibling_map)) | ||
7511 | continue; | 7628 | continue; |
7512 | 7629 | ||
7513 | init_sched_build_groups(this_sibling_map, cpu_map, | 7630 | init_sched_build_groups(this_sibling_map, cpu_map, |
@@ -7518,13 +7635,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7518 | 7635 | ||
7519 | #ifdef CONFIG_SCHED_MC | 7636 | #ifdef CONFIG_SCHED_MC |
7520 | /* Set up multi-core groups */ | 7637 | /* Set up multi-core groups */ |
7521 | for_each_cpu_mask_nr(i, *cpu_map) { | 7638 | for_each_cpu(i, cpu_map) { |
7522 | SCHED_CPUMASK_VAR(this_core_map, allmasks); | 7639 | cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); |
7523 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7640 | if (i != cpumask_first(this_core_map)) |
7524 | |||
7525 | *this_core_map = cpu_coregroup_map(i); | ||
7526 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | ||
7527 | if (i != first_cpu(*this_core_map)) | ||
7528 | continue; | 7641 | continue; |
7529 | 7642 | ||
7530 | init_sched_build_groups(this_core_map, cpu_map, | 7643 | init_sched_build_groups(this_core_map, cpu_map, |
@@ -7535,12 +7648,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7535 | 7648 | ||
7536 | /* Set up physical groups */ | 7649 | /* Set up physical groups */ |
7537 | for (i = 0; i < nr_node_ids; i++) { | 7650 | for (i = 0; i < nr_node_ids; i++) { |
7538 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7651 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7539 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7652 | if (cpumask_empty(nodemask)) |
7540 | |||
7541 | *nodemask = node_to_cpumask(i); | ||
7542 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7543 | if (cpus_empty(*nodemask)) | ||
7544 | continue; | 7653 | continue; |
7545 | 7654 | ||
7546 | init_sched_build_groups(nodemask, cpu_map, | 7655 | init_sched_build_groups(nodemask, cpu_map, |
@@ -7551,8 +7660,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7551 | #ifdef CONFIG_NUMA | 7660 | #ifdef CONFIG_NUMA |
7552 | /* Set up node groups */ | 7661 | /* Set up node groups */ |
7553 | if (sd_allnodes) { | 7662 | if (sd_allnodes) { |
7554 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
7555 | |||
7556 | init_sched_build_groups(cpu_map, cpu_map, | 7663 | init_sched_build_groups(cpu_map, cpu_map, |
7557 | &cpu_to_allnodes_group, | 7664 | &cpu_to_allnodes_group, |
7558 | send_covered, tmpmask); | 7665 | send_covered, tmpmask); |
@@ -7561,58 +7668,53 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7561 | for (i = 0; i < nr_node_ids; i++) { | 7668 | for (i = 0; i < nr_node_ids; i++) { |
7562 | /* Set up node groups */ | 7669 | /* Set up node groups */ |
7563 | struct sched_group *sg, *prev; | 7670 | struct sched_group *sg, *prev; |
7564 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7565 | SCHED_CPUMASK_VAR(domainspan, allmasks); | ||
7566 | SCHED_CPUMASK_VAR(covered, allmasks); | ||
7567 | int j; | 7671 | int j; |
7568 | 7672 | ||
7569 | *nodemask = node_to_cpumask(i); | 7673 | cpumask_clear(covered); |
7570 | cpus_clear(*covered); | 7674 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7571 | 7675 | if (cpumask_empty(nodemask)) { | |
7572 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7573 | if (cpus_empty(*nodemask)) { | ||
7574 | sched_group_nodes[i] = NULL; | 7676 | sched_group_nodes[i] = NULL; |
7575 | continue; | 7677 | continue; |
7576 | } | 7678 | } |
7577 | 7679 | ||
7578 | sched_domain_node_span(i, domainspan); | 7680 | sched_domain_node_span(i, domainspan); |
7579 | cpus_and(*domainspan, *domainspan, *cpu_map); | 7681 | cpumask_and(domainspan, domainspan, cpu_map); |
7580 | 7682 | ||
7581 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); | 7683 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
7684 | GFP_KERNEL, i); | ||
7582 | if (!sg) { | 7685 | if (!sg) { |
7583 | printk(KERN_WARNING "Can not alloc domain group for " | 7686 | printk(KERN_WARNING "Can not alloc domain group for " |
7584 | "node %d\n", i); | 7687 | "node %d\n", i); |
7585 | goto error; | 7688 | goto error; |
7586 | } | 7689 | } |
7587 | sched_group_nodes[i] = sg; | 7690 | sched_group_nodes[i] = sg; |
7588 | for_each_cpu_mask_nr(j, *nodemask) { | 7691 | for_each_cpu(j, nodemask) { |
7589 | struct sched_domain *sd; | 7692 | struct sched_domain *sd; |
7590 | 7693 | ||
7591 | sd = &per_cpu(node_domains, j); | 7694 | sd = &per_cpu(node_domains, j).sd; |
7592 | sd->groups = sg; | 7695 | sd->groups = sg; |
7593 | } | 7696 | } |
7594 | sg->__cpu_power = 0; | 7697 | sg->__cpu_power = 0; |
7595 | sg->cpumask = *nodemask; | 7698 | cpumask_copy(sched_group_cpus(sg), nodemask); |
7596 | sg->next = sg; | 7699 | sg->next = sg; |
7597 | cpus_or(*covered, *covered, *nodemask); | 7700 | cpumask_or(covered, covered, nodemask); |
7598 | prev = sg; | 7701 | prev = sg; |
7599 | 7702 | ||
7600 | for (j = 0; j < nr_node_ids; j++) { | 7703 | for (j = 0; j < nr_node_ids; j++) { |
7601 | SCHED_CPUMASK_VAR(notcovered, allmasks); | ||
7602 | int n = (i + j) % nr_node_ids; | 7704 | int n = (i + j) % nr_node_ids; |
7603 | node_to_cpumask_ptr(pnodemask, n); | ||
7604 | 7705 | ||
7605 | cpus_complement(*notcovered, *covered); | 7706 | cpumask_complement(notcovered, covered); |
7606 | cpus_and(*tmpmask, *notcovered, *cpu_map); | 7707 | cpumask_and(tmpmask, notcovered, cpu_map); |
7607 | cpus_and(*tmpmask, *tmpmask, *domainspan); | 7708 | cpumask_and(tmpmask, tmpmask, domainspan); |
7608 | if (cpus_empty(*tmpmask)) | 7709 | if (cpumask_empty(tmpmask)) |
7609 | break; | 7710 | break; |
7610 | 7711 | ||
7611 | cpus_and(*tmpmask, *tmpmask, *pnodemask); | 7712 | cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); |
7612 | if (cpus_empty(*tmpmask)) | 7713 | if (cpumask_empty(tmpmask)) |
7613 | continue; | 7714 | continue; |
7614 | 7715 | ||
7615 | sg = kmalloc_node(sizeof(struct sched_group), | 7716 | sg = kmalloc_node(sizeof(struct sched_group) + |
7717 | cpumask_size(), | ||
7616 | GFP_KERNEL, i); | 7718 | GFP_KERNEL, i); |
7617 | if (!sg) { | 7719 | if (!sg) { |
7618 | printk(KERN_WARNING | 7720 | printk(KERN_WARNING |
@@ -7620,9 +7722,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7620 | goto error; | 7722 | goto error; |
7621 | } | 7723 | } |
7622 | sg->__cpu_power = 0; | 7724 | sg->__cpu_power = 0; |
7623 | sg->cpumask = *tmpmask; | 7725 | cpumask_copy(sched_group_cpus(sg), tmpmask); |
7624 | sg->next = prev->next; | 7726 | sg->next = prev->next; |
7625 | cpus_or(*covered, *covered, *tmpmask); | 7727 | cpumask_or(covered, covered, tmpmask); |
7626 | prev->next = sg; | 7728 | prev->next = sg; |
7627 | prev = sg; | 7729 | prev = sg; |
7628 | } | 7730 | } |
@@ -7631,22 +7733,22 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7631 | 7733 | ||
7632 | /* Calculate CPU power for physical packages and nodes */ | 7734 | /* Calculate CPU power for physical packages and nodes */ |
7633 | #ifdef CONFIG_SCHED_SMT | 7735 | #ifdef CONFIG_SCHED_SMT |
7634 | for_each_cpu_mask_nr(i, *cpu_map) { | 7736 | for_each_cpu(i, cpu_map) { |
7635 | struct sched_domain *sd = &per_cpu(cpu_domains, i); | 7737 | struct sched_domain *sd = &per_cpu(cpu_domains, i).sd; |
7636 | 7738 | ||
7637 | init_sched_groups_power(i, sd); | 7739 | init_sched_groups_power(i, sd); |
7638 | } | 7740 | } |
7639 | #endif | 7741 | #endif |
7640 | #ifdef CONFIG_SCHED_MC | 7742 | #ifdef CONFIG_SCHED_MC |
7641 | for_each_cpu_mask_nr(i, *cpu_map) { | 7743 | for_each_cpu(i, cpu_map) { |
7642 | struct sched_domain *sd = &per_cpu(core_domains, i); | 7744 | struct sched_domain *sd = &per_cpu(core_domains, i).sd; |
7643 | 7745 | ||
7644 | init_sched_groups_power(i, sd); | 7746 | init_sched_groups_power(i, sd); |
7645 | } | 7747 | } |
7646 | #endif | 7748 | #endif |
7647 | 7749 | ||
7648 | for_each_cpu_mask_nr(i, *cpu_map) { | 7750 | for_each_cpu(i, cpu_map) { |
7649 | struct sched_domain *sd = &per_cpu(phys_domains, i); | 7751 | struct sched_domain *sd = &per_cpu(phys_domains, i).sd; |
7650 | 7752 | ||
7651 | init_sched_groups_power(i, sd); | 7753 | init_sched_groups_power(i, sd); |
7652 | } | 7754 | } |
@@ -7658,56 +7760,87 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7658 | if (sd_allnodes) { | 7760 | if (sd_allnodes) { |
7659 | struct sched_group *sg; | 7761 | struct sched_group *sg; |
7660 | 7762 | ||
7661 | cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, | 7763 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
7662 | tmpmask); | 7764 | tmpmask); |
7663 | init_numa_sched_groups_power(sg); | 7765 | init_numa_sched_groups_power(sg); |
7664 | } | 7766 | } |
7665 | #endif | 7767 | #endif |
7666 | 7768 | ||
7667 | /* Attach the domains */ | 7769 | /* Attach the domains */ |
7668 | for_each_cpu_mask_nr(i, *cpu_map) { | 7770 | for_each_cpu(i, cpu_map) { |
7669 | struct sched_domain *sd; | 7771 | struct sched_domain *sd; |
7670 | #ifdef CONFIG_SCHED_SMT | 7772 | #ifdef CONFIG_SCHED_SMT |
7671 | sd = &per_cpu(cpu_domains, i); | 7773 | sd = &per_cpu(cpu_domains, i).sd; |
7672 | #elif defined(CONFIG_SCHED_MC) | 7774 | #elif defined(CONFIG_SCHED_MC) |
7673 | sd = &per_cpu(core_domains, i); | 7775 | sd = &per_cpu(core_domains, i).sd; |
7674 | #else | 7776 | #else |
7675 | sd = &per_cpu(phys_domains, i); | 7777 | sd = &per_cpu(phys_domains, i).sd; |
7676 | #endif | 7778 | #endif |
7677 | cpu_attach_domain(sd, rd, i); | 7779 | cpu_attach_domain(sd, rd, i); |
7678 | } | 7780 | } |
7679 | 7781 | ||
7680 | SCHED_CPUMASK_FREE((void *)allmasks); | 7782 | err = 0; |
7681 | return 0; | 7783 | |
7784 | free_tmpmask: | ||
7785 | free_cpumask_var(tmpmask); | ||
7786 | free_send_covered: | ||
7787 | free_cpumask_var(send_covered); | ||
7788 | free_this_core_map: | ||
7789 | free_cpumask_var(this_core_map); | ||
7790 | free_this_sibling_map: | ||
7791 | free_cpumask_var(this_sibling_map); | ||
7792 | free_nodemask: | ||
7793 | free_cpumask_var(nodemask); | ||
7794 | free_notcovered: | ||
7795 | #ifdef CONFIG_NUMA | ||
7796 | free_cpumask_var(notcovered); | ||
7797 | free_covered: | ||
7798 | free_cpumask_var(covered); | ||
7799 | free_domainspan: | ||
7800 | free_cpumask_var(domainspan); | ||
7801 | out: | ||
7802 | #endif | ||
7803 | return err; | ||
7804 | |||
7805 | free_sched_groups: | ||
7806 | #ifdef CONFIG_NUMA | ||
7807 | kfree(sched_group_nodes); | ||
7808 | #endif | ||
7809 | goto free_tmpmask; | ||
7682 | 7810 | ||
7683 | #ifdef CONFIG_NUMA | 7811 | #ifdef CONFIG_NUMA |
7684 | error: | 7812 | error: |
7685 | free_sched_groups(cpu_map, tmpmask); | 7813 | free_sched_groups(cpu_map, tmpmask); |
7686 | SCHED_CPUMASK_FREE((void *)allmasks); | 7814 | free_rootdomain(rd); |
7687 | kfree(rd); | 7815 | goto free_tmpmask; |
7688 | return -ENOMEM; | ||
7689 | #endif | 7816 | #endif |
7690 | } | 7817 | } |
7691 | 7818 | ||
7692 | static int build_sched_domains(const cpumask_t *cpu_map) | 7819 | static int build_sched_domains(const struct cpumask *cpu_map) |
7693 | { | 7820 | { |
7694 | return __build_sched_domains(cpu_map, NULL); | 7821 | return __build_sched_domains(cpu_map, NULL); |
7695 | } | 7822 | } |
7696 | 7823 | ||
7697 | static cpumask_t *doms_cur; /* current sched domains */ | 7824 | static struct cpumask *doms_cur; /* current sched domains */ |
7698 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 7825 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
7699 | static struct sched_domain_attr *dattr_cur; | 7826 | static struct sched_domain_attr *dattr_cur; |
7700 | /* attribues of custom domains in 'doms_cur' */ | 7827 | /* attribues of custom domains in 'doms_cur' */ |
7701 | 7828 | ||
7702 | /* | 7829 | /* |
7703 | * Special case: If a kmalloc of a doms_cur partition (array of | 7830 | * Special case: If a kmalloc of a doms_cur partition (array of |
7704 | * cpumask_t) fails, then fallback to a single sched domain, | 7831 | * cpumask) fails, then fallback to a single sched domain, |
7705 | * as determined by the single cpumask_t fallback_doms. | 7832 | * as determined by the single cpumask fallback_doms. |
7706 | */ | 7833 | */ |
7707 | static cpumask_t fallback_doms; | 7834 | static cpumask_var_t fallback_doms; |
7708 | 7835 | ||
7709 | void __attribute__((weak)) arch_update_cpu_topology(void) | 7836 | /* |
7837 | * arch_update_cpu_topology lets virtualized architectures update the | ||
7838 | * cpu core maps. It is supposed to return 1 if the topology changed | ||
7839 | * or 0 if it stayed the same. | ||
7840 | */ | ||
7841 | int __attribute__((weak)) arch_update_cpu_topology(void) | ||
7710 | { | 7842 | { |
7843 | return 0; | ||
7711 | } | 7844 | } |
7712 | 7845 | ||
7713 | /* | 7846 | /* |
@@ -7715,16 +7848,16 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
7715 | * For now this just excludes isolated cpus, but could be used to | 7848 | * For now this just excludes isolated cpus, but could be used to |
7716 | * exclude other special cases in the future. | 7849 | * exclude other special cases in the future. |
7717 | */ | 7850 | */ |
7718 | static int arch_init_sched_domains(const cpumask_t *cpu_map) | 7851 | static int arch_init_sched_domains(const struct cpumask *cpu_map) |
7719 | { | 7852 | { |
7720 | int err; | 7853 | int err; |
7721 | 7854 | ||
7722 | arch_update_cpu_topology(); | 7855 | arch_update_cpu_topology(); |
7723 | ndoms_cur = 1; | 7856 | ndoms_cur = 1; |
7724 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 7857 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); |
7725 | if (!doms_cur) | 7858 | if (!doms_cur) |
7726 | doms_cur = &fallback_doms; | 7859 | doms_cur = fallback_doms; |
7727 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); | 7860 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); |
7728 | dattr_cur = NULL; | 7861 | dattr_cur = NULL; |
7729 | err = build_sched_domains(doms_cur); | 7862 | err = build_sched_domains(doms_cur); |
7730 | register_sched_domain_sysctl(); | 7863 | register_sched_domain_sysctl(); |
@@ -7732,8 +7865,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
7732 | return err; | 7865 | return err; |
7733 | } | 7866 | } |
7734 | 7867 | ||
7735 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | 7868 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, |
7736 | cpumask_t *tmpmask) | 7869 | struct cpumask *tmpmask) |
7737 | { | 7870 | { |
7738 | free_sched_groups(cpu_map, tmpmask); | 7871 | free_sched_groups(cpu_map, tmpmask); |
7739 | } | 7872 | } |
@@ -7742,17 +7875,16 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | |||
7742 | * Detach sched domains from a group of cpus specified in cpu_map | 7875 | * Detach sched domains from a group of cpus specified in cpu_map |
7743 | * These cpus will now be attached to the NULL domain | 7876 | * These cpus will now be attached to the NULL domain |
7744 | */ | 7877 | */ |
7745 | static void detach_destroy_domains(const cpumask_t *cpu_map) | 7878 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
7746 | { | 7879 | { |
7747 | cpumask_t tmpmask; | 7880 | /* Save because hotplug lock held. */ |
7881 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); | ||
7748 | int i; | 7882 | int i; |
7749 | 7883 | ||
7750 | unregister_sched_domain_sysctl(); | 7884 | for_each_cpu(i, cpu_map) |
7751 | |||
7752 | for_each_cpu_mask_nr(i, *cpu_map) | ||
7753 | cpu_attach_domain(NULL, &def_root_domain, i); | 7885 | cpu_attach_domain(NULL, &def_root_domain, i); |
7754 | synchronize_sched(); | 7886 | synchronize_sched(); |
7755 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7887 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); |
7756 | } | 7888 | } |
7757 | 7889 | ||
7758 | /* handle null as "default" */ | 7890 | /* handle null as "default" */ |
@@ -7777,7 +7909,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7777 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 7909 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
7778 | * It destroys each deleted domain and builds each new domain. | 7910 | * It destroys each deleted domain and builds each new domain. |
7779 | * | 7911 | * |
7780 | * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. | 7912 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. |
7781 | * The masks don't intersect (don't overlap.) We should setup one | 7913 | * The masks don't intersect (don't overlap.) We should setup one |
7782 | * sched domain for each mask. CPUs not in any of the cpumasks will | 7914 | * sched domain for each mask. CPUs not in any of the cpumasks will |
7783 | * not be load balanced. If the same cpumask appears both in the | 7915 | * not be load balanced. If the same cpumask appears both in the |
@@ -7786,32 +7918,38 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7786 | * | 7918 | * |
7787 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 7919 | * The passed in 'doms_new' should be kmalloc'd. This routine takes |
7788 | * ownership of it and will kfree it when done with it. If the caller | 7920 | * ownership of it and will kfree it when done with it. If the caller |
7789 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7921 | * failed the kmalloc call, then it can pass in doms_new == NULL && |
7790 | * and partition_sched_domains() will fallback to the single partition | 7922 | * ndoms_new == 1, and partition_sched_domains() will fallback to |
7791 | * 'fallback_doms', it also forces the domains to be rebuilt. | 7923 | * the single partition 'fallback_doms', it also forces the domains |
7924 | * to be rebuilt. | ||
7792 | * | 7925 | * |
7793 | * If doms_new==NULL it will be replaced with cpu_online_map. | 7926 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
7794 | * ndoms_new==0 is a special case for destroying existing domains. | 7927 | * ndoms_new == 0 is a special case for destroying existing domains, |
7795 | * It will not create the default domain. | 7928 | * and it will not create the default domain. |
7796 | * | 7929 | * |
7797 | * Call with hotplug lock held | 7930 | * Call with hotplug lock held |
7798 | */ | 7931 | */ |
7799 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7932 | /* FIXME: Change to struct cpumask *doms_new[] */ |
7933 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
7800 | struct sched_domain_attr *dattr_new) | 7934 | struct sched_domain_attr *dattr_new) |
7801 | { | 7935 | { |
7802 | int i, j, n; | 7936 | int i, j, n; |
7937 | int new_topology; | ||
7803 | 7938 | ||
7804 | mutex_lock(&sched_domains_mutex); | 7939 | mutex_lock(&sched_domains_mutex); |
7805 | 7940 | ||
7806 | /* always unregister in case we don't destroy any domains */ | 7941 | /* always unregister in case we don't destroy any domains */ |
7807 | unregister_sched_domain_sysctl(); | 7942 | unregister_sched_domain_sysctl(); |
7808 | 7943 | ||
7944 | /* Let architecture update cpu core mappings. */ | ||
7945 | new_topology = arch_update_cpu_topology(); | ||
7946 | |||
7809 | n = doms_new ? ndoms_new : 0; | 7947 | n = doms_new ? ndoms_new : 0; |
7810 | 7948 | ||
7811 | /* Destroy deleted domains */ | 7949 | /* Destroy deleted domains */ |
7812 | for (i = 0; i < ndoms_cur; i++) { | 7950 | for (i = 0; i < ndoms_cur; i++) { |
7813 | for (j = 0; j < n; j++) { | 7951 | for (j = 0; j < n && !new_topology; j++) { |
7814 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7952 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) |
7815 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7953 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7816 | goto match1; | 7954 | goto match1; |
7817 | } | 7955 | } |
@@ -7823,15 +7961,15 @@ match1: | |||
7823 | 7961 | ||
7824 | if (doms_new == NULL) { | 7962 | if (doms_new == NULL) { |
7825 | ndoms_cur = 0; | 7963 | ndoms_cur = 0; |
7826 | doms_new = &fallback_doms; | 7964 | doms_new = fallback_doms; |
7827 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7965 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); |
7828 | dattr_new = NULL; | 7966 | WARN_ON_ONCE(dattr_new); |
7829 | } | 7967 | } |
7830 | 7968 | ||
7831 | /* Build new domains */ | 7969 | /* Build new domains */ |
7832 | for (i = 0; i < ndoms_new; i++) { | 7970 | for (i = 0; i < ndoms_new; i++) { |
7833 | for (j = 0; j < ndoms_cur; j++) { | 7971 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
7834 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7972 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) |
7835 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7973 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
7836 | goto match2; | 7974 | goto match2; |
7837 | } | 7975 | } |
@@ -7843,7 +7981,7 @@ match2: | |||
7843 | } | 7981 | } |
7844 | 7982 | ||
7845 | /* Remember the new sched domains */ | 7983 | /* Remember the new sched domains */ |
7846 | if (doms_cur != &fallback_doms) | 7984 | if (doms_cur != fallback_doms) |
7847 | kfree(doms_cur); | 7985 | kfree(doms_cur); |
7848 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 7986 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
7849 | doms_cur = doms_new; | 7987 | doms_cur = doms_new; |
@@ -7856,7 +7994,7 @@ match2: | |||
7856 | } | 7994 | } |
7857 | 7995 | ||
7858 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 7996 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
7859 | int arch_reinit_sched_domains(void) | 7997 | static void arch_reinit_sched_domains(void) |
7860 | { | 7998 | { |
7861 | get_online_cpus(); | 7999 | get_online_cpus(); |
7862 | 8000 | ||
@@ -7865,25 +8003,33 @@ int arch_reinit_sched_domains(void) | |||
7865 | 8003 | ||
7866 | rebuild_sched_domains(); | 8004 | rebuild_sched_domains(); |
7867 | put_online_cpus(); | 8005 | put_online_cpus(); |
7868 | |||
7869 | return 0; | ||
7870 | } | 8006 | } |
7871 | 8007 | ||
7872 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | 8008 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
7873 | { | 8009 | { |
7874 | int ret; | 8010 | unsigned int level = 0; |
8011 | |||
8012 | if (sscanf(buf, "%u", &level) != 1) | ||
8013 | return -EINVAL; | ||
8014 | |||
8015 | /* | ||
8016 | * level is always be positive so don't check for | ||
8017 | * level < POWERSAVINGS_BALANCE_NONE which is 0 | ||
8018 | * What happens on 0 or 1 byte write, | ||
8019 | * need to check for count as well? | ||
8020 | */ | ||
7875 | 8021 | ||
7876 | if (buf[0] != '0' && buf[0] != '1') | 8022 | if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) |
7877 | return -EINVAL; | 8023 | return -EINVAL; |
7878 | 8024 | ||
7879 | if (smt) | 8025 | if (smt) |
7880 | sched_smt_power_savings = (buf[0] == '1'); | 8026 | sched_smt_power_savings = level; |
7881 | else | 8027 | else |
7882 | sched_mc_power_savings = (buf[0] == '1'); | 8028 | sched_mc_power_savings = level; |
7883 | 8029 | ||
7884 | ret = arch_reinit_sched_domains(); | 8030 | arch_reinit_sched_domains(); |
7885 | 8031 | ||
7886 | return ret ? ret : count; | 8032 | return count; |
7887 | } | 8033 | } |
7888 | 8034 | ||
7889 | #ifdef CONFIG_SCHED_MC | 8035 | #ifdef CONFIG_SCHED_MC |
@@ -7918,7 +8064,7 @@ static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, | |||
7918 | sched_smt_power_savings_store); | 8064 | sched_smt_power_savings_store); |
7919 | #endif | 8065 | #endif |
7920 | 8066 | ||
7921 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | 8067 | int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) |
7922 | { | 8068 | { |
7923 | int err = 0; | 8069 | int err = 0; |
7924 | 8070 | ||
@@ -7983,7 +8129,9 @@ static int update_runtime(struct notifier_block *nfb, | |||
7983 | 8129 | ||
7984 | void __init sched_init_smp(void) | 8130 | void __init sched_init_smp(void) |
7985 | { | 8131 | { |
7986 | cpumask_t non_isolated_cpus; | 8132 | cpumask_var_t non_isolated_cpus; |
8133 | |||
8134 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | ||
7987 | 8135 | ||
7988 | #if defined(CONFIG_NUMA) | 8136 | #if defined(CONFIG_NUMA) |
7989 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | 8137 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
@@ -7992,10 +8140,10 @@ void __init sched_init_smp(void) | |||
7992 | #endif | 8140 | #endif |
7993 | get_online_cpus(); | 8141 | get_online_cpus(); |
7994 | mutex_lock(&sched_domains_mutex); | 8142 | mutex_lock(&sched_domains_mutex); |
7995 | arch_init_sched_domains(&cpu_online_map); | 8143 | arch_init_sched_domains(cpu_online_mask); |
7996 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 8144 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
7997 | if (cpus_empty(non_isolated_cpus)) | 8145 | if (cpumask_empty(non_isolated_cpus)) |
7998 | cpu_set(smp_processor_id(), non_isolated_cpus); | 8146 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
7999 | mutex_unlock(&sched_domains_mutex); | 8147 | mutex_unlock(&sched_domains_mutex); |
8000 | put_online_cpus(); | 8148 | put_online_cpus(); |
8001 | 8149 | ||
@@ -8010,9 +8158,13 @@ void __init sched_init_smp(void) | |||
8010 | init_hrtick(); | 8158 | init_hrtick(); |
8011 | 8159 | ||
8012 | /* Move init over to a non-isolated CPU */ | 8160 | /* Move init over to a non-isolated CPU */ |
8013 | if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) | 8161 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
8014 | BUG(); | 8162 | BUG(); |
8015 | sched_init_granularity(); | 8163 | sched_init_granularity(); |
8164 | free_cpumask_var(non_isolated_cpus); | ||
8165 | |||
8166 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | ||
8167 | init_sched_rt_class(); | ||
8016 | } | 8168 | } |
8017 | #else | 8169 | #else |
8018 | void __init sched_init_smp(void) | 8170 | void __init sched_init_smp(void) |
@@ -8327,6 +8479,15 @@ void __init sched_init(void) | |||
8327 | */ | 8479 | */ |
8328 | current->sched_class = &fair_sched_class; | 8480 | current->sched_class = &fair_sched_class; |
8329 | 8481 | ||
8482 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | ||
8483 | alloc_bootmem_cpumask_var(&nohz_cpu_mask); | ||
8484 | #ifdef CONFIG_SMP | ||
8485 | #ifdef CONFIG_NO_HZ | ||
8486 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); | ||
8487 | #endif | ||
8488 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
8489 | #endif /* SMP */ | ||
8490 | |||
8330 | scheduler_running = 1; | 8491 | scheduler_running = 1; |
8331 | } | 8492 | } |
8332 | 8493 | ||
@@ -8485,7 +8646,7 @@ static | |||
8485 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 8646 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
8486 | { | 8647 | { |
8487 | struct cfs_rq *cfs_rq; | 8648 | struct cfs_rq *cfs_rq; |
8488 | struct sched_entity *se, *parent_se; | 8649 | struct sched_entity *se; |
8489 | struct rq *rq; | 8650 | struct rq *rq; |
8490 | int i; | 8651 | int i; |
8491 | 8652 | ||
@@ -8501,18 +8662,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8501 | for_each_possible_cpu(i) { | 8662 | for_each_possible_cpu(i) { |
8502 | rq = cpu_rq(i); | 8663 | rq = cpu_rq(i); |
8503 | 8664 | ||
8504 | cfs_rq = kmalloc_node(sizeof(struct cfs_rq), | 8665 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
8505 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8666 | GFP_KERNEL, cpu_to_node(i)); |
8506 | if (!cfs_rq) | 8667 | if (!cfs_rq) |
8507 | goto err; | 8668 | goto err; |
8508 | 8669 | ||
8509 | se = kmalloc_node(sizeof(struct sched_entity), | 8670 | se = kzalloc_node(sizeof(struct sched_entity), |
8510 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8671 | GFP_KERNEL, cpu_to_node(i)); |
8511 | if (!se) | 8672 | if (!se) |
8512 | goto err; | 8673 | goto err; |
8513 | 8674 | ||
8514 | parent_se = parent ? parent->se[i] : NULL; | 8675 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
8515 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se); | ||
8516 | } | 8676 | } |
8517 | 8677 | ||
8518 | return 1; | 8678 | return 1; |
@@ -8573,7 +8733,7 @@ static | |||
8573 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | 8733 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
8574 | { | 8734 | { |
8575 | struct rt_rq *rt_rq; | 8735 | struct rt_rq *rt_rq; |
8576 | struct sched_rt_entity *rt_se, *parent_se; | 8736 | struct sched_rt_entity *rt_se; |
8577 | struct rq *rq; | 8737 | struct rq *rq; |
8578 | int i; | 8738 | int i; |
8579 | 8739 | ||
@@ -8590,18 +8750,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
8590 | for_each_possible_cpu(i) { | 8750 | for_each_possible_cpu(i) { |
8591 | rq = cpu_rq(i); | 8751 | rq = cpu_rq(i); |
8592 | 8752 | ||
8593 | rt_rq = kmalloc_node(sizeof(struct rt_rq), | 8753 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
8594 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8754 | GFP_KERNEL, cpu_to_node(i)); |
8595 | if (!rt_rq) | 8755 | if (!rt_rq) |
8596 | goto err; | 8756 | goto err; |
8597 | 8757 | ||
8598 | rt_se = kmalloc_node(sizeof(struct sched_rt_entity), | 8758 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
8599 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8759 | GFP_KERNEL, cpu_to_node(i)); |
8600 | if (!rt_se) | 8760 | if (!rt_se) |
8601 | goto err; | 8761 | goto err; |
8602 | 8762 | ||
8603 | parent_se = parent ? parent->rt_se[i] : NULL; | 8763 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
8604 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se); | ||
8605 | } | 8764 | } |
8606 | 8765 | ||
8607 | return 1; | 8766 | return 1; |
@@ -8891,6 +9050,13 @@ static int tg_schedulable(struct task_group *tg, void *data) | |||
8891 | runtime = d->rt_runtime; | 9050 | runtime = d->rt_runtime; |
8892 | } | 9051 | } |
8893 | 9052 | ||
9053 | #ifdef CONFIG_USER_SCHED | ||
9054 | if (tg == &root_task_group) { | ||
9055 | period = global_rt_period(); | ||
9056 | runtime = global_rt_runtime(); | ||
9057 | } | ||
9058 | #endif | ||
9059 | |||
8894 | /* | 9060 | /* |
8895 | * Cannot have more runtime than the period. | 9061 | * Cannot have more runtime than the period. |
8896 | */ | 9062 | */ |
@@ -9244,11 +9410,12 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
9244 | * (balbir@in.ibm.com). | 9410 | * (balbir@in.ibm.com). |
9245 | */ | 9411 | */ |
9246 | 9412 | ||
9247 | /* track cpu usage of a group of tasks */ | 9413 | /* track cpu usage of a group of tasks and its child groups */ |
9248 | struct cpuacct { | 9414 | struct cpuacct { |
9249 | struct cgroup_subsys_state css; | 9415 | struct cgroup_subsys_state css; |
9250 | /* cpuusage holds pointer to a u64-type object on every cpu */ | 9416 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
9251 | u64 *cpuusage; | 9417 | u64 *cpuusage; |
9418 | struct cpuacct *parent; | ||
9252 | }; | 9419 | }; |
9253 | 9420 | ||
9254 | struct cgroup_subsys cpuacct_subsys; | 9421 | struct cgroup_subsys cpuacct_subsys; |
@@ -9282,6 +9449,9 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
9282 | return ERR_PTR(-ENOMEM); | 9449 | return ERR_PTR(-ENOMEM); |
9283 | } | 9450 | } |
9284 | 9451 | ||
9452 | if (cgrp->parent) | ||
9453 | ca->parent = cgroup_ca(cgrp->parent); | ||
9454 | |||
9285 | return &ca->css; | 9455 | return &ca->css; |
9286 | } | 9456 | } |
9287 | 9457 | ||
@@ -9295,6 +9465,41 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9295 | kfree(ca); | 9465 | kfree(ca); |
9296 | } | 9466 | } |
9297 | 9467 | ||
9468 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | ||
9469 | { | ||
9470 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9471 | u64 data; | ||
9472 | |||
9473 | #ifndef CONFIG_64BIT | ||
9474 | /* | ||
9475 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | ||
9476 | */ | ||
9477 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
9478 | data = *cpuusage; | ||
9479 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
9480 | #else | ||
9481 | data = *cpuusage; | ||
9482 | #endif | ||
9483 | |||
9484 | return data; | ||
9485 | } | ||
9486 | |||
9487 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | ||
9488 | { | ||
9489 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9490 | |||
9491 | #ifndef CONFIG_64BIT | ||
9492 | /* | ||
9493 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | ||
9494 | */ | ||
9495 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
9496 | *cpuusage = val; | ||
9497 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
9498 | #else | ||
9499 | *cpuusage = val; | ||
9500 | #endif | ||
9501 | } | ||
9502 | |||
9298 | /* return total cpu usage (in nanoseconds) of a group */ | 9503 | /* return total cpu usage (in nanoseconds) of a group */ |
9299 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | 9504 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) |
9300 | { | 9505 | { |
@@ -9302,17 +9507,8 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | |||
9302 | u64 totalcpuusage = 0; | 9507 | u64 totalcpuusage = 0; |
9303 | int i; | 9508 | int i; |
9304 | 9509 | ||
9305 | for_each_possible_cpu(i) { | 9510 | for_each_present_cpu(i) |
9306 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9511 | totalcpuusage += cpuacct_cpuusage_read(ca, i); |
9307 | |||
9308 | /* | ||
9309 | * Take rq->lock to make 64-bit addition safe on 32-bit | ||
9310 | * platforms. | ||
9311 | */ | ||
9312 | spin_lock_irq(&cpu_rq(i)->lock); | ||
9313 | totalcpuusage += *cpuusage; | ||
9314 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
9315 | } | ||
9316 | 9512 | ||
9317 | return totalcpuusage; | 9513 | return totalcpuusage; |
9318 | } | 9514 | } |
@@ -9329,23 +9525,39 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, | |||
9329 | goto out; | 9525 | goto out; |
9330 | } | 9526 | } |
9331 | 9527 | ||
9332 | for_each_possible_cpu(i) { | 9528 | for_each_present_cpu(i) |
9333 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9529 | cpuacct_cpuusage_write(ca, i, 0); |
9334 | 9530 | ||
9335 | spin_lock_irq(&cpu_rq(i)->lock); | ||
9336 | *cpuusage = 0; | ||
9337 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
9338 | } | ||
9339 | out: | 9531 | out: |
9340 | return err; | 9532 | return err; |
9341 | } | 9533 | } |
9342 | 9534 | ||
9535 | static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, | ||
9536 | struct seq_file *m) | ||
9537 | { | ||
9538 | struct cpuacct *ca = cgroup_ca(cgroup); | ||
9539 | u64 percpu; | ||
9540 | int i; | ||
9541 | |||
9542 | for_each_present_cpu(i) { | ||
9543 | percpu = cpuacct_cpuusage_read(ca, i); | ||
9544 | seq_printf(m, "%llu ", (unsigned long long) percpu); | ||
9545 | } | ||
9546 | seq_printf(m, "\n"); | ||
9547 | return 0; | ||
9548 | } | ||
9549 | |||
9343 | static struct cftype files[] = { | 9550 | static struct cftype files[] = { |
9344 | { | 9551 | { |
9345 | .name = "usage", | 9552 | .name = "usage", |
9346 | .read_u64 = cpuusage_read, | 9553 | .read_u64 = cpuusage_read, |
9347 | .write_u64 = cpuusage_write, | 9554 | .write_u64 = cpuusage_write, |
9348 | }, | 9555 | }, |
9556 | { | ||
9557 | .name = "usage_percpu", | ||
9558 | .read_seq_string = cpuacct_percpu_seq_read, | ||
9559 | }, | ||
9560 | |||
9349 | }; | 9561 | }; |
9350 | 9562 | ||
9351 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | 9563 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
@@ -9361,14 +9573,16 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9361 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | 9573 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
9362 | { | 9574 | { |
9363 | struct cpuacct *ca; | 9575 | struct cpuacct *ca; |
9576 | int cpu; | ||
9364 | 9577 | ||
9365 | if (!cpuacct_subsys.active) | 9578 | if (!cpuacct_subsys.active) |
9366 | return; | 9579 | return; |
9367 | 9580 | ||
9581 | cpu = task_cpu(tsk); | ||
9368 | ca = task_ca(tsk); | 9582 | ca = task_ca(tsk); |
9369 | if (ca) { | ||
9370 | u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); | ||
9371 | 9583 | ||
9584 | for (; ca; ca = ca->parent) { | ||
9585 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9372 | *cpuusage += cputime; | 9586 | *cpuusage += cputime; |
9373 | } | 9587 | } |
9374 | } | 9588 | } |