diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 393 |
1 files changed, 214 insertions, 179 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index dcb39bc88f6c..748ff924a290 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -267,6 +267,10 @@ struct task_group { | |||
| 267 | struct cgroup_subsys_state css; | 267 | struct cgroup_subsys_state css; |
| 268 | #endif | 268 | #endif |
| 269 | 269 | ||
| 270 | #ifdef CONFIG_USER_SCHED | ||
| 271 | uid_t uid; | ||
| 272 | #endif | ||
| 273 | |||
| 270 | #ifdef CONFIG_FAIR_GROUP_SCHED | 274 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 271 | /* schedulable entities of this group on each cpu */ | 275 | /* schedulable entities of this group on each cpu */ |
| 272 | struct sched_entity **se; | 276 | struct sched_entity **se; |
| @@ -292,6 +296,12 @@ struct task_group { | |||
| 292 | 296 | ||
| 293 | #ifdef CONFIG_USER_SCHED | 297 | #ifdef CONFIG_USER_SCHED |
| 294 | 298 | ||
| 299 | /* Helper function to pass uid information to create_sched_user() */ | ||
| 300 | void set_tg_uid(struct user_struct *user) | ||
| 301 | { | ||
| 302 | user->tg->uid = user->uid; | ||
| 303 | } | ||
| 304 | |||
| 295 | /* | 305 | /* |
| 296 | * Root task group. | 306 | * Root task group. |
| 297 | * Every UID task group (including init_task_group aka UID-0) will | 307 | * Every UID task group (including init_task_group aka UID-0) will |
| @@ -351,7 +361,9 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
| 351 | struct task_group *tg; | 361 | struct task_group *tg; |
| 352 | 362 | ||
| 353 | #ifdef CONFIG_USER_SCHED | 363 | #ifdef CONFIG_USER_SCHED |
| 354 | tg = p->user->tg; | 364 | rcu_read_lock(); |
| 365 | tg = __task_cred(p)->user->tg; | ||
| 366 | rcu_read_unlock(); | ||
| 355 | #elif defined(CONFIG_CGROUP_SCHED) | 367 | #elif defined(CONFIG_CGROUP_SCHED) |
| 356 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), | 368 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), |
| 357 | struct task_group, css); | 369 | struct task_group, css); |
| @@ -592,6 +604,8 @@ struct rq { | |||
| 592 | #ifdef CONFIG_SCHEDSTATS | 604 | #ifdef CONFIG_SCHEDSTATS |
| 593 | /* latency stats */ | 605 | /* latency stats */ |
| 594 | struct sched_info rq_sched_info; | 606 | struct sched_info rq_sched_info; |
| 607 | unsigned long long rq_cpu_time; | ||
| 608 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ | ||
| 595 | 609 | ||
| 596 | /* sys_sched_yield() stats */ | 610 | /* sys_sched_yield() stats */ |
| 597 | unsigned int yld_exp_empty; | 611 | unsigned int yld_exp_empty; |
| @@ -709,45 +723,18 @@ static __read_mostly char *sched_feat_names[] = { | |||
| 709 | 723 | ||
| 710 | #undef SCHED_FEAT | 724 | #undef SCHED_FEAT |
| 711 | 725 | ||
| 712 | static int sched_feat_open(struct inode *inode, struct file *filp) | 726 | static int sched_feat_show(struct seq_file *m, void *v) |
| 713 | { | ||
| 714 | filp->private_data = inode->i_private; | ||
| 715 | return 0; | ||
| 716 | } | ||
| 717 | |||
| 718 | static ssize_t | ||
| 719 | sched_feat_read(struct file *filp, char __user *ubuf, | ||
| 720 | size_t cnt, loff_t *ppos) | ||
| 721 | { | 727 | { |
| 722 | char *buf; | ||
| 723 | int r = 0; | ||
| 724 | int len = 0; | ||
| 725 | int i; | 728 | int i; |
| 726 | 729 | ||
| 727 | for (i = 0; sched_feat_names[i]; i++) { | 730 | for (i = 0; sched_feat_names[i]; i++) { |
| 728 | len += strlen(sched_feat_names[i]); | 731 | if (!(sysctl_sched_features & (1UL << i))) |
| 729 | len += 4; | 732 | seq_puts(m, "NO_"); |
| 733 | seq_printf(m, "%s ", sched_feat_names[i]); | ||
| 730 | } | 734 | } |
| 735 | seq_puts(m, "\n"); | ||
| 731 | 736 | ||
| 732 | buf = kmalloc(len + 2, GFP_KERNEL); | 737 | return 0; |
| 733 | if (!buf) | ||
| 734 | return -ENOMEM; | ||
| 735 | |||
| 736 | for (i = 0; sched_feat_names[i]; i++) { | ||
| 737 | if (sysctl_sched_features & (1UL << i)) | ||
| 738 | r += sprintf(buf + r, "%s ", sched_feat_names[i]); | ||
| 739 | else | ||
| 740 | r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]); | ||
| 741 | } | ||
| 742 | |||
| 743 | r += sprintf(buf + r, "\n"); | ||
| 744 | WARN_ON(r >= len + 2); | ||
| 745 | |||
| 746 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
| 747 | |||
| 748 | kfree(buf); | ||
| 749 | |||
| 750 | return r; | ||
| 751 | } | 738 | } |
| 752 | 739 | ||
| 753 | static ssize_t | 740 | static ssize_t |
| @@ -792,10 +779,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
| 792 | return cnt; | 779 | return cnt; |
| 793 | } | 780 | } |
| 794 | 781 | ||
| 782 | static int sched_feat_open(struct inode *inode, struct file *filp) | ||
| 783 | { | ||
| 784 | return single_open(filp, sched_feat_show, NULL); | ||
| 785 | } | ||
| 786 | |||
| 795 | static struct file_operations sched_feat_fops = { | 787 | static struct file_operations sched_feat_fops = { |
| 796 | .open = sched_feat_open, | 788 | .open = sched_feat_open, |
| 797 | .read = sched_feat_read, | 789 | .write = sched_feat_write, |
| 798 | .write = sched_feat_write, | 790 | .read = seq_read, |
| 791 | .llseek = seq_lseek, | ||
| 792 | .release = single_release, | ||
| 799 | }; | 793 | }; |
| 800 | 794 | ||
| 801 | static __init int sched_init_debug(void) | 795 | static __init int sched_init_debug(void) |
| @@ -1480,27 +1474,13 @@ static void | |||
| 1480 | update_group_shares_cpu(struct task_group *tg, int cpu, | 1474 | update_group_shares_cpu(struct task_group *tg, int cpu, |
| 1481 | unsigned long sd_shares, unsigned long sd_rq_weight) | 1475 | unsigned long sd_shares, unsigned long sd_rq_weight) |
| 1482 | { | 1476 | { |
| 1483 | int boost = 0; | ||
| 1484 | unsigned long shares; | 1477 | unsigned long shares; |
| 1485 | unsigned long rq_weight; | 1478 | unsigned long rq_weight; |
| 1486 | 1479 | ||
| 1487 | if (!tg->se[cpu]) | 1480 | if (!tg->se[cpu]) |
| 1488 | return; | 1481 | return; |
| 1489 | 1482 | ||
| 1490 | rq_weight = tg->cfs_rq[cpu]->load.weight; | 1483 | rq_weight = tg->cfs_rq[cpu]->rq_weight; |
| 1491 | |||
| 1492 | /* | ||
| 1493 | * If there are currently no tasks on the cpu pretend there is one of | ||
| 1494 | * average load so that when a new task gets to run here it will not | ||
| 1495 | * get delayed by group starvation. | ||
| 1496 | */ | ||
| 1497 | if (!rq_weight) { | ||
| 1498 | boost = 1; | ||
| 1499 | rq_weight = NICE_0_LOAD; | ||
| 1500 | } | ||
| 1501 | |||
| 1502 | if (unlikely(rq_weight > sd_rq_weight)) | ||
| 1503 | rq_weight = sd_rq_weight; | ||
| 1504 | 1484 | ||
| 1505 | /* | 1485 | /* |
| 1506 | * \Sum shares * rq_weight | 1486 | * \Sum shares * rq_weight |
| @@ -1508,7 +1488,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
| 1508 | * \Sum rq_weight | 1488 | * \Sum rq_weight |
| 1509 | * | 1489 | * |
| 1510 | */ | 1490 | */ |
| 1511 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); | 1491 | shares = (sd_shares * rq_weight) / sd_rq_weight; |
| 1512 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); | 1492 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); |
| 1513 | 1493 | ||
| 1514 | if (abs(shares - tg->se[cpu]->load.weight) > | 1494 | if (abs(shares - tg->se[cpu]->load.weight) > |
| @@ -1517,11 +1497,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
| 1517 | unsigned long flags; | 1497 | unsigned long flags; |
| 1518 | 1498 | ||
| 1519 | spin_lock_irqsave(&rq->lock, flags); | 1499 | spin_lock_irqsave(&rq->lock, flags); |
| 1520 | /* | 1500 | tg->cfs_rq[cpu]->shares = shares; |
| 1521 | * record the actual number of shares, not the boosted amount. | ||
| 1522 | */ | ||
| 1523 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | ||
| 1524 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
| 1525 | 1501 | ||
| 1526 | __set_se_shares(tg->se[cpu], shares); | 1502 | __set_se_shares(tg->se[cpu], shares); |
| 1527 | spin_unlock_irqrestore(&rq->lock, flags); | 1503 | spin_unlock_irqrestore(&rq->lock, flags); |
| @@ -1535,13 +1511,23 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
| 1535 | */ | 1511 | */ |
| 1536 | static int tg_shares_up(struct task_group *tg, void *data) | 1512 | static int tg_shares_up(struct task_group *tg, void *data) |
| 1537 | { | 1513 | { |
| 1538 | unsigned long rq_weight = 0; | 1514 | unsigned long weight, rq_weight = 0; |
| 1539 | unsigned long shares = 0; | 1515 | unsigned long shares = 0; |
| 1540 | struct sched_domain *sd = data; | 1516 | struct sched_domain *sd = data; |
| 1541 | int i; | 1517 | int i; |
| 1542 | 1518 | ||
| 1543 | for_each_cpu_mask(i, sd->span) { | 1519 | for_each_cpu_mask(i, sd->span) { |
| 1544 | rq_weight += tg->cfs_rq[i]->load.weight; | 1520 | /* |
| 1521 | * If there are currently no tasks on the cpu pretend there | ||
| 1522 | * is one of average load so that when a new task gets to | ||
| 1523 | * run here it will not get delayed by group starvation. | ||
| 1524 | */ | ||
| 1525 | weight = tg->cfs_rq[i]->load.weight; | ||
| 1526 | if (!weight) | ||
| 1527 | weight = NICE_0_LOAD; | ||
| 1528 | |||
| 1529 | tg->cfs_rq[i]->rq_weight = weight; | ||
| 1530 | rq_weight += weight; | ||
| 1545 | shares += tg->cfs_rq[i]->shares; | 1531 | shares += tg->cfs_rq[i]->shares; |
| 1546 | } | 1532 | } |
| 1547 | 1533 | ||
| @@ -1551,9 +1537,6 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
| 1551 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) | 1537 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
| 1552 | shares = tg->shares; | 1538 | shares = tg->shares; |
| 1553 | 1539 | ||
| 1554 | if (!rq_weight) | ||
| 1555 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; | ||
| 1556 | |||
| 1557 | for_each_cpu_mask(i, sd->span) | 1540 | for_each_cpu_mask(i, sd->span) |
| 1558 | update_group_shares_cpu(tg, i, shares, rq_weight); | 1541 | update_group_shares_cpu(tg, i, shares, rq_weight); |
| 1559 | 1542 | ||
| @@ -1618,6 +1601,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
| 1618 | 1601 | ||
| 1619 | #endif | 1602 | #endif |
| 1620 | 1603 | ||
| 1604 | /* | ||
| 1605 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
| 1606 | */ | ||
| 1607 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
| 1608 | __releases(this_rq->lock) | ||
| 1609 | __acquires(busiest->lock) | ||
| 1610 | __acquires(this_rq->lock) | ||
| 1611 | { | ||
| 1612 | int ret = 0; | ||
| 1613 | |||
| 1614 | if (unlikely(!irqs_disabled())) { | ||
| 1615 | /* printk() doesn't work good under rq->lock */ | ||
| 1616 | spin_unlock(&this_rq->lock); | ||
| 1617 | BUG_ON(1); | ||
| 1618 | } | ||
| 1619 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
| 1620 | if (busiest < this_rq) { | ||
| 1621 | spin_unlock(&this_rq->lock); | ||
| 1622 | spin_lock(&busiest->lock); | ||
| 1623 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
| 1624 | ret = 1; | ||
| 1625 | } else | ||
| 1626 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
| 1627 | } | ||
| 1628 | return ret; | ||
| 1629 | } | ||
| 1630 | |||
| 1631 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
| 1632 | __releases(busiest->lock) | ||
| 1633 | { | ||
| 1634 | spin_unlock(&busiest->lock); | ||
| 1635 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
| 1636 | } | ||
| 1621 | #endif | 1637 | #endif |
| 1622 | 1638 | ||
| 1623 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1639 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| @@ -2262,6 +2278,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
| 2262 | 2278 | ||
| 2263 | smp_wmb(); | 2279 | smp_wmb(); |
| 2264 | rq = task_rq_lock(p, &flags); | 2280 | rq = task_rq_lock(p, &flags); |
| 2281 | update_rq_clock(rq); | ||
| 2265 | old_state = p->state; | 2282 | old_state = p->state; |
| 2266 | if (!(old_state & state)) | 2283 | if (!(old_state & state)) |
| 2267 | goto out; | 2284 | goto out; |
| @@ -2319,7 +2336,6 @@ out_activate: | |||
| 2319 | schedstat_inc(p, se.nr_wakeups_local); | 2336 | schedstat_inc(p, se.nr_wakeups_local); |
| 2320 | else | 2337 | else |
| 2321 | schedstat_inc(p, se.nr_wakeups_remote); | 2338 | schedstat_inc(p, se.nr_wakeups_remote); |
| 2322 | update_rq_clock(rq); | ||
| 2323 | activate_task(rq, p, 1); | 2339 | activate_task(rq, p, 1); |
| 2324 | success = 1; | 2340 | success = 1; |
| 2325 | 2341 | ||
| @@ -2820,40 +2836,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
| 2820 | } | 2836 | } |
| 2821 | 2837 | ||
| 2822 | /* | 2838 | /* |
| 2823 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
| 2824 | */ | ||
| 2825 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
| 2826 | __releases(this_rq->lock) | ||
| 2827 | __acquires(busiest->lock) | ||
| 2828 | __acquires(this_rq->lock) | ||
| 2829 | { | ||
| 2830 | int ret = 0; | ||
| 2831 | |||
| 2832 | if (unlikely(!irqs_disabled())) { | ||
| 2833 | /* printk() doesn't work good under rq->lock */ | ||
| 2834 | spin_unlock(&this_rq->lock); | ||
| 2835 | BUG_ON(1); | ||
| 2836 | } | ||
| 2837 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
| 2838 | if (busiest < this_rq) { | ||
| 2839 | spin_unlock(&this_rq->lock); | ||
| 2840 | spin_lock(&busiest->lock); | ||
| 2841 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
| 2842 | ret = 1; | ||
| 2843 | } else | ||
| 2844 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
| 2845 | } | ||
| 2846 | return ret; | ||
| 2847 | } | ||
| 2848 | |||
| 2849 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
| 2850 | __releases(busiest->lock) | ||
| 2851 | { | ||
| 2852 | spin_unlock(&busiest->lock); | ||
| 2853 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
| 2854 | } | ||
| 2855 | |||
| 2856 | /* | ||
| 2857 | * If dest_cpu is allowed for this process, migrate the task to it. | 2839 | * If dest_cpu is allowed for this process, migrate the task to it. |
| 2858 | * This is accomplished by forcing the cpu_allowed mask to only | 2840 | * This is accomplished by forcing the cpu_allowed mask to only |
| 2859 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | 2841 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then |
| @@ -3714,7 +3696,7 @@ out_balanced: | |||
| 3714 | static void idle_balance(int this_cpu, struct rq *this_rq) | 3696 | static void idle_balance(int this_cpu, struct rq *this_rq) |
| 3715 | { | 3697 | { |
| 3716 | struct sched_domain *sd; | 3698 | struct sched_domain *sd; |
| 3717 | int pulled_task = -1; | 3699 | int pulled_task = 0; |
| 3718 | unsigned long next_balance = jiffies + HZ; | 3700 | unsigned long next_balance = jiffies + HZ; |
| 3719 | cpumask_t tmpmask; | 3701 | cpumask_t tmpmask; |
| 3720 | 3702 | ||
| @@ -5141,6 +5123,22 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
| 5141 | set_load_weight(p); | 5123 | set_load_weight(p); |
| 5142 | } | 5124 | } |
| 5143 | 5125 | ||
| 5126 | /* | ||
| 5127 | * check the target process has a UID that matches the current process's | ||
| 5128 | */ | ||
| 5129 | static bool check_same_owner(struct task_struct *p) | ||
| 5130 | { | ||
| 5131 | const struct cred *cred = current_cred(), *pcred; | ||
| 5132 | bool match; | ||
| 5133 | |||
| 5134 | rcu_read_lock(); | ||
| 5135 | pcred = __task_cred(p); | ||
| 5136 | match = (cred->euid == pcred->euid || | ||
| 5137 | cred->euid == pcred->uid); | ||
| 5138 | rcu_read_unlock(); | ||
| 5139 | return match; | ||
| 5140 | } | ||
| 5141 | |||
| 5144 | static int __sched_setscheduler(struct task_struct *p, int policy, | 5142 | static int __sched_setscheduler(struct task_struct *p, int policy, |
| 5145 | struct sched_param *param, bool user) | 5143 | struct sched_param *param, bool user) |
| 5146 | { | 5144 | { |
| @@ -5200,8 +5198,7 @@ recheck: | |||
| 5200 | return -EPERM; | 5198 | return -EPERM; |
| 5201 | 5199 | ||
| 5202 | /* can't change other user's priorities */ | 5200 | /* can't change other user's priorities */ |
| 5203 | if ((current->euid != p->euid) && | 5201 | if (!check_same_owner(p)) |
| 5204 | (current->euid != p->uid)) | ||
| 5205 | return -EPERM; | 5202 | return -EPERM; |
| 5206 | } | 5203 | } |
| 5207 | 5204 | ||
| @@ -5433,8 +5430,7 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
| 5433 | read_unlock(&tasklist_lock); | 5430 | read_unlock(&tasklist_lock); |
| 5434 | 5431 | ||
| 5435 | retval = -EPERM; | 5432 | retval = -EPERM; |
| 5436 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 5433 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
| 5437 | !capable(CAP_SYS_NICE)) | ||
| 5438 | goto out_unlock; | 5434 | goto out_unlock; |
| 5439 | 5435 | ||
| 5440 | retval = security_task_setscheduler(p, 0, NULL); | 5436 | retval = security_task_setscheduler(p, 0, NULL); |
| @@ -6134,7 +6130,6 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
| 6134 | 6130 | ||
| 6135 | /* | 6131 | /* |
| 6136 | * Figure out where task on dead CPU should go, use force if necessary. | 6132 | * Figure out where task on dead CPU should go, use force if necessary. |
| 6137 | * NOTE: interrupts should be disabled by the caller | ||
| 6138 | */ | 6133 | */ |
| 6139 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6134 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
| 6140 | { | 6135 | { |
| @@ -6646,28 +6641,6 @@ early_initcall(migration_init); | |||
| 6646 | 6641 | ||
| 6647 | #ifdef CONFIG_SCHED_DEBUG | 6642 | #ifdef CONFIG_SCHED_DEBUG |
| 6648 | 6643 | ||
| 6649 | static inline const char *sd_level_to_string(enum sched_domain_level lvl) | ||
| 6650 | { | ||
| 6651 | switch (lvl) { | ||
| 6652 | case SD_LV_NONE: | ||
| 6653 | return "NONE"; | ||
| 6654 | case SD_LV_SIBLING: | ||
| 6655 | return "SIBLING"; | ||
| 6656 | case SD_LV_MC: | ||
| 6657 | return "MC"; | ||
| 6658 | case SD_LV_CPU: | ||
| 6659 | return "CPU"; | ||
| 6660 | case SD_LV_NODE: | ||
| 6661 | return "NODE"; | ||
| 6662 | case SD_LV_ALLNODES: | ||
| 6663 | return "ALLNODES"; | ||
| 6664 | case SD_LV_MAX: | ||
| 6665 | return "MAX"; | ||
| 6666 | |||
| 6667 | } | ||
| 6668 | return "MAX"; | ||
| 6669 | } | ||
| 6670 | |||
| 6671 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6644 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
| 6672 | cpumask_t *groupmask) | 6645 | cpumask_t *groupmask) |
| 6673 | { | 6646 | { |
| @@ -6687,8 +6660,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 6687 | return -1; | 6660 | return -1; |
| 6688 | } | 6661 | } |
| 6689 | 6662 | ||
| 6690 | printk(KERN_CONT "span %s level %s\n", | 6663 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
| 6691 | str, sd_level_to_string(sd->level)); | ||
| 6692 | 6664 | ||
| 6693 | if (!cpu_isset(cpu, sd->span)) { | 6665 | if (!cpu_isset(cpu, sd->span)) { |
| 6694 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6666 | printk(KERN_ERR "ERROR: domain->span does not contain " |
| @@ -6824,6 +6796,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
| 6824 | SD_BALANCE_EXEC | | 6796 | SD_BALANCE_EXEC | |
| 6825 | SD_SHARE_CPUPOWER | | 6797 | SD_SHARE_CPUPOWER | |
| 6826 | SD_SHARE_PKG_RESOURCES); | 6798 | SD_SHARE_PKG_RESOURCES); |
| 6799 | if (nr_node_ids == 1) | ||
| 6800 | pflags &= ~SD_SERIALIZE; | ||
| 6827 | } | 6801 | } |
| 6828 | if (~cflags & pflags) | 6802 | if (~cflags & pflags) |
| 6829 | return 0; | 6803 | return 0; |
| @@ -7344,13 +7318,21 @@ struct allmasks { | |||
| 7344 | }; | 7318 | }; |
| 7345 | 7319 | ||
| 7346 | #if NR_CPUS > 128 | 7320 | #if NR_CPUS > 128 |
| 7347 | #define SCHED_CPUMASK_ALLOC 1 | 7321 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v |
| 7348 | #define SCHED_CPUMASK_FREE(v) kfree(v) | 7322 | static inline void sched_cpumask_alloc(struct allmasks **masks) |
| 7349 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | 7323 | { |
| 7324 | *masks = kmalloc(sizeof(**masks), GFP_KERNEL); | ||
| 7325 | } | ||
| 7326 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
| 7327 | { | ||
| 7328 | kfree(masks); | ||
| 7329 | } | ||
| 7350 | #else | 7330 | #else |
| 7351 | #define SCHED_CPUMASK_ALLOC 0 | 7331 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v |
| 7352 | #define SCHED_CPUMASK_FREE(v) | 7332 | static inline void sched_cpumask_alloc(struct allmasks **masks) |
| 7353 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | 7333 | { } |
| 7334 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
| 7335 | { } | ||
| 7354 | #endif | 7336 | #endif |
| 7355 | 7337 | ||
| 7356 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | 7338 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ |
| @@ -7426,9 +7408,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7426 | return -ENOMEM; | 7408 | return -ENOMEM; |
| 7427 | } | 7409 | } |
| 7428 | 7410 | ||
| 7429 | #if SCHED_CPUMASK_ALLOC | ||
| 7430 | /* get space for all scratch cpumask variables */ | 7411 | /* get space for all scratch cpumask variables */ |
| 7431 | allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); | 7412 | sched_cpumask_alloc(&allmasks); |
| 7432 | if (!allmasks) { | 7413 | if (!allmasks) { |
| 7433 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | 7414 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); |
| 7434 | kfree(rd); | 7415 | kfree(rd); |
| @@ -7437,7 +7418,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7437 | #endif | 7418 | #endif |
| 7438 | return -ENOMEM; | 7419 | return -ENOMEM; |
| 7439 | } | 7420 | } |
| 7440 | #endif | 7421 | |
| 7441 | tmpmask = (cpumask_t *)allmasks; | 7422 | tmpmask = (cpumask_t *)allmasks; |
| 7442 | 7423 | ||
| 7443 | 7424 | ||
| @@ -7691,13 +7672,13 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
| 7691 | cpu_attach_domain(sd, rd, i); | 7672 | cpu_attach_domain(sd, rd, i); |
| 7692 | } | 7673 | } |
| 7693 | 7674 | ||
| 7694 | SCHED_CPUMASK_FREE((void *)allmasks); | 7675 | sched_cpumask_free(allmasks); |
| 7695 | return 0; | 7676 | return 0; |
| 7696 | 7677 | ||
| 7697 | #ifdef CONFIG_NUMA | 7678 | #ifdef CONFIG_NUMA |
| 7698 | error: | 7679 | error: |
| 7699 | free_sched_groups(cpu_map, tmpmask); | 7680 | free_sched_groups(cpu_map, tmpmask); |
| 7700 | SCHED_CPUMASK_FREE((void *)allmasks); | 7681 | sched_cpumask_free(allmasks); |
| 7701 | kfree(rd); | 7682 | kfree(rd); |
| 7702 | return -ENOMEM; | 7683 | return -ENOMEM; |
| 7703 | #endif | 7684 | #endif |
| @@ -7720,8 +7701,14 @@ static struct sched_domain_attr *dattr_cur; | |||
| 7720 | */ | 7701 | */ |
| 7721 | static cpumask_t fallback_doms; | 7702 | static cpumask_t fallback_doms; |
| 7722 | 7703 | ||
| 7723 | void __attribute__((weak)) arch_update_cpu_topology(void) | 7704 | /* |
| 7705 | * arch_update_cpu_topology lets virtualized architectures update the | ||
| 7706 | * cpu core maps. It is supposed to return 1 if the topology changed | ||
| 7707 | * or 0 if it stayed the same. | ||
| 7708 | */ | ||
| 7709 | int __attribute__((weak)) arch_update_cpu_topology(void) | ||
| 7724 | { | 7710 | { |
| 7711 | return 0; | ||
| 7725 | } | 7712 | } |
| 7726 | 7713 | ||
| 7727 | /* | 7714 | /* |
| @@ -7761,8 +7748,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
| 7761 | cpumask_t tmpmask; | 7748 | cpumask_t tmpmask; |
| 7762 | int i; | 7749 | int i; |
| 7763 | 7750 | ||
| 7764 | unregister_sched_domain_sysctl(); | ||
| 7765 | |||
| 7766 | for_each_cpu_mask_nr(i, *cpu_map) | 7751 | for_each_cpu_mask_nr(i, *cpu_map) |
| 7767 | cpu_attach_domain(NULL, &def_root_domain, i); | 7752 | cpu_attach_domain(NULL, &def_root_domain, i); |
| 7768 | synchronize_sched(); | 7753 | synchronize_sched(); |
| @@ -7815,17 +7800,21 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
| 7815 | struct sched_domain_attr *dattr_new) | 7800 | struct sched_domain_attr *dattr_new) |
| 7816 | { | 7801 | { |
| 7817 | int i, j, n; | 7802 | int i, j, n; |
| 7803 | int new_topology; | ||
| 7818 | 7804 | ||
| 7819 | mutex_lock(&sched_domains_mutex); | 7805 | mutex_lock(&sched_domains_mutex); |
| 7820 | 7806 | ||
| 7821 | /* always unregister in case we don't destroy any domains */ | 7807 | /* always unregister in case we don't destroy any domains */ |
| 7822 | unregister_sched_domain_sysctl(); | 7808 | unregister_sched_domain_sysctl(); |
| 7823 | 7809 | ||
| 7810 | /* Let architecture update cpu core mappings. */ | ||
| 7811 | new_topology = arch_update_cpu_topology(); | ||
| 7812 | |||
| 7824 | n = doms_new ? ndoms_new : 0; | 7813 | n = doms_new ? ndoms_new : 0; |
| 7825 | 7814 | ||
| 7826 | /* Destroy deleted domains */ | 7815 | /* Destroy deleted domains */ |
| 7827 | for (i = 0; i < ndoms_cur; i++) { | 7816 | for (i = 0; i < ndoms_cur; i++) { |
| 7828 | for (j = 0; j < n; j++) { | 7817 | for (j = 0; j < n && !new_topology; j++) { |
| 7829 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7818 | if (cpus_equal(doms_cur[i], doms_new[j]) |
| 7830 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7819 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
| 7831 | goto match1; | 7820 | goto match1; |
| @@ -7840,12 +7829,12 @@ match1: | |||
| 7840 | ndoms_cur = 0; | 7829 | ndoms_cur = 0; |
| 7841 | doms_new = &fallback_doms; | 7830 | doms_new = &fallback_doms; |
| 7842 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7831 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); |
| 7843 | dattr_new = NULL; | 7832 | WARN_ON_ONCE(dattr_new); |
| 7844 | } | 7833 | } |
| 7845 | 7834 | ||
| 7846 | /* Build new domains */ | 7835 | /* Build new domains */ |
| 7847 | for (i = 0; i < ndoms_new; i++) { | 7836 | for (i = 0; i < ndoms_new; i++) { |
| 7848 | for (j = 0; j < ndoms_cur; j++) { | 7837 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
| 7849 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7838 | if (cpus_equal(doms_new[i], doms_cur[j]) |
| 7850 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7839 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
| 7851 | goto match2; | 7840 | goto match2; |
| @@ -8500,7 +8489,7 @@ static | |||
| 8500 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 8489 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
| 8501 | { | 8490 | { |
| 8502 | struct cfs_rq *cfs_rq; | 8491 | struct cfs_rq *cfs_rq; |
| 8503 | struct sched_entity *se, *parent_se; | 8492 | struct sched_entity *se; |
| 8504 | struct rq *rq; | 8493 | struct rq *rq; |
| 8505 | int i; | 8494 | int i; |
| 8506 | 8495 | ||
| @@ -8516,18 +8505,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 8516 | for_each_possible_cpu(i) { | 8505 | for_each_possible_cpu(i) { |
| 8517 | rq = cpu_rq(i); | 8506 | rq = cpu_rq(i); |
| 8518 | 8507 | ||
| 8519 | cfs_rq = kmalloc_node(sizeof(struct cfs_rq), | 8508 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
| 8520 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8509 | GFP_KERNEL, cpu_to_node(i)); |
| 8521 | if (!cfs_rq) | 8510 | if (!cfs_rq) |
| 8522 | goto err; | 8511 | goto err; |
| 8523 | 8512 | ||
| 8524 | se = kmalloc_node(sizeof(struct sched_entity), | 8513 | se = kzalloc_node(sizeof(struct sched_entity), |
| 8525 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8514 | GFP_KERNEL, cpu_to_node(i)); |
| 8526 | if (!se) | 8515 | if (!se) |
| 8527 | goto err; | 8516 | goto err; |
| 8528 | 8517 | ||
| 8529 | parent_se = parent ? parent->se[i] : NULL; | 8518 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
| 8530 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se); | ||
| 8531 | } | 8519 | } |
| 8532 | 8520 | ||
| 8533 | return 1; | 8521 | return 1; |
| @@ -8588,7 +8576,7 @@ static | |||
| 8588 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | 8576 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
| 8589 | { | 8577 | { |
| 8590 | struct rt_rq *rt_rq; | 8578 | struct rt_rq *rt_rq; |
| 8591 | struct sched_rt_entity *rt_se, *parent_se; | 8579 | struct sched_rt_entity *rt_se; |
| 8592 | struct rq *rq; | 8580 | struct rq *rq; |
| 8593 | int i; | 8581 | int i; |
| 8594 | 8582 | ||
| @@ -8605,18 +8593,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 8605 | for_each_possible_cpu(i) { | 8593 | for_each_possible_cpu(i) { |
| 8606 | rq = cpu_rq(i); | 8594 | rq = cpu_rq(i); |
| 8607 | 8595 | ||
| 8608 | rt_rq = kmalloc_node(sizeof(struct rt_rq), | 8596 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
| 8609 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8597 | GFP_KERNEL, cpu_to_node(i)); |
| 8610 | if (!rt_rq) | 8598 | if (!rt_rq) |
| 8611 | goto err; | 8599 | goto err; |
| 8612 | 8600 | ||
| 8613 | rt_se = kmalloc_node(sizeof(struct sched_rt_entity), | 8601 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
| 8614 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8602 | GFP_KERNEL, cpu_to_node(i)); |
| 8615 | if (!rt_se) | 8603 | if (!rt_se) |
| 8616 | goto err; | 8604 | goto err; |
| 8617 | 8605 | ||
| 8618 | parent_se = parent ? parent->rt_se[i] : NULL; | 8606 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
| 8619 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se); | ||
| 8620 | } | 8607 | } |
| 8621 | 8608 | ||
| 8622 | return 1; | 8609 | return 1; |
| @@ -9259,11 +9246,12 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
| 9259 | * (balbir@in.ibm.com). | 9246 | * (balbir@in.ibm.com). |
| 9260 | */ | 9247 | */ |
| 9261 | 9248 | ||
| 9262 | /* track cpu usage of a group of tasks */ | 9249 | /* track cpu usage of a group of tasks and its child groups */ |
| 9263 | struct cpuacct { | 9250 | struct cpuacct { |
| 9264 | struct cgroup_subsys_state css; | 9251 | struct cgroup_subsys_state css; |
| 9265 | /* cpuusage holds pointer to a u64-type object on every cpu */ | 9252 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
| 9266 | u64 *cpuusage; | 9253 | u64 *cpuusage; |
| 9254 | struct cpuacct *parent; | ||
| 9267 | }; | 9255 | }; |
| 9268 | 9256 | ||
| 9269 | struct cgroup_subsys cpuacct_subsys; | 9257 | struct cgroup_subsys cpuacct_subsys; |
| @@ -9297,6 +9285,9 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
| 9297 | return ERR_PTR(-ENOMEM); | 9285 | return ERR_PTR(-ENOMEM); |
| 9298 | } | 9286 | } |
| 9299 | 9287 | ||
| 9288 | if (cgrp->parent) | ||
| 9289 | ca->parent = cgroup_ca(cgrp->parent); | ||
| 9290 | |||
| 9300 | return &ca->css; | 9291 | return &ca->css; |
| 9301 | } | 9292 | } |
| 9302 | 9293 | ||
| @@ -9310,6 +9301,41 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
| 9310 | kfree(ca); | 9301 | kfree(ca); |
| 9311 | } | 9302 | } |
| 9312 | 9303 | ||
| 9304 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | ||
| 9305 | { | ||
| 9306 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
| 9307 | u64 data; | ||
| 9308 | |||
| 9309 | #ifndef CONFIG_64BIT | ||
| 9310 | /* | ||
| 9311 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | ||
| 9312 | */ | ||
| 9313 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
| 9314 | data = *cpuusage; | ||
| 9315 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
| 9316 | #else | ||
| 9317 | data = *cpuusage; | ||
| 9318 | #endif | ||
| 9319 | |||
| 9320 | return data; | ||
| 9321 | } | ||
| 9322 | |||
| 9323 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | ||
| 9324 | { | ||
| 9325 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
| 9326 | |||
| 9327 | #ifndef CONFIG_64BIT | ||
| 9328 | /* | ||
| 9329 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | ||
| 9330 | */ | ||
| 9331 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
| 9332 | *cpuusage = val; | ||
| 9333 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
| 9334 | #else | ||
| 9335 | *cpuusage = val; | ||
| 9336 | #endif | ||
| 9337 | } | ||
| 9338 | |||
| 9313 | /* return total cpu usage (in nanoseconds) of a group */ | 9339 | /* return total cpu usage (in nanoseconds) of a group */ |
| 9314 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | 9340 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) |
| 9315 | { | 9341 | { |
| @@ -9317,17 +9343,8 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | |||
| 9317 | u64 totalcpuusage = 0; | 9343 | u64 totalcpuusage = 0; |
| 9318 | int i; | 9344 | int i; |
| 9319 | 9345 | ||
| 9320 | for_each_possible_cpu(i) { | 9346 | for_each_present_cpu(i) |
| 9321 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9347 | totalcpuusage += cpuacct_cpuusage_read(ca, i); |
| 9322 | |||
| 9323 | /* | ||
| 9324 | * Take rq->lock to make 64-bit addition safe on 32-bit | ||
| 9325 | * platforms. | ||
| 9326 | */ | ||
| 9327 | spin_lock_irq(&cpu_rq(i)->lock); | ||
| 9328 | totalcpuusage += *cpuusage; | ||
| 9329 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
| 9330 | } | ||
| 9331 | 9348 | ||
| 9332 | return totalcpuusage; | 9349 | return totalcpuusage; |
| 9333 | } | 9350 | } |
| @@ -9344,23 +9361,39 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, | |||
| 9344 | goto out; | 9361 | goto out; |
| 9345 | } | 9362 | } |
| 9346 | 9363 | ||
| 9347 | for_each_possible_cpu(i) { | 9364 | for_each_present_cpu(i) |
| 9348 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9365 | cpuacct_cpuusage_write(ca, i, 0); |
| 9349 | 9366 | ||
| 9350 | spin_lock_irq(&cpu_rq(i)->lock); | ||
| 9351 | *cpuusage = 0; | ||
| 9352 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
| 9353 | } | ||
| 9354 | out: | 9367 | out: |
| 9355 | return err; | 9368 | return err; |
| 9356 | } | 9369 | } |
| 9357 | 9370 | ||
| 9371 | static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, | ||
| 9372 | struct seq_file *m) | ||
| 9373 | { | ||
| 9374 | struct cpuacct *ca = cgroup_ca(cgroup); | ||
| 9375 | u64 percpu; | ||
| 9376 | int i; | ||
| 9377 | |||
| 9378 | for_each_present_cpu(i) { | ||
| 9379 | percpu = cpuacct_cpuusage_read(ca, i); | ||
| 9380 | seq_printf(m, "%llu ", (unsigned long long) percpu); | ||
| 9381 | } | ||
| 9382 | seq_printf(m, "\n"); | ||
| 9383 | return 0; | ||
| 9384 | } | ||
| 9385 | |||
| 9358 | static struct cftype files[] = { | 9386 | static struct cftype files[] = { |
| 9359 | { | 9387 | { |
| 9360 | .name = "usage", | 9388 | .name = "usage", |
| 9361 | .read_u64 = cpuusage_read, | 9389 | .read_u64 = cpuusage_read, |
| 9362 | .write_u64 = cpuusage_write, | 9390 | .write_u64 = cpuusage_write, |
| 9363 | }, | 9391 | }, |
| 9392 | { | ||
| 9393 | .name = "usage_percpu", | ||
| 9394 | .read_seq_string = cpuacct_percpu_seq_read, | ||
| 9395 | }, | ||
| 9396 | |||
| 9364 | }; | 9397 | }; |
| 9365 | 9398 | ||
| 9366 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | 9399 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
| @@ -9376,14 +9409,16 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
| 9376 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | 9409 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
| 9377 | { | 9410 | { |
| 9378 | struct cpuacct *ca; | 9411 | struct cpuacct *ca; |
| 9412 | int cpu; | ||
| 9379 | 9413 | ||
| 9380 | if (!cpuacct_subsys.active) | 9414 | if (!cpuacct_subsys.active) |
| 9381 | return; | 9415 | return; |
| 9382 | 9416 | ||
| 9417 | cpu = task_cpu(tsk); | ||
| 9383 | ca = task_ca(tsk); | 9418 | ca = task_ca(tsk); |
| 9384 | if (ca) { | ||
| 9385 | u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); | ||
| 9386 | 9419 | ||
| 9420 | for (; ca; ca = ca->parent) { | ||
| 9421 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
| 9387 | *cpuusage += cputime; | 9422 | *cpuusage += cputime; |
| 9388 | } | 9423 | } |
| 9389 | } | 9424 | } |
