aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/sched.c271
-rw-r--r--kernel/sched_debug.c57
-rw-r--r--kernel/sched_rt.c7
-rw-r--r--kernel/user.c2
5 files changed, 166 insertions, 174 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 19fad003b19d..6a212b842d86 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -19,7 +19,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg
19CFLAGS_REMOVE_rtmutex-debug.o = -pg 19CFLAGS_REMOVE_rtmutex-debug.o = -pg
20CFLAGS_REMOVE_cgroup-debug.o = -pg 20CFLAGS_REMOVE_cgroup-debug.o = -pg
21CFLAGS_REMOVE_sched_clock.o = -pg 21CFLAGS_REMOVE_sched_clock.o = -pg
22CFLAGS_REMOVE_sched.o = -pg
23endif 22endif
24 23
25obj-$(CONFIG_FREEZER) += freezer.o 24obj-$(CONFIG_FREEZER) += freezer.o
@@ -90,7 +89,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/
90obj-$(CONFIG_TRACING) += trace/ 89obj-$(CONFIG_TRACING) += trace/
91obj-$(CONFIG_SMP) += sched_cpupri.o 90obj-$(CONFIG_SMP) += sched_cpupri.o
92 91
93ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) 92ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
94# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 93# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
95# needed for x86 only. Why this used to be enabled for all architectures is beyond 94# needed for x86 only. Why this used to be enabled for all architectures is beyond
96# me. I suspect most platforms don't need this, but until we know that for sure 95# me. I suspect most platforms don't need this, but until we know that for sure
diff --git a/kernel/sched.c b/kernel/sched.c
index e4bb1dd7b308..ef212da928e8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -261,6 +261,10 @@ struct task_group {
261 struct cgroup_subsys_state css; 261 struct cgroup_subsys_state css;
262#endif 262#endif
263 263
264#ifdef CONFIG_USER_SCHED
265 uid_t uid;
266#endif
267
264#ifdef CONFIG_FAIR_GROUP_SCHED 268#ifdef CONFIG_FAIR_GROUP_SCHED
265 /* schedulable entities of this group on each cpu */ 269 /* schedulable entities of this group on each cpu */
266 struct sched_entity **se; 270 struct sched_entity **se;
@@ -286,6 +290,12 @@ struct task_group {
286 290
287#ifdef CONFIG_USER_SCHED 291#ifdef CONFIG_USER_SCHED
288 292
293/* Helper function to pass uid information to create_sched_user() */
294void set_tg_uid(struct user_struct *user)
295{
296 user->tg->uid = user->uid;
297}
298
289/* 299/*
290 * Root task group. 300 * Root task group.
291 * Every UID task group (including init_task_group aka UID-0) will 301 * Every UID task group (including init_task_group aka UID-0) will
@@ -703,45 +713,18 @@ static __read_mostly char *sched_feat_names[] = {
703 713
704#undef SCHED_FEAT 714#undef SCHED_FEAT
705 715
706static int sched_feat_open(struct inode *inode, struct file *filp) 716static int sched_feat_show(struct seq_file *m, void *v)
707{ 717{
708 filp->private_data = inode->i_private;
709 return 0;
710}
711
712static ssize_t
713sched_feat_read(struct file *filp, char __user *ubuf,
714 size_t cnt, loff_t *ppos)
715{
716 char *buf;
717 int r = 0;
718 int len = 0;
719 int i; 718 int i;
720 719
721 for (i = 0; sched_feat_names[i]; i++) { 720 for (i = 0; sched_feat_names[i]; i++) {
722 len += strlen(sched_feat_names[i]); 721 if (!(sysctl_sched_features & (1UL << i)))
723 len += 4; 722 seq_puts(m, "NO_");
724 } 723 seq_printf(m, "%s ", sched_feat_names[i]);
725
726 buf = kmalloc(len + 2, GFP_KERNEL);
727 if (!buf)
728 return -ENOMEM;
729
730 for (i = 0; sched_feat_names[i]; i++) {
731 if (sysctl_sched_features & (1UL << i))
732 r += sprintf(buf + r, "%s ", sched_feat_names[i]);
733 else
734 r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);
735 } 724 }
725 seq_puts(m, "\n");
736 726
737 r += sprintf(buf + r, "\n"); 727 return 0;
738 WARN_ON(r >= len + 2);
739
740 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
741
742 kfree(buf);
743
744 return r;
745} 728}
746 729
747static ssize_t 730static ssize_t
@@ -786,10 +769,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
786 return cnt; 769 return cnt;
787} 770}
788 771
772static int sched_feat_open(struct inode *inode, struct file *filp)
773{
774 return single_open(filp, sched_feat_show, NULL);
775}
776
789static struct file_operations sched_feat_fops = { 777static struct file_operations sched_feat_fops = {
790 .open = sched_feat_open, 778 .open = sched_feat_open,
791 .read = sched_feat_read, 779 .write = sched_feat_write,
792 .write = sched_feat_write, 780 .read = seq_read,
781 .llseek = seq_lseek,
782 .release = single_release,
793}; 783};
794 784
795static __init int sched_init_debug(void) 785static __init int sched_init_debug(void)
@@ -1474,27 +1464,13 @@ static void
1474update_group_shares_cpu(struct task_group *tg, int cpu, 1464update_group_shares_cpu(struct task_group *tg, int cpu,
1475 unsigned long sd_shares, unsigned long sd_rq_weight) 1465 unsigned long sd_shares, unsigned long sd_rq_weight)
1476{ 1466{
1477 int boost = 0;
1478 unsigned long shares; 1467 unsigned long shares;
1479 unsigned long rq_weight; 1468 unsigned long rq_weight;
1480 1469
1481 if (!tg->se[cpu]) 1470 if (!tg->se[cpu])
1482 return; 1471 return;
1483 1472
1484 rq_weight = tg->cfs_rq[cpu]->load.weight; 1473 rq_weight = tg->cfs_rq[cpu]->rq_weight;
1485
1486 /*
1487 * If there are currently no tasks on the cpu pretend there is one of
1488 * average load so that when a new task gets to run here it will not
1489 * get delayed by group starvation.
1490 */
1491 if (!rq_weight) {
1492 boost = 1;
1493 rq_weight = NICE_0_LOAD;
1494 }
1495
1496 if (unlikely(rq_weight > sd_rq_weight))
1497 rq_weight = sd_rq_weight;
1498 1474
1499 /* 1475 /*
1500 * \Sum shares * rq_weight 1476 * \Sum shares * rq_weight
@@ -1502,7 +1478,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
1502 * \Sum rq_weight 1478 * \Sum rq_weight
1503 * 1479 *
1504 */ 1480 */
1505 shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); 1481 shares = (sd_shares * rq_weight) / sd_rq_weight;
1506 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); 1482 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
1507 1483
1508 if (abs(shares - tg->se[cpu]->load.weight) > 1484 if (abs(shares - tg->se[cpu]->load.weight) >
@@ -1511,11 +1487,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
1511 unsigned long flags; 1487 unsigned long flags;
1512 1488
1513 spin_lock_irqsave(&rq->lock, flags); 1489 spin_lock_irqsave(&rq->lock, flags);
1514 /* 1490 tg->cfs_rq[cpu]->shares = shares;
1515 * record the actual number of shares, not the boosted amount.
1516 */
1517 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1518 tg->cfs_rq[cpu]->rq_weight = rq_weight;
1519 1491
1520 __set_se_shares(tg->se[cpu], shares); 1492 __set_se_shares(tg->se[cpu], shares);
1521 spin_unlock_irqrestore(&rq->lock, flags); 1493 spin_unlock_irqrestore(&rq->lock, flags);
@@ -1529,13 +1501,23 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
1529 */ 1501 */
1530static int tg_shares_up(struct task_group *tg, void *data) 1502static int tg_shares_up(struct task_group *tg, void *data)
1531{ 1503{
1532 unsigned long rq_weight = 0; 1504 unsigned long weight, rq_weight = 0;
1533 unsigned long shares = 0; 1505 unsigned long shares = 0;
1534 struct sched_domain *sd = data; 1506 struct sched_domain *sd = data;
1535 int i; 1507 int i;
1536 1508
1537 for_each_cpu_mask(i, sd->span) { 1509 for_each_cpu_mask(i, sd->span) {
1538 rq_weight += tg->cfs_rq[i]->load.weight; 1510 /*
1511 * If there are currently no tasks on the cpu pretend there
1512 * is one of average load so that when a new task gets to
1513 * run here it will not get delayed by group starvation.
1514 */
1515 weight = tg->cfs_rq[i]->load.weight;
1516 if (!weight)
1517 weight = NICE_0_LOAD;
1518
1519 tg->cfs_rq[i]->rq_weight = weight;
1520 rq_weight += weight;
1539 shares += tg->cfs_rq[i]->shares; 1521 shares += tg->cfs_rq[i]->shares;
1540 } 1522 }
1541 1523
@@ -1545,9 +1527,6 @@ static int tg_shares_up(struct task_group *tg, void *data)
1545 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) 1527 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1546 shares = tg->shares; 1528 shares = tg->shares;
1547 1529
1548 if (!rq_weight)
1549 rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
1550
1551 for_each_cpu_mask(i, sd->span) 1530 for_each_cpu_mask(i, sd->span)
1552 update_group_shares_cpu(tg, i, shares, rq_weight); 1531 update_group_shares_cpu(tg, i, shares, rq_weight);
1553 1532
@@ -1612,6 +1591,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1612 1591
1613#endif 1592#endif
1614 1593
1594/*
1595 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1596 */
1597static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1598 __releases(this_rq->lock)
1599 __acquires(busiest->lock)
1600 __acquires(this_rq->lock)
1601{
1602 int ret = 0;
1603
1604 if (unlikely(!irqs_disabled())) {
1605 /* printk() doesn't work good under rq->lock */
1606 spin_unlock(&this_rq->lock);
1607 BUG_ON(1);
1608 }
1609 if (unlikely(!spin_trylock(&busiest->lock))) {
1610 if (busiest < this_rq) {
1611 spin_unlock(&this_rq->lock);
1612 spin_lock(&busiest->lock);
1613 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
1614 ret = 1;
1615 } else
1616 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
1617 }
1618 return ret;
1619}
1620
1621static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1622 __releases(busiest->lock)
1623{
1624 spin_unlock(&busiest->lock);
1625 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1626}
1615#endif 1627#endif
1616 1628
1617#ifdef CONFIG_FAIR_GROUP_SCHED 1629#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -2812,40 +2824,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2812} 2824}
2813 2825
2814/* 2826/*
2815 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2816 */
2817static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2818 __releases(this_rq->lock)
2819 __acquires(busiest->lock)
2820 __acquires(this_rq->lock)
2821{
2822 int ret = 0;
2823
2824 if (unlikely(!irqs_disabled())) {
2825 /* printk() doesn't work good under rq->lock */
2826 spin_unlock(&this_rq->lock);
2827 BUG_ON(1);
2828 }
2829 if (unlikely(!spin_trylock(&busiest->lock))) {
2830 if (busiest < this_rq) {
2831 spin_unlock(&this_rq->lock);
2832 spin_lock(&busiest->lock);
2833 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
2834 ret = 1;
2835 } else
2836 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
2837 }
2838 return ret;
2839}
2840
2841static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2842 __releases(busiest->lock)
2843{
2844 spin_unlock(&busiest->lock);
2845 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2846}
2847
2848/*
2849 * If dest_cpu is allowed for this process, migrate the task to it. 2827 * If dest_cpu is allowed for this process, migrate the task to it.
2850 * This is accomplished by forcing the cpu_allowed mask to only 2828 * This is accomplished by forcing the cpu_allowed mask to only
2851 * allow dest_cpu, which will force the cpu onto dest_cpu. Then 2829 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
@@ -3707,7 +3685,7 @@ out_balanced:
3707static void idle_balance(int this_cpu, struct rq *this_rq) 3685static void idle_balance(int this_cpu, struct rq *this_rq)
3708{ 3686{
3709 struct sched_domain *sd; 3687 struct sched_domain *sd;
3710 int pulled_task = -1; 3688 int pulled_task = 0;
3711 unsigned long next_balance = jiffies + HZ; 3689 unsigned long next_balance = jiffies + HZ;
3712 cpumask_t tmpmask; 3690 cpumask_t tmpmask;
3713 3691
@@ -6126,7 +6104,6 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
6126 6104
6127/* 6105/*
6128 * Figure out where task on dead CPU should go, use force if necessary. 6106 * Figure out where task on dead CPU should go, use force if necessary.
6129 * NOTE: interrupts should be disabled by the caller
6130 */ 6107 */
6131static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 6108static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
6132{ 6109{
@@ -6638,28 +6615,6 @@ early_initcall(migration_init);
6638 6615
6639#ifdef CONFIG_SCHED_DEBUG 6616#ifdef CONFIG_SCHED_DEBUG
6640 6617
6641static inline const char *sd_level_to_string(enum sched_domain_level lvl)
6642{
6643 switch (lvl) {
6644 case SD_LV_NONE:
6645 return "NONE";
6646 case SD_LV_SIBLING:
6647 return "SIBLING";
6648 case SD_LV_MC:
6649 return "MC";
6650 case SD_LV_CPU:
6651 return "CPU";
6652 case SD_LV_NODE:
6653 return "NODE";
6654 case SD_LV_ALLNODES:
6655 return "ALLNODES";
6656 case SD_LV_MAX:
6657 return "MAX";
6658
6659 }
6660 return "MAX";
6661}
6662
6663static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 6618static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6664 cpumask_t *groupmask) 6619 cpumask_t *groupmask)
6665{ 6620{
@@ -6679,8 +6634,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6679 return -1; 6634 return -1;
6680 } 6635 }
6681 6636
6682 printk(KERN_CONT "span %s level %s\n", 6637 printk(KERN_CONT "span %s level %s\n", str, sd->name);
6683 str, sd_level_to_string(sd->level));
6684 6638
6685 if (!cpu_isset(cpu, sd->span)) { 6639 if (!cpu_isset(cpu, sd->span)) {
6686 printk(KERN_ERR "ERROR: domain->span does not contain " 6640 printk(KERN_ERR "ERROR: domain->span does not contain "
@@ -6816,6 +6770,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6816 SD_BALANCE_EXEC | 6770 SD_BALANCE_EXEC |
6817 SD_SHARE_CPUPOWER | 6771 SD_SHARE_CPUPOWER |
6818 SD_SHARE_PKG_RESOURCES); 6772 SD_SHARE_PKG_RESOURCES);
6773 if (nr_node_ids == 1)
6774 pflags &= ~SD_SERIALIZE;
6819 } 6775 }
6820 if (~cflags & pflags) 6776 if (~cflags & pflags)
6821 return 0; 6777 return 0;
@@ -7336,13 +7292,21 @@ struct allmasks {
7336}; 7292};
7337 7293
7338#if NR_CPUS > 128 7294#if NR_CPUS > 128
7339#define SCHED_CPUMASK_ALLOC 1 7295#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
7340#define SCHED_CPUMASK_FREE(v) kfree(v) 7296static inline void sched_cpumask_alloc(struct allmasks **masks)
7341#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v 7297{
7298 *masks = kmalloc(sizeof(**masks), GFP_KERNEL);
7299}
7300static inline void sched_cpumask_free(struct allmasks *masks)
7301{
7302 kfree(masks);
7303}
7342#else 7304#else
7343#define SCHED_CPUMASK_ALLOC 0 7305#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
7344#define SCHED_CPUMASK_FREE(v) 7306static inline void sched_cpumask_alloc(struct allmasks **masks)
7345#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v 7307{ }
7308static inline void sched_cpumask_free(struct allmasks *masks)
7309{ }
7346#endif 7310#endif
7347 7311
7348#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ 7312#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
@@ -7418,9 +7382,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7418 return -ENOMEM; 7382 return -ENOMEM;
7419 } 7383 }
7420 7384
7421#if SCHED_CPUMASK_ALLOC
7422 /* get space for all scratch cpumask variables */ 7385 /* get space for all scratch cpumask variables */
7423 allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); 7386 sched_cpumask_alloc(&allmasks);
7424 if (!allmasks) { 7387 if (!allmasks) {
7425 printk(KERN_WARNING "Cannot alloc cpumask array\n"); 7388 printk(KERN_WARNING "Cannot alloc cpumask array\n");
7426 kfree(rd); 7389 kfree(rd);
@@ -7429,7 +7392,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7429#endif 7392#endif
7430 return -ENOMEM; 7393 return -ENOMEM;
7431 } 7394 }
7432#endif 7395
7433 tmpmask = (cpumask_t *)allmasks; 7396 tmpmask = (cpumask_t *)allmasks;
7434 7397
7435 7398
@@ -7683,13 +7646,13 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7683 cpu_attach_domain(sd, rd, i); 7646 cpu_attach_domain(sd, rd, i);
7684 } 7647 }
7685 7648
7686 SCHED_CPUMASK_FREE((void *)allmasks); 7649 sched_cpumask_free(allmasks);
7687 return 0; 7650 return 0;
7688 7651
7689#ifdef CONFIG_NUMA 7652#ifdef CONFIG_NUMA
7690error: 7653error:
7691 free_sched_groups(cpu_map, tmpmask); 7654 free_sched_groups(cpu_map, tmpmask);
7692 SCHED_CPUMASK_FREE((void *)allmasks); 7655 sched_cpumask_free(allmasks);
7693 kfree(rd); 7656 kfree(rd);
7694 return -ENOMEM; 7657 return -ENOMEM;
7695#endif 7658#endif
@@ -7753,8 +7716,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7753 cpumask_t tmpmask; 7716 cpumask_t tmpmask;
7754 int i; 7717 int i;
7755 7718
7756 unregister_sched_domain_sysctl();
7757
7758 for_each_cpu_mask_nr(i, *cpu_map) 7719 for_each_cpu_mask_nr(i, *cpu_map)
7759 cpu_attach_domain(NULL, &def_root_domain, i); 7720 cpu_attach_domain(NULL, &def_root_domain, i);
7760 synchronize_sched(); 7721 synchronize_sched();
@@ -7832,7 +7793,7 @@ match1:
7832 ndoms_cur = 0; 7793 ndoms_cur = 0;
7833 doms_new = &fallback_doms; 7794 doms_new = &fallback_doms;
7834 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); 7795 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7835 dattr_new = NULL; 7796 WARN_ON_ONCE(dattr_new);
7836 } 7797 }
7837 7798
7838 /* Build new domains */ 7799 /* Build new domains */
@@ -8492,7 +8453,7 @@ static
8492int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 8453int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8493{ 8454{
8494 struct cfs_rq *cfs_rq; 8455 struct cfs_rq *cfs_rq;
8495 struct sched_entity *se, *parent_se; 8456 struct sched_entity *se;
8496 struct rq *rq; 8457 struct rq *rq;
8497 int i; 8458 int i;
8498 8459
@@ -8508,18 +8469,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8508 for_each_possible_cpu(i) { 8469 for_each_possible_cpu(i) {
8509 rq = cpu_rq(i); 8470 rq = cpu_rq(i);
8510 8471
8511 cfs_rq = kmalloc_node(sizeof(struct cfs_rq), 8472 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8512 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8473 GFP_KERNEL, cpu_to_node(i));
8513 if (!cfs_rq) 8474 if (!cfs_rq)
8514 goto err; 8475 goto err;
8515 8476
8516 se = kmalloc_node(sizeof(struct sched_entity), 8477 se = kzalloc_node(sizeof(struct sched_entity),
8517 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8478 GFP_KERNEL, cpu_to_node(i));
8518 if (!se) 8479 if (!se)
8519 goto err; 8480 goto err;
8520 8481
8521 parent_se = parent ? parent->se[i] : NULL; 8482 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
8522 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
8523 } 8483 }
8524 8484
8525 return 1; 8485 return 1;
@@ -8580,7 +8540,7 @@ static
8580int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 8540int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8581{ 8541{
8582 struct rt_rq *rt_rq; 8542 struct rt_rq *rt_rq;
8583 struct sched_rt_entity *rt_se, *parent_se; 8543 struct sched_rt_entity *rt_se;
8584 struct rq *rq; 8544 struct rq *rq;
8585 int i; 8545 int i;
8586 8546
@@ -8597,18 +8557,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8597 for_each_possible_cpu(i) { 8557 for_each_possible_cpu(i) {
8598 rq = cpu_rq(i); 8558 rq = cpu_rq(i);
8599 8559
8600 rt_rq = kmalloc_node(sizeof(struct rt_rq), 8560 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8601 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8561 GFP_KERNEL, cpu_to_node(i));
8602 if (!rt_rq) 8562 if (!rt_rq)
8603 goto err; 8563 goto err;
8604 8564
8605 rt_se = kmalloc_node(sizeof(struct sched_rt_entity), 8565 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8606 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8566 GFP_KERNEL, cpu_to_node(i));
8607 if (!rt_se) 8567 if (!rt_se)
8608 goto err; 8568 goto err;
8609 8569
8610 parent_se = parent ? parent->rt_se[i] : NULL; 8570 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
8611 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
8612 } 8571 }
8613 8572
8614 return 1; 8573 return 1;
@@ -9251,11 +9210,12 @@ struct cgroup_subsys cpu_cgroup_subsys = {
9251 * (balbir@in.ibm.com). 9210 * (balbir@in.ibm.com).
9252 */ 9211 */
9253 9212
9254/* track cpu usage of a group of tasks */ 9213/* track cpu usage of a group of tasks and its child groups */
9255struct cpuacct { 9214struct cpuacct {
9256 struct cgroup_subsys_state css; 9215 struct cgroup_subsys_state css;
9257 /* cpuusage holds pointer to a u64-type object on every cpu */ 9216 /* cpuusage holds pointer to a u64-type object on every cpu */
9258 u64 *cpuusage; 9217 u64 *cpuusage;
9218 struct cpuacct *parent;
9259}; 9219};
9260 9220
9261struct cgroup_subsys cpuacct_subsys; 9221struct cgroup_subsys cpuacct_subsys;
@@ -9289,6 +9249,9 @@ static struct cgroup_subsys_state *cpuacct_create(
9289 return ERR_PTR(-ENOMEM); 9249 return ERR_PTR(-ENOMEM);
9290 } 9250 }
9291 9251
9252 if (cgrp->parent)
9253 ca->parent = cgroup_ca(cgrp->parent);
9254
9292 return &ca->css; 9255 return &ca->css;
9293} 9256}
9294 9257
@@ -9368,14 +9331,16 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
9368static void cpuacct_charge(struct task_struct *tsk, u64 cputime) 9331static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9369{ 9332{
9370 struct cpuacct *ca; 9333 struct cpuacct *ca;
9334 int cpu;
9371 9335
9372 if (!cpuacct_subsys.active) 9336 if (!cpuacct_subsys.active)
9373 return; 9337 return;
9374 9338
9339 cpu = task_cpu(tsk);
9375 ca = task_ca(tsk); 9340 ca = task_ca(tsk);
9376 if (ca) {
9377 u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
9378 9341
9342 for (; ca; ca = ca->parent) {
9343 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
9379 *cpuusage += cputime; 9344 *cpuusage += cputime;
9380 } 9345 }
9381} 9346}
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 26ed8e3d1c15..4293cfa9681d 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -53,6 +53,40 @@ static unsigned long nsec_low(unsigned long long nsec)
53 53
54#define SPLIT_NS(x) nsec_high(x), nsec_low(x) 54#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
55 55
56#ifdef CONFIG_FAIR_GROUP_SCHED
57static void print_cfs_group_stats(struct seq_file *m, int cpu,
58 struct task_group *tg)
59{
60 struct sched_entity *se = tg->se[cpu];
61 if (!se)
62 return;
63
64#define P(F) \
65 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
66#define PN(F) \
67 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
68
69 PN(se->exec_start);
70 PN(se->vruntime);
71 PN(se->sum_exec_runtime);
72#ifdef CONFIG_SCHEDSTATS
73 PN(se->wait_start);
74 PN(se->sleep_start);
75 PN(se->block_start);
76 PN(se->sleep_max);
77 PN(se->block_max);
78 PN(se->exec_max);
79 PN(se->slice_max);
80 PN(se->wait_max);
81 PN(se->wait_sum);
82 P(se->wait_count);
83#endif
84 P(se->load.weight);
85#undef PN
86#undef P
87}
88#endif
89
56static void 90static void
57print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 91print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
58{ 92{
@@ -121,20 +155,19 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
121 155
122#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) 156#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
123 char path[128] = ""; 157 char path[128] = "";
124 struct cgroup *cgroup = NULL;
125 struct task_group *tg = cfs_rq->tg; 158 struct task_group *tg = cfs_rq->tg;
126 159
127 if (tg) 160 cgroup_path(tg->css.cgroup, path, sizeof(path));
128 cgroup = tg->css.cgroup;
129
130 if (cgroup)
131 cgroup_path(cgroup, path, sizeof(path));
132 161
133 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 162 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
163#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
164 {
165 uid_t uid = cfs_rq->tg->uid;
166 SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
167 }
134#else 168#else
135 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 169 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
136#endif 170#endif
137
138 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 171 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
139 SPLIT_NS(cfs_rq->exec_clock)); 172 SPLIT_NS(cfs_rq->exec_clock));
140 173
@@ -168,6 +201,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
168#ifdef CONFIG_SMP 201#ifdef CONFIG_SMP
169 SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); 202 SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
170#endif 203#endif
204 print_cfs_group_stats(m, cpu, cfs_rq->tg);
171#endif 205#endif
172} 206}
173 207
@@ -175,14 +209,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
175{ 209{
176#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) 210#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
177 char path[128] = ""; 211 char path[128] = "";
178 struct cgroup *cgroup = NULL;
179 struct task_group *tg = rt_rq->tg; 212 struct task_group *tg = rt_rq->tg;
180 213
181 if (tg) 214 cgroup_path(tg->css.cgroup, path, sizeof(path));
182 cgroup = tg->css.cgroup;
183
184 if (cgroup)
185 cgroup_path(cgroup, path, sizeof(path));
186 215
187 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); 216 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
188#else 217#else
@@ -272,7 +301,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
272 u64 now = ktime_to_ns(ktime_get()); 301 u64 now = ktime_to_ns(ktime_get());
273 int cpu; 302 int cpu;
274 303
275 SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n", 304 SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n",
276 init_utsname()->release, 305 init_utsname()->release,
277 (int)strcspn(init_utsname()->version, " "), 306 (int)strcspn(init_utsname()->version, " "),
278 init_utsname()->version); 307 init_utsname()->version);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index d9ba9d5f99d6..587a16e2a8f5 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -537,13 +537,13 @@ static void update_curr_rt(struct rq *rq)
537 for_each_sched_rt_entity(rt_se) { 537 for_each_sched_rt_entity(rt_se) {
538 rt_rq = rt_rq_of_se(rt_se); 538 rt_rq = rt_rq_of_se(rt_se);
539 539
540 spin_lock(&rt_rq->rt_runtime_lock);
541 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 540 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
541 spin_lock(&rt_rq->rt_runtime_lock);
542 rt_rq->rt_time += delta_exec; 542 rt_rq->rt_time += delta_exec;
543 if (sched_rt_runtime_exceeded(rt_rq)) 543 if (sched_rt_runtime_exceeded(rt_rq))
544 resched_task(curr); 544 resched_task(curr);
545 spin_unlock(&rt_rq->rt_runtime_lock);
545 } 546 }
546 spin_unlock(&rt_rq->rt_runtime_lock);
547 } 547 }
548} 548}
549 549
@@ -909,9 +909,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
909/* Only try algorithms three times */ 909/* Only try algorithms three times */
910#define RT_MAX_TRIES 3 910#define RT_MAX_TRIES 3
911 911
912static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
913static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
914
915static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); 912static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
916 913
917static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 914static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
diff --git a/kernel/user.c b/kernel/user.c
index 39d6159fae43..cec2224bc9f5 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -101,6 +101,8 @@ static int sched_create_user(struct user_struct *up)
101 if (IS_ERR(up->tg)) 101 if (IS_ERR(up->tg))
102 rc = -ENOMEM; 102 rc = -ENOMEM;
103 103
104 set_tg_uid(up);
105
104 return rc; 106 return rc;
105} 107}
106 108