aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/sched.c192
-rw-r--r--kernel/sched_debug.c51
-rw-r--r--kernel/sched_rt.c7
-rw-r--r--kernel/trace/ftrace.c113
-rw-r--r--kernel/trace/ring_buffer.c1
-rw-r--r--kernel/trace/trace.c1
7 files changed, 172 insertions, 196 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 19fad003b19d..6a212b842d86 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -19,7 +19,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg
19CFLAGS_REMOVE_rtmutex-debug.o = -pg 19CFLAGS_REMOVE_rtmutex-debug.o = -pg
20CFLAGS_REMOVE_cgroup-debug.o = -pg 20CFLAGS_REMOVE_cgroup-debug.o = -pg
21CFLAGS_REMOVE_sched_clock.o = -pg 21CFLAGS_REMOVE_sched_clock.o = -pg
22CFLAGS_REMOVE_sched.o = -pg
23endif 22endif
24 23
25obj-$(CONFIG_FREEZER) += freezer.o 24obj-$(CONFIG_FREEZER) += freezer.o
@@ -90,7 +89,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/
90obj-$(CONFIG_TRACING) += trace/ 89obj-$(CONFIG_TRACING) += trace/
91obj-$(CONFIG_SMP) += sched_cpupri.o 90obj-$(CONFIG_SMP) += sched_cpupri.o
92 91
93ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) 92ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
94# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 93# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
95# needed for x86 only. Why this used to be enabled for all architectures is beyond 94# needed for x86 only. Why this used to be enabled for all architectures is beyond
96# me. I suspect most platforms don't need this, but until we know that for sure 95# me. I suspect most platforms don't need this, but until we know that for sure
diff --git a/kernel/sched.c b/kernel/sched.c
index 700aa9a1413f..3d1ee429219b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -703,45 +703,18 @@ static __read_mostly char *sched_feat_names[] = {
703 703
704#undef SCHED_FEAT 704#undef SCHED_FEAT
705 705
706static int sched_feat_open(struct inode *inode, struct file *filp) 706static int sched_feat_show(struct seq_file *m, void *v)
707{
708 filp->private_data = inode->i_private;
709 return 0;
710}
711
712static ssize_t
713sched_feat_read(struct file *filp, char __user *ubuf,
714 size_t cnt, loff_t *ppos)
715{ 707{
716 char *buf;
717 int r = 0;
718 int len = 0;
719 int i; 708 int i;
720 709
721 for (i = 0; sched_feat_names[i]; i++) { 710 for (i = 0; sched_feat_names[i]; i++) {
722 len += strlen(sched_feat_names[i]); 711 if (!(sysctl_sched_features & (1UL << i)))
723 len += 4; 712 seq_puts(m, "NO_");
724 } 713 seq_printf(m, "%s ", sched_feat_names[i]);
725
726 buf = kmalloc(len + 2, GFP_KERNEL);
727 if (!buf)
728 return -ENOMEM;
729
730 for (i = 0; sched_feat_names[i]; i++) {
731 if (sysctl_sched_features & (1UL << i))
732 r += sprintf(buf + r, "%s ", sched_feat_names[i]);
733 else
734 r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);
735 } 714 }
715 seq_puts(m, "\n");
736 716
737 r += sprintf(buf + r, "\n"); 717 return 0;
738 WARN_ON(r >= len + 2);
739
740 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
741
742 kfree(buf);
743
744 return r;
745} 718}
746 719
747static ssize_t 720static ssize_t
@@ -786,10 +759,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
786 return cnt; 759 return cnt;
787} 760}
788 761
762static int sched_feat_open(struct inode *inode, struct file *filp)
763{
764 return single_open(filp, sched_feat_show, NULL);
765}
766
789static struct file_operations sched_feat_fops = { 767static struct file_operations sched_feat_fops = {
790 .open = sched_feat_open, 768 .open = sched_feat_open,
791 .read = sched_feat_read, 769 .write = sched_feat_write,
792 .write = sched_feat_write, 770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
793}; 773};
794 774
795static __init int sched_init_debug(void) 775static __init int sched_init_debug(void)
@@ -1474,27 +1454,13 @@ static void
1474update_group_shares_cpu(struct task_group *tg, int cpu, 1454update_group_shares_cpu(struct task_group *tg, int cpu,
1475 unsigned long sd_shares, unsigned long sd_rq_weight) 1455 unsigned long sd_shares, unsigned long sd_rq_weight)
1476{ 1456{
1477 int boost = 0;
1478 unsigned long shares; 1457 unsigned long shares;
1479 unsigned long rq_weight; 1458 unsigned long rq_weight;
1480 1459
1481 if (!tg->se[cpu]) 1460 if (!tg->se[cpu])
1482 return; 1461 return;
1483 1462
1484 rq_weight = tg->cfs_rq[cpu]->load.weight; 1463 rq_weight = tg->cfs_rq[cpu]->rq_weight;
1485
1486 /*
1487 * If there are currently no tasks on the cpu pretend there is one of
1488 * average load so that when a new task gets to run here it will not
1489 * get delayed by group starvation.
1490 */
1491 if (!rq_weight) {
1492 boost = 1;
1493 rq_weight = NICE_0_LOAD;
1494 }
1495
1496 if (unlikely(rq_weight > sd_rq_weight))
1497 rq_weight = sd_rq_weight;
1498 1464
1499 /* 1465 /*
1500 * \Sum shares * rq_weight 1466 * \Sum shares * rq_weight
@@ -1502,7 +1468,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
1502 * \Sum rq_weight 1468 * \Sum rq_weight
1503 * 1469 *
1504 */ 1470 */
1505 shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); 1471 shares = (sd_shares * rq_weight) / sd_rq_weight;
1506 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); 1472 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
1507 1473
1508 if (abs(shares - tg->se[cpu]->load.weight) > 1474 if (abs(shares - tg->se[cpu]->load.weight) >
@@ -1511,11 +1477,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
1511 unsigned long flags; 1477 unsigned long flags;
1512 1478
1513 spin_lock_irqsave(&rq->lock, flags); 1479 spin_lock_irqsave(&rq->lock, flags);
1514 /* 1480 tg->cfs_rq[cpu]->shares = shares;
1515 * record the actual number of shares, not the boosted amount.
1516 */
1517 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1518 tg->cfs_rq[cpu]->rq_weight = rq_weight;
1519 1481
1520 __set_se_shares(tg->se[cpu], shares); 1482 __set_se_shares(tg->se[cpu], shares);
1521 spin_unlock_irqrestore(&rq->lock, flags); 1483 spin_unlock_irqrestore(&rq->lock, flags);
@@ -1529,13 +1491,23 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
1529 */ 1491 */
1530static int tg_shares_up(struct task_group *tg, void *data) 1492static int tg_shares_up(struct task_group *tg, void *data)
1531{ 1493{
1532 unsigned long rq_weight = 0; 1494 unsigned long weight, rq_weight = 0;
1533 unsigned long shares = 0; 1495 unsigned long shares = 0;
1534 struct sched_domain *sd = data; 1496 struct sched_domain *sd = data;
1535 int i; 1497 int i;
1536 1498
1537 for_each_cpu_mask(i, sd->span) { 1499 for_each_cpu_mask(i, sd->span) {
1538 rq_weight += tg->cfs_rq[i]->load.weight; 1500 /*
1501 * If there are currently no tasks on the cpu pretend there
1502 * is one of average load so that when a new task gets to
1503 * run here it will not get delayed by group starvation.
1504 */
1505 weight = tg->cfs_rq[i]->load.weight;
1506 if (!weight)
1507 weight = NICE_0_LOAD;
1508
1509 tg->cfs_rq[i]->rq_weight = weight;
1510 rq_weight += weight;
1539 shares += tg->cfs_rq[i]->shares; 1511 shares += tg->cfs_rq[i]->shares;
1540 } 1512 }
1541 1513
@@ -1545,9 +1517,6 @@ static int tg_shares_up(struct task_group *tg, void *data)
1545 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) 1517 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1546 shares = tg->shares; 1518 shares = tg->shares;
1547 1519
1548 if (!rq_weight)
1549 rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
1550
1551 for_each_cpu_mask(i, sd->span) 1520 for_each_cpu_mask(i, sd->span)
1552 update_group_shares_cpu(tg, i, shares, rq_weight); 1521 update_group_shares_cpu(tg, i, shares, rq_weight);
1553 1522
@@ -2838,7 +2807,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2838 return ret; 2807 return ret;
2839} 2808}
2840 2809
2841static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2810static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2842 __releases(busiest->lock) 2811 __releases(busiest->lock)
2843{ 2812{
2844 spin_unlock(&busiest->lock); 2813 spin_unlock(&busiest->lock);
@@ -6126,7 +6095,6 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
6126 6095
6127/* 6096/*
6128 * Figure out where task on dead CPU should go, use force if necessary. 6097 * Figure out where task on dead CPU should go, use force if necessary.
6129 * NOTE: interrupts should be disabled by the caller
6130 */ 6098 */
6131static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 6099static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
6132{ 6100{
@@ -6636,28 +6604,6 @@ early_initcall(migration_init);
6636 6604
6637#ifdef CONFIG_SCHED_DEBUG 6605#ifdef CONFIG_SCHED_DEBUG
6638 6606
6639static inline const char *sd_level_to_string(enum sched_domain_level lvl)
6640{
6641 switch (lvl) {
6642 case SD_LV_NONE:
6643 return "NONE";
6644 case SD_LV_SIBLING:
6645 return "SIBLING";
6646 case SD_LV_MC:
6647 return "MC";
6648 case SD_LV_CPU:
6649 return "CPU";
6650 case SD_LV_NODE:
6651 return "NODE";
6652 case SD_LV_ALLNODES:
6653 return "ALLNODES";
6654 case SD_LV_MAX:
6655 return "MAX";
6656
6657 }
6658 return "MAX";
6659}
6660
6661static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 6607static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6662 cpumask_t *groupmask) 6608 cpumask_t *groupmask)
6663{ 6609{
@@ -6677,8 +6623,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6677 return -1; 6623 return -1;
6678 } 6624 }
6679 6625
6680 printk(KERN_CONT "span %s level %s\n", 6626 printk(KERN_CONT "span %s level %s\n", str, sd->name);
6681 str, sd_level_to_string(sd->level));
6682 6627
6683 if (!cpu_isset(cpu, sd->span)) { 6628 if (!cpu_isset(cpu, sd->span)) {
6684 printk(KERN_ERR "ERROR: domain->span does not contain " 6629 printk(KERN_ERR "ERROR: domain->span does not contain "
@@ -7334,13 +7279,21 @@ struct allmasks {
7334}; 7279};
7335 7280
7336#if NR_CPUS > 128 7281#if NR_CPUS > 128
7337#define SCHED_CPUMASK_ALLOC 1 7282#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
7338#define SCHED_CPUMASK_FREE(v) kfree(v) 7283static inline void sched_cpumask_alloc(struct allmasks **masks)
7339#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v 7284{
7285 *masks = kmalloc(sizeof(**masks), GFP_KERNEL);
7286}
7287static inline void sched_cpumask_free(struct allmasks *masks)
7288{
7289 kfree(masks);
7290}
7340#else 7291#else
7341#define SCHED_CPUMASK_ALLOC 0 7292#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
7342#define SCHED_CPUMASK_FREE(v) 7293static inline void sched_cpumask_alloc(struct allmasks **masks)
7343#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v 7294{ }
7295static inline void sched_cpumask_free(struct allmasks *masks)
7296{ }
7344#endif 7297#endif
7345 7298
7346#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ 7299#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
@@ -7416,9 +7369,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7416 return -ENOMEM; 7369 return -ENOMEM;
7417 } 7370 }
7418 7371
7419#if SCHED_CPUMASK_ALLOC
7420 /* get space for all scratch cpumask variables */ 7372 /* get space for all scratch cpumask variables */
7421 allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); 7373 sched_cpumask_alloc(&allmasks);
7422 if (!allmasks) { 7374 if (!allmasks) {
7423 printk(KERN_WARNING "Cannot alloc cpumask array\n"); 7375 printk(KERN_WARNING "Cannot alloc cpumask array\n");
7424 kfree(rd); 7376 kfree(rd);
@@ -7427,7 +7379,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7427#endif 7379#endif
7428 return -ENOMEM; 7380 return -ENOMEM;
7429 } 7381 }
7430#endif 7382
7431 tmpmask = (cpumask_t *)allmasks; 7383 tmpmask = (cpumask_t *)allmasks;
7432 7384
7433 7385
@@ -7681,13 +7633,13 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7681 cpu_attach_domain(sd, rd, i); 7633 cpu_attach_domain(sd, rd, i);
7682 } 7634 }
7683 7635
7684 SCHED_CPUMASK_FREE((void *)allmasks); 7636 sched_cpumask_free(allmasks);
7685 return 0; 7637 return 0;
7686 7638
7687#ifdef CONFIG_NUMA 7639#ifdef CONFIG_NUMA
7688error: 7640error:
7689 free_sched_groups(cpu_map, tmpmask); 7641 free_sched_groups(cpu_map, tmpmask);
7690 SCHED_CPUMASK_FREE((void *)allmasks); 7642 sched_cpumask_free(allmasks);
7691 kfree(rd); 7643 kfree(rd);
7692 return -ENOMEM; 7644 return -ENOMEM;
7693#endif 7645#endif
@@ -7751,8 +7703,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7751 cpumask_t tmpmask; 7703 cpumask_t tmpmask;
7752 int i; 7704 int i;
7753 7705
7754 unregister_sched_domain_sysctl();
7755
7756 for_each_cpu_mask_nr(i, *cpu_map) 7706 for_each_cpu_mask_nr(i, *cpu_map)
7757 cpu_attach_domain(NULL, &def_root_domain, i); 7707 cpu_attach_domain(NULL, &def_root_domain, i);
7758 synchronize_sched(); 7708 synchronize_sched();
@@ -7830,7 +7780,7 @@ match1:
7830 ndoms_cur = 0; 7780 ndoms_cur = 0;
7831 doms_new = &fallback_doms; 7781 doms_new = &fallback_doms;
7832 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); 7782 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7833 dattr_new = NULL; 7783 WARN_ON_ONCE(dattr_new);
7834 } 7784 }
7835 7785
7836 /* Build new domains */ 7786 /* Build new domains */
@@ -8490,7 +8440,7 @@ static
8490int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 8440int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8491{ 8441{
8492 struct cfs_rq *cfs_rq; 8442 struct cfs_rq *cfs_rq;
8493 struct sched_entity *se, *parent_se; 8443 struct sched_entity *se;
8494 struct rq *rq; 8444 struct rq *rq;
8495 int i; 8445 int i;
8496 8446
@@ -8506,18 +8456,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8506 for_each_possible_cpu(i) { 8456 for_each_possible_cpu(i) {
8507 rq = cpu_rq(i); 8457 rq = cpu_rq(i);
8508 8458
8509 cfs_rq = kmalloc_node(sizeof(struct cfs_rq), 8459 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8510 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8460 GFP_KERNEL, cpu_to_node(i));
8511 if (!cfs_rq) 8461 if (!cfs_rq)
8512 goto err; 8462 goto err;
8513 8463
8514 se = kmalloc_node(sizeof(struct sched_entity), 8464 se = kzalloc_node(sizeof(struct sched_entity),
8515 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8465 GFP_KERNEL, cpu_to_node(i));
8516 if (!se) 8466 if (!se)
8517 goto err; 8467 goto err;
8518 8468
8519 parent_se = parent ? parent->se[i] : NULL; 8469 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
8520 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
8521 } 8470 }
8522 8471
8523 return 1; 8472 return 1;
@@ -8578,7 +8527,7 @@ static
8578int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 8527int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8579{ 8528{
8580 struct rt_rq *rt_rq; 8529 struct rt_rq *rt_rq;
8581 struct sched_rt_entity *rt_se, *parent_se; 8530 struct sched_rt_entity *rt_se;
8582 struct rq *rq; 8531 struct rq *rq;
8583 int i; 8532 int i;
8584 8533
@@ -8595,18 +8544,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8595 for_each_possible_cpu(i) { 8544 for_each_possible_cpu(i) {
8596 rq = cpu_rq(i); 8545 rq = cpu_rq(i);
8597 8546
8598 rt_rq = kmalloc_node(sizeof(struct rt_rq), 8547 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8599 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8548 GFP_KERNEL, cpu_to_node(i));
8600 if (!rt_rq) 8549 if (!rt_rq)
8601 goto err; 8550 goto err;
8602 8551
8603 rt_se = kmalloc_node(sizeof(struct sched_rt_entity), 8552 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8604 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8553 GFP_KERNEL, cpu_to_node(i));
8605 if (!rt_se) 8554 if (!rt_se)
8606 goto err; 8555 goto err;
8607 8556
8608 parent_se = parent ? parent->rt_se[i] : NULL; 8557 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
8609 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
8610 } 8558 }
8611 8559
8612 return 1; 8560 return 1;
@@ -9249,11 +9197,12 @@ struct cgroup_subsys cpu_cgroup_subsys = {
9249 * (balbir@in.ibm.com). 9197 * (balbir@in.ibm.com).
9250 */ 9198 */
9251 9199
9252/* track cpu usage of a group of tasks */ 9200/* track cpu usage of a group of tasks and its child groups */
9253struct cpuacct { 9201struct cpuacct {
9254 struct cgroup_subsys_state css; 9202 struct cgroup_subsys_state css;
9255 /* cpuusage holds pointer to a u64-type object on every cpu */ 9203 /* cpuusage holds pointer to a u64-type object on every cpu */
9256 u64 *cpuusage; 9204 u64 *cpuusage;
9205 struct cpuacct *parent;
9257}; 9206};
9258 9207
9259struct cgroup_subsys cpuacct_subsys; 9208struct cgroup_subsys cpuacct_subsys;
@@ -9287,6 +9236,9 @@ static struct cgroup_subsys_state *cpuacct_create(
9287 return ERR_PTR(-ENOMEM); 9236 return ERR_PTR(-ENOMEM);
9288 } 9237 }
9289 9238
9239 if (cgrp->parent)
9240 ca->parent = cgroup_ca(cgrp->parent);
9241
9290 return &ca->css; 9242 return &ca->css;
9291} 9243}
9292 9244
@@ -9366,14 +9318,16 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
9366static void cpuacct_charge(struct task_struct *tsk, u64 cputime) 9318static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9367{ 9319{
9368 struct cpuacct *ca; 9320 struct cpuacct *ca;
9321 int cpu;
9369 9322
9370 if (!cpuacct_subsys.active) 9323 if (!cpuacct_subsys.active)
9371 return; 9324 return;
9372 9325
9326 cpu = task_cpu(tsk);
9373 ca = task_ca(tsk); 9327 ca = task_ca(tsk);
9374 if (ca) {
9375 u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
9376 9328
9329 for (; ca; ca = ca->parent) {
9330 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
9377 *cpuusage += cputime; 9331 *cpuusage += cputime;
9378 } 9332 }
9379} 9333}
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 26ed8e3d1c15..baf2f17af462 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -53,6 +53,40 @@ static unsigned long nsec_low(unsigned long long nsec)
53 53
54#define SPLIT_NS(x) nsec_high(x), nsec_low(x) 54#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
55 55
56#ifdef CONFIG_FAIR_GROUP_SCHED
57static void print_cfs_group_stats(struct seq_file *m, int cpu,
58 struct task_group *tg)
59{
60 struct sched_entity *se = tg->se[cpu];
61 if (!se)
62 return;
63
64#define P(F) \
65 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
66#define PN(F) \
67 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
68
69 PN(se->exec_start);
70 PN(se->vruntime);
71 PN(se->sum_exec_runtime);
72#ifdef CONFIG_SCHEDSTATS
73 PN(se->wait_start);
74 PN(se->sleep_start);
75 PN(se->block_start);
76 PN(se->sleep_max);
77 PN(se->block_max);
78 PN(se->exec_max);
79 PN(se->slice_max);
80 PN(se->wait_max);
81 PN(se->wait_sum);
82 P(se->wait_count);
83#endif
84 P(se->load.weight);
85#undef PN
86#undef P
87}
88#endif
89
56static void 90static void
57print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 91print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
58{ 92{
@@ -121,14 +155,9 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
121 155
122#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) 156#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
123 char path[128] = ""; 157 char path[128] = "";
124 struct cgroup *cgroup = NULL;
125 struct task_group *tg = cfs_rq->tg; 158 struct task_group *tg = cfs_rq->tg;
126 159
127 if (tg) 160 cgroup_path(tg->css.cgroup, path, sizeof(path));
128 cgroup = tg->css.cgroup;
129
130 if (cgroup)
131 cgroup_path(cgroup, path, sizeof(path));
132 161
133 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 162 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
134#else 163#else
@@ -168,6 +197,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
168#ifdef CONFIG_SMP 197#ifdef CONFIG_SMP
169 SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); 198 SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
170#endif 199#endif
200 print_cfs_group_stats(m, cpu, cfs_rq->tg);
171#endif 201#endif
172} 202}
173 203
@@ -175,14 +205,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
175{ 205{
176#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) 206#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
177 char path[128] = ""; 207 char path[128] = "";
178 struct cgroup *cgroup = NULL;
179 struct task_group *tg = rt_rq->tg; 208 struct task_group *tg = rt_rq->tg;
180 209
181 if (tg) 210 cgroup_path(tg->css.cgroup, path, sizeof(path));
182 cgroup = tg->css.cgroup;
183
184 if (cgroup)
185 cgroup_path(cgroup, path, sizeof(path));
186 211
187 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); 212 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
188#else 213#else
@@ -272,7 +297,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
272 u64 now = ktime_to_ns(ktime_get()); 297 u64 now = ktime_to_ns(ktime_get());
273 int cpu; 298 int cpu;
274 299
275 SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n", 300 SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n",
276 init_utsname()->release, 301 init_utsname()->release,
277 (int)strcspn(init_utsname()->version, " "), 302 (int)strcspn(init_utsname()->version, " "),
278 init_utsname()->version); 303 init_utsname()->version);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index d9ba9d5f99d6..2bdd44423599 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -537,13 +537,13 @@ static void update_curr_rt(struct rq *rq)
537 for_each_sched_rt_entity(rt_se) { 537 for_each_sched_rt_entity(rt_se) {
538 rt_rq = rt_rq_of_se(rt_se); 538 rt_rq = rt_rq_of_se(rt_se);
539 539
540 spin_lock(&rt_rq->rt_runtime_lock);
541 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 540 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
541 spin_lock(&rt_rq->rt_runtime_lock);
542 rt_rq->rt_time += delta_exec; 542 rt_rq->rt_time += delta_exec;
543 if (sched_rt_runtime_exceeded(rt_rq)) 543 if (sched_rt_runtime_exceeded(rt_rq))
544 resched_task(curr); 544 resched_task(curr);
545 spin_unlock(&rt_rq->rt_runtime_lock);
545 } 546 }
546 spin_unlock(&rt_rq->rt_runtime_lock);
547 } 547 }
548} 548}
549 549
@@ -910,7 +910,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
910#define RT_MAX_TRIES 3 910#define RT_MAX_TRIES 3
911 911
912static int double_lock_balance(struct rq *this_rq, struct rq *busiest); 912static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
913static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); 913static inline void double_unlock_balance(struct rq *this_rq,
914 struct rq *busiest);
914 915
915static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); 916static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
916 917
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e60205722d0c..78db083390f0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -326,96 +326,89 @@ ftrace_record_ip(unsigned long ip)
326 326
327static int 327static int
328__ftrace_replace_code(struct dyn_ftrace *rec, 328__ftrace_replace_code(struct dyn_ftrace *rec,
329 unsigned char *old, unsigned char *new, int enable) 329 unsigned char *nop, int enable)
330{ 330{
331 unsigned long ip, fl; 331 unsigned long ip, fl;
332 unsigned char *call, *old, *new;
332 333
333 ip = rec->ip; 334 ip = rec->ip;
334 335
335 if (ftrace_filtered && enable) { 336 /*
337 * If this record is not to be traced and
338 * it is not enabled then do nothing.
339 *
340 * If this record is not to be traced and
341 * it is enabled then disabled it.
342 *
343 */
344 if (rec->flags & FTRACE_FL_NOTRACE) {
345 if (rec->flags & FTRACE_FL_ENABLED)
346 rec->flags &= ~FTRACE_FL_ENABLED;
347 else
348 return 0;
349
350 } else if (ftrace_filtered && enable) {
336 /* 351 /*
337 * If filtering is on: 352 * Filtering is on:
338 *
339 * If this record is set to be filtered and
340 * is enabled then do nothing.
341 *
342 * If this record is set to be filtered and
343 * it is not enabled, enable it.
344 *
345 * If this record is not set to be filtered
346 * and it is not enabled do nothing.
347 *
348 * If this record is set not to trace then
349 * do nothing.
350 *
351 * If this record is set not to trace and
352 * it is enabled then disable it.
353 *
354 * If this record is not set to be filtered and
355 * it is enabled, disable it.
356 */ 353 */
357 354
358 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | 355 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
359 FTRACE_FL_ENABLED);
360 356
361 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || 357 /* Record is filtered and enabled, do nothing */
362 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || 358 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
363 !fl || (fl == FTRACE_FL_NOTRACE))
364 return 0; 359 return 0;
365 360
366 /* 361 /* Record is not filtered and is not enabled do nothing */
367 * If it is enabled disable it, 362 if (!fl)
368 * otherwise enable it! 363 return 0;
369 */ 364
370 if (fl & FTRACE_FL_ENABLED) { 365 /* Record is not filtered but enabled, disable it */
371 /* swap new and old */ 366 if (fl == FTRACE_FL_ENABLED)
372 new = old;
373 old = ftrace_call_replace(ip, FTRACE_ADDR);
374 rec->flags &= ~FTRACE_FL_ENABLED; 367 rec->flags &= ~FTRACE_FL_ENABLED;
375 } else { 368 else
376 new = ftrace_call_replace(ip, FTRACE_ADDR); 369 /* Otherwise record is filtered but not enabled, enable it */
377 rec->flags |= FTRACE_FL_ENABLED; 370 rec->flags |= FTRACE_FL_ENABLED;
378 }
379 } else { 371 } else {
372 /* Disable or not filtered */
380 373
381 if (enable) { 374 if (enable) {
382 /* 375 /* if record is enabled, do nothing */
383 * If this record is set not to trace and is
384 * not enabled, do nothing.
385 */
386 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
387 if (fl == FTRACE_FL_NOTRACE)
388 return 0;
389
390 new = ftrace_call_replace(ip, FTRACE_ADDR);
391 } else
392 old = ftrace_call_replace(ip, FTRACE_ADDR);
393
394 if (enable) {
395 if (rec->flags & FTRACE_FL_ENABLED) 376 if (rec->flags & FTRACE_FL_ENABLED)
396 return 0; 377 return 0;
378
397 rec->flags |= FTRACE_FL_ENABLED; 379 rec->flags |= FTRACE_FL_ENABLED;
380
398 } else { 381 } else {
382
383 /* if record is not enabled do nothing */
399 if (!(rec->flags & FTRACE_FL_ENABLED)) 384 if (!(rec->flags & FTRACE_FL_ENABLED))
400 return 0; 385 return 0;
386
401 rec->flags &= ~FTRACE_FL_ENABLED; 387 rec->flags &= ~FTRACE_FL_ENABLED;
402 } 388 }
403 } 389 }
404 390
391 call = ftrace_call_replace(ip, FTRACE_ADDR);
392
393 if (rec->flags & FTRACE_FL_ENABLED) {
394 old = nop;
395 new = call;
396 } else {
397 old = call;
398 new = nop;
399 }
400
405 return ftrace_modify_code(ip, old, new); 401 return ftrace_modify_code(ip, old, new);
406} 402}
407 403
408static void ftrace_replace_code(int enable) 404static void ftrace_replace_code(int enable)
409{ 405{
410 int i, failed; 406 int i, failed;
411 unsigned char *new = NULL, *old = NULL; 407 unsigned char *nop = NULL;
412 struct dyn_ftrace *rec; 408 struct dyn_ftrace *rec;
413 struct ftrace_page *pg; 409 struct ftrace_page *pg;
414 410
415 if (enable) 411 nop = ftrace_nop_replace();
416 old = ftrace_nop_replace();
417 else
418 new = ftrace_nop_replace();
419 412
420 for (pg = ftrace_pages_start; pg; pg = pg->next) { 413 for (pg = ftrace_pages_start; pg; pg = pg->next) {
421 for (i = 0; i < pg->index; i++) { 414 for (i = 0; i < pg->index; i++) {
@@ -433,7 +426,7 @@ static void ftrace_replace_code(int enable)
433 unfreeze_record(rec); 426 unfreeze_record(rec);
434 } 427 }
435 428
436 failed = __ftrace_replace_code(rec, old, new, enable); 429 failed = __ftrace_replace_code(rec, nop, enable);
437 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 430 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
438 rec->flags |= FTRACE_FL_FAILED; 431 rec->flags |= FTRACE_FL_FAILED;
439 if ((system_state == SYSTEM_BOOTING) || 432 if ((system_state == SYSTEM_BOOTING) ||
@@ -534,8 +527,7 @@ static void ftrace_startup(void)
534 527
535 mutex_lock(&ftrace_start_lock); 528 mutex_lock(&ftrace_start_lock);
536 ftrace_start++; 529 ftrace_start++;
537 if (ftrace_start == 1) 530 command |= FTRACE_ENABLE_CALLS;
538 command |= FTRACE_ENABLE_CALLS;
539 531
540 if (saved_ftrace_func != ftrace_trace_function) { 532 if (saved_ftrace_func != ftrace_trace_function) {
541 saved_ftrace_func = ftrace_trace_function; 533 saved_ftrace_func = ftrace_trace_function;
@@ -734,6 +726,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
734 ((iter->flags & FTRACE_ITER_FAILURES) && 726 ((iter->flags & FTRACE_ITER_FAILURES) &&
735 !(rec->flags & FTRACE_FL_FAILED)) || 727 !(rec->flags & FTRACE_FL_FAILED)) ||
736 728
729 ((iter->flags & FTRACE_ITER_FILTER) &&
730 !(rec->flags & FTRACE_FL_FILTER)) ||
731
737 ((iter->flags & FTRACE_ITER_NOTRACE) && 732 ((iter->flags & FTRACE_ITER_NOTRACE) &&
738 !(rec->flags & FTRACE_FL_NOTRACE))) { 733 !(rec->flags & FTRACE_FL_NOTRACE))) {
739 rec = NULL; 734 rec = NULL;
@@ -1186,7 +1181,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1186 1181
1187 mutex_lock(&ftrace_sysctl_lock); 1182 mutex_lock(&ftrace_sysctl_lock);
1188 mutex_lock(&ftrace_start_lock); 1183 mutex_lock(&ftrace_start_lock);
1189 if (iter->filtered && ftrace_start && ftrace_enabled) 1184 if (ftrace_start && ftrace_enabled)
1190 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1185 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1191 mutex_unlock(&ftrace_start_lock); 1186 mutex_unlock(&ftrace_start_lock);
1192 mutex_unlock(&ftrace_sysctl_lock); 1187 mutex_unlock(&ftrace_sysctl_lock);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 036456cbb4f7..f780e9552f91 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -617,6 +617,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
617 list_del_init(&page->list); 617 list_del_init(&page->list);
618 free_buffer_page(page); 618 free_buffer_page(page);
619 } 619 }
620 mutex_unlock(&buffer->mutex);
620 return -ENOMEM; 621 return -ENOMEM;
621} 622}
622 623
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 697eda36b86a..d86e3252f300 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1936,6 +1936,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1936 ring_buffer_read_finish(iter->buffer_iter[cpu]); 1936 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1937 } 1937 }
1938 mutex_unlock(&trace_types_lock); 1938 mutex_unlock(&trace_types_lock);
1939 kfree(iter);
1939 1940
1940 return ERR_PTR(-ENOMEM); 1941 return ERR_PTR(-ENOMEM);
1941} 1942}