diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 3 | ||||
-rw-r--r-- | kernel/sched.c | 148 | ||||
-rw-r--r-- | kernel/sched_debug.c | 51 |
3 files changed, 96 insertions, 106 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 19fad003b19d..6a212b842d86 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -19,7 +19,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg | |||
19 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 19 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
20 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 20 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
21 | CFLAGS_REMOVE_sched_clock.o = -pg | 21 | CFLAGS_REMOVE_sched_clock.o = -pg |
22 | CFLAGS_REMOVE_sched.o = -pg | ||
23 | endif | 22 | endif |
24 | 23 | ||
25 | obj-$(CONFIG_FREEZER) += freezer.o | 24 | obj-$(CONFIG_FREEZER) += freezer.o |
@@ -90,7 +89,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/ | |||
90 | obj-$(CONFIG_TRACING) += trace/ | 89 | obj-$(CONFIG_TRACING) += trace/ |
91 | obj-$(CONFIG_SMP) += sched_cpupri.o | 90 | obj-$(CONFIG_SMP) += sched_cpupri.o |
92 | 91 | ||
93 | ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) | 92 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
94 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 93 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
95 | # needed for x86 only. Why this used to be enabled for all architectures is beyond | 94 | # needed for x86 only. Why this used to be enabled for all architectures is beyond |
96 | # me. I suspect most platforms don't need this, but until we know that for sure | 95 | # me. I suspect most platforms don't need this, but until we know that for sure |
diff --git a/kernel/sched.c b/kernel/sched.c index 9b1e79371c20..a4c156d9a4a5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -703,45 +703,18 @@ static __read_mostly char *sched_feat_names[] = { | |||
703 | 703 | ||
704 | #undef SCHED_FEAT | 704 | #undef SCHED_FEAT |
705 | 705 | ||
706 | static int sched_feat_open(struct inode *inode, struct file *filp) | 706 | static int sched_feat_show(struct seq_file *m, void *v) |
707 | { | ||
708 | filp->private_data = inode->i_private; | ||
709 | return 0; | ||
710 | } | ||
711 | |||
712 | static ssize_t | ||
713 | sched_feat_read(struct file *filp, char __user *ubuf, | ||
714 | size_t cnt, loff_t *ppos) | ||
715 | { | 707 | { |
716 | char *buf; | ||
717 | int r = 0; | ||
718 | int len = 0; | ||
719 | int i; | 708 | int i; |
720 | 709 | ||
721 | for (i = 0; sched_feat_names[i]; i++) { | 710 | for (i = 0; sched_feat_names[i]; i++) { |
722 | len += strlen(sched_feat_names[i]); | 711 | if (!(sysctl_sched_features & (1UL << i))) |
723 | len += 4; | 712 | seq_puts(m, "NO_"); |
713 | seq_printf(m, "%s ", sched_feat_names[i]); | ||
724 | } | 714 | } |
715 | seq_puts(m, "\n"); | ||
725 | 716 | ||
726 | buf = kmalloc(len + 2, GFP_KERNEL); | 717 | return 0; |
727 | if (!buf) | ||
728 | return -ENOMEM; | ||
729 | |||
730 | for (i = 0; sched_feat_names[i]; i++) { | ||
731 | if (sysctl_sched_features & (1UL << i)) | ||
732 | r += sprintf(buf + r, "%s ", sched_feat_names[i]); | ||
733 | else | ||
734 | r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]); | ||
735 | } | ||
736 | |||
737 | r += sprintf(buf + r, "\n"); | ||
738 | WARN_ON(r >= len + 2); | ||
739 | |||
740 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
741 | |||
742 | kfree(buf); | ||
743 | |||
744 | return r; | ||
745 | } | 718 | } |
746 | 719 | ||
747 | static ssize_t | 720 | static ssize_t |
@@ -786,10 +759,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
786 | return cnt; | 759 | return cnt; |
787 | } | 760 | } |
788 | 761 | ||
762 | static int sched_feat_open(struct inode *inode, struct file *filp) | ||
763 | { | ||
764 | return single_open(filp, sched_feat_show, NULL); | ||
765 | } | ||
766 | |||
789 | static struct file_operations sched_feat_fops = { | 767 | static struct file_operations sched_feat_fops = { |
790 | .open = sched_feat_open, | 768 | .open = sched_feat_open, |
791 | .read = sched_feat_read, | 769 | .write = sched_feat_write, |
792 | .write = sched_feat_write, | 770 | .read = seq_read, |
771 | .llseek = seq_lseek, | ||
772 | .release = single_release, | ||
793 | }; | 773 | }; |
794 | 774 | ||
795 | static __init int sched_init_debug(void) | 775 | static __init int sched_init_debug(void) |
@@ -6635,28 +6615,6 @@ early_initcall(migration_init); | |||
6635 | 6615 | ||
6636 | #ifdef CONFIG_SCHED_DEBUG | 6616 | #ifdef CONFIG_SCHED_DEBUG |
6637 | 6617 | ||
6638 | static inline const char *sd_level_to_string(enum sched_domain_level lvl) | ||
6639 | { | ||
6640 | switch (lvl) { | ||
6641 | case SD_LV_NONE: | ||
6642 | return "NONE"; | ||
6643 | case SD_LV_SIBLING: | ||
6644 | return "SIBLING"; | ||
6645 | case SD_LV_MC: | ||
6646 | return "MC"; | ||
6647 | case SD_LV_CPU: | ||
6648 | return "CPU"; | ||
6649 | case SD_LV_NODE: | ||
6650 | return "NODE"; | ||
6651 | case SD_LV_ALLNODES: | ||
6652 | return "ALLNODES"; | ||
6653 | case SD_LV_MAX: | ||
6654 | return "MAX"; | ||
6655 | |||
6656 | } | ||
6657 | return "MAX"; | ||
6658 | } | ||
6659 | |||
6660 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6618 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6661 | cpumask_t *groupmask) | 6619 | cpumask_t *groupmask) |
6662 | { | 6620 | { |
@@ -6676,8 +6634,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6676 | return -1; | 6634 | return -1; |
6677 | } | 6635 | } |
6678 | 6636 | ||
6679 | printk(KERN_CONT "span %s level %s\n", | 6637 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
6680 | str, sd_level_to_string(sd->level)); | ||
6681 | 6638 | ||
6682 | if (!cpu_isset(cpu, sd->span)) { | 6639 | if (!cpu_isset(cpu, sd->span)) { |
6683 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6640 | printk(KERN_ERR "ERROR: domain->span does not contain " |
@@ -7333,13 +7290,21 @@ struct allmasks { | |||
7333 | }; | 7290 | }; |
7334 | 7291 | ||
7335 | #if NR_CPUS > 128 | 7292 | #if NR_CPUS > 128 |
7336 | #define SCHED_CPUMASK_ALLOC 1 | 7293 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v |
7337 | #define SCHED_CPUMASK_FREE(v) kfree(v) | 7294 | static inline void sched_cpumask_alloc(struct allmasks **masks) |
7338 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | 7295 | { |
7296 | *masks = kmalloc(sizeof(**masks), GFP_KERNEL); | ||
7297 | } | ||
7298 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7299 | { | ||
7300 | kfree(masks); | ||
7301 | } | ||
7339 | #else | 7302 | #else |
7340 | #define SCHED_CPUMASK_ALLOC 0 | 7303 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v |
7341 | #define SCHED_CPUMASK_FREE(v) | 7304 | static inline void sched_cpumask_alloc(struct allmasks **masks) |
7342 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | 7305 | { } |
7306 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7307 | { } | ||
7343 | #endif | 7308 | #endif |
7344 | 7309 | ||
7345 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | 7310 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ |
@@ -7415,9 +7380,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7415 | return -ENOMEM; | 7380 | return -ENOMEM; |
7416 | } | 7381 | } |
7417 | 7382 | ||
7418 | #if SCHED_CPUMASK_ALLOC | ||
7419 | /* get space for all scratch cpumask variables */ | 7383 | /* get space for all scratch cpumask variables */ |
7420 | allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); | 7384 | sched_cpumask_alloc(&allmasks); |
7421 | if (!allmasks) { | 7385 | if (!allmasks) { |
7422 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | 7386 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); |
7423 | kfree(rd); | 7387 | kfree(rd); |
@@ -7426,7 +7390,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7426 | #endif | 7390 | #endif |
7427 | return -ENOMEM; | 7391 | return -ENOMEM; |
7428 | } | 7392 | } |
7429 | #endif | 7393 | |
7430 | tmpmask = (cpumask_t *)allmasks; | 7394 | tmpmask = (cpumask_t *)allmasks; |
7431 | 7395 | ||
7432 | 7396 | ||
@@ -7680,13 +7644,13 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7680 | cpu_attach_domain(sd, rd, i); | 7644 | cpu_attach_domain(sd, rd, i); |
7681 | } | 7645 | } |
7682 | 7646 | ||
7683 | SCHED_CPUMASK_FREE((void *)allmasks); | 7647 | sched_cpumask_free(allmasks); |
7684 | return 0; | 7648 | return 0; |
7685 | 7649 | ||
7686 | #ifdef CONFIG_NUMA | 7650 | #ifdef CONFIG_NUMA |
7687 | error: | 7651 | error: |
7688 | free_sched_groups(cpu_map, tmpmask); | 7652 | free_sched_groups(cpu_map, tmpmask); |
7689 | SCHED_CPUMASK_FREE((void *)allmasks); | 7653 | sched_cpumask_free(allmasks); |
7690 | kfree(rd); | 7654 | kfree(rd); |
7691 | return -ENOMEM; | 7655 | return -ENOMEM; |
7692 | #endif | 7656 | #endif |
@@ -7750,8 +7714,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
7750 | cpumask_t tmpmask; | 7714 | cpumask_t tmpmask; |
7751 | int i; | 7715 | int i; |
7752 | 7716 | ||
7753 | unregister_sched_domain_sysctl(); | ||
7754 | |||
7755 | for_each_cpu_mask_nr(i, *cpu_map) | 7717 | for_each_cpu_mask_nr(i, *cpu_map) |
7756 | cpu_attach_domain(NULL, &def_root_domain, i); | 7718 | cpu_attach_domain(NULL, &def_root_domain, i); |
7757 | synchronize_sched(); | 7719 | synchronize_sched(); |
@@ -7829,7 +7791,7 @@ match1: | |||
7829 | ndoms_cur = 0; | 7791 | ndoms_cur = 0; |
7830 | doms_new = &fallback_doms; | 7792 | doms_new = &fallback_doms; |
7831 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7793 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); |
7832 | dattr_new = NULL; | 7794 | WARN_ON_ONCE(dattr_new); |
7833 | } | 7795 | } |
7834 | 7796 | ||
7835 | /* Build new domains */ | 7797 | /* Build new domains */ |
@@ -8489,7 +8451,7 @@ static | |||
8489 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 8451 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
8490 | { | 8452 | { |
8491 | struct cfs_rq *cfs_rq; | 8453 | struct cfs_rq *cfs_rq; |
8492 | struct sched_entity *se, *parent_se; | 8454 | struct sched_entity *se; |
8493 | struct rq *rq; | 8455 | struct rq *rq; |
8494 | int i; | 8456 | int i; |
8495 | 8457 | ||
@@ -8505,18 +8467,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8505 | for_each_possible_cpu(i) { | 8467 | for_each_possible_cpu(i) { |
8506 | rq = cpu_rq(i); | 8468 | rq = cpu_rq(i); |
8507 | 8469 | ||
8508 | cfs_rq = kmalloc_node(sizeof(struct cfs_rq), | 8470 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
8509 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8471 | GFP_KERNEL, cpu_to_node(i)); |
8510 | if (!cfs_rq) | 8472 | if (!cfs_rq) |
8511 | goto err; | 8473 | goto err; |
8512 | 8474 | ||
8513 | se = kmalloc_node(sizeof(struct sched_entity), | 8475 | se = kzalloc_node(sizeof(struct sched_entity), |
8514 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8476 | GFP_KERNEL, cpu_to_node(i)); |
8515 | if (!se) | 8477 | if (!se) |
8516 | goto err; | 8478 | goto err; |
8517 | 8479 | ||
8518 | parent_se = parent ? parent->se[i] : NULL; | 8480 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
8519 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se); | ||
8520 | } | 8481 | } |
8521 | 8482 | ||
8522 | return 1; | 8483 | return 1; |
@@ -8577,7 +8538,7 @@ static | |||
8577 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | 8538 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
8578 | { | 8539 | { |
8579 | struct rt_rq *rt_rq; | 8540 | struct rt_rq *rt_rq; |
8580 | struct sched_rt_entity *rt_se, *parent_se; | 8541 | struct sched_rt_entity *rt_se; |
8581 | struct rq *rq; | 8542 | struct rq *rq; |
8582 | int i; | 8543 | int i; |
8583 | 8544 | ||
@@ -8594,18 +8555,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
8594 | for_each_possible_cpu(i) { | 8555 | for_each_possible_cpu(i) { |
8595 | rq = cpu_rq(i); | 8556 | rq = cpu_rq(i); |
8596 | 8557 | ||
8597 | rt_rq = kmalloc_node(sizeof(struct rt_rq), | 8558 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
8598 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8559 | GFP_KERNEL, cpu_to_node(i)); |
8599 | if (!rt_rq) | 8560 | if (!rt_rq) |
8600 | goto err; | 8561 | goto err; |
8601 | 8562 | ||
8602 | rt_se = kmalloc_node(sizeof(struct sched_rt_entity), | 8563 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
8603 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8564 | GFP_KERNEL, cpu_to_node(i)); |
8604 | if (!rt_se) | 8565 | if (!rt_se) |
8605 | goto err; | 8566 | goto err; |
8606 | 8567 | ||
8607 | parent_se = parent ? parent->rt_se[i] : NULL; | 8568 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
8608 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se); | ||
8609 | } | 8569 | } |
8610 | 8570 | ||
8611 | return 1; | 8571 | return 1; |
@@ -9248,11 +9208,12 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
9248 | * (balbir@in.ibm.com). | 9208 | * (balbir@in.ibm.com). |
9249 | */ | 9209 | */ |
9250 | 9210 | ||
9251 | /* track cpu usage of a group of tasks */ | 9211 | /* track cpu usage of a group of tasks and its child groups */ |
9252 | struct cpuacct { | 9212 | struct cpuacct { |
9253 | struct cgroup_subsys_state css; | 9213 | struct cgroup_subsys_state css; |
9254 | /* cpuusage holds pointer to a u64-type object on every cpu */ | 9214 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
9255 | u64 *cpuusage; | 9215 | u64 *cpuusage; |
9216 | struct cpuacct *parent; | ||
9256 | }; | 9217 | }; |
9257 | 9218 | ||
9258 | struct cgroup_subsys cpuacct_subsys; | 9219 | struct cgroup_subsys cpuacct_subsys; |
@@ -9286,6 +9247,9 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
9286 | return ERR_PTR(-ENOMEM); | 9247 | return ERR_PTR(-ENOMEM); |
9287 | } | 9248 | } |
9288 | 9249 | ||
9250 | if (cgrp->parent) | ||
9251 | ca->parent = cgroup_ca(cgrp->parent); | ||
9252 | |||
9289 | return &ca->css; | 9253 | return &ca->css; |
9290 | } | 9254 | } |
9291 | 9255 | ||
@@ -9365,14 +9329,16 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9365 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | 9329 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
9366 | { | 9330 | { |
9367 | struct cpuacct *ca; | 9331 | struct cpuacct *ca; |
9332 | int cpu; | ||
9368 | 9333 | ||
9369 | if (!cpuacct_subsys.active) | 9334 | if (!cpuacct_subsys.active) |
9370 | return; | 9335 | return; |
9371 | 9336 | ||
9337 | cpu = task_cpu(tsk); | ||
9372 | ca = task_ca(tsk); | 9338 | ca = task_ca(tsk); |
9373 | if (ca) { | ||
9374 | u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); | ||
9375 | 9339 | ||
9340 | for (; ca; ca = ca->parent) { | ||
9341 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9376 | *cpuusage += cputime; | 9342 | *cpuusage += cputime; |
9377 | } | 9343 | } |
9378 | } | 9344 | } |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 26ed8e3d1c15..baf2f17af462 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -53,6 +53,40 @@ static unsigned long nsec_low(unsigned long long nsec) | |||
53 | 53 | ||
54 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) | 54 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) |
55 | 55 | ||
56 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
57 | static void print_cfs_group_stats(struct seq_file *m, int cpu, | ||
58 | struct task_group *tg) | ||
59 | { | ||
60 | struct sched_entity *se = tg->se[cpu]; | ||
61 | if (!se) | ||
62 | return; | ||
63 | |||
64 | #define P(F) \ | ||
65 | SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) | ||
66 | #define PN(F) \ | ||
67 | SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) | ||
68 | |||
69 | PN(se->exec_start); | ||
70 | PN(se->vruntime); | ||
71 | PN(se->sum_exec_runtime); | ||
72 | #ifdef CONFIG_SCHEDSTATS | ||
73 | PN(se->wait_start); | ||
74 | PN(se->sleep_start); | ||
75 | PN(se->block_start); | ||
76 | PN(se->sleep_max); | ||
77 | PN(se->block_max); | ||
78 | PN(se->exec_max); | ||
79 | PN(se->slice_max); | ||
80 | PN(se->wait_max); | ||
81 | PN(se->wait_sum); | ||
82 | P(se->wait_count); | ||
83 | #endif | ||
84 | P(se->load.weight); | ||
85 | #undef PN | ||
86 | #undef P | ||
87 | } | ||
88 | #endif | ||
89 | |||
56 | static void | 90 | static void |
57 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | 91 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) |
58 | { | 92 | { |
@@ -121,14 +155,9 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
121 | 155 | ||
122 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) | 156 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) |
123 | char path[128] = ""; | 157 | char path[128] = ""; |
124 | struct cgroup *cgroup = NULL; | ||
125 | struct task_group *tg = cfs_rq->tg; | 158 | struct task_group *tg = cfs_rq->tg; |
126 | 159 | ||
127 | if (tg) | 160 | cgroup_path(tg->css.cgroup, path, sizeof(path)); |
128 | cgroup = tg->css.cgroup; | ||
129 | |||
130 | if (cgroup) | ||
131 | cgroup_path(cgroup, path, sizeof(path)); | ||
132 | 161 | ||
133 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); | 162 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); |
134 | #else | 163 | #else |
@@ -168,6 +197,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
168 | #ifdef CONFIG_SMP | 197 | #ifdef CONFIG_SMP |
169 | SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); | 198 | SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); |
170 | #endif | 199 | #endif |
200 | print_cfs_group_stats(m, cpu, cfs_rq->tg); | ||
171 | #endif | 201 | #endif |
172 | } | 202 | } |
173 | 203 | ||
@@ -175,14 +205,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) | |||
175 | { | 205 | { |
176 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) | 206 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) |
177 | char path[128] = ""; | 207 | char path[128] = ""; |
178 | struct cgroup *cgroup = NULL; | ||
179 | struct task_group *tg = rt_rq->tg; | 208 | struct task_group *tg = rt_rq->tg; |
180 | 209 | ||
181 | if (tg) | 210 | cgroup_path(tg->css.cgroup, path, sizeof(path)); |
182 | cgroup = tg->css.cgroup; | ||
183 | |||
184 | if (cgroup) | ||
185 | cgroup_path(cgroup, path, sizeof(path)); | ||
186 | 211 | ||
187 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); | 212 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); |
188 | #else | 213 | #else |
@@ -272,7 +297,7 @@ static int sched_debug_show(struct seq_file *m, void *v) | |||
272 | u64 now = ktime_to_ns(ktime_get()); | 297 | u64 now = ktime_to_ns(ktime_get()); |
273 | int cpu; | 298 | int cpu; |
274 | 299 | ||
275 | SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n", | 300 | SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n", |
276 | init_utsname()->release, | 301 | init_utsname()->release, |
277 | (int)strcspn(init_utsname()->version, " "), | 302 | (int)strcspn(init_utsname()->version, " "), |
278 | init_utsname()->version); | 303 | init_utsname()->version); |