aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scheduler/sched-arch.txt4
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/sched.c107
-rw-r--r--kernel/sched_debug.c14
5 files changed, 35 insertions, 93 deletions
diff --git a/Documentation/scheduler/sched-arch.txt b/Documentation/scheduler/sched-arch.txt
index 941615a9769b..d43dbcbd163b 100644
--- a/Documentation/scheduler/sched-arch.txt
+++ b/Documentation/scheduler/sched-arch.txt
@@ -8,7 +8,7 @@ Context switch
8By default, the switch_to arch function is called with the runqueue 8By default, the switch_to arch function is called with the runqueue
9locked. This is usually not a problem unless switch_to may need to 9locked. This is usually not a problem unless switch_to may need to
10take the runqueue lock. This is usually due to a wake up operation in 10take the runqueue lock. This is usually due to a wake up operation in
11the context switch. See include/asm-ia64/system.h for an example. 11the context switch. See arch/ia64/include/asm/system.h for an example.
12 12
13To request the scheduler call switch_to with the runqueue unlocked, 13To request the scheduler call switch_to with the runqueue unlocked,
14you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file 14you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file
@@ -23,7 +23,7 @@ disabled. Interrupts may be enabled over the call if it is likely to
23introduce a significant interrupt latency by adding the line 23introduce a significant interrupt latency by adding the line
24`#define __ARCH_WANT_INTERRUPTS_ON_CTXSW` in the same place as for 24`#define __ARCH_WANT_INTERRUPTS_ON_CTXSW` in the same place as for
25unlocked context switches. This define also implies 25unlocked context switches. This define also implies
26`__ARCH_WANT_UNLOCKED_CTXSW`. See include/asm-arm/system.h for an 26`__ARCH_WANT_UNLOCKED_CTXSW`. See arch/arm/include/asm/system.h for an
27example. 27example.
28 28
29 29
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4cf0ab13d187..1d5550d19b66 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -367,7 +367,7 @@ config X86_RDC321X
367config SCHED_NO_NO_OMIT_FRAME_POINTER 367config SCHED_NO_NO_OMIT_FRAME_POINTER
368 def_bool y 368 def_bool y
369 prompt "Single-depth WCHAN output" 369 prompt "Single-depth WCHAN output"
370 depends on X86_32 370 depends on X86
371 help 371 help
372 Calculate simpler /proc/<PID>/wchan values. If this option 372 Calculate simpler /proc/<PID>/wchan values. If this option
373 is disabled then wchan values will recurse back to the 373 is disabled then wchan values will recurse back to the
diff --git a/kernel/Makefile b/kernel/Makefile
index 9a3ec66a9d84..e1af03972148 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -21,7 +21,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg
21CFLAGS_REMOVE_rtmutex-debug.o = -pg 21CFLAGS_REMOVE_rtmutex-debug.o = -pg
22CFLAGS_REMOVE_cgroup-debug.o = -pg 22CFLAGS_REMOVE_cgroup-debug.o = -pg
23CFLAGS_REMOVE_sched_clock.o = -pg 23CFLAGS_REMOVE_sched_clock.o = -pg
24CFLAGS_REMOVE_sched.o = -mno-spe -pg
25endif 24endif
26 25
27obj-$(CONFIG_FREEZER) += freezer.o 26obj-$(CONFIG_FREEZER) += freezer.o
diff --git a/kernel/sched.c b/kernel/sched.c
index 57c933ffbee1..b24e57a10f6f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -703,45 +703,18 @@ static __read_mostly char *sched_feat_names[] = {
703 703
704#undef SCHED_FEAT 704#undef SCHED_FEAT
705 705
706static int sched_feat_open(struct inode *inode, struct file *filp) 706static int sched_feat_show(struct seq_file *m, void *v)
707{
708 filp->private_data = inode->i_private;
709 return 0;
710}
711
712static ssize_t
713sched_feat_read(struct file *filp, char __user *ubuf,
714 size_t cnt, loff_t *ppos)
715{ 707{
716 char *buf;
717 int r = 0;
718 int len = 0;
719 int i; 708 int i;
720 709
721 for (i = 0; sched_feat_names[i]; i++) { 710 for (i = 0; sched_feat_names[i]; i++) {
722 len += strlen(sched_feat_names[i]); 711 if (!(sysctl_sched_features & (1UL << i)))
723 len += 4; 712 seq_puts(m, "NO_");
713 seq_printf(m, "%s ", sched_feat_names[i]);
724 } 714 }
715 seq_puts(m, "\n");
725 716
726 buf = kmalloc(len + 2, GFP_KERNEL); 717 return 0;
727 if (!buf)
728 return -ENOMEM;
729
730 for (i = 0; sched_feat_names[i]; i++) {
731 if (sysctl_sched_features & (1UL << i))
732 r += sprintf(buf + r, "%s ", sched_feat_names[i]);
733 else
734 r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);
735 }
736
737 r += sprintf(buf + r, "\n");
738 WARN_ON(r >= len + 2);
739
740 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
741
742 kfree(buf);
743
744 return r;
745} 718}
746 719
747static ssize_t 720static ssize_t
@@ -786,10 +759,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
786 return cnt; 759 return cnt;
787} 760}
788 761
762static int sched_feat_open(struct inode *inode, struct file *filp)
763{
764 return single_open(filp, sched_feat_show, NULL);
765}
766
789static struct file_operations sched_feat_fops = { 767static struct file_operations sched_feat_fops = {
790 .open = sched_feat_open, 768 .open = sched_feat_open,
791 .read = sched_feat_read, 769 .write = sched_feat_write,
792 .write = sched_feat_write, 770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
793}; 773};
794 774
795static __init int sched_init_debug(void) 775static __init int sched_init_debug(void)
@@ -6624,28 +6604,6 @@ early_initcall(migration_init);
6624 6604
6625#ifdef CONFIG_SCHED_DEBUG 6605#ifdef CONFIG_SCHED_DEBUG
6626 6606
6627static inline const char *sd_level_to_string(enum sched_domain_level lvl)
6628{
6629 switch (lvl) {
6630 case SD_LV_NONE:
6631 return "NONE";
6632 case SD_LV_SIBLING:
6633 return "SIBLING";
6634 case SD_LV_MC:
6635 return "MC";
6636 case SD_LV_CPU:
6637 return "CPU";
6638 case SD_LV_NODE:
6639 return "NODE";
6640 case SD_LV_ALLNODES:
6641 return "ALLNODES";
6642 case SD_LV_MAX:
6643 return "MAX";
6644
6645 }
6646 return "MAX";
6647}
6648
6649static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 6607static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6650 cpumask_t *groupmask) 6608 cpumask_t *groupmask)
6651{ 6609{
@@ -6665,8 +6623,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6665 return -1; 6623 return -1;
6666 } 6624 }
6667 6625
6668 printk(KERN_CONT "span %s level %s\n", 6626 printk(KERN_CONT "span %s level %s\n", str, sd->name);
6669 str, sd_level_to_string(sd->level));
6670 6627
6671 if (!cpu_isset(cpu, sd->span)) { 6628 if (!cpu_isset(cpu, sd->span)) {
6672 printk(KERN_ERR "ERROR: domain->span does not contain " 6629 printk(KERN_ERR "ERROR: domain->span does not contain "
@@ -7739,8 +7696,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7739 cpumask_t tmpmask; 7696 cpumask_t tmpmask;
7740 int i; 7697 int i;
7741 7698
7742 unregister_sched_domain_sysctl();
7743
7744 for_each_cpu_mask_nr(i, *cpu_map) 7699 for_each_cpu_mask_nr(i, *cpu_map)
7745 cpu_attach_domain(NULL, &def_root_domain, i); 7700 cpu_attach_domain(NULL, &def_root_domain, i);
7746 synchronize_sched(); 7701 synchronize_sched();
@@ -7817,7 +7772,7 @@ match1:
7817 ndoms_cur = 0; 7772 ndoms_cur = 0;
7818 doms_new = &fallback_doms; 7773 doms_new = &fallback_doms;
7819 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); 7774 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7820 dattr_new = NULL; 7775 WARN_ON_ONCE(dattr_new);
7821 } 7776 }
7822 7777
7823 /* Build new domains */ 7778 /* Build new domains */
@@ -8477,7 +8432,7 @@ static
8477int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 8432int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8478{ 8433{
8479 struct cfs_rq *cfs_rq; 8434 struct cfs_rq *cfs_rq;
8480 struct sched_entity *se, *parent_se; 8435 struct sched_entity *se;
8481 struct rq *rq; 8436 struct rq *rq;
8482 int i; 8437 int i;
8483 8438
@@ -8493,18 +8448,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8493 for_each_possible_cpu(i) { 8448 for_each_possible_cpu(i) {
8494 rq = cpu_rq(i); 8449 rq = cpu_rq(i);
8495 8450
8496 cfs_rq = kmalloc_node(sizeof(struct cfs_rq), 8451 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8497 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8452 GFP_KERNEL, cpu_to_node(i));
8498 if (!cfs_rq) 8453 if (!cfs_rq)
8499 goto err; 8454 goto err;
8500 8455
8501 se = kmalloc_node(sizeof(struct sched_entity), 8456 se = kzalloc_node(sizeof(struct sched_entity),
8502 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8457 GFP_KERNEL, cpu_to_node(i));
8503 if (!se) 8458 if (!se)
8504 goto err; 8459 goto err;
8505 8460
8506 parent_se = parent ? parent->se[i] : NULL; 8461 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
8507 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
8508 } 8462 }
8509 8463
8510 return 1; 8464 return 1;
@@ -8565,7 +8519,7 @@ static
8565int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 8519int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8566{ 8520{
8567 struct rt_rq *rt_rq; 8521 struct rt_rq *rt_rq;
8568 struct sched_rt_entity *rt_se, *parent_se; 8522 struct sched_rt_entity *rt_se;
8569 struct rq *rq; 8523 struct rq *rq;
8570 int i; 8524 int i;
8571 8525
@@ -8582,18 +8536,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
8582 for_each_possible_cpu(i) { 8536 for_each_possible_cpu(i) {
8583 rq = cpu_rq(i); 8537 rq = cpu_rq(i);
8584 8538
8585 rt_rq = kmalloc_node(sizeof(struct rt_rq), 8539 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8586 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8540 GFP_KERNEL, cpu_to_node(i));
8587 if (!rt_rq) 8541 if (!rt_rq)
8588 goto err; 8542 goto err;
8589 8543
8590 rt_se = kmalloc_node(sizeof(struct sched_rt_entity), 8544 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8591 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); 8545 GFP_KERNEL, cpu_to_node(i));
8592 if (!rt_se) 8546 if (!rt_se)
8593 goto err; 8547 goto err;
8594 8548
8595 parent_se = parent ? parent->rt_se[i] : NULL; 8549 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
8596 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
8597 } 8550 }
8598 8551
8599 return 1; 8552 return 1;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 5ae17762ec32..d25cefe3f0eb 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -121,14 +121,9 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
121 121
122#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) 122#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
123 char path[128] = ""; 123 char path[128] = "";
124 struct cgroup *cgroup = NULL;
125 struct task_group *tg = cfs_rq->tg; 124 struct task_group *tg = cfs_rq->tg;
126 125
127 if (tg) 126 cgroup_path(tg->css.cgroup, path, sizeof(path));
128 cgroup = tg->css.cgroup;
129
130 if (cgroup)
131 cgroup_path(cgroup, path, sizeof(path));
132 127
133 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 128 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
134#else 129#else
@@ -193,14 +188,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
193{ 188{
194#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) 189#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
195 char path[128] = ""; 190 char path[128] = "";
196 struct cgroup *cgroup = NULL;
197 struct task_group *tg = rt_rq->tg; 191 struct task_group *tg = rt_rq->tg;
198 192
199 if (tg) 193 cgroup_path(tg->css.cgroup, path, sizeof(path));
200 cgroup = tg->css.cgroup;
201
202 if (cgroup)
203 cgroup_path(cgroup, path, sizeof(path));
204 194
205 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); 195 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
206#else 196#else