aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-03-21 13:05:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-03-21 13:05:19 -0400
commit028011e1391eab27e7bc113c2ac08d4f55584a75 (patch)
tree679c03b4cd668f07901b2ebafcaf5de7ab7cbe14
parent37bff62e98f675777e1e76173fe320f04062841d (diff)
parent22e52b072dd87faa9b2559fe89d4e8f2370f81ca (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel: sched: add arch_update_cpu_topology hook. sched: add exported arch_reinit_sched_domains() to header file. sched: remove double unlikely from schedule() sched: cleanup old and rarely used 'debug' features.
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/topology.h2
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/sched_fair.c14
4 files changed, 12 insertions, 22 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3625fcaf5d0f..fed07d03364e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -790,6 +790,7 @@ struct sched_domain {
790}; 790};
791 791
792extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); 792extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);
793extern int arch_reinit_sched_domains(void);
793 794
794#endif /* CONFIG_SMP */ 795#endif /* CONFIG_SMP */
795 796
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 2d8dac8799cf..bd14f8b30f09 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -50,6 +50,8 @@
50 for_each_online_node(node) \ 50 for_each_online_node(node) \
51 if (nr_cpus_node(node)) 51 if (nr_cpus_node(node))
52 52
53void arch_update_cpu_topology(void);
54
53/* Conform to ACPI 2.0 SLIT distance definitions */ 55/* Conform to ACPI 2.0 SLIT distance definitions */
54#define LOCAL_DISTANCE 10 56#define LOCAL_DISTANCE 10
55#define REMOTE_DISTANCE 20 57#define REMOTE_DISTANCE 20
diff --git a/kernel/sched.c b/kernel/sched.c
index 3f7c5eb254e2..28c73f07efb2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -594,18 +594,14 @@ enum {
594 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, 594 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
595 SCHED_FEAT_WAKEUP_PREEMPT = 2, 595 SCHED_FEAT_WAKEUP_PREEMPT = 2,
596 SCHED_FEAT_START_DEBIT = 4, 596 SCHED_FEAT_START_DEBIT = 4,
597 SCHED_FEAT_TREE_AVG = 8, 597 SCHED_FEAT_HRTICK = 8,
598 SCHED_FEAT_APPROX_AVG = 16, 598 SCHED_FEAT_DOUBLE_TICK = 16,
599 SCHED_FEAT_HRTICK = 32,
600 SCHED_FEAT_DOUBLE_TICK = 64,
601}; 599};
602 600
603const_debug unsigned int sysctl_sched_features = 601const_debug unsigned int sysctl_sched_features =
604 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | 602 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
605 SCHED_FEAT_WAKEUP_PREEMPT * 1 | 603 SCHED_FEAT_WAKEUP_PREEMPT * 1 |
606 SCHED_FEAT_START_DEBIT * 1 | 604 SCHED_FEAT_START_DEBIT * 1 |
607 SCHED_FEAT_TREE_AVG * 0 |
608 SCHED_FEAT_APPROX_AVG * 0 |
609 SCHED_FEAT_HRTICK * 1 | 605 SCHED_FEAT_HRTICK * 1 |
610 SCHED_FEAT_DOUBLE_TICK * 0; 606 SCHED_FEAT_DOUBLE_TICK * 0;
611 607
@@ -3886,7 +3882,7 @@ need_resched_nonpreemptible:
3886 3882
3887 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3883 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3888 if (unlikely((prev->state & TASK_INTERRUPTIBLE) && 3884 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
3889 unlikely(signal_pending(prev)))) { 3885 signal_pending(prev))) {
3890 prev->state = TASK_RUNNING; 3886 prev->state = TASK_RUNNING;
3891 } else { 3887 } else {
3892 deactivate_task(rq, prev, 1); 3888 deactivate_task(rq, prev, 1);
@@ -6811,6 +6807,10 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */
6811 */ 6807 */
6812static cpumask_t fallback_doms; 6808static cpumask_t fallback_doms;
6813 6809
6810void __attribute__((weak)) arch_update_cpu_topology(void)
6811{
6812}
6813
6814/* 6814/*
6815 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6815 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6816 * For now this just excludes isolated cpus, but could be used to 6816 * For now this just excludes isolated cpus, but could be used to
@@ -6820,6 +6820,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
6820{ 6820{
6821 int err; 6821 int err;
6822 6822
6823 arch_update_cpu_topology();
6823 ndoms_cur = 1; 6824 ndoms_cur = 1;
6824 doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 6825 doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
6825 if (!doms_cur) 6826 if (!doms_cur)
@@ -6924,7 +6925,7 @@ match2:
6924} 6925}
6925 6926
6926#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 6927#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
6927static int arch_reinit_sched_domains(void) 6928int arch_reinit_sched_domains(void)
6928{ 6929{
6929 int err; 6930 int err;
6930 6931
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b85cac4b5e25..86a93376282c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -302,11 +302,6 @@ static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
302 return vslice; 302 return vslice;
303} 303}
304 304
305static u64 sched_vslice(struct cfs_rq *cfs_rq)
306{
307 return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
308}
309
310static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) 305static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
311{ 306{
312 return __sched_vslice(cfs_rq->load.weight + se->load.weight, 307 return __sched_vslice(cfs_rq->load.weight + se->load.weight,
@@ -504,15 +499,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
504 } else 499 } else
505 vruntime = cfs_rq->min_vruntime; 500 vruntime = cfs_rq->min_vruntime;
506 501
507 if (sched_feat(TREE_AVG)) {
508 struct sched_entity *last = __pick_last_entity(cfs_rq);
509 if (last) {
510 vruntime += last->vruntime;
511 vruntime >>= 1;
512 }
513 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
514 vruntime += sched_vslice(cfs_rq)/2;
515
516 /* 502 /*
517 * The 'current' period is already promised to the current tasks, 503 * The 'current' period is already promised to the current tasks,
518 * however the extra weight of the new task will slow them down a 504 * however the extra weight of the new task will slow them down a