aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-07-21 11:59:54 -0400
committerIngo Molnar <mingo@elte.hu>2011-07-21 12:00:01 -0400
commit994bf1c92270e3d7731ea08f1d1bd7a668314e60 (patch)
tree4409a21eab486e53fbe350a66e8a4f28b7a720c0 /kernel/sched.c
parentbd96efe17d945f0bad56d592f8686dc6309905e7 (diff)
parentcf6ace16a3cd8b728fb0afa68368fd40bbeae19f (diff)
Merge branch 'linus' into sched/core
Merge reason: pick up the latest scheduler fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c245
1 files changed, 199 insertions, 46 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4380a80c1e7a..b0e7ad796d3b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock);
292 * (The default weight is 1024 - so there's no practical 292 * (The default weight is 1024 - so there's no practical
293 * limitation from this.) 293 * limitation from this.)
294 */ 294 */
295#define MIN_SHARES 2 295#define MIN_SHARES (1UL << 1)
296#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION)) 296#define MAX_SHARES (1UL << 18)
297 297
298static int root_task_group_load = ROOT_TASK_GROUP_LOAD; 298static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
299#endif 299#endif
@@ -2544,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
2544} 2544}
2545 2545
2546#ifdef CONFIG_SMP 2546#ifdef CONFIG_SMP
2547static void sched_ttwu_pending(void) 2547static void sched_ttwu_do_pending(struct task_struct *list)
2548{ 2548{
2549 struct rq *rq = this_rq(); 2549 struct rq *rq = this_rq();
2550 struct task_struct *list = xchg(&rq->wake_list, NULL);
2551
2552 if (!list)
2553 return;
2554 2550
2555 raw_spin_lock(&rq->lock); 2551 raw_spin_lock(&rq->lock);
2556 2552
@@ -2563,9 +2559,45 @@ static void sched_ttwu_pending(void)
2563 raw_spin_unlock(&rq->lock); 2559 raw_spin_unlock(&rq->lock);
2564} 2560}
2565 2561
2562#ifdef CONFIG_HOTPLUG_CPU
2563
2564static void sched_ttwu_pending(void)
2565{
2566 struct rq *rq = this_rq();
2567 struct task_struct *list = xchg(&rq->wake_list, NULL);
2568
2569 if (!list)
2570 return;
2571
2572 sched_ttwu_do_pending(list);
2573}
2574
2575#endif /* CONFIG_HOTPLUG_CPU */
2576
2566void scheduler_ipi(void) 2577void scheduler_ipi(void)
2567{ 2578{
2568 sched_ttwu_pending(); 2579 struct rq *rq = this_rq();
2580 struct task_struct *list = xchg(&rq->wake_list, NULL);
2581
2582 if (!list)
2583 return;
2584
2585 /*
2586 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
2587 * traditionally all their work was done from the interrupt return
2588 * path. Now that we actually do some work, we need to make sure
2589 * we do call them.
2590 *
2591 * Some archs already do call them, luckily irq_enter/exit nest
2592 * properly.
2593 *
2594 * Arguably we should visit all archs and update all handlers,
2595 * however a fair share of IPIs are still resched only so this would
2596 * somewhat pessimize the simple resched case.
2597 */
2598 irq_enter();
2599 sched_ttwu_do_pending(list);
2600 irq_exit();
2569} 2601}
2570 2602
2571static void ttwu_queue_remote(struct task_struct *p, int cpu) 2603static void ttwu_queue_remote(struct task_struct *p, int cpu)
@@ -6550,7 +6582,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6550 break; 6582 break;
6551 } 6583 }
6552 6584
6553 if (!group->cpu_power) { 6585 if (!group->sgp->power) {
6554 printk(KERN_CONT "\n"); 6586 printk(KERN_CONT "\n");
6555 printk(KERN_ERR "ERROR: domain->cpu_power not " 6587 printk(KERN_ERR "ERROR: domain->cpu_power not "
6556 "set\n"); 6588 "set\n");
@@ -6574,9 +6606,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6574 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 6606 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6575 6607
6576 printk(KERN_CONT " %s", str); 6608 printk(KERN_CONT " %s", str);
6577 if (group->cpu_power != SCHED_POWER_SCALE) { 6609 if (group->sgp->power != SCHED_POWER_SCALE) {
6578 printk(KERN_CONT " (cpu_power = %d)", 6610 printk(KERN_CONT " (cpu_power = %d)",
6579 group->cpu_power); 6611 group->sgp->power);
6580 } 6612 }
6581 6613
6582 group = group->next; 6614 group = group->next;
@@ -6767,11 +6799,39 @@ static struct root_domain *alloc_rootdomain(void)
6767 return rd; 6799 return rd;
6768} 6800}
6769 6801
6802static void free_sched_groups(struct sched_group *sg, int free_sgp)
6803{
6804 struct sched_group *tmp, *first;
6805
6806 if (!sg)
6807 return;
6808
6809 first = sg;
6810 do {
6811 tmp = sg->next;
6812
6813 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
6814 kfree(sg->sgp);
6815
6816 kfree(sg);
6817 sg = tmp;
6818 } while (sg != first);
6819}
6820
6770static void free_sched_domain(struct rcu_head *rcu) 6821static void free_sched_domain(struct rcu_head *rcu)
6771{ 6822{
6772 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 6823 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
6773 if (atomic_dec_and_test(&sd->groups->ref)) 6824
6825 /*
6826 * If its an overlapping domain it has private groups, iterate and
6827 * nuke them all.
6828 */
6829 if (sd->flags & SD_OVERLAP) {
6830 free_sched_groups(sd->groups, 1);
6831 } else if (atomic_dec_and_test(&sd->groups->ref)) {
6832 kfree(sd->groups->sgp);
6774 kfree(sd->groups); 6833 kfree(sd->groups);
6834 }
6775 kfree(sd); 6835 kfree(sd);
6776} 6836}
6777 6837
@@ -6938,6 +6998,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6938struct sd_data { 6998struct sd_data {
6939 struct sched_domain **__percpu sd; 6999 struct sched_domain **__percpu sd;
6940 struct sched_group **__percpu sg; 7000 struct sched_group **__percpu sg;
7001 struct sched_group_power **__percpu sgp;
6941}; 7002};
6942 7003
6943struct s_data { 7004struct s_data {
@@ -6957,15 +7018,73 @@ struct sched_domain_topology_level;
6957typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); 7018typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
6958typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 7019typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
6959 7020
7021#define SDTL_OVERLAP 0x01
7022
6960struct sched_domain_topology_level { 7023struct sched_domain_topology_level {
6961 sched_domain_init_f init; 7024 sched_domain_init_f init;
6962 sched_domain_mask_f mask; 7025 sched_domain_mask_f mask;
7026 int flags;
6963 struct sd_data data; 7027 struct sd_data data;
6964}; 7028};
6965 7029
6966/* 7030static int
6967 * Assumes the sched_domain tree is fully constructed 7031build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6968 */ 7032{
7033 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
7034 const struct cpumask *span = sched_domain_span(sd);
7035 struct cpumask *covered = sched_domains_tmpmask;
7036 struct sd_data *sdd = sd->private;
7037 struct sched_domain *child;
7038 int i;
7039
7040 cpumask_clear(covered);
7041
7042 for_each_cpu(i, span) {
7043 struct cpumask *sg_span;
7044
7045 if (cpumask_test_cpu(i, covered))
7046 continue;
7047
7048 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
7049 GFP_KERNEL, cpu_to_node(i));
7050
7051 if (!sg)
7052 goto fail;
7053
7054 sg_span = sched_group_cpus(sg);
7055
7056 child = *per_cpu_ptr(sdd->sd, i);
7057 if (child->child) {
7058 child = child->child;
7059 cpumask_copy(sg_span, sched_domain_span(child));
7060 } else
7061 cpumask_set_cpu(i, sg_span);
7062
7063 cpumask_or(covered, covered, sg_span);
7064
7065 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
7066 atomic_inc(&sg->sgp->ref);
7067
7068 if (cpumask_test_cpu(cpu, sg_span))
7069 groups = sg;
7070
7071 if (!first)
7072 first = sg;
7073 if (last)
7074 last->next = sg;
7075 last = sg;
7076 last->next = first;
7077 }
7078 sd->groups = groups;
7079
7080 return 0;
7081
7082fail:
7083 free_sched_groups(first, 0);
7084
7085 return -ENOMEM;
7086}
7087
6969static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 7088static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6970{ 7089{
6971 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 7090 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -6974,24 +7093,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6974 if (child) 7093 if (child)
6975 cpu = cpumask_first(sched_domain_span(child)); 7094 cpu = cpumask_first(sched_domain_span(child));
6976 7095
6977 if (sg) 7096 if (sg) {
6978 *sg = *per_cpu_ptr(sdd->sg, cpu); 7097 *sg = *per_cpu_ptr(sdd->sg, cpu);
7098 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
7099 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
7100 }
6979 7101
6980 return cpu; 7102 return cpu;
6981} 7103}
6982 7104
6983/* 7105/*
6984 * build_sched_groups takes the cpumask we wish to span, and a pointer
6985 * to a function which identifies what group(along with sched group) a CPU
6986 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6987 * (due to the fact that we keep track of groups covered with a struct cpumask).
6988 *
6989 * build_sched_groups will build a circular linked list of the groups 7106 * build_sched_groups will build a circular linked list of the groups
6990 * covered by the given span, and will set each group's ->cpumask correctly, 7107 * covered by the given span, and will set each group's ->cpumask correctly,
6991 * and ->cpu_power to 0. 7108 * and ->cpu_power to 0.
7109 *
7110 * Assumes the sched_domain tree is fully constructed
6992 */ 7111 */
6993static void 7112static int
6994build_sched_groups(struct sched_domain *sd) 7113build_sched_groups(struct sched_domain *sd, int cpu)
6995{ 7114{
6996 struct sched_group *first = NULL, *last = NULL; 7115 struct sched_group *first = NULL, *last = NULL;
6997 struct sd_data *sdd = sd->private; 7116 struct sd_data *sdd = sd->private;
@@ -6999,6 +7118,12 @@ build_sched_groups(struct sched_domain *sd)
6999 struct cpumask *covered; 7118 struct cpumask *covered;
7000 int i; 7119 int i;
7001 7120
7121 get_group(cpu, sdd, &sd->groups);
7122 atomic_inc(&sd->groups->ref);
7123
7124 if (cpu != cpumask_first(sched_domain_span(sd)))
7125 return 0;
7126
7002 lockdep_assert_held(&sched_domains_mutex); 7127 lockdep_assert_held(&sched_domains_mutex);
7003 covered = sched_domains_tmpmask; 7128 covered = sched_domains_tmpmask;
7004 7129
@@ -7013,7 +7138,7 @@ build_sched_groups(struct sched_domain *sd)
7013 continue; 7138 continue;
7014 7139
7015 cpumask_clear(sched_group_cpus(sg)); 7140 cpumask_clear(sched_group_cpus(sg));
7016 sg->cpu_power = 0; 7141 sg->sgp->power = 0;
7017 7142
7018 for_each_cpu(j, span) { 7143 for_each_cpu(j, span) {
7019 if (get_group(j, sdd, NULL) != group) 7144 if (get_group(j, sdd, NULL) != group)
@@ -7030,6 +7155,8 @@ build_sched_groups(struct sched_domain *sd)
7030 last = sg; 7155 last = sg;
7031 } 7156 }
7032 last->next = first; 7157 last->next = first;
7158
7159 return 0;
7033} 7160}
7034 7161
7035/* 7162/*
@@ -7044,12 +7171,17 @@ build_sched_groups(struct sched_domain *sd)
7044 */ 7171 */
7045static void init_sched_groups_power(int cpu, struct sched_domain *sd) 7172static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7046{ 7173{
7047 WARN_ON(!sd || !sd->groups); 7174 struct sched_group *sg = sd->groups;
7048 7175
7049 if (cpu != group_first_cpu(sd->groups)) 7176 WARN_ON(!sd || !sg);
7050 return; 7177
7178 do {
7179 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
7180 sg = sg->next;
7181 } while (sg != sd->groups);
7051 7182
7052 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); 7183 if (cpu != group_first_cpu(sg))
7184 return;
7053 7185
7054 update_group_power(sd, cpu); 7186 update_group_power(sd, cpu);
7055} 7187}
@@ -7170,15 +7302,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7170static void claim_allocations(int cpu, struct sched_domain *sd) 7302static void claim_allocations(int cpu, struct sched_domain *sd)
7171{ 7303{
7172 struct sd_data *sdd = sd->private; 7304 struct sd_data *sdd = sd->private;
7173 struct sched_group *sg = sd->groups;
7174 7305
7175 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 7306 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
7176 *per_cpu_ptr(sdd->sd, cpu) = NULL; 7307 *per_cpu_ptr(sdd->sd, cpu) = NULL;
7177 7308
7178 if (cpu == cpumask_first(sched_group_cpus(sg))) { 7309 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
7179 WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
7180 *per_cpu_ptr(sdd->sg, cpu) = NULL; 7310 *per_cpu_ptr(sdd->sg, cpu) = NULL;
7181 } 7311
7312 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
7313 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
7182} 7314}
7183 7315
7184#ifdef CONFIG_SCHED_SMT 7316#ifdef CONFIG_SCHED_SMT
@@ -7203,7 +7335,7 @@ static struct sched_domain_topology_level default_topology[] = {
7203#endif 7335#endif
7204 { sd_init_CPU, cpu_cpu_mask, }, 7336 { sd_init_CPU, cpu_cpu_mask, },
7205#ifdef CONFIG_NUMA 7337#ifdef CONFIG_NUMA
7206 { sd_init_NODE, cpu_node_mask, }, 7338 { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
7207 { sd_init_ALLNODES, cpu_allnodes_mask, }, 7339 { sd_init_ALLNODES, cpu_allnodes_mask, },
7208#endif 7340#endif
7209 { NULL, }, 7341 { NULL, },
@@ -7227,9 +7359,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
7227 if (!sdd->sg) 7359 if (!sdd->sg)
7228 return -ENOMEM; 7360 return -ENOMEM;
7229 7361
7362 sdd->sgp = alloc_percpu(struct sched_group_power *);
7363 if (!sdd->sgp)
7364 return -ENOMEM;
7365
7230 for_each_cpu(j, cpu_map) { 7366 for_each_cpu(j, cpu_map) {
7231 struct sched_domain *sd; 7367 struct sched_domain *sd;
7232 struct sched_group *sg; 7368 struct sched_group *sg;
7369 struct sched_group_power *sgp;
7233 7370
7234 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 7371 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
7235 GFP_KERNEL, cpu_to_node(j)); 7372 GFP_KERNEL, cpu_to_node(j));
@@ -7244,6 +7381,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
7244 return -ENOMEM; 7381 return -ENOMEM;
7245 7382
7246 *per_cpu_ptr(sdd->sg, j) = sg; 7383 *per_cpu_ptr(sdd->sg, j) = sg;
7384
7385 sgp = kzalloc_node(sizeof(struct sched_group_power),
7386 GFP_KERNEL, cpu_to_node(j));
7387 if (!sgp)
7388 return -ENOMEM;
7389
7390 *per_cpu_ptr(sdd->sgp, j) = sgp;
7247 } 7391 }
7248 } 7392 }
7249 7393
@@ -7259,11 +7403,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
7259 struct sd_data *sdd = &tl->data; 7403 struct sd_data *sdd = &tl->data;
7260 7404
7261 for_each_cpu(j, cpu_map) { 7405 for_each_cpu(j, cpu_map) {
7262 kfree(*per_cpu_ptr(sdd->sd, j)); 7406 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7407 if (sd && (sd->flags & SD_OVERLAP))
7408 free_sched_groups(sd->groups, 0);
7263 kfree(*per_cpu_ptr(sdd->sg, j)); 7409 kfree(*per_cpu_ptr(sdd->sg, j));
7410 kfree(*per_cpu_ptr(sdd->sgp, j));
7264 } 7411 }
7265 free_percpu(sdd->sd); 7412 free_percpu(sdd->sd);
7266 free_percpu(sdd->sg); 7413 free_percpu(sdd->sg);
7414 free_percpu(sdd->sgp);
7267 } 7415 }
7268} 7416}
7269 7417
@@ -7309,8 +7457,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
7309 struct sched_domain_topology_level *tl; 7457 struct sched_domain_topology_level *tl;
7310 7458
7311 sd = NULL; 7459 sd = NULL;
7312 for (tl = sched_domain_topology; tl->init; tl++) 7460 for (tl = sched_domain_topology; tl->init; tl++) {
7313 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); 7461 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
7462 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
7463 sd->flags |= SD_OVERLAP;
7464 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
7465 break;
7466 }
7314 7467
7315 while (sd->child) 7468 while (sd->child)
7316 sd = sd->child; 7469 sd = sd->child;
@@ -7322,13 +7475,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
7322 for_each_cpu(i, cpu_map) { 7475 for_each_cpu(i, cpu_map) {
7323 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 7476 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7324 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 7477 sd->span_weight = cpumask_weight(sched_domain_span(sd));
7325 get_group(i, sd->private, &sd->groups); 7478 if (sd->flags & SD_OVERLAP) {
7326 atomic_inc(&sd->groups->ref); 7479 if (build_overlap_sched_groups(sd, i))
7327 7480 goto error;
7328 if (i != cpumask_first(sched_domain_span(sd))) 7481 } else {
7329 continue; 7482 if (build_sched_groups(sd, i))
7330 7483 goto error;
7331 build_sched_groups(sd); 7484 }
7332 } 7485 }
7333 } 7486 }
7334 7487
@@ -7750,6 +7903,9 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
7750#endif 7903#endif
7751#endif 7904#endif
7752 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 7905 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7906#ifndef CONFIG_64BIT
7907 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7908#endif
7753} 7909}
7754 7910
7755static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) 7911static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
@@ -8441,10 +8597,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8441 if (!tg->se[0]) 8597 if (!tg->se[0])
8442 return -EINVAL; 8598 return -EINVAL;
8443 8599
8444 if (shares < MIN_SHARES) 8600 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8445 shares = MIN_SHARES;
8446 else if (shares > MAX_SHARES)
8447 shares = MAX_SHARES;
8448 8601
8449 mutex_lock(&shares_mutex); 8602 mutex_lock(&shares_mutex);
8450 if (tg->shares == shares) 8603 if (tg->shares == shares)