diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 32 |
1 files changed, 17 insertions, 15 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 461ee900d1ac..23b9925a1dfb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -235,17 +235,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares); | |||
235 | * Every task in system belong to this group at bootup. | 235 | * Every task in system belong to this group at bootup. |
236 | */ | 236 | */ |
237 | struct task_group init_task_group = { | 237 | struct task_group init_task_group = { |
238 | .se = init_sched_entity_p, | 238 | .se = init_sched_entity_p, |
239 | .cfs_rq = init_cfs_rq_p, | 239 | .cfs_rq = init_cfs_rq_p, |
240 | }; | 240 | }; |
241 | 241 | ||
242 | #ifdef CONFIG_FAIR_USER_SCHED | 242 | #ifdef CONFIG_FAIR_USER_SCHED |
243 | # define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD | 243 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) |
244 | #else | 244 | #else |
245 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD | 245 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD |
246 | #endif | 246 | #endif |
247 | 247 | ||
248 | #define MIN_GROUP_SHARES 2 | 248 | #define MIN_GROUP_SHARES 2 |
249 | 249 | ||
250 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; | 250 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; |
251 | 251 | ||
@@ -352,8 +352,8 @@ struct rt_rq { | |||
352 | 352 | ||
353 | /* | 353 | /* |
354 | * We add the notion of a root-domain which will be used to define per-domain | 354 | * We add the notion of a root-domain which will be used to define per-domain |
355 | * variables. Each exclusive cpuset essentially defines an island domain by | 355 | * variables. Each exclusive cpuset essentially defines an island domain by |
356 | * fully partitioning the member cpus from any other cpuset. Whenever a new | 356 | * fully partitioning the member cpus from any other cpuset. Whenever a new |
357 | * exclusive cpuset is created, we also create and attach a new root-domain | 357 | * exclusive cpuset is created, we also create and attach a new root-domain |
358 | * object. | 358 | * object. |
359 | * | 359 | * |
@@ -365,12 +365,12 @@ struct root_domain { | |||
365 | cpumask_t span; | 365 | cpumask_t span; |
366 | cpumask_t online; | 366 | cpumask_t online; |
367 | 367 | ||
368 | /* | 368 | /* |
369 | * The "RT overload" flag: it gets set if a CPU has more than | 369 | * The "RT overload" flag: it gets set if a CPU has more than |
370 | * one runnable RT task. | 370 | * one runnable RT task. |
371 | */ | 371 | */ |
372 | cpumask_t rto_mask; | 372 | cpumask_t rto_mask; |
373 | atomic_t rto_count; | 373 | atomic_t rto_count; |
374 | }; | 374 | }; |
375 | 375 | ||
376 | static struct root_domain def_root_domain; | 376 | static struct root_domain def_root_domain; |
@@ -434,7 +434,7 @@ struct rq { | |||
434 | atomic_t nr_iowait; | 434 | atomic_t nr_iowait; |
435 | 435 | ||
436 | #ifdef CONFIG_SMP | 436 | #ifdef CONFIG_SMP |
437 | struct root_domain *rd; | 437 | struct root_domain *rd; |
438 | struct sched_domain *sd; | 438 | struct sched_domain *sd; |
439 | 439 | ||
440 | /* For active balancing */ | 440 | /* For active balancing */ |
@@ -5066,7 +5066,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
5066 | if (p->sched_class->set_cpus_allowed) | 5066 | if (p->sched_class->set_cpus_allowed) |
5067 | p->sched_class->set_cpus_allowed(p, &new_mask); | 5067 | p->sched_class->set_cpus_allowed(p, &new_mask); |
5068 | else { | 5068 | else { |
5069 | p->cpus_allowed = new_mask; | 5069 | p->cpus_allowed = new_mask; |
5070 | p->nr_cpus_allowed = cpus_weight(new_mask); | 5070 | p->nr_cpus_allowed = cpus_weight(new_mask); |
5071 | } | 5071 | } |
5072 | 5072 | ||
@@ -5847,9 +5847,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
5847 | if (rq->rd) { | 5847 | if (rq->rd) { |
5848 | struct root_domain *old_rd = rq->rd; | 5848 | struct root_domain *old_rd = rq->rd; |
5849 | 5849 | ||
5850 | for (class = sched_class_highest; class; class = class->next) | 5850 | for (class = sched_class_highest; class; class = class->next) { |
5851 | if (class->leave_domain) | 5851 | if (class->leave_domain) |
5852 | class->leave_domain(rq); | 5852 | class->leave_domain(rq); |
5853 | } | ||
5853 | 5854 | ||
5854 | if (atomic_dec_and_test(&old_rd->refcount)) | 5855 | if (atomic_dec_and_test(&old_rd->refcount)) |
5855 | kfree(old_rd); | 5856 | kfree(old_rd); |
@@ -5858,9 +5859,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
5858 | atomic_inc(&rd->refcount); | 5859 | atomic_inc(&rd->refcount); |
5859 | rq->rd = rd; | 5860 | rq->rd = rd; |
5860 | 5861 | ||
5861 | for (class = sched_class_highest; class; class = class->next) | 5862 | for (class = sched_class_highest; class; class = class->next) { |
5862 | if (class->join_domain) | 5863 | if (class->join_domain) |
5863 | class->join_domain(rq); | 5864 | class->join_domain(rq); |
5865 | } | ||
5864 | 5866 | ||
5865 | spin_unlock_irqrestore(&rq->lock, flags); | 5867 | spin_unlock_irqrestore(&rq->lock, flags); |
5866 | } | 5868 | } |
@@ -5895,11 +5897,11 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map) | |||
5895 | } | 5897 | } |
5896 | 5898 | ||
5897 | /* | 5899 | /* |
5898 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must | 5900 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must |
5899 | * hold the hotplug lock. | 5901 | * hold the hotplug lock. |
5900 | */ | 5902 | */ |
5901 | static void cpu_attach_domain(struct sched_domain *sd, | 5903 | static void |
5902 | struct root_domain *rd, int cpu) | 5904 | cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) |
5903 | { | 5905 | { |
5904 | struct rq *rq = cpu_rq(cpu); | 5906 | struct rq *rq = cpu_rq(cpu); |
5905 | struct sched_domain *tmp; | 5907 | struct sched_domain *tmp; |
@@ -7095,7 +7097,7 @@ static int rebalance_shares(struct sched_domain *sd, int this_cpu) | |||
7095 | for_each_cpu_mask(i, sdspan) | 7097 | for_each_cpu_mask(i, sdspan) |
7096 | total_load += tg->cfs_rq[i]->load.weight; | 7098 | total_load += tg->cfs_rq[i]->load.weight; |
7097 | 7099 | ||
7098 | /* Nothing to do if this group has no load */ | 7100 | /* Nothing to do if this group has no load */ |
7099 | if (!total_load) | 7101 | if (!total_load) |
7100 | continue; | 7102 | continue; |
7101 | 7103 | ||