diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 80 |
1 files changed, 52 insertions, 28 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e4bf4477aee5..ef3f28b334ea 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -67,6 +67,7 @@ | |||
67 | #include <linux/pagemap.h> | 67 | #include <linux/pagemap.h> |
68 | #include <linux/hrtimer.h> | 68 | #include <linux/hrtimer.h> |
69 | #include <linux/tick.h> | 69 | #include <linux/tick.h> |
70 | #include <linux/bootmem.h> | ||
70 | 71 | ||
71 | #include <asm/tlb.h> | 72 | #include <asm/tlb.h> |
72 | #include <asm/irq_regs.h> | 73 | #include <asm/irq_regs.h> |
@@ -276,17 +277,11 @@ struct task_group { | |||
276 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); | 277 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); |
277 | /* Default task group's cfs_rq on each cpu */ | 278 | /* Default task group's cfs_rq on each cpu */ |
278 | static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; | 279 | static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; |
279 | |||
280 | static struct sched_entity *init_sched_entity_p[NR_CPUS]; | ||
281 | static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; | ||
282 | #endif | 280 | #endif |
283 | 281 | ||
284 | #ifdef CONFIG_RT_GROUP_SCHED | 282 | #ifdef CONFIG_RT_GROUP_SCHED |
285 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 283 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
286 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; | 284 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; |
287 | |||
288 | static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS]; | ||
289 | static struct rt_rq *init_rt_rq_p[NR_CPUS]; | ||
290 | #endif | 285 | #endif |
291 | 286 | ||
292 | /* task_group_lock serializes add/remove of task groups and also changes to | 287 | /* task_group_lock serializes add/remove of task groups and also changes to |
@@ -310,17 +305,7 @@ static int init_task_group_load = INIT_TASK_GROUP_LOAD; | |||
310 | /* Default task group. | 305 | /* Default task group. |
311 | * Every task in system belong to this group at bootup. | 306 | * Every task in system belong to this group at bootup. |
312 | */ | 307 | */ |
313 | struct task_group init_task_group = { | 308 | struct task_group init_task_group; |
314 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
315 | .se = init_sched_entity_p, | ||
316 | .cfs_rq = init_cfs_rq_p, | ||
317 | #endif | ||
318 | |||
319 | #ifdef CONFIG_RT_GROUP_SCHED | ||
320 | .rt_se = init_sched_rt_entity_p, | ||
321 | .rt_rq = init_rt_rq_p, | ||
322 | #endif | ||
323 | }; | ||
324 | 309 | ||
325 | /* return group to which a task belongs */ | 310 | /* return group to which a task belongs */ |
326 | static inline struct task_group *task_group(struct task_struct *p) | 311 | static inline struct task_group *task_group(struct task_struct *p) |
@@ -3720,7 +3705,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
3720 | */ | 3705 | */ |
3721 | int ilb = first_cpu(nohz.cpu_mask); | 3706 | int ilb = first_cpu(nohz.cpu_mask); |
3722 | 3707 | ||
3723 | if (ilb != NR_CPUS) | 3708 | if (ilb < nr_cpu_ids) |
3724 | resched_cpu(ilb); | 3709 | resched_cpu(ilb); |
3725 | } | 3710 | } |
3726 | } | 3711 | } |
@@ -5671,11 +5656,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
5671 | dest_cpu = any_online_cpu(mask); | 5656 | dest_cpu = any_online_cpu(mask); |
5672 | 5657 | ||
5673 | /* On any allowed CPU? */ | 5658 | /* On any allowed CPU? */ |
5674 | if (dest_cpu == NR_CPUS) | 5659 | if (dest_cpu >= nr_cpu_ids) |
5675 | dest_cpu = any_online_cpu(p->cpus_allowed); | 5660 | dest_cpu = any_online_cpu(p->cpus_allowed); |
5676 | 5661 | ||
5677 | /* No more Mr. Nice Guy. */ | 5662 | /* No more Mr. Nice Guy. */ |
5678 | if (dest_cpu == NR_CPUS) { | 5663 | if (dest_cpu >= nr_cpu_ids) { |
5679 | cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p); | 5664 | cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p); |
5680 | /* | 5665 | /* |
5681 | * Try to stay on the same cpuset, where the | 5666 | * Try to stay on the same cpuset, where the |
@@ -6134,9 +6119,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level) | |||
6134 | { | 6119 | { |
6135 | struct sched_group *group = sd->groups; | 6120 | struct sched_group *group = sd->groups; |
6136 | cpumask_t groupmask; | 6121 | cpumask_t groupmask; |
6137 | char str[NR_CPUS]; | 6122 | char str[256]; |
6138 | 6123 | ||
6139 | cpumask_scnprintf(str, NR_CPUS, sd->span); | 6124 | cpulist_scnprintf(str, sizeof(str), sd->span); |
6140 | cpus_clear(groupmask); | 6125 | cpus_clear(groupmask); |
6141 | 6126 | ||
6142 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 6127 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
@@ -6189,7 +6174,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level) | |||
6189 | 6174 | ||
6190 | cpus_or(groupmask, groupmask, group->cpumask); | 6175 | cpus_or(groupmask, groupmask, group->cpumask); |
6191 | 6176 | ||
6192 | cpumask_scnprintf(str, NR_CPUS, group->cpumask); | 6177 | cpulist_scnprintf(str, sizeof(str), group->cpumask); |
6193 | printk(KERN_CONT " %s", str); | 6178 | printk(KERN_CONT " %s", str); |
6194 | 6179 | ||
6195 | group = group->next; | 6180 | group = group->next; |
@@ -6601,7 +6586,7 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) | |||
6601 | * gets dynamically allocated. | 6586 | * gets dynamically allocated. |
6602 | */ | 6587 | */ |
6603 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | 6588 | static DEFINE_PER_CPU(struct sched_domain, node_domains); |
6604 | static struct sched_group **sched_group_nodes_bycpu[NR_CPUS]; | 6589 | static struct sched_group ***sched_group_nodes_bycpu; |
6605 | 6590 | ||
6606 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 6591 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
6607 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); | 6592 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); |
@@ -7244,6 +7229,11 @@ void __init sched_init_smp(void) | |||
7244 | { | 7229 | { |
7245 | cpumask_t non_isolated_cpus; | 7230 | cpumask_t non_isolated_cpus; |
7246 | 7231 | ||
7232 | #if defined(CONFIG_NUMA) | ||
7233 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | ||
7234 | GFP_KERNEL); | ||
7235 | BUG_ON(sched_group_nodes_bycpu == NULL); | ||
7236 | #endif | ||
7247 | get_online_cpus(); | 7237 | get_online_cpus(); |
7248 | arch_init_sched_domains(&cpu_online_map); | 7238 | arch_init_sched_domains(&cpu_online_map); |
7249 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 7239 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); |
@@ -7261,6 +7251,11 @@ void __init sched_init_smp(void) | |||
7261 | #else | 7251 | #else |
7262 | void __init sched_init_smp(void) | 7252 | void __init sched_init_smp(void) |
7263 | { | 7253 | { |
7254 | #if defined(CONFIG_NUMA) | ||
7255 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | ||
7256 | GFP_KERNEL); | ||
7257 | BUG_ON(sched_group_nodes_bycpu == NULL); | ||
7258 | #endif | ||
7264 | sched_init_granularity(); | 7259 | sched_init_granularity(); |
7265 | } | 7260 | } |
7266 | #endif /* CONFIG_SMP */ | 7261 | #endif /* CONFIG_SMP */ |
@@ -7358,6 +7353,35 @@ void __init sched_init(void) | |||
7358 | { | 7353 | { |
7359 | int highest_cpu = 0; | 7354 | int highest_cpu = 0; |
7360 | int i, j; | 7355 | int i, j; |
7356 | unsigned long alloc_size = 0, ptr; | ||
7357 | |||
7358 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7359 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); | ||
7360 | #endif | ||
7361 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7362 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); | ||
7363 | #endif | ||
7364 | /* | ||
7365 | * As sched_init() is called before page_alloc is setup, | ||
7366 | * we use alloc_bootmem(). | ||
7367 | */ | ||
7368 | if (alloc_size) { | ||
7369 | ptr = (unsigned long)alloc_bootmem_low(alloc_size); | ||
7370 | |||
7371 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7372 | init_task_group.se = (struct sched_entity **)ptr; | ||
7373 | ptr += nr_cpu_ids * sizeof(void **); | ||
7374 | |||
7375 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; | ||
7376 | ptr += nr_cpu_ids * sizeof(void **); | ||
7377 | #endif | ||
7378 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7379 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; | ||
7380 | ptr += nr_cpu_ids * sizeof(void **); | ||
7381 | |||
7382 | init_task_group.rt_rq = (struct rt_rq **)ptr; | ||
7383 | #endif | ||
7384 | } | ||
7361 | 7385 | ||
7362 | #ifdef CONFIG_SMP | 7386 | #ifdef CONFIG_SMP |
7363 | init_defrootdomain(); | 7387 | init_defrootdomain(); |
@@ -7610,10 +7634,10 @@ static int alloc_fair_sched_group(struct task_group *tg) | |||
7610 | struct rq *rq; | 7634 | struct rq *rq; |
7611 | int i; | 7635 | int i; |
7612 | 7636 | ||
7613 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL); | 7637 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); |
7614 | if (!tg->cfs_rq) | 7638 | if (!tg->cfs_rq) |
7615 | goto err; | 7639 | goto err; |
7616 | tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL); | 7640 | tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); |
7617 | if (!tg->se) | 7641 | if (!tg->se) |
7618 | goto err; | 7642 | goto err; |
7619 | 7643 | ||
@@ -7695,10 +7719,10 @@ static int alloc_rt_sched_group(struct task_group *tg) | |||
7695 | struct rq *rq; | 7719 | struct rq *rq; |
7696 | int i; | 7720 | int i; |
7697 | 7721 | ||
7698 | tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL); | 7722 | tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); |
7699 | if (!tg->rt_rq) | 7723 | if (!tg->rt_rq) |
7700 | goto err; | 7724 | goto err; |
7701 | tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL); | 7725 | tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); |
7702 | if (!tg->rt_se) | 7726 | if (!tg->rt_se) |
7703 | goto err; | 7727 | goto err; |
7704 | 7728 | ||