aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c273
1 files changed, 176 insertions, 97 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b60ba7475574..e7f2cfa6a257 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -535,14 +535,12 @@ struct rq {
535 #define CPU_LOAD_IDX_MAX 5 535 #define CPU_LOAD_IDX_MAX 5
536 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 536 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
537#ifdef CONFIG_NO_HZ 537#ifdef CONFIG_NO_HZ
538 unsigned long last_tick_seen;
539 unsigned char in_nohz_recently; 538 unsigned char in_nohz_recently;
540#endif 539#endif
541 /* capture load from *all* tasks on this cpu: */ 540 /* capture load from *all* tasks on this cpu: */
542 struct load_weight load; 541 struct load_weight load;
543 unsigned long nr_load_updates; 542 unsigned long nr_load_updates;
544 u64 nr_switches; 543 u64 nr_switches;
545 u64 nr_migrations_in;
546 544
547 struct cfs_rq cfs; 545 struct cfs_rq cfs;
548 struct rt_rq rt; 546 struct rt_rq rt;
@@ -591,6 +589,8 @@ struct rq {
591 589
592 u64 rt_avg; 590 u64 rt_avg;
593 u64 age_stamp; 591 u64 age_stamp;
592 u64 idle_stamp;
593 u64 avg_idle;
594#endif 594#endif
595 595
596 /* calc_load related fields */ 596 /* calc_load related fields */
@@ -772,7 +772,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
772 if (!sched_feat_names[i]) 772 if (!sched_feat_names[i])
773 return -EINVAL; 773 return -EINVAL;
774 774
775 filp->f_pos += cnt; 775 *ppos += cnt;
776 776
777 return cnt; 777 return cnt;
778} 778}
@@ -2017,6 +2017,7 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
2017 } 2017 }
2018 2018
2019 spin_lock_irqsave(&rq->lock, flags); 2019 spin_lock_irqsave(&rq->lock, flags);
2020 update_rq_clock(rq);
2020 set_task_cpu(p, cpu); 2021 set_task_cpu(p, cpu);
2021 p->cpus_allowed = cpumask_of_cpu(cpu); 2022 p->cpus_allowed = cpumask_of_cpu(cpu);
2022 p->rt.nr_cpus_allowed = 1; 2023 p->rt.nr_cpus_allowed = 1;
@@ -2078,7 +2079,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2078#endif 2079#endif
2079 if (old_cpu != new_cpu) { 2080 if (old_cpu != new_cpu) {
2080 p->se.nr_migrations++; 2081 p->se.nr_migrations++;
2081 new_rq->nr_migrations_in++;
2082#ifdef CONFIG_SCHEDSTATS 2082#ifdef CONFIG_SCHEDSTATS
2083 if (task_hot(p, old_rq->clock, NULL)) 2083 if (task_hot(p, old_rq->clock, NULL))
2084 schedstat_inc(p, se.nr_forced2_migrations); 2084 schedstat_inc(p, se.nr_forced2_migrations);
@@ -2115,6 +2115,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2115 * it is sufficient to simply update the task's cpu field. 2115 * it is sufficient to simply update the task's cpu field.
2116 */ 2116 */
2117 if (!p->se.on_rq && !task_running(rq, p)) { 2117 if (!p->se.on_rq && !task_running(rq, p)) {
2118 update_rq_clock(rq);
2118 set_task_cpu(p, dest_cpu); 2119 set_task_cpu(p, dest_cpu);
2119 return 0; 2120 return 0;
2120 } 2121 }
@@ -2376,14 +2377,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2376 task_rq_unlock(rq, &flags); 2377 task_rq_unlock(rq, &flags);
2377 2378
2378 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2379 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2379 if (cpu != orig_cpu) 2380 if (cpu != orig_cpu) {
2381 local_irq_save(flags);
2382 rq = cpu_rq(cpu);
2383 update_rq_clock(rq);
2380 set_task_cpu(p, cpu); 2384 set_task_cpu(p, cpu);
2381 2385 local_irq_restore(flags);
2386 }
2382 rq = task_rq_lock(p, &flags); 2387 rq = task_rq_lock(p, &flags);
2383 2388
2384 if (rq != orig_rq)
2385 update_rq_clock(rq);
2386
2387 WARN_ON(p->state != TASK_WAKING); 2389 WARN_ON(p->state != TASK_WAKING);
2388 cpu = task_cpu(p); 2390 cpu = task_cpu(p);
2389 2391
@@ -2440,6 +2442,17 @@ out_running:
2440#ifdef CONFIG_SMP 2442#ifdef CONFIG_SMP
2441 if (p->sched_class->task_wake_up) 2443 if (p->sched_class->task_wake_up)
2442 p->sched_class->task_wake_up(rq, p); 2444 p->sched_class->task_wake_up(rq, p);
2445
2446 if (unlikely(rq->idle_stamp)) {
2447 u64 delta = rq->clock - rq->idle_stamp;
2448 u64 max = 2*sysctl_sched_migration_cost;
2449
2450 if (delta > max)
2451 rq->avg_idle = max;
2452 else
2453 update_avg(&rq->avg_idle, delta);
2454 rq->idle_stamp = 0;
2455 }
2443#endif 2456#endif
2444out: 2457out:
2445 task_rq_unlock(rq, &flags); 2458 task_rq_unlock(rq, &flags);
@@ -2545,6 +2558,7 @@ static void __sched_fork(struct task_struct *p)
2545void sched_fork(struct task_struct *p, int clone_flags) 2558void sched_fork(struct task_struct *p, int clone_flags)
2546{ 2559{
2547 int cpu = get_cpu(); 2560 int cpu = get_cpu();
2561 unsigned long flags;
2548 2562
2549 __sched_fork(p); 2563 __sched_fork(p);
2550 2564
@@ -2581,7 +2595,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
2581#ifdef CONFIG_SMP 2595#ifdef CONFIG_SMP
2582 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); 2596 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
2583#endif 2597#endif
2598 local_irq_save(flags);
2599 update_rq_clock(cpu_rq(cpu));
2584 set_task_cpu(p, cpu); 2600 set_task_cpu(p, cpu);
2601 local_irq_restore(flags);
2585 2602
2586#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2603#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2587 if (likely(sched_info_on())) 2604 if (likely(sched_info_on()))
@@ -2848,14 +2865,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
2848 */ 2865 */
2849 arch_start_context_switch(prev); 2866 arch_start_context_switch(prev);
2850 2867
2851 if (unlikely(!mm)) { 2868 if (likely(!mm)) {
2852 next->active_mm = oldmm; 2869 next->active_mm = oldmm;
2853 atomic_inc(&oldmm->mm_count); 2870 atomic_inc(&oldmm->mm_count);
2854 enter_lazy_tlb(oldmm, next); 2871 enter_lazy_tlb(oldmm, next);
2855 } else 2872 } else
2856 switch_mm(oldmm, mm, next); 2873 switch_mm(oldmm, mm, next);
2857 2874
2858 if (unlikely(!prev->mm)) { 2875 if (likely(!prev->mm)) {
2859 prev->active_mm = NULL; 2876 prev->active_mm = NULL;
2860 rq->prev_mm = oldmm; 2877 rq->prev_mm = oldmm;
2861 } 2878 }
@@ -3018,15 +3035,6 @@ static void calc_load_account_active(struct rq *this_rq)
3018} 3035}
3019 3036
3020/* 3037/*
3021 * Externally visible per-cpu scheduler statistics:
3022 * cpu_nr_migrations(cpu) - number of migrations into that cpu
3023 */
3024u64 cpu_nr_migrations(int cpu)
3025{
3026 return cpu_rq(cpu)->nr_migrations_in;
3027}
3028
3029/*
3030 * Update rq->cpu_load[] statistics. This function is usually called every 3038 * Update rq->cpu_load[] statistics. This function is usually called every
3031 * scheduler tick (TICK_NSEC). 3039 * scheduler tick (TICK_NSEC).
3032 */ 3040 */
@@ -4126,7 +4134,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4126 unsigned long flags; 4134 unsigned long flags;
4127 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4135 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4128 4136
4129 cpumask_setall(cpus); 4137 cpumask_copy(cpus, cpu_online_mask);
4130 4138
4131 /* 4139 /*
4132 * When power savings policy is enabled for the parent domain, idle 4140 * When power savings policy is enabled for the parent domain, idle
@@ -4289,7 +4297,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
4289 int all_pinned = 0; 4297 int all_pinned = 0;
4290 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4298 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4291 4299
4292 cpumask_setall(cpus); 4300 cpumask_copy(cpus, cpu_online_mask);
4293 4301
4294 /* 4302 /*
4295 * When power savings policy is enabled for the parent domain, idle 4303 * When power savings policy is enabled for the parent domain, idle
@@ -4429,6 +4437,11 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4429 int pulled_task = 0; 4437 int pulled_task = 0;
4430 unsigned long next_balance = jiffies + HZ; 4438 unsigned long next_balance = jiffies + HZ;
4431 4439
4440 this_rq->idle_stamp = this_rq->clock;
4441
4442 if (this_rq->avg_idle < sysctl_sched_migration_cost)
4443 return;
4444
4432 for_each_domain(this_cpu, sd) { 4445 for_each_domain(this_cpu, sd) {
4433 unsigned long interval; 4446 unsigned long interval;
4434 4447
@@ -4443,8 +4456,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4443 interval = msecs_to_jiffies(sd->balance_interval); 4456 interval = msecs_to_jiffies(sd->balance_interval);
4444 if (time_after(next_balance, sd->last_balance + interval)) 4457 if (time_after(next_balance, sd->last_balance + interval))
4445 next_balance = sd->last_balance + interval; 4458 next_balance = sd->last_balance + interval;
4446 if (pulled_task) 4459 if (pulled_task) {
4460 this_rq->idle_stamp = 0;
4447 break; 4461 break;
4462 }
4448 } 4463 }
4449 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 4464 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
4450 /* 4465 /*
@@ -5046,8 +5061,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
5046 p->gtime = cputime_add(p->gtime, cputime); 5061 p->gtime = cputime_add(p->gtime, cputime);
5047 5062
5048 /* Add guest time to cpustat. */ 5063 /* Add guest time to cpustat. */
5049 cpustat->user = cputime64_add(cpustat->user, tmp); 5064 if (TASK_NICE(p) > 0) {
5050 cpustat->guest = cputime64_add(cpustat->guest, tmp); 5065 cpustat->nice = cputime64_add(cpustat->nice, tmp);
5066 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
5067 } else {
5068 cpustat->user = cputime64_add(cpustat->user, tmp);
5069 cpustat->guest = cputime64_add(cpustat->guest, tmp);
5070 }
5051} 5071}
5052 5072
5053/* 5073/*
@@ -5162,60 +5182,86 @@ void account_idle_ticks(unsigned long ticks)
5162 * Use precise platform statistics if available: 5182 * Use precise platform statistics if available:
5163 */ 5183 */
5164#ifdef CONFIG_VIRT_CPU_ACCOUNTING 5184#ifdef CONFIG_VIRT_CPU_ACCOUNTING
5165cputime_t task_utime(struct task_struct *p) 5185void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5166{ 5186{
5167 return p->utime; 5187 *ut = p->utime;
5188 *st = p->stime;
5168} 5189}
5169 5190
5170cputime_t task_stime(struct task_struct *p) 5191void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5171{ 5192{
5172 return p->stime; 5193 struct task_cputime cputime;
5194
5195 thread_group_cputime(p, &cputime);
5196
5197 *ut = cputime.utime;
5198 *st = cputime.stime;
5173} 5199}
5174#else 5200#else
5175cputime_t task_utime(struct task_struct *p) 5201
5202#ifndef nsecs_to_cputime
5203# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
5204#endif
5205
5206void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5176{ 5207{
5177 clock_t utime = cputime_to_clock_t(p->utime), 5208 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
5178 total = utime + cputime_to_clock_t(p->stime);
5179 u64 temp;
5180 5209
5181 /* 5210 /*
5182 * Use CFS's precise accounting: 5211 * Use CFS's precise accounting:
5183 */ 5212 */
5184 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); 5213 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
5185 5214
5186 if (total) { 5215 if (total) {
5187 temp *= utime; 5216 u64 temp;
5217
5218 temp = (u64)(rtime * utime);
5188 do_div(temp, total); 5219 do_div(temp, total);
5189 } 5220 utime = (cputime_t)temp;
5190 utime = (clock_t)temp; 5221 } else
5222 utime = rtime;
5223
5224 /*
5225 * Compare with previous values, to keep monotonicity:
5226 */
5227 p->prev_utime = max(p->prev_utime, utime);
5228 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
5191 5229
5192 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); 5230 *ut = p->prev_utime;
5193 return p->prev_utime; 5231 *st = p->prev_stime;
5194} 5232}
5195 5233
5196cputime_t task_stime(struct task_struct *p) 5234/*
5235 * Must be called with siglock held.
5236 */
5237void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5197{ 5238{
5198 clock_t stime; 5239 struct signal_struct *sig = p->signal;
5240 struct task_cputime cputime;
5241 cputime_t rtime, utime, total;
5199 5242
5200 /* 5243 thread_group_cputime(p, &cputime);
5201 * Use CFS's precise accounting. (we subtract utime from
5202 * the total, to make sure the total observed by userspace
5203 * grows monotonically - apps rely on that):
5204 */
5205 stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
5206 cputime_to_clock_t(task_utime(p));
5207 5244
5208 if (stime >= 0) 5245 total = cputime_add(cputime.utime, cputime.stime);
5209 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); 5246 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
5210 5247
5211 return p->prev_stime; 5248 if (total) {
5212} 5249 u64 temp;
5213#endif
5214 5250
5215inline cputime_t task_gtime(struct task_struct *p) 5251 temp = (u64)(rtime * cputime.utime);
5216{ 5252 do_div(temp, total);
5217 return p->gtime; 5253 utime = (cputime_t)temp;
5254 } else
5255 utime = rtime;
5256
5257 sig->prev_utime = max(sig->prev_utime, utime);
5258 sig->prev_stime = max(sig->prev_stime,
5259 cputime_sub(rtime, sig->prev_utime));
5260
5261 *ut = sig->prev_utime;
5262 *st = sig->prev_stime;
5218} 5263}
5264#endif
5219 5265
5220/* 5266/*
5221 * This function gets called by the timer code, with HZ frequency. 5267 * This function gets called by the timer code, with HZ frequency.
@@ -5481,7 +5527,7 @@ need_resched_nonpreemptible:
5481} 5527}
5482EXPORT_SYMBOL(schedule); 5528EXPORT_SYMBOL(schedule);
5483 5529
5484#ifdef CONFIG_SMP 5530#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
5485/* 5531/*
5486 * Look out! "owner" is an entirely speculative pointer 5532 * Look out! "owner" is an entirely speculative pointer
5487 * access and not reliable. 5533 * access and not reliable.
@@ -6175,22 +6221,14 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
6175 BUG_ON(p->se.on_rq); 6221 BUG_ON(p->se.on_rq);
6176 6222
6177 p->policy = policy; 6223 p->policy = policy;
6178 switch (p->policy) {
6179 case SCHED_NORMAL:
6180 case SCHED_BATCH:
6181 case SCHED_IDLE:
6182 p->sched_class = &fair_sched_class;
6183 break;
6184 case SCHED_FIFO:
6185 case SCHED_RR:
6186 p->sched_class = &rt_sched_class;
6187 break;
6188 }
6189
6190 p->rt_priority = prio; 6224 p->rt_priority = prio;
6191 p->normal_prio = normal_prio(p); 6225 p->normal_prio = normal_prio(p);
6192 /* we are holding p->pi_lock already */ 6226 /* we are holding p->pi_lock already */
6193 p->prio = rt_mutex_getprio(p); 6227 p->prio = rt_mutex_getprio(p);
6228 if (rt_prio(p->prio))
6229 p->sched_class = &rt_sched_class;
6230 else
6231 p->sched_class = &fair_sched_class;
6194 set_load_weight(p); 6232 set_load_weight(p);
6195} 6233}
6196 6234
@@ -6935,7 +6973,7 @@ void show_state_filter(unsigned long state_filter)
6935 /* 6973 /*
6936 * Only show locks if all tasks are dumped: 6974 * Only show locks if all tasks are dumped:
6937 */ 6975 */
6938 if (state_filter == -1) 6976 if (!state_filter)
6939 debug_show_all_locks(); 6977 debug_show_all_locks();
6940} 6978}
6941 6979
@@ -7739,6 +7777,16 @@ early_initcall(migration_init);
7739 7777
7740#ifdef CONFIG_SCHED_DEBUG 7778#ifdef CONFIG_SCHED_DEBUG
7741 7779
7780static __read_mostly int sched_domain_debug_enabled;
7781
7782static int __init sched_domain_debug_setup(char *str)
7783{
7784 sched_domain_debug_enabled = 1;
7785
7786 return 0;
7787}
7788early_param("sched_debug", sched_domain_debug_setup);
7789
7742static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 7790static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7743 struct cpumask *groupmask) 7791 struct cpumask *groupmask)
7744{ 7792{
@@ -7825,6 +7873,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
7825 cpumask_var_t groupmask; 7873 cpumask_var_t groupmask;
7826 int level = 0; 7874 int level = 0;
7827 7875
7876 if (!sched_domain_debug_enabled)
7877 return;
7878
7828 if (!sd) { 7879 if (!sd) {
7829 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 7880 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
7830 return; 7881 return;
@@ -7904,6 +7955,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
7904 7955
7905static void free_rootdomain(struct root_domain *rd) 7956static void free_rootdomain(struct root_domain *rd)
7906{ 7957{
7958 synchronize_sched();
7959
7907 cpupri_cleanup(&rd->cpupri); 7960 cpupri_cleanup(&rd->cpupri);
7908 7961
7909 free_cpumask_var(rd->rto_mask); 7962 free_cpumask_var(rd->rto_mask);
@@ -8044,6 +8097,7 @@ static cpumask_var_t cpu_isolated_map;
8044/* Setup the mask of cpus configured for isolated domains */ 8097/* Setup the mask of cpus configured for isolated domains */
8045static int __init isolated_cpu_setup(char *str) 8098static int __init isolated_cpu_setup(char *str)
8046{ 8099{
8100 alloc_bootmem_cpumask_var(&cpu_isolated_map);
8047 cpulist_parse(str, cpu_isolated_map); 8101 cpulist_parse(str, cpu_isolated_map);
8048 return 1; 8102 return 1;
8049} 8103}
@@ -8880,7 +8934,7 @@ static int build_sched_domains(const struct cpumask *cpu_map)
8880 return __build_sched_domains(cpu_map, NULL); 8934 return __build_sched_domains(cpu_map, NULL);
8881} 8935}
8882 8936
8883static struct cpumask *doms_cur; /* current sched domains */ 8937static cpumask_var_t *doms_cur; /* current sched domains */
8884static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 8938static int ndoms_cur; /* number of sched domains in 'doms_cur' */
8885static struct sched_domain_attr *dattr_cur; 8939static struct sched_domain_attr *dattr_cur;
8886 /* attribues of custom domains in 'doms_cur' */ 8940 /* attribues of custom domains in 'doms_cur' */
@@ -8902,6 +8956,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
8902 return 0; 8956 return 0;
8903} 8957}
8904 8958
8959cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
8960{
8961 int i;
8962 cpumask_var_t *doms;
8963
8964 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
8965 if (!doms)
8966 return NULL;
8967 for (i = 0; i < ndoms; i++) {
8968 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
8969 free_sched_domains(doms, i);
8970 return NULL;
8971 }
8972 }
8973 return doms;
8974}
8975
8976void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
8977{
8978 unsigned int i;
8979 for (i = 0; i < ndoms; i++)
8980 free_cpumask_var(doms[i]);
8981 kfree(doms);
8982}
8983
8905/* 8984/*
8906 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 8985 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
8907 * For now this just excludes isolated cpus, but could be used to 8986 * For now this just excludes isolated cpus, but could be used to
@@ -8913,12 +8992,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
8913 8992
8914 arch_update_cpu_topology(); 8993 arch_update_cpu_topology();
8915 ndoms_cur = 1; 8994 ndoms_cur = 1;
8916 doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); 8995 doms_cur = alloc_sched_domains(ndoms_cur);
8917 if (!doms_cur) 8996 if (!doms_cur)
8918 doms_cur = fallback_doms; 8997 doms_cur = &fallback_doms;
8919 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); 8998 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
8920 dattr_cur = NULL; 8999 dattr_cur = NULL;
8921 err = build_sched_domains(doms_cur); 9000 err = build_sched_domains(doms_cur[0]);
8922 register_sched_domain_sysctl(); 9001 register_sched_domain_sysctl();
8923 9002
8924 return err; 9003 return err;
@@ -8968,19 +9047,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8968 * doms_new[] to the current sched domain partitioning, doms_cur[]. 9047 * doms_new[] to the current sched domain partitioning, doms_cur[].
8969 * It destroys each deleted domain and builds each new domain. 9048 * It destroys each deleted domain and builds each new domain.
8970 * 9049 *
8971 * 'doms_new' is an array of cpumask's of length 'ndoms_new'. 9050 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
8972 * The masks don't intersect (don't overlap.) We should setup one 9051 * The masks don't intersect (don't overlap.) We should setup one
8973 * sched domain for each mask. CPUs not in any of the cpumasks will 9052 * sched domain for each mask. CPUs not in any of the cpumasks will
8974 * not be load balanced. If the same cpumask appears both in the 9053 * not be load balanced. If the same cpumask appears both in the
8975 * current 'doms_cur' domains and in the new 'doms_new', we can leave 9054 * current 'doms_cur' domains and in the new 'doms_new', we can leave
8976 * it as it is. 9055 * it as it is.
8977 * 9056 *
8978 * The passed in 'doms_new' should be kmalloc'd. This routine takes 9057 * The passed in 'doms_new' should be allocated using
8979 * ownership of it and will kfree it when done with it. If the caller 9058 * alloc_sched_domains. This routine takes ownership of it and will
8980 * failed the kmalloc call, then it can pass in doms_new == NULL && 9059 * free_sched_domains it when done with it. If the caller failed the
8981 * ndoms_new == 1, and partition_sched_domains() will fallback to 9060 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
8982 * the single partition 'fallback_doms', it also forces the domains 9061 * and partition_sched_domains() will fallback to the single partition
8983 * to be rebuilt. 9062 * 'fallback_doms', it also forces the domains to be rebuilt.
8984 * 9063 *
8985 * If doms_new == NULL it will be replaced with cpu_online_mask. 9064 * If doms_new == NULL it will be replaced with cpu_online_mask.
8986 * ndoms_new == 0 is a special case for destroying existing domains, 9065 * ndoms_new == 0 is a special case for destroying existing domains,
@@ -8988,8 +9067,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8988 * 9067 *
8989 * Call with hotplug lock held 9068 * Call with hotplug lock held
8990 */ 9069 */
8991/* FIXME: Change to struct cpumask *doms_new[] */ 9070void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
8992void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
8993 struct sched_domain_attr *dattr_new) 9071 struct sched_domain_attr *dattr_new)
8994{ 9072{
8995 int i, j, n; 9073 int i, j, n;
@@ -9008,40 +9086,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
9008 /* Destroy deleted domains */ 9086 /* Destroy deleted domains */
9009 for (i = 0; i < ndoms_cur; i++) { 9087 for (i = 0; i < ndoms_cur; i++) {
9010 for (j = 0; j < n && !new_topology; j++) { 9088 for (j = 0; j < n && !new_topology; j++) {
9011 if (cpumask_equal(&doms_cur[i], &doms_new[j]) 9089 if (cpumask_equal(doms_cur[i], doms_new[j])
9012 && dattrs_equal(dattr_cur, i, dattr_new, j)) 9090 && dattrs_equal(dattr_cur, i, dattr_new, j))
9013 goto match1; 9091 goto match1;
9014 } 9092 }
9015 /* no match - a current sched domain not in new doms_new[] */ 9093 /* no match - a current sched domain not in new doms_new[] */
9016 detach_destroy_domains(doms_cur + i); 9094 detach_destroy_domains(doms_cur[i]);
9017match1: 9095match1:
9018 ; 9096 ;
9019 } 9097 }
9020 9098
9021 if (doms_new == NULL) { 9099 if (doms_new == NULL) {
9022 ndoms_cur = 0; 9100 ndoms_cur = 0;
9023 doms_new = fallback_doms; 9101 doms_new = &fallback_doms;
9024 cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); 9102 cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
9025 WARN_ON_ONCE(dattr_new); 9103 WARN_ON_ONCE(dattr_new);
9026 } 9104 }
9027 9105
9028 /* Build new domains */ 9106 /* Build new domains */
9029 for (i = 0; i < ndoms_new; i++) { 9107 for (i = 0; i < ndoms_new; i++) {
9030 for (j = 0; j < ndoms_cur && !new_topology; j++) { 9108 for (j = 0; j < ndoms_cur && !new_topology; j++) {
9031 if (cpumask_equal(&doms_new[i], &doms_cur[j]) 9109 if (cpumask_equal(doms_new[i], doms_cur[j])
9032 && dattrs_equal(dattr_new, i, dattr_cur, j)) 9110 && dattrs_equal(dattr_new, i, dattr_cur, j))
9033 goto match2; 9111 goto match2;
9034 } 9112 }
9035 /* no match - add a new doms_new */ 9113 /* no match - add a new doms_new */
9036 __build_sched_domains(doms_new + i, 9114 __build_sched_domains(doms_new[i],
9037 dattr_new ? dattr_new + i : NULL); 9115 dattr_new ? dattr_new + i : NULL);
9038match2: 9116match2:
9039 ; 9117 ;
9040 } 9118 }
9041 9119
9042 /* Remember the new sched domains */ 9120 /* Remember the new sched domains */
9043 if (doms_cur != fallback_doms) 9121 if (doms_cur != &fallback_doms)
9044 kfree(doms_cur); 9122 free_sched_domains(doms_cur, ndoms_cur);
9045 kfree(dattr_cur); /* kfree(NULL) is safe */ 9123 kfree(dattr_cur); /* kfree(NULL) is safe */
9046 doms_cur = doms_new; 9124 doms_cur = doms_new;
9047 dattr_cur = dattr_new; 9125 dattr_cur = dattr_new;
@@ -9363,10 +9441,6 @@ void __init sched_init(void)
9363#ifdef CONFIG_CPUMASK_OFFSTACK 9441#ifdef CONFIG_CPUMASK_OFFSTACK
9364 alloc_size += num_possible_cpus() * cpumask_size(); 9442 alloc_size += num_possible_cpus() * cpumask_size();
9365#endif 9443#endif
9366 /*
9367 * As sched_init() is called before page_alloc is setup,
9368 * we use alloc_bootmem().
9369 */
9370 if (alloc_size) { 9444 if (alloc_size) {
9371 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 9445 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
9372 9446
@@ -9521,6 +9595,8 @@ void __init sched_init(void)
9521 rq->cpu = i; 9595 rq->cpu = i;
9522 rq->online = 0; 9596 rq->online = 0;
9523 rq->migration_thread = NULL; 9597 rq->migration_thread = NULL;
9598 rq->idle_stamp = 0;
9599 rq->avg_idle = 2*sysctl_sched_migration_cost;
9524 INIT_LIST_HEAD(&rq->migration_queue); 9600 INIT_LIST_HEAD(&rq->migration_queue);
9525 rq_attach_root(rq, &def_root_domain); 9601 rq_attach_root(rq, &def_root_domain);
9526#endif 9602#endif
@@ -9570,7 +9646,9 @@ void __init sched_init(void)
9570 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 9646 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9571 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 9647 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9572#endif 9648#endif
9573 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9649 /* May be allocated at isolcpus cmdline parse time */
9650 if (cpu_isolated_map == NULL)
9651 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9574#endif /* SMP */ 9652#endif /* SMP */
9575 9653
9576 perf_event_init(); 9654 perf_event_init();
@@ -10900,6 +10978,7 @@ void synchronize_sched_expedited(void)
10900 spin_unlock_irqrestore(&rq->lock, flags); 10978 spin_unlock_irqrestore(&rq->lock, flags);
10901 } 10979 }
10902 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 10980 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10981 synchronize_sched_expedited_count++;
10903 mutex_unlock(&rcu_sched_expedited_mutex); 10982 mutex_unlock(&rcu_sched_expedited_mutex);
10904 put_online_cpus(); 10983 put_online_cpus();
10905 if (need_full_sync) 10984 if (need_full_sync)