aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c336
1 files changed, 224 insertions, 112 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e88689522e66..aa31244caa9f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
309 */ 309 */
310static DEFINE_SPINLOCK(task_group_lock); 310static DEFINE_SPINLOCK(task_group_lock);
311 311
312#ifdef CONFIG_FAIR_GROUP_SCHED
313
312#ifdef CONFIG_SMP 314#ifdef CONFIG_SMP
313static int root_task_group_empty(void) 315static int root_task_group_empty(void)
314{ 316{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
316} 318}
317#endif 319#endif
318 320
319#ifdef CONFIG_FAIR_GROUP_SCHED
320#ifdef CONFIG_USER_SCHED 321#ifdef CONFIG_USER_SCHED
321# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
322#else /* !CONFIG_USER_SCHED */ 323#else /* !CONFIG_USER_SCHED */
@@ -534,14 +535,12 @@ struct rq {
534 #define CPU_LOAD_IDX_MAX 5 535 #define CPU_LOAD_IDX_MAX 5
535 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 536 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
536#ifdef CONFIG_NO_HZ 537#ifdef CONFIG_NO_HZ
537 unsigned long last_tick_seen;
538 unsigned char in_nohz_recently; 538 unsigned char in_nohz_recently;
539#endif 539#endif
540 /* capture load from *all* tasks on this cpu: */ 540 /* capture load from *all* tasks on this cpu: */
541 struct load_weight load; 541 struct load_weight load;
542 unsigned long nr_load_updates; 542 unsigned long nr_load_updates;
543 u64 nr_switches; 543 u64 nr_switches;
544 u64 nr_migrations_in;
545 544
546 struct cfs_rq cfs; 545 struct cfs_rq cfs;
547 struct rt_rq rt; 546 struct rt_rq rt;
@@ -590,6 +589,8 @@ struct rq {
590 589
591 u64 rt_avg; 590 u64 rt_avg;
592 u64 age_stamp; 591 u64 age_stamp;
592 u64 idle_stamp;
593 u64 avg_idle;
593#endif 594#endif
594 595
595 /* calc_load related fields */ 596 /* calc_load related fields */
@@ -771,7 +772,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
771 if (!sched_feat_names[i]) 772 if (!sched_feat_names[i])
772 return -EINVAL; 773 return -EINVAL;
773 774
774 filp->f_pos += cnt; 775 *ppos += cnt;
775 776
776 return cnt; 777 return cnt;
777} 778}
@@ -1564,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1564 1565
1565#ifdef CONFIG_FAIR_GROUP_SCHED 1566#ifdef CONFIG_FAIR_GROUP_SCHED
1566 1567
1567struct update_shares_data { 1568static __read_mostly unsigned long *update_shares_data;
1568 unsigned long rq_weight[NR_CPUS];
1569};
1570
1571static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
1572 1569
1573static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1570static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1574 1571
@@ -1578,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1578static void update_group_shares_cpu(struct task_group *tg, int cpu, 1575static void update_group_shares_cpu(struct task_group *tg, int cpu,
1579 unsigned long sd_shares, 1576 unsigned long sd_shares,
1580 unsigned long sd_rq_weight, 1577 unsigned long sd_rq_weight,
1581 struct update_shares_data *usd) 1578 unsigned long *usd_rq_weight)
1582{ 1579{
1583 unsigned long shares, rq_weight; 1580 unsigned long shares, rq_weight;
1584 int boost = 0; 1581 int boost = 0;
1585 1582
1586 rq_weight = usd->rq_weight[cpu]; 1583 rq_weight = usd_rq_weight[cpu];
1587 if (!rq_weight) { 1584 if (!rq_weight) {
1588 boost = 1; 1585 boost = 1;
1589 rq_weight = NICE_0_LOAD; 1586 rq_weight = NICE_0_LOAD;
@@ -1618,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1618static int tg_shares_up(struct task_group *tg, void *data) 1615static int tg_shares_up(struct task_group *tg, void *data)
1619{ 1616{
1620 unsigned long weight, rq_weight = 0, shares = 0; 1617 unsigned long weight, rq_weight = 0, shares = 0;
1621 struct update_shares_data *usd; 1618 unsigned long *usd_rq_weight;
1622 struct sched_domain *sd = data; 1619 struct sched_domain *sd = data;
1623 unsigned long flags; 1620 unsigned long flags;
1624 int i; 1621 int i;
@@ -1627,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
1627 return 0; 1624 return 0;
1628 1625
1629 local_irq_save(flags); 1626 local_irq_save(flags);
1630 usd = &__get_cpu_var(update_shares_data); 1627 usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
1631 1628
1632 for_each_cpu(i, sched_domain_span(sd)) { 1629 for_each_cpu(i, sched_domain_span(sd)) {
1633 weight = tg->cfs_rq[i]->load.weight; 1630 weight = tg->cfs_rq[i]->load.weight;
1634 usd->rq_weight[i] = weight; 1631 usd_rq_weight[i] = weight;
1635 1632
1636 /* 1633 /*
1637 * If there are currently no tasks on the cpu pretend there 1634 * If there are currently no tasks on the cpu pretend there
@@ -1652,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1652 shares = tg->shares; 1649 shares = tg->shares;
1653 1650
1654 for_each_cpu(i, sched_domain_span(sd)) 1651 for_each_cpu(i, sched_domain_span(sd))
1655 update_group_shares_cpu(tg, i, shares, rq_weight, usd); 1652 update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
1656 1653
1657 local_irq_restore(flags); 1654 local_irq_restore(flags);
1658 1655
@@ -1996,6 +1993,39 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1996 p->sched_class->prio_changed(rq, p, oldprio, running); 1993 p->sched_class->prio_changed(rq, p, oldprio, running);
1997} 1994}
1998 1995
1996/**
1997 * kthread_bind - bind a just-created kthread to a cpu.
1998 * @p: thread created by kthread_create().
1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2000 *
2001 * Description: This function is equivalent to set_cpus_allowed(),
2002 * except that @cpu doesn't need to be online, and the thread must be
2003 * stopped (i.e., just returned from kthread_create()).
2004 *
2005 * Function lives here instead of kthread.c because it messes with
2006 * scheduler internals which require locking.
2007 */
2008void kthread_bind(struct task_struct *p, unsigned int cpu)
2009{
2010 struct rq *rq = cpu_rq(cpu);
2011 unsigned long flags;
2012
2013 /* Must have done schedule() in kthread() before we set_task_cpu */
2014 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2015 WARN_ON(1);
2016 return;
2017 }
2018
2019 spin_lock_irqsave(&rq->lock, flags);
2020 update_rq_clock(rq);
2021 set_task_cpu(p, cpu);
2022 p->cpus_allowed = cpumask_of_cpu(cpu);
2023 p->rt.nr_cpus_allowed = 1;
2024 p->flags |= PF_THREAD_BOUND;
2025 spin_unlock_irqrestore(&rq->lock, flags);
2026}
2027EXPORT_SYMBOL(kthread_bind);
2028
1999#ifdef CONFIG_SMP 2029#ifdef CONFIG_SMP
2000/* 2030/*
2001 * Is this task likely cache-hot: 2031 * Is this task likely cache-hot:
@@ -2008,7 +2038,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2008 /* 2038 /*
2009 * Buddy candidates are cache hot: 2039 * Buddy candidates are cache hot:
2010 */ 2040 */
2011 if (sched_feat(CACHE_HOT_BUDDY) && 2041 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2012 (&p->se == cfs_rq_of(&p->se)->next || 2042 (&p->se == cfs_rq_of(&p->se)->next ||
2013 &p->se == cfs_rq_of(&p->se)->last)) 2043 &p->se == cfs_rq_of(&p->se)->last))
2014 return 1; 2044 return 1;
@@ -2049,7 +2079,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2049#endif 2079#endif
2050 if (old_cpu != new_cpu) { 2080 if (old_cpu != new_cpu) {
2051 p->se.nr_migrations++; 2081 p->se.nr_migrations++;
2052 new_rq->nr_migrations_in++;
2053#ifdef CONFIG_SCHEDSTATS 2082#ifdef CONFIG_SCHEDSTATS
2054 if (task_hot(p, old_rq->clock, NULL)) 2083 if (task_hot(p, old_rq->clock, NULL))
2055 schedstat_inc(p, se.nr_forced2_migrations); 2084 schedstat_inc(p, se.nr_forced2_migrations);
@@ -2086,6 +2115,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2086 * it is sufficient to simply update the task's cpu field. 2115 * it is sufficient to simply update the task's cpu field.
2087 */ 2116 */
2088 if (!p->se.on_rq && !task_running(rq, p)) { 2117 if (!p->se.on_rq && !task_running(rq, p)) {
2118 update_rq_clock(rq);
2089 set_task_cpu(p, dest_cpu); 2119 set_task_cpu(p, dest_cpu);
2090 return 0; 2120 return 0;
2091 } 2121 }
@@ -2347,14 +2377,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2347 task_rq_unlock(rq, &flags); 2377 task_rq_unlock(rq, &flags);
2348 2378
2349 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2379 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2350 if (cpu != orig_cpu) 2380 if (cpu != orig_cpu) {
2381 local_irq_save(flags);
2382 rq = cpu_rq(cpu);
2383 update_rq_clock(rq);
2351 set_task_cpu(p, cpu); 2384 set_task_cpu(p, cpu);
2352 2385 local_irq_restore(flags);
2386 }
2353 rq = task_rq_lock(p, &flags); 2387 rq = task_rq_lock(p, &flags);
2354 2388
2355 if (rq != orig_rq)
2356 update_rq_clock(rq);
2357
2358 WARN_ON(p->state != TASK_WAKING); 2389 WARN_ON(p->state != TASK_WAKING);
2359 cpu = task_cpu(p); 2390 cpu = task_cpu(p);
2360 2391
@@ -2411,6 +2442,17 @@ out_running:
2411#ifdef CONFIG_SMP 2442#ifdef CONFIG_SMP
2412 if (p->sched_class->task_wake_up) 2443 if (p->sched_class->task_wake_up)
2413 p->sched_class->task_wake_up(rq, p); 2444 p->sched_class->task_wake_up(rq, p);
2445
2446 if (unlikely(rq->idle_stamp)) {
2447 u64 delta = rq->clock - rq->idle_stamp;
2448 u64 max = 2*sysctl_sched_migration_cost;
2449
2450 if (delta > max)
2451 rq->avg_idle = max;
2452 else
2453 update_avg(&rq->avg_idle, delta);
2454 rq->idle_stamp = 0;
2455 }
2414#endif 2456#endif
2415out: 2457out:
2416 task_rq_unlock(rq, &flags); 2458 task_rq_unlock(rq, &flags);
@@ -2516,6 +2558,7 @@ static void __sched_fork(struct task_struct *p)
2516void sched_fork(struct task_struct *p, int clone_flags) 2558void sched_fork(struct task_struct *p, int clone_flags)
2517{ 2559{
2518 int cpu = get_cpu(); 2560 int cpu = get_cpu();
2561 unsigned long flags;
2519 2562
2520 __sched_fork(p); 2563 __sched_fork(p);
2521 2564
@@ -2552,7 +2595,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
2552#ifdef CONFIG_SMP 2595#ifdef CONFIG_SMP
2553 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); 2596 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
2554#endif 2597#endif
2598 local_irq_save(flags);
2599 update_rq_clock(cpu_rq(cpu));
2555 set_task_cpu(p, cpu); 2600 set_task_cpu(p, cpu);
2601 local_irq_restore(flags);
2556 2602
2557#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2603#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2558 if (likely(sched_info_on())) 2604 if (likely(sched_info_on()))
@@ -2819,14 +2865,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
2819 */ 2865 */
2820 arch_start_context_switch(prev); 2866 arch_start_context_switch(prev);
2821 2867
2822 if (unlikely(!mm)) { 2868 if (likely(!mm)) {
2823 next->active_mm = oldmm; 2869 next->active_mm = oldmm;
2824 atomic_inc(&oldmm->mm_count); 2870 atomic_inc(&oldmm->mm_count);
2825 enter_lazy_tlb(oldmm, next); 2871 enter_lazy_tlb(oldmm, next);
2826 } else 2872 } else
2827 switch_mm(oldmm, mm, next); 2873 switch_mm(oldmm, mm, next);
2828 2874
2829 if (unlikely(!prev->mm)) { 2875 if (likely(!prev->mm)) {
2830 prev->active_mm = NULL; 2876 prev->active_mm = NULL;
2831 rq->prev_mm = oldmm; 2877 rq->prev_mm = oldmm;
2832 } 2878 }
@@ -2989,15 +3035,6 @@ static void calc_load_account_active(struct rq *this_rq)
2989} 3035}
2990 3036
2991/* 3037/*
2992 * Externally visible per-cpu scheduler statistics:
2993 * cpu_nr_migrations(cpu) - number of migrations into that cpu
2994 */
2995u64 cpu_nr_migrations(int cpu)
2996{
2997 return cpu_rq(cpu)->nr_migrations_in;
2998}
2999
3000/*
3001 * Update rq->cpu_load[] statistics. This function is usually called every 3038 * Update rq->cpu_load[] statistics. This function is usually called every
3002 * scheduler tick (TICK_NSEC). 3039 * scheduler tick (TICK_NSEC).
3003 */ 3040 */
@@ -4097,7 +4134,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4097 unsigned long flags; 4134 unsigned long flags;
4098 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4135 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4099 4136
4100 cpumask_setall(cpus); 4137 cpumask_copy(cpus, cpu_online_mask);
4101 4138
4102 /* 4139 /*
4103 * When power savings policy is enabled for the parent domain, idle 4140 * When power savings policy is enabled for the parent domain, idle
@@ -4260,7 +4297,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
4260 int all_pinned = 0; 4297 int all_pinned = 0;
4261 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4298 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4262 4299
4263 cpumask_setall(cpus); 4300 cpumask_copy(cpus, cpu_online_mask);
4264 4301
4265 /* 4302 /*
4266 * When power savings policy is enabled for the parent domain, idle 4303 * When power savings policy is enabled for the parent domain, idle
@@ -4400,6 +4437,11 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4400 int pulled_task = 0; 4437 int pulled_task = 0;
4401 unsigned long next_balance = jiffies + HZ; 4438 unsigned long next_balance = jiffies + HZ;
4402 4439
4440 this_rq->idle_stamp = this_rq->clock;
4441
4442 if (this_rq->avg_idle < sysctl_sched_migration_cost)
4443 return;
4444
4403 for_each_domain(this_cpu, sd) { 4445 for_each_domain(this_cpu, sd) {
4404 unsigned long interval; 4446 unsigned long interval;
4405 4447
@@ -4414,8 +4456,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4414 interval = msecs_to_jiffies(sd->balance_interval); 4456 interval = msecs_to_jiffies(sd->balance_interval);
4415 if (time_after(next_balance, sd->last_balance + interval)) 4457 if (time_after(next_balance, sd->last_balance + interval))
4416 next_balance = sd->last_balance + interval; 4458 next_balance = sd->last_balance + interval;
4417 if (pulled_task) 4459 if (pulled_task) {
4460 this_rq->idle_stamp = 0;
4418 break; 4461 break;
4462 }
4419 } 4463 }
4420 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 4464 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
4421 /* 4465 /*
@@ -5017,8 +5061,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
5017 p->gtime = cputime_add(p->gtime, cputime); 5061 p->gtime = cputime_add(p->gtime, cputime);
5018 5062
5019 /* Add guest time to cpustat. */ 5063 /* Add guest time to cpustat. */
5020 cpustat->user = cputime64_add(cpustat->user, tmp); 5064 if (TASK_NICE(p) > 0) {
5021 cpustat->guest = cputime64_add(cpustat->guest, tmp); 5065 cpustat->nice = cputime64_add(cpustat->nice, tmp);
5066 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
5067 } else {
5068 cpustat->user = cputime64_add(cpustat->user, tmp);
5069 cpustat->guest = cputime64_add(cpustat->guest, tmp);
5070 }
5022} 5071}
5023 5072
5024/* 5073/*
@@ -5133,60 +5182,86 @@ void account_idle_ticks(unsigned long ticks)
5133 * Use precise platform statistics if available: 5182 * Use precise platform statistics if available:
5134 */ 5183 */
5135#ifdef CONFIG_VIRT_CPU_ACCOUNTING 5184#ifdef CONFIG_VIRT_CPU_ACCOUNTING
5136cputime_t task_utime(struct task_struct *p) 5185void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5137{ 5186{
5138 return p->utime; 5187 *ut = p->utime;
5188 *st = p->stime;
5139} 5189}
5140 5190
5141cputime_t task_stime(struct task_struct *p) 5191void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5142{ 5192{
5143 return p->stime; 5193 struct task_cputime cputime;
5194
5195 thread_group_cputime(p, &cputime);
5196
5197 *ut = cputime.utime;
5198 *st = cputime.stime;
5144} 5199}
5145#else 5200#else
5146cputime_t task_utime(struct task_struct *p) 5201
5202#ifndef nsecs_to_cputime
5203# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
5204#endif
5205
5206void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5147{ 5207{
5148 clock_t utime = cputime_to_clock_t(p->utime), 5208 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
5149 total = utime + cputime_to_clock_t(p->stime);
5150 u64 temp;
5151 5209
5152 /* 5210 /*
5153 * Use CFS's precise accounting: 5211 * Use CFS's precise accounting:
5154 */ 5212 */
5155 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); 5213 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
5156 5214
5157 if (total) { 5215 if (total) {
5158 temp *= utime; 5216 u64 temp;
5217
5218 temp = (u64)(rtime * utime);
5159 do_div(temp, total); 5219 do_div(temp, total);
5160 } 5220 utime = (cputime_t)temp;
5161 utime = (clock_t)temp; 5221 } else
5222 utime = rtime;
5223
5224 /*
5225 * Compare with previous values, to keep monotonicity:
5226 */
5227 p->prev_utime = max(p->prev_utime, utime);
5228 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
5162 5229
5163 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); 5230 *ut = p->prev_utime;
5164 return p->prev_utime; 5231 *st = p->prev_stime;
5165} 5232}
5166 5233
5167cputime_t task_stime(struct task_struct *p) 5234/*
5235 * Must be called with siglock held.
5236 */
5237void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5168{ 5238{
5169 clock_t stime; 5239 struct signal_struct *sig = p->signal;
5240 struct task_cputime cputime;
5241 cputime_t rtime, utime, total;
5170 5242
5171 /* 5243 thread_group_cputime(p, &cputime);
5172 * Use CFS's precise accounting. (we subtract utime from
5173 * the total, to make sure the total observed by userspace
5174 * grows monotonically - apps rely on that):
5175 */
5176 stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
5177 cputime_to_clock_t(task_utime(p));
5178 5244
5179 if (stime >= 0) 5245 total = cputime_add(cputime.utime, cputime.stime);
5180 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); 5246 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
5181 5247
5182 return p->prev_stime; 5248 if (total) {
5183} 5249 u64 temp;
5184#endif
5185 5250
5186inline cputime_t task_gtime(struct task_struct *p) 5251 temp = (u64)(rtime * cputime.utime);
5187{ 5252 do_div(temp, total);
5188 return p->gtime; 5253 utime = (cputime_t)temp;
5254 } else
5255 utime = rtime;
5256
5257 sig->prev_utime = max(sig->prev_utime, utime);
5258 sig->prev_stime = max(sig->prev_stime,
5259 cputime_sub(rtime, sig->prev_utime));
5260
5261 *ut = sig->prev_utime;
5262 *st = sig->prev_stime;
5189} 5263}
5264#endif
5190 5265
5191/* 5266/*
5192 * This function gets called by the timer code, with HZ frequency. 5267 * This function gets called by the timer code, with HZ frequency.
@@ -5452,7 +5527,7 @@ need_resched_nonpreemptible:
5452} 5527}
5453EXPORT_SYMBOL(schedule); 5528EXPORT_SYMBOL(schedule);
5454 5529
5455#ifdef CONFIG_SMP 5530#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
5456/* 5531/*
5457 * Look out! "owner" is an entirely speculative pointer 5532 * Look out! "owner" is an entirely speculative pointer
5458 * access and not reliable. 5533 * access and not reliable.
@@ -6146,22 +6221,14 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
6146 BUG_ON(p->se.on_rq); 6221 BUG_ON(p->se.on_rq);
6147 6222
6148 p->policy = policy; 6223 p->policy = policy;
6149 switch (p->policy) {
6150 case SCHED_NORMAL:
6151 case SCHED_BATCH:
6152 case SCHED_IDLE:
6153 p->sched_class = &fair_sched_class;
6154 break;
6155 case SCHED_FIFO:
6156 case SCHED_RR:
6157 p->sched_class = &rt_sched_class;
6158 break;
6159 }
6160
6161 p->rt_priority = prio; 6224 p->rt_priority = prio;
6162 p->normal_prio = normal_prio(p); 6225 p->normal_prio = normal_prio(p);
6163 /* we are holding p->pi_lock already */ 6226 /* we are holding p->pi_lock already */
6164 p->prio = rt_mutex_getprio(p); 6227 p->prio = rt_mutex_getprio(p);
6228 if (rt_prio(p->prio))
6229 p->sched_class = &rt_sched_class;
6230 else
6231 p->sched_class = &fair_sched_class;
6165 set_load_weight(p); 6232 set_load_weight(p);
6166} 6233}
6167 6234
@@ -6906,7 +6973,7 @@ void show_state_filter(unsigned long state_filter)
6906 /* 6973 /*
6907 * Only show locks if all tasks are dumped: 6974 * Only show locks if all tasks are dumped:
6908 */ 6975 */
6909 if (state_filter == -1) 6976 if (!state_filter)
6910 debug_show_all_locks(); 6977 debug_show_all_locks();
6911} 6978}
6912 6979
@@ -7711,6 +7778,16 @@ early_initcall(migration_init);
7711 7778
7712#ifdef CONFIG_SCHED_DEBUG 7779#ifdef CONFIG_SCHED_DEBUG
7713 7780
7781static __read_mostly int sched_domain_debug_enabled;
7782
7783static int __init sched_domain_debug_setup(char *str)
7784{
7785 sched_domain_debug_enabled = 1;
7786
7787 return 0;
7788}
7789early_param("sched_debug", sched_domain_debug_setup);
7790
7714static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 7791static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7715 struct cpumask *groupmask) 7792 struct cpumask *groupmask)
7716{ 7793{
@@ -7797,6 +7874,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
7797 cpumask_var_t groupmask; 7874 cpumask_var_t groupmask;
7798 int level = 0; 7875 int level = 0;
7799 7876
7877 if (!sched_domain_debug_enabled)
7878 return;
7879
7800 if (!sd) { 7880 if (!sd) {
7801 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 7881 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
7802 return; 7882 return;
@@ -7876,6 +7956,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
7876 7956
7877static void free_rootdomain(struct root_domain *rd) 7957static void free_rootdomain(struct root_domain *rd)
7878{ 7958{
7959 synchronize_sched();
7960
7879 cpupri_cleanup(&rd->cpupri); 7961 cpupri_cleanup(&rd->cpupri);
7880 7962
7881 free_cpumask_var(rd->rto_mask); 7963 free_cpumask_var(rd->rto_mask);
@@ -8016,6 +8098,7 @@ static cpumask_var_t cpu_isolated_map;
8016/* Setup the mask of cpus configured for isolated domains */ 8098/* Setup the mask of cpus configured for isolated domains */
8017static int __init isolated_cpu_setup(char *str) 8099static int __init isolated_cpu_setup(char *str)
8018{ 8100{
8101 alloc_bootmem_cpumask_var(&cpu_isolated_map);
8019 cpulist_parse(str, cpu_isolated_map); 8102 cpulist_parse(str, cpu_isolated_map);
8020 return 1; 8103 return 1;
8021} 8104}
@@ -8852,7 +8935,7 @@ static int build_sched_domains(const struct cpumask *cpu_map)
8852 return __build_sched_domains(cpu_map, NULL); 8935 return __build_sched_domains(cpu_map, NULL);
8853} 8936}
8854 8937
8855static struct cpumask *doms_cur; /* current sched domains */ 8938static cpumask_var_t *doms_cur; /* current sched domains */
8856static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 8939static int ndoms_cur; /* number of sched domains in 'doms_cur' */
8857static struct sched_domain_attr *dattr_cur; 8940static struct sched_domain_attr *dattr_cur;
8858 /* attribues of custom domains in 'doms_cur' */ 8941 /* attribues of custom domains in 'doms_cur' */
@@ -8874,6 +8957,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
8874 return 0; 8957 return 0;
8875} 8958}
8876 8959
8960cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
8961{
8962 int i;
8963 cpumask_var_t *doms;
8964
8965 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
8966 if (!doms)
8967 return NULL;
8968 for (i = 0; i < ndoms; i++) {
8969 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
8970 free_sched_domains(doms, i);
8971 return NULL;
8972 }
8973 }
8974 return doms;
8975}
8976
8977void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
8978{
8979 unsigned int i;
8980 for (i = 0; i < ndoms; i++)
8981 free_cpumask_var(doms[i]);
8982 kfree(doms);
8983}
8984
8877/* 8985/*
8878 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 8986 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
8879 * For now this just excludes isolated cpus, but could be used to 8987 * For now this just excludes isolated cpus, but could be used to
@@ -8885,12 +8993,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
8885 8993
8886 arch_update_cpu_topology(); 8994 arch_update_cpu_topology();
8887 ndoms_cur = 1; 8995 ndoms_cur = 1;
8888 doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); 8996 doms_cur = alloc_sched_domains(ndoms_cur);
8889 if (!doms_cur) 8997 if (!doms_cur)
8890 doms_cur = fallback_doms; 8998 doms_cur = &fallback_doms;
8891 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); 8999 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
8892 dattr_cur = NULL; 9000 dattr_cur = NULL;
8893 err = build_sched_domains(doms_cur); 9001 err = build_sched_domains(doms_cur[0]);
8894 register_sched_domain_sysctl(); 9002 register_sched_domain_sysctl();
8895 9003
8896 return err; 9004 return err;
@@ -8940,19 +9048,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8940 * doms_new[] to the current sched domain partitioning, doms_cur[]. 9048 * doms_new[] to the current sched domain partitioning, doms_cur[].
8941 * It destroys each deleted domain and builds each new domain. 9049 * It destroys each deleted domain and builds each new domain.
8942 * 9050 *
8943 * 'doms_new' is an array of cpumask's of length 'ndoms_new'. 9051 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
8944 * The masks don't intersect (don't overlap.) We should setup one 9052 * The masks don't intersect (don't overlap.) We should setup one
8945 * sched domain for each mask. CPUs not in any of the cpumasks will 9053 * sched domain for each mask. CPUs not in any of the cpumasks will
8946 * not be load balanced. If the same cpumask appears both in the 9054 * not be load balanced. If the same cpumask appears both in the
8947 * current 'doms_cur' domains and in the new 'doms_new', we can leave 9055 * current 'doms_cur' domains and in the new 'doms_new', we can leave
8948 * it as it is. 9056 * it as it is.
8949 * 9057 *
8950 * The passed in 'doms_new' should be kmalloc'd. This routine takes 9058 * The passed in 'doms_new' should be allocated using
8951 * ownership of it and will kfree it when done with it. If the caller 9059 * alloc_sched_domains. This routine takes ownership of it and will
8952 * failed the kmalloc call, then it can pass in doms_new == NULL && 9060 * free_sched_domains it when done with it. If the caller failed the
8953 * ndoms_new == 1, and partition_sched_domains() will fallback to 9061 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
8954 * the single partition 'fallback_doms', it also forces the domains 9062 * and partition_sched_domains() will fallback to the single partition
8955 * to be rebuilt. 9063 * 'fallback_doms', it also forces the domains to be rebuilt.
8956 * 9064 *
8957 * If doms_new == NULL it will be replaced with cpu_online_mask. 9065 * If doms_new == NULL it will be replaced with cpu_online_mask.
8958 * ndoms_new == 0 is a special case for destroying existing domains, 9066 * ndoms_new == 0 is a special case for destroying existing domains,
@@ -8960,8 +9068,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8960 * 9068 *
8961 * Call with hotplug lock held 9069 * Call with hotplug lock held
8962 */ 9070 */
8963/* FIXME: Change to struct cpumask *doms_new[] */ 9071void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
8964void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
8965 struct sched_domain_attr *dattr_new) 9072 struct sched_domain_attr *dattr_new)
8966{ 9073{
8967 int i, j, n; 9074 int i, j, n;
@@ -8980,40 +9087,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
8980 /* Destroy deleted domains */ 9087 /* Destroy deleted domains */
8981 for (i = 0; i < ndoms_cur; i++) { 9088 for (i = 0; i < ndoms_cur; i++) {
8982 for (j = 0; j < n && !new_topology; j++) { 9089 for (j = 0; j < n && !new_topology; j++) {
8983 if (cpumask_equal(&doms_cur[i], &doms_new[j]) 9090 if (cpumask_equal(doms_cur[i], doms_new[j])
8984 && dattrs_equal(dattr_cur, i, dattr_new, j)) 9091 && dattrs_equal(dattr_cur, i, dattr_new, j))
8985 goto match1; 9092 goto match1;
8986 } 9093 }
8987 /* no match - a current sched domain not in new doms_new[] */ 9094 /* no match - a current sched domain not in new doms_new[] */
8988 detach_destroy_domains(doms_cur + i); 9095 detach_destroy_domains(doms_cur[i]);
8989match1: 9096match1:
8990 ; 9097 ;
8991 } 9098 }
8992 9099
8993 if (doms_new == NULL) { 9100 if (doms_new == NULL) {
8994 ndoms_cur = 0; 9101 ndoms_cur = 0;
8995 doms_new = fallback_doms; 9102 doms_new = &fallback_doms;
8996 cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); 9103 cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
8997 WARN_ON_ONCE(dattr_new); 9104 WARN_ON_ONCE(dattr_new);
8998 } 9105 }
8999 9106
9000 /* Build new domains */ 9107 /* Build new domains */
9001 for (i = 0; i < ndoms_new; i++) { 9108 for (i = 0; i < ndoms_new; i++) {
9002 for (j = 0; j < ndoms_cur && !new_topology; j++) { 9109 for (j = 0; j < ndoms_cur && !new_topology; j++) {
9003 if (cpumask_equal(&doms_new[i], &doms_cur[j]) 9110 if (cpumask_equal(doms_new[i], doms_cur[j])
9004 && dattrs_equal(dattr_new, i, dattr_cur, j)) 9111 && dattrs_equal(dattr_new, i, dattr_cur, j))
9005 goto match2; 9112 goto match2;
9006 } 9113 }
9007 /* no match - add a new doms_new */ 9114 /* no match - add a new doms_new */
9008 __build_sched_domains(doms_new + i, 9115 __build_sched_domains(doms_new[i],
9009 dattr_new ? dattr_new + i : NULL); 9116 dattr_new ? dattr_new + i : NULL);
9010match2: 9117match2:
9011 ; 9118 ;
9012 } 9119 }
9013 9120
9014 /* Remember the new sched domains */ 9121 /* Remember the new sched domains */
9015 if (doms_cur != fallback_doms) 9122 if (doms_cur != &fallback_doms)
9016 kfree(doms_cur); 9123 free_sched_domains(doms_cur, ndoms_cur);
9017 kfree(dattr_cur); /* kfree(NULL) is safe */ 9124 kfree(dattr_cur); /* kfree(NULL) is safe */
9018 doms_cur = doms_new; 9125 doms_cur = doms_new;
9019 dattr_cur = dattr_new; 9126 dattr_cur = dattr_new;
@@ -9335,10 +9442,6 @@ void __init sched_init(void)
9335#ifdef CONFIG_CPUMASK_OFFSTACK 9442#ifdef CONFIG_CPUMASK_OFFSTACK
9336 alloc_size += num_possible_cpus() * cpumask_size(); 9443 alloc_size += num_possible_cpus() * cpumask_size();
9337#endif 9444#endif
9338 /*
9339 * As sched_init() is called before page_alloc is setup,
9340 * we use alloc_bootmem().
9341 */
9342 if (alloc_size) { 9445 if (alloc_size) {
9343 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 9446 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
9344 9447
@@ -9407,6 +9510,10 @@ void __init sched_init(void)
9407#endif /* CONFIG_USER_SCHED */ 9510#endif /* CONFIG_USER_SCHED */
9408#endif /* CONFIG_GROUP_SCHED */ 9511#endif /* CONFIG_GROUP_SCHED */
9409 9512
9513#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
9514 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
9515 __alignof__(unsigned long));
9516#endif
9410 for_each_possible_cpu(i) { 9517 for_each_possible_cpu(i) {
9411 struct rq *rq; 9518 struct rq *rq;
9412 9519
@@ -9489,6 +9596,8 @@ void __init sched_init(void)
9489 rq->cpu = i; 9596 rq->cpu = i;
9490 rq->online = 0; 9597 rq->online = 0;
9491 rq->migration_thread = NULL; 9598 rq->migration_thread = NULL;
9599 rq->idle_stamp = 0;
9600 rq->avg_idle = 2*sysctl_sched_migration_cost;
9492 INIT_LIST_HEAD(&rq->migration_queue); 9601 INIT_LIST_HEAD(&rq->migration_queue);
9493 rq_attach_root(rq, &def_root_domain); 9602 rq_attach_root(rq, &def_root_domain);
9494#endif 9603#endif
@@ -9532,13 +9641,15 @@ void __init sched_init(void)
9532 current->sched_class = &fair_sched_class; 9641 current->sched_class = &fair_sched_class;
9533 9642
9534 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9643 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9535 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); 9644 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9536#ifdef CONFIG_SMP 9645#ifdef CONFIG_SMP
9537#ifdef CONFIG_NO_HZ 9646#ifdef CONFIG_NO_HZ
9538 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 9647 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9539 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 9648 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9540#endif 9649#endif
9541 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9650 /* May be allocated at isolcpus cmdline parse time */
9651 if (cpu_isolated_map == NULL)
9652 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9542#endif /* SMP */ 9653#endif /* SMP */
9543 9654
9544 perf_event_init(); 9655 perf_event_init();
@@ -10868,6 +10979,7 @@ void synchronize_sched_expedited(void)
10868 spin_unlock_irqrestore(&rq->lock, flags); 10979 spin_unlock_irqrestore(&rq->lock, flags);
10869 } 10980 }
10870 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 10981 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10982 synchronize_sched_expedited_count++;
10871 mutex_unlock(&rcu_sched_expedited_mutex); 10983 mutex_unlock(&rcu_sched_expedited_mutex);
10872 put_online_cpus(); 10984 put_online_cpus();
10873 if (need_full_sync) 10985 if (need_full_sync)