aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c180
1 files changed, 112 insertions, 68 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c91f110fc62..315ba4059f93 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -535,14 +535,12 @@ struct rq {
535 #define CPU_LOAD_IDX_MAX 5 535 #define CPU_LOAD_IDX_MAX 5
536 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 536 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
537#ifdef CONFIG_NO_HZ 537#ifdef CONFIG_NO_HZ
538 unsigned long last_tick_seen;
539 unsigned char in_nohz_recently; 538 unsigned char in_nohz_recently;
540#endif 539#endif
541 /* capture load from *all* tasks on this cpu: */ 540 /* capture load from *all* tasks on this cpu: */
542 struct load_weight load; 541 struct load_weight load;
543 unsigned long nr_load_updates; 542 unsigned long nr_load_updates;
544 u64 nr_switches; 543 u64 nr_switches;
545 u64 nr_migrations_in;
546 544
547 struct cfs_rq cfs; 545 struct cfs_rq cfs;
548 struct rt_rq rt; 546 struct rt_rq rt;
@@ -591,6 +589,8 @@ struct rq {
591 589
592 u64 rt_avg; 590 u64 rt_avg;
593 u64 age_stamp; 591 u64 age_stamp;
592 u64 idle_stamp;
593 u64 avg_idle;
594#endif 594#endif
595 595
596 /* calc_load related fields */ 596 /* calc_load related fields */
@@ -772,7 +772,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
772 if (!sched_feat_names[i]) 772 if (!sched_feat_names[i])
773 return -EINVAL; 773 return -EINVAL;
774 774
775 filp->f_pos += cnt; 775 *ppos += cnt;
776 776
777 return cnt; 777 return cnt;
778} 778}
@@ -2079,7 +2079,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2079#endif 2079#endif
2080 if (old_cpu != new_cpu) { 2080 if (old_cpu != new_cpu) {
2081 p->se.nr_migrations++; 2081 p->se.nr_migrations++;
2082 new_rq->nr_migrations_in++;
2083#ifdef CONFIG_SCHEDSTATS 2082#ifdef CONFIG_SCHEDSTATS
2084 if (task_hot(p, old_rq->clock, NULL)) 2083 if (task_hot(p, old_rq->clock, NULL))
2085 schedstat_inc(p, se.nr_forced2_migrations); 2084 schedstat_inc(p, se.nr_forced2_migrations);
@@ -2443,6 +2442,17 @@ out_running:
2443#ifdef CONFIG_SMP 2442#ifdef CONFIG_SMP
2444 if (p->sched_class->task_wake_up) 2443 if (p->sched_class->task_wake_up)
2445 p->sched_class->task_wake_up(rq, p); 2444 p->sched_class->task_wake_up(rq, p);
2445
2446 if (unlikely(rq->idle_stamp)) {
2447 u64 delta = rq->clock - rq->idle_stamp;
2448 u64 max = 2*sysctl_sched_migration_cost;
2449
2450 if (delta > max)
2451 rq->avg_idle = max;
2452 else
2453 update_avg(&rq->avg_idle, delta);
2454 rq->idle_stamp = 0;
2455 }
2446#endif 2456#endif
2447out: 2457out:
2448 task_rq_unlock(rq, &flags); 2458 task_rq_unlock(rq, &flags);
@@ -2855,14 +2865,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
2855 */ 2865 */
2856 arch_start_context_switch(prev); 2866 arch_start_context_switch(prev);
2857 2867
2858 if (unlikely(!mm)) { 2868 if (likely(!mm)) {
2859 next->active_mm = oldmm; 2869 next->active_mm = oldmm;
2860 atomic_inc(&oldmm->mm_count); 2870 atomic_inc(&oldmm->mm_count);
2861 enter_lazy_tlb(oldmm, next); 2871 enter_lazy_tlb(oldmm, next);
2862 } else 2872 } else
2863 switch_mm(oldmm, mm, next); 2873 switch_mm(oldmm, mm, next);
2864 2874
2865 if (unlikely(!prev->mm)) { 2875 if (likely(!prev->mm)) {
2866 prev->active_mm = NULL; 2876 prev->active_mm = NULL;
2867 rq->prev_mm = oldmm; 2877 rq->prev_mm = oldmm;
2868 } 2878 }
@@ -3025,15 +3035,6 @@ static void calc_load_account_active(struct rq *this_rq)
3025} 3035}
3026 3036
3027/* 3037/*
3028 * Externally visible per-cpu scheduler statistics:
3029 * cpu_nr_migrations(cpu) - number of migrations into that cpu
3030 */
3031u64 cpu_nr_migrations(int cpu)
3032{
3033 return cpu_rq(cpu)->nr_migrations_in;
3034}
3035
3036/*
3037 * Update rq->cpu_load[] statistics. This function is usually called every 3038 * Update rq->cpu_load[] statistics. This function is usually called every
3038 * scheduler tick (TICK_NSEC). 3039 * scheduler tick (TICK_NSEC).
3039 */ 3040 */
@@ -4133,7 +4134,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4133 unsigned long flags; 4134 unsigned long flags;
4134 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4135 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4135 4136
4136 cpumask_setall(cpus); 4137 cpumask_copy(cpus, cpu_online_mask);
4137 4138
4138 /* 4139 /*
4139 * When power savings policy is enabled for the parent domain, idle 4140 * When power savings policy is enabled for the parent domain, idle
@@ -4296,7 +4297,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
4296 int all_pinned = 0; 4297 int all_pinned = 0;
4297 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4298 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4298 4299
4299 cpumask_setall(cpus); 4300 cpumask_copy(cpus, cpu_online_mask);
4300 4301
4301 /* 4302 /*
4302 * When power savings policy is enabled for the parent domain, idle 4303 * When power savings policy is enabled for the parent domain, idle
@@ -4436,6 +4437,11 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4436 int pulled_task = 0; 4437 int pulled_task = 0;
4437 unsigned long next_balance = jiffies + HZ; 4438 unsigned long next_balance = jiffies + HZ;
4438 4439
4440 this_rq->idle_stamp = this_rq->clock;
4441
4442 if (this_rq->avg_idle < sysctl_sched_migration_cost)
4443 return;
4444
4439 for_each_domain(this_cpu, sd) { 4445 for_each_domain(this_cpu, sd) {
4440 unsigned long interval; 4446 unsigned long interval;
4441 4447
@@ -4450,8 +4456,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4450 interval = msecs_to_jiffies(sd->balance_interval); 4456 interval = msecs_to_jiffies(sd->balance_interval);
4451 if (time_after(next_balance, sd->last_balance + interval)) 4457 if (time_after(next_balance, sd->last_balance + interval))
4452 next_balance = sd->last_balance + interval; 4458 next_balance = sd->last_balance + interval;
4453 if (pulled_task) 4459 if (pulled_task) {
4460 this_rq->idle_stamp = 0;
4454 break; 4461 break;
4462 }
4455 } 4463 }
4456 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 4464 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
4457 /* 4465 /*
@@ -5053,8 +5061,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
5053 p->gtime = cputime_add(p->gtime, cputime); 5061 p->gtime = cputime_add(p->gtime, cputime);
5054 5062
5055 /* Add guest time to cpustat. */ 5063 /* Add guest time to cpustat. */
5056 cpustat->user = cputime64_add(cpustat->user, tmp); 5064 if (TASK_NICE(p) > 0) {
5057 cpustat->guest = cputime64_add(cpustat->guest, tmp); 5065 cpustat->nice = cputime64_add(cpustat->nice, tmp);
5066 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
5067 } else {
5068 cpustat->user = cputime64_add(cpustat->user, tmp);
5069 cpustat->guest = cputime64_add(cpustat->guest, tmp);
5070 }
5058} 5071}
5059 5072
5060/* 5073/*
@@ -5179,41 +5192,45 @@ cputime_t task_stime(struct task_struct *p)
5179 return p->stime; 5192 return p->stime;
5180} 5193}
5181#else 5194#else
5195
5196#ifndef nsecs_to_cputime
5197# define nsecs_to_cputime(__nsecs) \
5198 msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC))
5199#endif
5200
5182cputime_t task_utime(struct task_struct *p) 5201cputime_t task_utime(struct task_struct *p)
5183{ 5202{
5184 clock_t utime = cputime_to_clock_t(p->utime), 5203 cputime_t utime = p->utime, total = utime + p->stime;
5185 total = utime + cputime_to_clock_t(p->stime);
5186 u64 temp; 5204 u64 temp;
5187 5205
5188 /* 5206 /*
5189 * Use CFS's precise accounting: 5207 * Use CFS's precise accounting:
5190 */ 5208 */
5191 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); 5209 temp = (u64)nsecs_to_cputime(p->se.sum_exec_runtime);
5192 5210
5193 if (total) { 5211 if (total) {
5194 temp *= utime; 5212 temp *= utime;
5195 do_div(temp, total); 5213 do_div(temp, total);
5196 } 5214 }
5197 utime = (clock_t)temp; 5215 utime = (cputime_t)temp;
5198 5216
5199 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); 5217 p->prev_utime = max(p->prev_utime, utime);
5200 return p->prev_utime; 5218 return p->prev_utime;
5201} 5219}
5202 5220
5203cputime_t task_stime(struct task_struct *p) 5221cputime_t task_stime(struct task_struct *p)
5204{ 5222{
5205 clock_t stime; 5223 cputime_t stime;
5206 5224
5207 /* 5225 /*
5208 * Use CFS's precise accounting. (we subtract utime from 5226 * Use CFS's precise accounting. (we subtract utime from
5209 * the total, to make sure the total observed by userspace 5227 * the total, to make sure the total observed by userspace
5210 * grows monotonically - apps rely on that): 5228 * grows monotonically - apps rely on that):
5211 */ 5229 */
5212 stime = nsec_to_clock_t(p->se.sum_exec_runtime) - 5230 stime = nsecs_to_cputime(p->se.sum_exec_runtime) - task_utime(p);
5213 cputime_to_clock_t(task_utime(p));
5214 5231
5215 if (stime >= 0) 5232 if (stime >= 0)
5216 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); 5233 p->prev_stime = max(p->prev_stime, stime);
5217 5234
5218 return p->prev_stime; 5235 return p->prev_stime;
5219} 5236}
@@ -6182,22 +6199,14 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
6182 BUG_ON(p->se.on_rq); 6199 BUG_ON(p->se.on_rq);
6183 6200
6184 p->policy = policy; 6201 p->policy = policy;
6185 switch (p->policy) {
6186 case SCHED_NORMAL:
6187 case SCHED_BATCH:
6188 case SCHED_IDLE:
6189 p->sched_class = &fair_sched_class;
6190 break;
6191 case SCHED_FIFO:
6192 case SCHED_RR:
6193 p->sched_class = &rt_sched_class;
6194 break;
6195 }
6196
6197 p->rt_priority = prio; 6202 p->rt_priority = prio;
6198 p->normal_prio = normal_prio(p); 6203 p->normal_prio = normal_prio(p);
6199 /* we are holding p->pi_lock already */ 6204 /* we are holding p->pi_lock already */
6200 p->prio = rt_mutex_getprio(p); 6205 p->prio = rt_mutex_getprio(p);
6206 if (rt_prio(p->prio))
6207 p->sched_class = &rt_sched_class;
6208 else
6209 p->sched_class = &fair_sched_class;
6201 set_load_weight(p); 6210 set_load_weight(p);
6202} 6211}
6203 6212
@@ -6942,7 +6951,7 @@ void show_state_filter(unsigned long state_filter)
6942 /* 6951 /*
6943 * Only show locks if all tasks are dumped: 6952 * Only show locks if all tasks are dumped:
6944 */ 6953 */
6945 if (state_filter == -1) 6954 if (!state_filter)
6946 debug_show_all_locks(); 6955 debug_show_all_locks();
6947} 6956}
6948 6957
@@ -7747,6 +7756,16 @@ early_initcall(migration_init);
7747 7756
7748#ifdef CONFIG_SCHED_DEBUG 7757#ifdef CONFIG_SCHED_DEBUG
7749 7758
7759static __read_mostly int sched_domain_debug_enabled;
7760
7761static int __init sched_domain_debug_setup(char *str)
7762{
7763 sched_domain_debug_enabled = 1;
7764
7765 return 0;
7766}
7767early_param("sched_debug", sched_domain_debug_setup);
7768
7750static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 7769static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7751 struct cpumask *groupmask) 7770 struct cpumask *groupmask)
7752{ 7771{
@@ -7833,6 +7852,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
7833 cpumask_var_t groupmask; 7852 cpumask_var_t groupmask;
7834 int level = 0; 7853 int level = 0;
7835 7854
7855 if (!sched_domain_debug_enabled)
7856 return;
7857
7836 if (!sd) { 7858 if (!sd) {
7837 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 7859 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
7838 return; 7860 return;
@@ -8890,7 +8912,7 @@ static int build_sched_domains(const struct cpumask *cpu_map)
8890 return __build_sched_domains(cpu_map, NULL); 8912 return __build_sched_domains(cpu_map, NULL);
8891} 8913}
8892 8914
8893static struct cpumask *doms_cur; /* current sched domains */ 8915static cpumask_var_t *doms_cur; /* current sched domains */
8894static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 8916static int ndoms_cur; /* number of sched domains in 'doms_cur' */
8895static struct sched_domain_attr *dattr_cur; 8917static struct sched_domain_attr *dattr_cur;
8896 /* attribues of custom domains in 'doms_cur' */ 8918 /* attribues of custom domains in 'doms_cur' */
@@ -8912,6 +8934,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
8912 return 0; 8934 return 0;
8913} 8935}
8914 8936
8937cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
8938{
8939 int i;
8940 cpumask_var_t *doms;
8941
8942 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
8943 if (!doms)
8944 return NULL;
8945 for (i = 0; i < ndoms; i++) {
8946 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
8947 free_sched_domains(doms, i);
8948 return NULL;
8949 }
8950 }
8951 return doms;
8952}
8953
8954void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
8955{
8956 unsigned int i;
8957 for (i = 0; i < ndoms; i++)
8958 free_cpumask_var(doms[i]);
8959 kfree(doms);
8960}
8961
8915/* 8962/*
8916 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 8963 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
8917 * For now this just excludes isolated cpus, but could be used to 8964 * For now this just excludes isolated cpus, but could be used to
@@ -8923,12 +8970,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
8923 8970
8924 arch_update_cpu_topology(); 8971 arch_update_cpu_topology();
8925 ndoms_cur = 1; 8972 ndoms_cur = 1;
8926 doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); 8973 doms_cur = alloc_sched_domains(ndoms_cur);
8927 if (!doms_cur) 8974 if (!doms_cur)
8928 doms_cur = fallback_doms; 8975 doms_cur = &fallback_doms;
8929 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); 8976 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
8930 dattr_cur = NULL; 8977 dattr_cur = NULL;
8931 err = build_sched_domains(doms_cur); 8978 err = build_sched_domains(doms_cur[0]);
8932 register_sched_domain_sysctl(); 8979 register_sched_domain_sysctl();
8933 8980
8934 return err; 8981 return err;
@@ -8978,19 +9025,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8978 * doms_new[] to the current sched domain partitioning, doms_cur[]. 9025 * doms_new[] to the current sched domain partitioning, doms_cur[].
8979 * It destroys each deleted domain and builds each new domain. 9026 * It destroys each deleted domain and builds each new domain.
8980 * 9027 *
8981 * 'doms_new' is an array of cpumask's of length 'ndoms_new'. 9028 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
8982 * The masks don't intersect (don't overlap.) We should setup one 9029 * The masks don't intersect (don't overlap.) We should setup one
8983 * sched domain for each mask. CPUs not in any of the cpumasks will 9030 * sched domain for each mask. CPUs not in any of the cpumasks will
8984 * not be load balanced. If the same cpumask appears both in the 9031 * not be load balanced. If the same cpumask appears both in the
8985 * current 'doms_cur' domains and in the new 'doms_new', we can leave 9032 * current 'doms_cur' domains and in the new 'doms_new', we can leave
8986 * it as it is. 9033 * it as it is.
8987 * 9034 *
8988 * The passed in 'doms_new' should be kmalloc'd. This routine takes 9035 * The passed in 'doms_new' should be allocated using
8989 * ownership of it and will kfree it when done with it. If the caller 9036 * alloc_sched_domains. This routine takes ownership of it and will
8990 * failed the kmalloc call, then it can pass in doms_new == NULL && 9037 * free_sched_domains it when done with it. If the caller failed the
8991 * ndoms_new == 1, and partition_sched_domains() will fallback to 9038 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
8992 * the single partition 'fallback_doms', it also forces the domains 9039 * and partition_sched_domains() will fallback to the single partition
8993 * to be rebuilt. 9040 * 'fallback_doms', it also forces the domains to be rebuilt.
8994 * 9041 *
8995 * If doms_new == NULL it will be replaced with cpu_online_mask. 9042 * If doms_new == NULL it will be replaced with cpu_online_mask.
8996 * ndoms_new == 0 is a special case for destroying existing domains, 9043 * ndoms_new == 0 is a special case for destroying existing domains,
@@ -8998,8 +9045,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8998 * 9045 *
8999 * Call with hotplug lock held 9046 * Call with hotplug lock held
9000 */ 9047 */
9001/* FIXME: Change to struct cpumask *doms_new[] */ 9048void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
9002void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
9003 struct sched_domain_attr *dattr_new) 9049 struct sched_domain_attr *dattr_new)
9004{ 9050{
9005 int i, j, n; 9051 int i, j, n;
@@ -9018,40 +9064,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
9018 /* Destroy deleted domains */ 9064 /* Destroy deleted domains */
9019 for (i = 0; i < ndoms_cur; i++) { 9065 for (i = 0; i < ndoms_cur; i++) {
9020 for (j = 0; j < n && !new_topology; j++) { 9066 for (j = 0; j < n && !new_topology; j++) {
9021 if (cpumask_equal(&doms_cur[i], &doms_new[j]) 9067 if (cpumask_equal(doms_cur[i], doms_new[j])
9022 && dattrs_equal(dattr_cur, i, dattr_new, j)) 9068 && dattrs_equal(dattr_cur, i, dattr_new, j))
9023 goto match1; 9069 goto match1;
9024 } 9070 }
9025 /* no match - a current sched domain not in new doms_new[] */ 9071 /* no match - a current sched domain not in new doms_new[] */
9026 detach_destroy_domains(doms_cur + i); 9072 detach_destroy_domains(doms_cur[i]);
9027match1: 9073match1:
9028 ; 9074 ;
9029 } 9075 }
9030 9076
9031 if (doms_new == NULL) { 9077 if (doms_new == NULL) {
9032 ndoms_cur = 0; 9078 ndoms_cur = 0;
9033 doms_new = fallback_doms; 9079 doms_new = &fallback_doms;
9034 cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); 9080 cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
9035 WARN_ON_ONCE(dattr_new); 9081 WARN_ON_ONCE(dattr_new);
9036 } 9082 }
9037 9083
9038 /* Build new domains */ 9084 /* Build new domains */
9039 for (i = 0; i < ndoms_new; i++) { 9085 for (i = 0; i < ndoms_new; i++) {
9040 for (j = 0; j < ndoms_cur && !new_topology; j++) { 9086 for (j = 0; j < ndoms_cur && !new_topology; j++) {
9041 if (cpumask_equal(&doms_new[i], &doms_cur[j]) 9087 if (cpumask_equal(doms_new[i], doms_cur[j])
9042 && dattrs_equal(dattr_new, i, dattr_cur, j)) 9088 && dattrs_equal(dattr_new, i, dattr_cur, j))
9043 goto match2; 9089 goto match2;
9044 } 9090 }
9045 /* no match - add a new doms_new */ 9091 /* no match - add a new doms_new */
9046 __build_sched_domains(doms_new + i, 9092 __build_sched_domains(doms_new[i],
9047 dattr_new ? dattr_new + i : NULL); 9093 dattr_new ? dattr_new + i : NULL);
9048match2: 9094match2:
9049 ; 9095 ;
9050 } 9096 }
9051 9097
9052 /* Remember the new sched domains */ 9098 /* Remember the new sched domains */
9053 if (doms_cur != fallback_doms) 9099 if (doms_cur != &fallback_doms)
9054 kfree(doms_cur); 9100 free_sched_domains(doms_cur, ndoms_cur);
9055 kfree(dattr_cur); /* kfree(NULL) is safe */ 9101 kfree(dattr_cur); /* kfree(NULL) is safe */
9056 doms_cur = doms_new; 9102 doms_cur = doms_new;
9057 dattr_cur = dattr_new; 9103 dattr_cur = dattr_new;
@@ -9373,10 +9419,6 @@ void __init sched_init(void)
9373#ifdef CONFIG_CPUMASK_OFFSTACK 9419#ifdef CONFIG_CPUMASK_OFFSTACK
9374 alloc_size += num_possible_cpus() * cpumask_size(); 9420 alloc_size += num_possible_cpus() * cpumask_size();
9375#endif 9421#endif
9376 /*
9377 * As sched_init() is called before page_alloc is setup,
9378 * we use alloc_bootmem().
9379 */
9380 if (alloc_size) { 9422 if (alloc_size) {
9381 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 9423 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
9382 9424
@@ -9531,6 +9573,8 @@ void __init sched_init(void)
9531 rq->cpu = i; 9573 rq->cpu = i;
9532 rq->online = 0; 9574 rq->online = 0;
9533 rq->migration_thread = NULL; 9575 rq->migration_thread = NULL;
9576 rq->idle_stamp = 0;
9577 rq->avg_idle = 2*sysctl_sched_migration_cost;
9534 INIT_LIST_HEAD(&rq->migration_queue); 9578 INIT_LIST_HEAD(&rq->migration_queue);
9535 rq_attach_root(rq, &def_root_domain); 9579 rq_attach_root(rq, &def_root_domain);
9536#endif 9580#endif