diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 44 |
1 files changed, 30 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e8819bc6f462..558e5f284269 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -397,9 +397,9 @@ struct cfs_rq { | |||
397 | * 'curr' points to currently running entity on this cfs_rq. | 397 | * 'curr' points to currently running entity on this cfs_rq. |
398 | * It is set to NULL otherwise (i.e when none are currently running). | 398 | * It is set to NULL otherwise (i.e when none are currently running). |
399 | */ | 399 | */ |
400 | struct sched_entity *curr, *next; | 400 | struct sched_entity *curr, *next, *last; |
401 | 401 | ||
402 | unsigned long nr_spread_over; | 402 | unsigned int nr_spread_over; |
403 | 403 | ||
404 | #ifdef CONFIG_FAIR_GROUP_SCHED | 404 | #ifdef CONFIG_FAIR_GROUP_SCHED |
405 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ | 405 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
@@ -969,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
969 | } | 969 | } |
970 | } | 970 | } |
971 | 971 | ||
972 | void task_rq_unlock_wait(struct task_struct *p) | ||
973 | { | ||
974 | struct rq *rq = task_rq(p); | ||
975 | |||
976 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | ||
977 | spin_unlock_wait(&rq->lock); | ||
978 | } | ||
979 | |||
972 | static void __task_rq_unlock(struct rq *rq) | 980 | static void __task_rq_unlock(struct rq *rq) |
973 | __releases(rq->lock) | 981 | __releases(rq->lock) |
974 | { | 982 | { |
@@ -1448,6 +1456,8 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1448 | 1456 | ||
1449 | if (rq->nr_running) | 1457 | if (rq->nr_running) |
1450 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; |
1459 | else | ||
1460 | rq->avg_load_per_task = 0; | ||
1451 | 1461 | ||
1452 | return rq->avg_load_per_task; | 1462 | return rq->avg_load_per_task; |
1453 | } | 1463 | } |
@@ -1805,7 +1815,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
1805 | /* | 1815 | /* |
1806 | * Buddy candidates are cache hot: | 1816 | * Buddy candidates are cache hot: |
1807 | */ | 1817 | */ |
1808 | if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) | 1818 | if (sched_feat(CACHE_HOT_BUDDY) && |
1819 | (&p->se == cfs_rq_of(&p->se)->next || | ||
1820 | &p->se == cfs_rq_of(&p->se)->last)) | ||
1809 | return 1; | 1821 | return 1; |
1810 | 1822 | ||
1811 | if (p->sched_class != &fair_sched_class) | 1823 | if (p->sched_class != &fair_sched_class) |
@@ -4190,7 +4202,6 @@ void account_steal_time(struct task_struct *p, cputime_t steal) | |||
4190 | 4202 | ||
4191 | if (p == rq->idle) { | 4203 | if (p == rq->idle) { |
4192 | p->stime = cputime_add(p->stime, steal); | 4204 | p->stime = cputime_add(p->stime, steal); |
4193 | account_group_system_time(p, steal); | ||
4194 | if (atomic_read(&rq->nr_iowait) > 0) | 4205 | if (atomic_read(&rq->nr_iowait) > 0) |
4195 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 4206 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); |
4196 | else | 4207 | else |
@@ -4326,7 +4337,7 @@ void __kprobes sub_preempt_count(int val) | |||
4326 | /* | 4337 | /* |
4327 | * Underflow? | 4338 | * Underflow? |
4328 | */ | 4339 | */ |
4329 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) | 4340 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) |
4330 | return; | 4341 | return; |
4331 | /* | 4342 | /* |
4332 | * Is the spinlock portion underflowing? | 4343 | * Is the spinlock portion underflowing? |
@@ -5858,6 +5869,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5858 | struct rq *rq = cpu_rq(cpu); | 5869 | struct rq *rq = cpu_rq(cpu); |
5859 | unsigned long flags; | 5870 | unsigned long flags; |
5860 | 5871 | ||
5872 | spin_lock_irqsave(&rq->lock, flags); | ||
5873 | |||
5861 | __sched_fork(idle); | 5874 | __sched_fork(idle); |
5862 | idle->se.exec_start = sched_clock(); | 5875 | idle->se.exec_start = sched_clock(); |
5863 | 5876 | ||
@@ -5865,7 +5878,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5865 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 5878 | idle->cpus_allowed = cpumask_of_cpu(cpu); |
5866 | __set_task_cpu(idle, cpu); | 5879 | __set_task_cpu(idle, cpu); |
5867 | 5880 | ||
5868 | spin_lock_irqsave(&rq->lock, flags); | ||
5869 | rq->curr = rq->idle = idle; | 5881 | rq->curr = rq->idle = idle; |
5870 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5882 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
5871 | idle->oncpu = 1; | 5883 | idle->oncpu = 1; |
@@ -6875,15 +6887,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6875 | struct sched_domain *tmp; | 6887 | struct sched_domain *tmp; |
6876 | 6888 | ||
6877 | /* Remove the sched domains which do not contribute to scheduling. */ | 6889 | /* Remove the sched domains which do not contribute to scheduling. */ |
6878 | for (tmp = sd; tmp; tmp = tmp->parent) { | 6890 | for (tmp = sd; tmp; ) { |
6879 | struct sched_domain *parent = tmp->parent; | 6891 | struct sched_domain *parent = tmp->parent; |
6880 | if (!parent) | 6892 | if (!parent) |
6881 | break; | 6893 | break; |
6894 | |||
6882 | if (sd_parent_degenerate(tmp, parent)) { | 6895 | if (sd_parent_degenerate(tmp, parent)) { |
6883 | tmp->parent = parent->parent; | 6896 | tmp->parent = parent->parent; |
6884 | if (parent->parent) | 6897 | if (parent->parent) |
6885 | parent->parent->child = tmp; | 6898 | parent->parent->child = tmp; |
6886 | } | 6899 | } else |
6900 | tmp = tmp->parent; | ||
6887 | } | 6901 | } |
6888 | 6902 | ||
6889 | if (sd && sd_degenerate(sd)) { | 6903 | if (sd && sd_degenerate(sd)) { |
@@ -7672,6 +7686,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7672 | error: | 7686 | error: |
7673 | free_sched_groups(cpu_map, tmpmask); | 7687 | free_sched_groups(cpu_map, tmpmask); |
7674 | SCHED_CPUMASK_FREE((void *)allmasks); | 7688 | SCHED_CPUMASK_FREE((void *)allmasks); |
7689 | kfree(rd); | ||
7675 | return -ENOMEM; | 7690 | return -ENOMEM; |
7676 | #endif | 7691 | #endif |
7677 | } | 7692 | } |
@@ -7773,13 +7788,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7773 | * | 7788 | * |
7774 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 7789 | * The passed in 'doms_new' should be kmalloc'd. This routine takes |
7775 | * ownership of it and will kfree it when done with it. If the caller | 7790 | * ownership of it and will kfree it when done with it. If the caller |
7776 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7791 | * failed the kmalloc call, then it can pass in doms_new == NULL && |
7777 | * and partition_sched_domains() will fallback to the single partition | 7792 | * ndoms_new == 1, and partition_sched_domains() will fallback to |
7778 | * 'fallback_doms', it also forces the domains to be rebuilt. | 7793 | * the single partition 'fallback_doms', it also forces the domains |
7794 | * to be rebuilt. | ||
7779 | * | 7795 | * |
7780 | * If doms_new==NULL it will be replaced with cpu_online_map. | 7796 | * If doms_new == NULL it will be replaced with cpu_online_map. |
7781 | * ndoms_new==0 is a special case for destroying existing domains. | 7797 | * ndoms_new == 0 is a special case for destroying existing domains, |
7782 | * It will not create the default domain. | 7798 | * and it will not create the default domain. |
7783 | * | 7799 | * |
7784 | * Call with hotplug lock held | 7800 | * Call with hotplug lock held |
7785 | */ | 7801 | */ |