diff options
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/exit.c | 5 | ||||
-rw-r--r-- | kernel/sched.c | 10 | ||||
-rw-r--r-- | kernel/sched_debug.c | 41 | ||||
-rw-r--r-- | kernel/sched_fair.c | 17 |
5 files changed, 48 insertions, 26 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 295b7c756ca6..644ffbda17ca 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -247,6 +247,7 @@ extern void init_idle(struct task_struct *idle, int cpu); | |||
247 | extern void init_idle_bootup_task(struct task_struct *idle); | 247 | extern void init_idle_bootup_task(struct task_struct *idle); |
248 | 248 | ||
249 | extern int runqueue_is_locked(void); | 249 | extern int runqueue_is_locked(void); |
250 | extern void task_rq_unlock_wait(struct task_struct *p); | ||
250 | 251 | ||
251 | extern cpumask_t nohz_cpu_mask; | 252 | extern cpumask_t nohz_cpu_mask; |
252 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 253 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
diff --git a/kernel/exit.c b/kernel/exit.c index 80137a5d9467..ae2b92be5fae 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -141,6 +141,11 @@ static void __exit_signal(struct task_struct *tsk) | |||
141 | if (sig) { | 141 | if (sig) { |
142 | flush_sigqueue(&sig->shared_pending); | 142 | flush_sigqueue(&sig->shared_pending); |
143 | taskstats_tgid_free(sig); | 143 | taskstats_tgid_free(sig); |
144 | /* | ||
145 | * Make sure ->signal can't go away under rq->lock, | ||
146 | * see account_group_exec_runtime(). | ||
147 | */ | ||
148 | task_rq_unlock_wait(tsk); | ||
144 | __cleanup_signal(sig); | 149 | __cleanup_signal(sig); |
145 | } | 150 | } |
146 | } | 151 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 57c933ffbee1..50a21f964679 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -399,7 +399,7 @@ struct cfs_rq { | |||
399 | */ | 399 | */ |
400 | struct sched_entity *curr, *next, *last; | 400 | struct sched_entity *curr, *next, *last; |
401 | 401 | ||
402 | unsigned long nr_spread_over; | 402 | unsigned int nr_spread_over; |
403 | 403 | ||
404 | #ifdef CONFIG_FAIR_GROUP_SCHED | 404 | #ifdef CONFIG_FAIR_GROUP_SCHED |
405 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ | 405 | struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ |
@@ -969,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
969 | } | 969 | } |
970 | } | 970 | } |
971 | 971 | ||
972 | void task_rq_unlock_wait(struct task_struct *p) | ||
973 | { | ||
974 | struct rq *rq = task_rq(p); | ||
975 | |||
976 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | ||
977 | spin_unlock_wait(&rq->lock); | ||
978 | } | ||
979 | |||
972 | static void __task_rq_unlock(struct rq *rq) | 980 | static void __task_rq_unlock(struct rq *rq) |
973 | __releases(rq->lock) | 981 | __releases(rq->lock) |
974 | { | 982 | { |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 5ae17762ec32..48ecc51e7701 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -144,7 +144,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
144 | last = __pick_last_entity(cfs_rq); | 144 | last = __pick_last_entity(cfs_rq); |
145 | if (last) | 145 | if (last) |
146 | max_vruntime = last->vruntime; | 146 | max_vruntime = last->vruntime; |
147 | min_vruntime = rq->cfs.min_vruntime; | 147 | min_vruntime = cfs_rq->min_vruntime; |
148 | rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime; | 148 | rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime; |
149 | spin_unlock_irqrestore(&rq->lock, flags); | 149 | spin_unlock_irqrestore(&rq->lock, flags); |
150 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", | 150 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", |
@@ -161,26 +161,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
161 | SPLIT_NS(spread0)); | 161 | SPLIT_NS(spread0)); |
162 | SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); | 162 | SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); |
163 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); | 163 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); |
164 | #ifdef CONFIG_SCHEDSTATS | ||
165 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); | ||
166 | |||
167 | P(yld_exp_empty); | ||
168 | P(yld_act_empty); | ||
169 | P(yld_both_empty); | ||
170 | P(yld_count); | ||
171 | 164 | ||
172 | P(sched_switch); | 165 | SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", |
173 | P(sched_count); | ||
174 | P(sched_goidle); | ||
175 | |||
176 | P(ttwu_count); | ||
177 | P(ttwu_local); | ||
178 | |||
179 | P(bkl_count); | ||
180 | |||
181 | #undef P | ||
182 | #endif | ||
183 | SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", | ||
184 | cfs_rq->nr_spread_over); | 166 | cfs_rq->nr_spread_over); |
185 | #ifdef CONFIG_FAIR_GROUP_SCHED | 167 | #ifdef CONFIG_FAIR_GROUP_SCHED |
186 | #ifdef CONFIG_SMP | 168 | #ifdef CONFIG_SMP |
@@ -260,6 +242,25 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
260 | #undef P | 242 | #undef P |
261 | #undef PN | 243 | #undef PN |
262 | 244 | ||
245 | #ifdef CONFIG_SCHEDSTATS | ||
246 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); | ||
247 | |||
248 | P(yld_exp_empty); | ||
249 | P(yld_act_empty); | ||
250 | P(yld_both_empty); | ||
251 | P(yld_count); | ||
252 | |||
253 | P(sched_switch); | ||
254 | P(sched_count); | ||
255 | P(sched_goidle); | ||
256 | |||
257 | P(ttwu_count); | ||
258 | P(ttwu_local); | ||
259 | |||
260 | P(bkl_count); | ||
261 | |||
262 | #undef P | ||
263 | #endif | ||
263 | print_cfs_stats(m, cpu); | 264 | print_cfs_stats(m, cpu); |
264 | print_rt_stats(m, cpu); | 265 | print_rt_stats(m, cpu); |
265 | 266 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 51aa3e102acb..98345e45b059 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -716,6 +716,15 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
716 | __enqueue_entity(cfs_rq, se); | 716 | __enqueue_entity(cfs_rq, se); |
717 | } | 717 | } |
718 | 718 | ||
719 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
720 | { | ||
721 | if (cfs_rq->last == se) | ||
722 | cfs_rq->last = NULL; | ||
723 | |||
724 | if (cfs_rq->next == se) | ||
725 | cfs_rq->next = NULL; | ||
726 | } | ||
727 | |||
719 | static void | 728 | static void |
720 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 729 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) |
721 | { | 730 | { |
@@ -738,11 +747,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
738 | #endif | 747 | #endif |
739 | } | 748 | } |
740 | 749 | ||
741 | if (cfs_rq->last == se) | 750 | clear_buddies(cfs_rq, se); |
742 | cfs_rq->last = NULL; | ||
743 | |||
744 | if (cfs_rq->next == se) | ||
745 | cfs_rq->next = NULL; | ||
746 | 751 | ||
747 | if (se != cfs_rq->curr) | 752 | if (se != cfs_rq->curr) |
748 | __dequeue_entity(cfs_rq, se); | 753 | __dequeue_entity(cfs_rq, se); |
@@ -977,6 +982,8 @@ static void yield_task_fair(struct rq *rq) | |||
977 | if (unlikely(cfs_rq->nr_running == 1)) | 982 | if (unlikely(cfs_rq->nr_running == 1)) |
978 | return; | 983 | return; |
979 | 984 | ||
985 | clear_buddies(cfs_rq, se); | ||
986 | |||
980 | if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { | 987 | if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { |
981 | update_rq_clock(rq); | 988 | update_rq_clock(rq); |
982 | /* | 989 | /* |