summaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorKirill Tkhai <tkhai@yandex.ru>2014-05-08 19:00:14 -0400
committerIngo Molnar <mingo@kernel.org>2014-05-22 05:16:33 -0400
commit72465447867b9de6b5cdea5d10f9781585136270 (patch)
tree2067c7c82f389c91c325d04797ced6b851ba1691 /kernel/sched/sched.h
parent52a08ef1f13a11289c9e18cd4cfb4e51c024058b (diff)
sched, nohz: Change rq->nr_running to always use wrappers
Sometimes ->nr_running may cross 2 but interrupt is not being sent to rq's cpu. In this case we don't reenable the timer. Looks like this may be the reason for rare unexpected effects, if nohz is enabled. Patch replaces all places of direct changing of nr_running and makes add_nr_running() caring about crossing border. Signed-off-by: Kirill Tkhai <tkhai@yandex.ru> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20140508225830.2469.97461.stgit@localhost Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h12
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b2cbe81308af..600e2291a75c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1206,12 +1206,14 @@ extern void update_idle_cpu_load(struct rq *this_rq);
1206 1206
1207extern void init_task_runnable_average(struct task_struct *p); 1207extern void init_task_runnable_average(struct task_struct *p);
1208 1208
1209static inline void inc_nr_running(struct rq *rq) 1209static inline void add_nr_running(struct rq *rq, unsigned count)
1210{ 1210{
1211 rq->nr_running++; 1211 unsigned prev_nr = rq->nr_running;
1212
1213 rq->nr_running = prev_nr + count;
1212 1214
1213#ifdef CONFIG_NO_HZ_FULL 1215#ifdef CONFIG_NO_HZ_FULL
1214 if (rq->nr_running == 2) { 1216 if (prev_nr < 2 && rq->nr_running >= 2) {
1215 if (tick_nohz_full_cpu(rq->cpu)) { 1217 if (tick_nohz_full_cpu(rq->cpu)) {
1216 /* Order rq->nr_running write against the IPI */ 1218 /* Order rq->nr_running write against the IPI */
1217 smp_wmb(); 1219 smp_wmb();
@@ -1221,9 +1223,9 @@ static inline void inc_nr_running(struct rq *rq)
1221#endif 1223#endif
1222} 1224}
1223 1225
1224static inline void dec_nr_running(struct rq *rq) 1226static inline void sub_nr_running(struct rq *rq, unsigned count)
1225{ 1227{
1226 rq->nr_running--; 1228 rq->nr_running -= count;
1227} 1229}
1228 1230
1229static inline void rq_last_tick_reset(struct rq *rq) 1231static inline void rq_last_tick_reset(struct rq *rq)