aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorKirill Tkhai <tkhai@yandex.ru>2014-05-08 19:00:14 -0400
committerIngo Molnar <mingo@kernel.org>2014-05-22 05:16:33 -0400
commit72465447867b9de6b5cdea5d10f9781585136270 (patch)
tree2067c7c82f389c91c325d04797ced6b851ba1691 /kernel/sched
parent52a08ef1f13a11289c9e18cd4cfb4e51c024058b (diff)
sched, nohz: Change rq->nr_running to always use wrappers
Sometimes ->nr_running may cross 2 but interrupt is not being sent to rq's cpu. In this case we don't reenable the timer. Looks like this may be the reason for rare unexpected effects, if nohz is enabled. Patch replaces all places of direct changing of nr_running and makes add_nr_running() caring about crossing border. Signed-off-by: Kirill Tkhai <tkhai@yandex.ru> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20140508225830.2469.97461.stgit@localhost Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/deadline.c4
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/sched/sched.h12
-rw-r--r--kernel/sched/stop_task.c4
5 files changed, 17 insertions, 15 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 800e99b99075..e0a04ae1e0dd 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -741,7 +741,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
741 741
742 WARN_ON(!dl_prio(prio)); 742 WARN_ON(!dl_prio(prio));
743 dl_rq->dl_nr_running++; 743 dl_rq->dl_nr_running++;
744 inc_nr_running(rq_of_dl_rq(dl_rq)); 744 add_nr_running(rq_of_dl_rq(dl_rq), 1);
745 745
746 inc_dl_deadline(dl_rq, deadline); 746 inc_dl_deadline(dl_rq, deadline);
747 inc_dl_migration(dl_se, dl_rq); 747 inc_dl_migration(dl_se, dl_rq);
@@ -755,7 +755,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
755 WARN_ON(!dl_prio(prio)); 755 WARN_ON(!dl_prio(prio));
756 WARN_ON(!dl_rq->dl_nr_running); 756 WARN_ON(!dl_rq->dl_nr_running);
757 dl_rq->dl_nr_running--; 757 dl_rq->dl_nr_running--;
758 dec_nr_running(rq_of_dl_rq(dl_rq)); 758 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
759 759
760 dec_dl_deadline(dl_rq, dl_se->deadline); 760 dec_dl_deadline(dl_rq, dl_se->deadline);
761 dec_dl_migration(dl_se, dl_rq); 761 dec_dl_migration(dl_se, dl_rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 26ec6686a00b..f7cac2ba62ea 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3325,7 +3325,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3325 } 3325 }
3326 3326
3327 if (!se) 3327 if (!se)
3328 rq->nr_running -= task_delta; 3328 sub_nr_running(rq, task_delta);
3329 3329
3330 cfs_rq->throttled = 1; 3330 cfs_rq->throttled = 1;
3331 cfs_rq->throttled_clock = rq_clock(rq); 3331 cfs_rq->throttled_clock = rq_clock(rq);
@@ -3376,7 +3376,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3376 } 3376 }
3377 3377
3378 if (!se) 3378 if (!se)
3379 rq->nr_running += task_delta; 3379 add_nr_running(rq, task_delta);
3380 3380
3381 /* determine whether we need to wake up potentially idle cpu */ 3381 /* determine whether we need to wake up potentially idle cpu */
3382 if (rq->curr == rq->idle && rq->cfs.nr_running) 3382 if (rq->curr == rq->idle && rq->cfs.nr_running)
@@ -3908,7 +3908,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3908 3908
3909 if (!se) { 3909 if (!se) {
3910 update_rq_runnable_avg(rq, rq->nr_running); 3910 update_rq_runnable_avg(rq, rq->nr_running);
3911 inc_nr_running(rq); 3911 add_nr_running(rq, 1);
3912 } 3912 }
3913 hrtick_update(rq); 3913 hrtick_update(rq);
3914} 3914}
@@ -3968,7 +3968,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3968 } 3968 }
3969 3969
3970 if (!se) { 3970 if (!se) {
3971 dec_nr_running(rq); 3971 sub_nr_running(rq, 1);
3972 update_rq_runnable_avg(rq, 1); 3972 update_rq_runnable_avg(rq, 1);
3973 } 3973 }
3974 hrtick_update(rq); 3974 hrtick_update(rq);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7795e292f4c9..0ebfd7a29472 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -973,7 +973,7 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
973 973
974 BUG_ON(!rq->nr_running); 974 BUG_ON(!rq->nr_running);
975 975
976 rq->nr_running -= rt_rq->rt_nr_running; 976 sub_nr_running(rq, rt_rq->rt_nr_running);
977 rt_rq->rt_queued = 0; 977 rt_rq->rt_queued = 0;
978} 978}
979 979
@@ -989,7 +989,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
989 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) 989 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
990 return; 990 return;
991 991
992 rq->nr_running += rt_rq->rt_nr_running; 992 add_nr_running(rq, rt_rq->rt_nr_running);
993 rt_rq->rt_queued = 1; 993 rt_rq->rt_queued = 1;
994} 994}
995 995
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b2cbe81308af..600e2291a75c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1206,12 +1206,14 @@ extern void update_idle_cpu_load(struct rq *this_rq);
1206 1206
1207extern void init_task_runnable_average(struct task_struct *p); 1207extern void init_task_runnable_average(struct task_struct *p);
1208 1208
1209static inline void inc_nr_running(struct rq *rq) 1209static inline void add_nr_running(struct rq *rq, unsigned count)
1210{ 1210{
1211 rq->nr_running++; 1211 unsigned prev_nr = rq->nr_running;
1212
1213 rq->nr_running = prev_nr + count;
1212 1214
1213#ifdef CONFIG_NO_HZ_FULL 1215#ifdef CONFIG_NO_HZ_FULL
1214 if (rq->nr_running == 2) { 1216 if (prev_nr < 2 && rq->nr_running >= 2) {
1215 if (tick_nohz_full_cpu(rq->cpu)) { 1217 if (tick_nohz_full_cpu(rq->cpu)) {
1216 /* Order rq->nr_running write against the IPI */ 1218 /* Order rq->nr_running write against the IPI */
1217 smp_wmb(); 1219 smp_wmb();
@@ -1221,9 +1223,9 @@ static inline void inc_nr_running(struct rq *rq)
1221#endif 1223#endif
1222} 1224}
1223 1225
1224static inline void dec_nr_running(struct rq *rq) 1226static inline void sub_nr_running(struct rq *rq, unsigned count)
1225{ 1227{
1226 rq->nr_running--; 1228 rq->nr_running -= count;
1227} 1229}
1228 1230
1229static inline void rq_last_tick_reset(struct rq *rq) 1231static inline void rq_last_tick_reset(struct rq *rq)
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index d6ce65dde541..bfe0edadbfbb 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -41,13 +41,13 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev)
41static void 41static void
42enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) 42enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
43{ 43{
44 inc_nr_running(rq); 44 add_nr_running(rq, 1);
45} 45}
46 46
47static void 47static void
48dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) 48dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
49{ 49{
50 dec_nr_running(rq); 50 sub_nr_running(rq, 1);
51} 51}
52 52
53static void yield_task_stop(struct rq *rq) 53static void yield_task_stop(struct rq *rq)