aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-02-12 09:47:29 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-02-21 15:43:18 -0500
commitdc87734106bb6e97c92d8bd81f261fb71976ec2c (patch)
tree42df15a40c0c96470f70b2582383aa00d2bb60c9 /kernel/sched
parent3f1d2a318171bf61850d4e5a72031271e5aada76 (diff)
sched: Remove some #ifdeffery
Remove a few gratuitous #ifdefs in pick_next_task*(). Cc: Ingo Molnar <mingo@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Juri Lelli <juri.lelli@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-nnzddp5c4fijyzzxxrwlxghf@git.kernel.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/deadline.c31
-rw-r--r--kernel/sched/idle_task.c4
-rw-r--r--kernel/sched/rt.c41
-rw-r--r--kernel/sched/sched.h5
4 files changed, 60 insertions, 21 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index bfeb84ecc32b..3185b775dbf7 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -214,6 +214,16 @@ static inline int has_pushable_dl_tasks(struct rq *rq)
214 214
215static int push_dl_task(struct rq *rq); 215static int push_dl_task(struct rq *rq);
216 216
217static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
218{
219 return dl_task(prev);
220}
221
222static inline void set_post_schedule(struct rq *rq)
223{
224 rq->post_schedule = has_pushable_dl_tasks(rq);
225}
226
217#else 227#else
218 228
219static inline 229static inline
@@ -236,6 +246,19 @@ void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
236{ 246{
237} 247}
238 248
249static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
250{
251 return false;
252}
253
254static inline int pull_dl_task(struct rq *rq)
255{
256 return 0;
257}
258
259static inline void set_post_schedule(struct rq *rq)
260{
261}
239#endif /* CONFIG_SMP */ 262#endif /* CONFIG_SMP */
240 263
241static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 264static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
@@ -1000,10 +1023,8 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1000 1023
1001 dl_rq = &rq->dl; 1024 dl_rq = &rq->dl;
1002 1025
1003#ifdef CONFIG_SMP 1026 if (need_pull_dl_task(rq, prev))
1004 if (dl_task(prev))
1005 pull_dl_task(rq); 1027 pull_dl_task(rq);
1006#endif
1007 1028
1008 if (unlikely(!dl_rq->dl_nr_running)) 1029 if (unlikely(!dl_rq->dl_nr_running))
1009 return NULL; 1030 return NULL;
@@ -1024,9 +1045,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1024 start_hrtick_dl(rq, p); 1045 start_hrtick_dl(rq, p);
1025#endif 1046#endif
1026 1047
1027#ifdef CONFIG_SMP 1048 set_post_schedule(rq);
1028 rq->post_schedule = has_pushable_dl_tasks(rq);
1029#endif /* CONFIG_SMP */
1030 1049
1031 return p; 1050 return p;
1032} 1051}
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 53ff9e7c76d2..1f3725882838 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -29,9 +29,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev)
29 put_prev_task(rq, prev); 29 put_prev_task(rq, prev);
30 30
31 schedstat_inc(rq, sched_goidle); 31 schedstat_inc(rq, sched_goidle);
32#ifdef CONFIG_SMP
33 idle_enter_fair(rq); 32 idle_enter_fair(rq);
34#endif
35 return rq->idle; 33 return rq->idle;
36} 34}
37 35
@@ -50,10 +48,8 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
50 48
51static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) 49static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
52{ 50{
53#ifdef CONFIG_SMP
54 idle_exit_fair(rq); 51 idle_exit_fair(rq);
55 rq_last_tick_reset(rq); 52 rq_last_tick_reset(rq);
56#endif
57} 53}
58 54
59static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) 55static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 65c2d6881ac3..3e488ca6050d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -231,6 +231,12 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
231 231
232static int pull_rt_task(struct rq *this_rq); 232static int pull_rt_task(struct rq *this_rq);
233 233
234static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
235{
236 /* Try to pull RT tasks here if we lower this rq's prio */
237 return rq->rt.highest_prio.curr > prev->prio;
238}
239
234static inline int rt_overloaded(struct rq *rq) 240static inline int rt_overloaded(struct rq *rq)
235{ 241{
236 return atomic_read(&rq->rd->rto_count); 242 return atomic_read(&rq->rd->rto_count);
@@ -317,6 +323,15 @@ static inline int has_pushable_tasks(struct rq *rq)
317 return !plist_head_empty(&rq->rt.pushable_tasks); 323 return !plist_head_empty(&rq->rt.pushable_tasks);
318} 324}
319 325
326static inline void set_post_schedule(struct rq *rq)
327{
328 /*
329 * We detect this state here so that we can avoid taking the RQ
330 * lock again later if there is no need to push
331 */
332 rq->post_schedule = has_pushable_tasks(rq);
333}
334
320static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 335static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
321{ 336{
322 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 337 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -361,6 +376,19 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
361{ 376{
362} 377}
363 378
379static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
380{
381 return false;
382}
383
384static inline int pull_rt_task(struct rq *this_rq)
385{
386 return 0;
387}
388
389static inline void set_post_schedule(struct rq *rq)
390{
391}
364#endif /* CONFIG_SMP */ 392#endif /* CONFIG_SMP */
365 393
366static inline int on_rt_rq(struct sched_rt_entity *rt_se) 394static inline int on_rt_rq(struct sched_rt_entity *rt_se)
@@ -1332,11 +1360,8 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1332 struct task_struct *p; 1360 struct task_struct *p;
1333 struct rt_rq *rt_rq = &rq->rt; 1361 struct rt_rq *rt_rq = &rq->rt;
1334 1362
1335#ifdef CONFIG_SMP 1363 if (need_pull_rt_task(rq, prev))
1336 /* Try to pull RT tasks here if we lower this rq's prio */
1337 if (rq->rt.highest_prio.curr > prev->prio)
1338 pull_rt_task(rq); 1364 pull_rt_task(rq);
1339#endif
1340 1365
1341 if (!rt_rq->rt_nr_running) 1366 if (!rt_rq->rt_nr_running)
1342 return NULL; 1367 return NULL;
@@ -1352,13 +1377,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1352 if (p) 1377 if (p)
1353 dequeue_pushable_task(rq, p); 1378 dequeue_pushable_task(rq, p);
1354 1379
1355#ifdef CONFIG_SMP 1380 set_post_schedule(rq);
1356 /*
1357 * We detect this state here so that we can avoid taking the RQ
1358 * lock again later if there is no need to push
1359 */
1360 rq->post_schedule = has_pushable_tasks(rq);
1361#endif
1362 1381
1363 return p; 1382 return p;
1364} 1383}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d276147ba5e4..caf4abda45e3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1172,6 +1172,11 @@ extern void trigger_load_balance(struct rq *rq);
1172extern void idle_enter_fair(struct rq *this_rq); 1172extern void idle_enter_fair(struct rq *this_rq);
1173extern void idle_exit_fair(struct rq *this_rq); 1173extern void idle_exit_fair(struct rq *this_rq);
1174 1174
1175#else
1176
1177static inline void idle_enter_fair(struct rq *rq) { }
1178static inline void idle_exit_fair(struct rq *rq) { }
1179
1175#endif 1180#endif
1176 1181
1177extern void sysrq_sched_debug_show(void); 1182extern void sysrq_sched_debug_show(void);