aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-03-03 10:27:54 -0500
committerIngo Molnar <mingo@kernel.org>2018-03-04 06:39:34 -0500
commit02d8ec9456f47b8865f1ff3fbb532e12a760d3b5 (patch)
tree002d9266267930e3c629d58da315c51bf4c9c01b
parenta92057e14beb233e8c891f4de075f2a468c71f15 (diff)
sched/deadline, rt: Rename queue_push_tasks/queue_pull_task to create separate namespace
There are similarly named functions in both of these modules: kernel/sched/deadline.c:static inline void queue_push_tasks(struct rq *rq) kernel/sched/deadline.c:static inline void queue_pull_task(struct rq *rq) kernel/sched/deadline.c:static inline void queue_push_tasks(struct rq *rq) kernel/sched/deadline.c:static inline void queue_pull_task(struct rq *rq) kernel/sched/deadline.c: queue_push_tasks(rq); kernel/sched/deadline.c: queue_pull_task(rq); kernel/sched/deadline.c: queue_push_tasks(rq); kernel/sched/deadline.c: queue_pull_task(rq); kernel/sched/rt.c:static inline void queue_push_tasks(struct rq *rq) kernel/sched/rt.c:static inline void queue_pull_task(struct rq *rq) kernel/sched/rt.c:static inline void queue_push_tasks(struct rq *rq) kernel/sched/rt.c: queue_push_tasks(rq); kernel/sched/rt.c: queue_pull_task(rq); kernel/sched/rt.c: queue_push_tasks(rq); kernel/sched/rt.c: queue_pull_task(rq); ... which makes it harder to grep for them. Prefix them with deadline_ and rt_, respectively. Cc: Peter Zijlstra <peterz@infradead.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/deadline.c16
-rw-r--r--kernel/sched/rt.c14
2 files changed, 15 insertions, 15 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index af491f537636..8b7c2b35bec9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -511,7 +511,7 @@ static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
511static void push_dl_tasks(struct rq *); 511static void push_dl_tasks(struct rq *);
512static void pull_dl_task(struct rq *); 512static void pull_dl_task(struct rq *);
513 513
514static inline void queue_push_tasks(struct rq *rq) 514static inline void deadline_queue_push_tasks(struct rq *rq)
515{ 515{
516 if (!has_pushable_dl_tasks(rq)) 516 if (!has_pushable_dl_tasks(rq))
517 return; 517 return;
@@ -519,7 +519,7 @@ static inline void queue_push_tasks(struct rq *rq)
519 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 519 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
520} 520}
521 521
522static inline void queue_pull_task(struct rq *rq) 522static inline void deadline_queue_pull_task(struct rq *rq)
523{ 523{
524 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 524 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
525} 525}
@@ -594,11 +594,11 @@ static inline void pull_dl_task(struct rq *rq)
594{ 594{
595} 595}
596 596
597static inline void queue_push_tasks(struct rq *rq) 597static inline void deadline_queue_push_tasks(struct rq *rq)
598{ 598{
599} 599}
600 600
601static inline void queue_pull_task(struct rq *rq) 601static inline void deadline_queue_pull_task(struct rq *rq)
602{ 602{
603} 603}
604#endif /* CONFIG_SMP */ 604#endif /* CONFIG_SMP */
@@ -1759,7 +1759,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1759 if (hrtick_enabled(rq)) 1759 if (hrtick_enabled(rq))
1760 start_hrtick_dl(rq, p); 1760 start_hrtick_dl(rq, p);
1761 1761
1762 queue_push_tasks(rq); 1762 deadline_queue_push_tasks(rq);
1763 1763
1764 return p; 1764 return p;
1765} 1765}
@@ -2309,7 +2309,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
2309 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 2309 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2310 return; 2310 return;
2311 2311
2312 queue_pull_task(rq); 2312 deadline_queue_pull_task(rq);
2313} 2313}
2314 2314
2315/* 2315/*
@@ -2331,7 +2331,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
2331 if (rq->curr != p) { 2331 if (rq->curr != p) {
2332#ifdef CONFIG_SMP 2332#ifdef CONFIG_SMP
2333 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 2333 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2334 queue_push_tasks(rq); 2334 deadline_queue_push_tasks(rq);
2335#endif 2335#endif
2336 if (dl_task(rq->curr)) 2336 if (dl_task(rq->curr))
2337 check_preempt_curr_dl(rq, p, 0); 2337 check_preempt_curr_dl(rq, p, 0);
@@ -2356,7 +2356,7 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2356 * or lowering its prio, so... 2356 * or lowering its prio, so...
2357 */ 2357 */
2358 if (!rq->dl.overloaded) 2358 if (!rq->dl.overloaded)
2359 queue_pull_task(rq); 2359 deadline_queue_pull_task(rq);
2360 2360
2361 /* 2361 /*
2362 * If we now have a earlier deadline task than p, 2362 * If we now have a earlier deadline task than p,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a3d438fec46c..4f4fd3b157f1 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -355,7 +355,7 @@ static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
355static void push_rt_tasks(struct rq *); 355static void push_rt_tasks(struct rq *);
356static void pull_rt_task(struct rq *); 356static void pull_rt_task(struct rq *);
357 357
358static inline void queue_push_tasks(struct rq *rq) 358static inline void rt_queue_push_tasks(struct rq *rq)
359{ 359{
360 if (!has_pushable_tasks(rq)) 360 if (!has_pushable_tasks(rq))
361 return; 361 return;
@@ -363,7 +363,7 @@ static inline void queue_push_tasks(struct rq *rq)
363 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); 363 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
364} 364}
365 365
366static inline void queue_pull_task(struct rq *rq) 366static inline void rt_queue_pull_task(struct rq *rq)
367{ 367{
368 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); 368 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
369} 369}
@@ -421,7 +421,7 @@ static inline void pull_rt_task(struct rq *this_rq)
421{ 421{
422} 422}
423 423
424static inline void queue_push_tasks(struct rq *rq) 424static inline void rt_queue_push_tasks(struct rq *rq)
425{ 425{
426} 426}
427#endif /* CONFIG_SMP */ 427#endif /* CONFIG_SMP */
@@ -1565,7 +1565,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1565 /* The running task is never eligible for pushing */ 1565 /* The running task is never eligible for pushing */
1566 dequeue_pushable_task(rq, p); 1566 dequeue_pushable_task(rq, p);
1567 1567
1568 queue_push_tasks(rq); 1568 rt_queue_push_tasks(rq);
1569 1569
1570 return p; 1570 return p;
1571} 1571}
@@ -2185,7 +2185,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
2185 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) 2185 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2186 return; 2186 return;
2187 2187
2188 queue_pull_task(rq); 2188 rt_queue_pull_task(rq);
2189} 2189}
2190 2190
2191void __init init_sched_rt_class(void) 2191void __init init_sched_rt_class(void)
@@ -2216,7 +2216,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
2216 if (task_on_rq_queued(p) && rq->curr != p) { 2216 if (task_on_rq_queued(p) && rq->curr != p) {
2217#ifdef CONFIG_SMP 2217#ifdef CONFIG_SMP
2218 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) 2218 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2219 queue_push_tasks(rq); 2219 rt_queue_push_tasks(rq);
2220#endif /* CONFIG_SMP */ 2220#endif /* CONFIG_SMP */
2221 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) 2221 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2222 resched_curr(rq); 2222 resched_curr(rq);
@@ -2240,7 +2240,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2240 * may need to pull tasks to this runqueue. 2240 * may need to pull tasks to this runqueue.
2241 */ 2241 */
2242 if (oldprio < p->prio) 2242 if (oldprio < p->prio)
2243 queue_pull_task(rq); 2243 rt_queue_pull_task(rq);
2244 2244
2245 /* 2245 /*
2246 * If there's a higher priority task waiting to run 2246 * If there's a higher priority task waiting to run