aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorKirill Tkhai <tkhai@yandex.ru>2014-06-28 16:03:57 -0400
committerIngo Molnar <mingo@kernel.org>2014-07-16 07:38:19 -0400
commit8875125efe8402c4d84b08291e68f1281baba8e2 (patch)
tree2957c181dd06189a1499e09836c5fe5c3932a0b3 /kernel/sched/core.c
parent466af29bf4270e84261712428a1304c28e3743fa (diff)
sched: Transform resched_task() into resched_curr()
We always use resched_task() with rq->curr argument. It's not possible to reschedule any task but rq's current. The patch introduces resched_curr(struct rq *) to replace all of the repeating patterns. The main aim is cleanup, but there is a little size profit too: (before) $ size kernel/sched/built-in.o text data bss dec hex filename 155274 16445 7042 178761 2ba49 kernel/sched/built-in.o $ size vmlinux text data bss dec hex filename 7411490 1178376 991232 9581098 92322a vmlinux (after) $ size kernel/sched/built-in.o text data bss dec hex filename 155130 16445 7042 178617 2b9b9 kernel/sched/built-in.o $ size vmlinux text data bss dec hex filename 7411362 1178376 991232 9580970 9231aa vmlinux I was choosing between resched_curr() and resched_rq(), and the first name looks better for me. A little lie in Documentation/trace/ftrace.txt. I have not actually collected the tracing again. With a hope the patch won't make execution times much worse :) Signed-off-by: Kirill Tkhai <tkhai@yandex.ru> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/20140628200219.1778.18735.stgit@localhost Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cf7695a6c1d2..2f960813c582 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -589,30 +589,31 @@ static bool set_nr_if_polling(struct task_struct *p)
589#endif 589#endif
590 590
591/* 591/*
592 * resched_task - mark a task 'to be rescheduled now'. 592 * resched_curr - mark rq's current task 'to be rescheduled now'.
593 * 593 *
594 * On UP this means the setting of the need_resched flag, on SMP it 594 * On UP this means the setting of the need_resched flag, on SMP it
595 * might also involve a cross-CPU call to trigger the scheduler on 595 * might also involve a cross-CPU call to trigger the scheduler on
596 * the target CPU. 596 * the target CPU.
597 */ 597 */
598void resched_task(struct task_struct *p) 598void resched_curr(struct rq *rq)
599{ 599{
600 struct task_struct *curr = rq->curr;
600 int cpu; 601 int cpu;
601 602
602 lockdep_assert_held(&task_rq(p)->lock); 603 lockdep_assert_held(&rq->lock);
603 604
604 if (test_tsk_need_resched(p)) 605 if (test_tsk_need_resched(curr))
605 return; 606 return;
606 607
607 cpu = task_cpu(p); 608 cpu = cpu_of(rq);
608 609
609 if (cpu == smp_processor_id()) { 610 if (cpu == smp_processor_id()) {
610 set_tsk_need_resched(p); 611 set_tsk_need_resched(curr);
611 set_preempt_need_resched(); 612 set_preempt_need_resched();
612 return; 613 return;
613 } 614 }
614 615
615 if (set_nr_and_not_polling(p)) 616 if (set_nr_and_not_polling(curr))
616 smp_send_reschedule(cpu); 617 smp_send_reschedule(cpu);
617 else 618 else
618 trace_sched_wake_idle_without_ipi(cpu); 619 trace_sched_wake_idle_without_ipi(cpu);
@@ -625,7 +626,7 @@ void resched_cpu(int cpu)
625 626
626 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) 627 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
627 return; 628 return;
628 resched_task(cpu_curr(cpu)); 629 resched_curr(rq);
629 raw_spin_unlock_irqrestore(&rq->lock, flags); 630 raw_spin_unlock_irqrestore(&rq->lock, flags);
630} 631}
631 632
@@ -1027,7 +1028,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1027 if (class == rq->curr->sched_class) 1028 if (class == rq->curr->sched_class)
1028 break; 1029 break;
1029 if (class == p->sched_class) { 1030 if (class == p->sched_class) {
1030 resched_task(rq->curr); 1031 resched_curr(rq);
1031 break; 1032 break;
1032 } 1033 }
1033 } 1034 }
@@ -3073,7 +3074,7 @@ void set_user_nice(struct task_struct *p, long nice)
3073 * lowered its priority, then reschedule its CPU: 3074 * lowered its priority, then reschedule its CPU:
3074 */ 3075 */
3075 if (delta < 0 || (delta > 0 && task_running(rq, p))) 3076 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3076 resched_task(rq->curr); 3077 resched_curr(rq);
3077 } 3078 }
3078out_unlock: 3079out_unlock:
3079 task_rq_unlock(rq, p, &flags); 3080 task_rq_unlock(rq, p, &flags);
@@ -4299,7 +4300,7 @@ again:
4299 * fairness. 4300 * fairness.
4300 */ 4301 */
4301 if (preempt && rq != p_rq) 4302 if (preempt && rq != p_rq)
4302 resched_task(p_rq->curr); 4303 resched_curr(p_rq);
4303 } 4304 }
4304 4305
4305out_unlock: 4306out_unlock:
@@ -7106,7 +7107,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7106 __setscheduler(rq, p, &attr); 7107 __setscheduler(rq, p, &attr);
7107 if (on_rq) { 7108 if (on_rq) {
7108 enqueue_task(rq, p, 0); 7109 enqueue_task(rq, p, 0);
7109 resched_task(rq->curr); 7110 resched_curr(rq);
7110 } 7111 }
7111 7112
7112 check_class_changed(rq, p, prev_class, old_prio); 7113 check_class_changed(rq, p, prev_class, old_prio);