aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 12:51:59 -0400
committerIngo Molnar <mingo@elte.hu>2007-07-09 12:51:59 -0400
commitc24d20dbef948487cd14f15dbf04644142e9f886 (patch)
treeafdf0f55982daeb07fad9953e2f9285861d8857e
parent62480d13d5d1812176e969a47e2db78a5398d02e (diff)
sched: move around resched_task()
move resched_task()/resched_cpu() into the 'public interfaces' section of sched.c, for use by kernel/sched_fair/rt/idletask.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c104
1 files changed, 52 insertions, 52 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 53c0ee742f69..e642bfa61fe3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -617,6 +617,58 @@ static inline struct rq *this_rq_lock(void)
617 return rq; 617 return rq;
618} 618}
619 619
620/*
621 * resched_task - mark a task 'to be rescheduled now'.
622 *
623 * On UP this means the setting of the need_resched flag, on SMP it
624 * might also involve a cross-CPU call to trigger the scheduler on
625 * the target CPU.
626 */
627#ifdef CONFIG_SMP
628
629#ifndef tsk_is_polling
630#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
631#endif
632
633static void resched_task(struct task_struct *p)
634{
635 int cpu;
636
637 assert_spin_locked(&task_rq(p)->lock);
638
639 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
640 return;
641
642 set_tsk_thread_flag(p, TIF_NEED_RESCHED);
643
644 cpu = task_cpu(p);
645 if (cpu == smp_processor_id())
646 return;
647
648 /* NEED_RESCHED must be visible before we test polling */
649 smp_mb();
650 if (!tsk_is_polling(p))
651 smp_send_reschedule(cpu);
652}
653
654static void resched_cpu(int cpu)
655{
656 struct rq *rq = cpu_rq(cpu);
657 unsigned long flags;
658
659 if (!spin_trylock_irqsave(&rq->lock, flags))
660 return;
661 resched_task(cpu_curr(cpu));
662 spin_unlock_irqrestore(&rq->lock, flags);
663}
664#else
665static inline void resched_task(struct task_struct *p)
666{
667 assert_spin_locked(&task_rq(p)->lock);
668 set_tsk_need_resched(p);
669}
670#endif
671
620#include "sched_stats.h" 672#include "sched_stats.h"
621 673
622/* 674/*
@@ -953,58 +1005,6 @@ static void deactivate_task(struct task_struct *p, struct rq *rq)
953 p->array = NULL; 1005 p->array = NULL;
954} 1006}
955 1007
956/*
957 * resched_task - mark a task 'to be rescheduled now'.
958 *
959 * On UP this means the setting of the need_resched flag, on SMP it
960 * might also involve a cross-CPU call to trigger the scheduler on
961 * the target CPU.
962 */
963#ifdef CONFIG_SMP
964
965#ifndef tsk_is_polling
966#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
967#endif
968
969static void resched_task(struct task_struct *p)
970{
971 int cpu;
972
973 assert_spin_locked(&task_rq(p)->lock);
974
975 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
976 return;
977
978 set_tsk_thread_flag(p, TIF_NEED_RESCHED);
979
980 cpu = task_cpu(p);
981 if (cpu == smp_processor_id())
982 return;
983
984 /* NEED_RESCHED must be visible before we test polling */
985 smp_mb();
986 if (!tsk_is_polling(p))
987 smp_send_reschedule(cpu);
988}
989
990static void resched_cpu(int cpu)
991{
992 struct rq *rq = cpu_rq(cpu);
993 unsigned long flags;
994
995 if (!spin_trylock_irqsave(&rq->lock, flags))
996 return;
997 resched_task(cpu_curr(cpu));
998 spin_unlock_irqrestore(&rq->lock, flags);
999}
1000#else
1001static inline void resched_task(struct task_struct *p)
1002{
1003 assert_spin_locked(&task_rq(p)->lock);
1004 set_tsk_need_resched(p);
1005}
1006#endif
1007
1008/** 1008/**
1009 * task_curr - is this task currently executing on a CPU? 1009 * task_curr - is this task currently executing on a CPU?
1010 * @p: the task in question. 1010 * @p: the task in question.