aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c31
1 files changed, 29 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e4cfd193e76b..3682adadf6a7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -79,6 +79,7 @@
79#include "sched_cpupri.h" 79#include "sched_cpupri.h"
80#include "workqueue_sched.h" 80#include "workqueue_sched.h"
81 81
82#include <litmus/litmus.h>
82#include <litmus/debug_trace.h> 83#include <litmus/debug_trace.h>
83#include <litmus/sched_trace.h> 84#include <litmus/sched_trace.h>
84#include <litmus/trace.h> 85#include <litmus/trace.h>
@@ -462,6 +463,8 @@ struct rq {
462 /* runqueue lock: */ 463 /* runqueue lock: */
463 raw_spinlock_t lock; 464 raw_spinlock_t lock;
464 465
466 int dbg_in_schedule;
467
465 /* 468 /*
466 * nr_running and cpu_load should be in the same cacheline because 469 * nr_running and cpu_load should be in the same cacheline because
467 * remote CPUs use both these fields when doing load calculation. 470 * remote CPUs use both these fields when doing load calculation.
@@ -573,6 +576,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
573static inline 576static inline
574void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 577void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
575{ 578{
579 if (test_tsk_need_resched(p))
580 TRACE_TASK(p, "already need_resched when resuming\n");
576 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 581 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
577 582
578 /* 583 /*
@@ -1193,6 +1198,10 @@ static void resched_task(struct task_struct *p)
1193 if (test_tsk_need_resched(p)) 1198 if (test_tsk_need_resched(p))
1194 return; 1199 return;
1195 1200
1201 if(task_rq(p)->dbg_in_schedule)
1202 TRACE_TASK(p, "XXXX calling resched_task() during schedule() from ret:0x%p [rt:%d nr_running:%d]\n",
1203 __builtin_return_address(0), is_realtime(p), task_rq(p)->nr_running);
1204
1196 set_tsk_need_resched(p); 1205 set_tsk_need_resched(p);
1197 1206
1198 cpu = task_cpu(p); 1207 cpu = task_cpu(p);
@@ -3823,6 +3832,7 @@ need_resched_nonpreemptible:
3823 hrtick_clear(rq); 3832 hrtick_clear(rq);
3824 3833
3825 raw_spin_lock_irq(&rq->lock); 3834 raw_spin_lock_irq(&rq->lock);
3835 rq->dbg_in_schedule = 1;
3826 clear_tsk_need_resched(prev); 3836 clear_tsk_need_resched(prev);
3827 3837
3828 switch_count = &prev->nivcsw; 3838 switch_count = &prev->nivcsw;
@@ -3840,20 +3850,35 @@ need_resched_nonpreemptible:
3840 struct task_struct *to_wakeup; 3850 struct task_struct *to_wakeup;
3841 3851
3842 to_wakeup = wq_worker_sleeping(prev, cpu); 3852 to_wakeup = wq_worker_sleeping(prev, cpu);
3843 if (to_wakeup) 3853 if (to_wakeup) {
3854 TRACE_TASK(prev, "try_to_wake_up_local(%s/%d)\n", to_wakeup->comm, to_wakeup->pid);
3844 try_to_wake_up_local(to_wakeup); 3855 try_to_wake_up_local(to_wakeup);
3856 }
3845 } 3857 }
3846 deactivate_task(rq, prev, DEQUEUE_SLEEP); 3858 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3847 } 3859 }
3848 switch_count = &prev->nvcsw; 3860 switch_count = &prev->nvcsw;
3849 } 3861 }
3850 3862
3863 if (test_tsk_need_resched(prev))
3864 TRACE_TASK(prev, "need_resched before pre_schedule()\n");
3865
3851 pre_schedule(rq, prev); 3866 pre_schedule(rq, prev);
3852 3867
3853 if (unlikely(!rq->nr_running)) 3868 if (test_tsk_need_resched(prev))
3869 TRACE_TASK(prev, "need_resched after pre_schedule()\n");
3870
3871 if (unlikely(!rq->nr_running)) {
3854 idle_balance(cpu, rq); 3872 idle_balance(cpu, rq);
3873 if (test_tsk_need_resched(prev))
3874 TRACE_TASK(prev, "need_resched after idle_balance\n");
3875 }
3855 3876
3856 put_prev_task(rq, prev); 3877 put_prev_task(rq, prev);
3878
3879 if (test_tsk_need_resched(prev))
3880 TRACE_TASK(prev, "need_resched after put_prev\n");
3881
3857 next = pick_next_task(rq); 3882 next = pick_next_task(rq);
3858 3883
3859 if (likely(prev != next)) { 3884 if (likely(prev != next)) {
@@ -3866,6 +3891,7 @@ need_resched_nonpreemptible:
3866 3891
3867 TS_SCHED_END(next); 3892 TS_SCHED_END(next);
3868 TS_CXS_START(next); 3893 TS_CXS_START(next);
3894 rq->dbg_in_schedule = 0;
3869 context_switch(rq, prev, next); /* unlocks the rq */ 3895 context_switch(rq, prev, next); /* unlocks the rq */
3870 TS_CXS_END(current); 3896 TS_CXS_END(current);
3871 /* 3897 /*
@@ -3878,6 +3904,7 @@ need_resched_nonpreemptible:
3878 rq = cpu_rq(cpu); 3904 rq = cpu_rq(cpu);
3879 } else { 3905 } else {
3880 TS_SCHED_END(prev); 3906 TS_SCHED_END(prev);
3907 rq->dbg_in_schedule = 0;
3881 raw_spin_unlock_irq(&rq->lock); 3908 raw_spin_unlock_irq(&rq->lock);
3882 } 3909 }
3883 3910