aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-28 10:25:34 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:21:11 -0400
commite68debebdc2983600063cd6b04c6a51c4b7ddcc1 (patch)
tree60eb377f29b67cbd7b22e46e41d2d2e0b1ec22c1 /kernel/sched.c
parent9ac80419f88f192cdf586da3df585c224ef27773 (diff)
Integrate litmus_tick() in task_tick_litmus()
- remove the call to litmus_tick() from scheduler_tick() just after having performed the class task_tick() and integrate litmus_tick() in task_tick_litmus() - task_tick_litmus() is the handler for the litmus class task_tick() method. It is called in non-queued mode from scheduler_tick()
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ee894ee8a0bb..9ad41979c0b2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -485,6 +485,11 @@ struct rt_rq {
485#endif 485#endif
486}; 486};
487 487
488/* Litmus related fields in a runqueue */
489struct litmus_rq {
490 struct task_struct *prev;
491};
492
488#ifdef CONFIG_SMP 493#ifdef CONFIG_SMP
489 494
490/* 495/*
@@ -549,6 +554,7 @@ struct rq {
549 554
550 struct cfs_rq cfs; 555 struct cfs_rq cfs;
551 struct rt_rq rt; 556 struct rt_rq rt;
557 struct litmus_rq litmus;
552 558
553#ifdef CONFIG_FAIR_GROUP_SCHED 559#ifdef CONFIG_FAIR_GROUP_SCHED
554 /* list of leaf cfs_rq on this cpu: */ 560 /* list of leaf cfs_rq on this cpu: */
@@ -574,8 +580,6 @@ struct rq {
574 580
575 atomic_t nr_iowait; 581 atomic_t nr_iowait;
576 582
577 struct task_struct *litmus_next;
578
579#ifdef CONFIG_SMP 583#ifdef CONFIG_SMP
580 struct root_domain *rd; 584 struct root_domain *rd;
581 struct sched_domain *sd; 585 struct sched_domain *sd;
@@ -2786,6 +2790,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2786{ 2790{
2787 if (prev->sched_class->pre_schedule) 2791 if (prev->sched_class->pre_schedule)
2788 prev->sched_class->pre_schedule(rq, prev); 2792 prev->sched_class->pre_schedule(rq, prev);
2793
2794 /* LITMUS^RT not very clean hack: we need to save the prev task
2795 * as our scheduling decision rely on it (as we drop the rq lock
2796 * something in prev can change...); there is no way to escape
2797 * this ack apart from modifying pick_nex_task(rq, _prev_) or
2798 * falling back on the previous solution of decoupling
2799 * scheduling decisions
2800 */
2801 rq->litmus.prev = prev;
2789} 2802}
2790 2803
2791/* rq->lock is NOT held, but preemption is disabled */ 2804/* rq->lock is NOT held, but preemption is disabled */
@@ -5252,13 +5265,8 @@ void scheduler_tick(void)
5252 update_cpu_load(rq); 5265 update_cpu_load(rq);
5253 curr->sched_class->task_tick(rq, curr, 0); 5266 curr->sched_class->task_tick(rq, curr, 0);
5254 5267
5255 /* 5268 /* litmus_tick may force current to resched */
5256 * LITMUS_TODO: can we move litmus_tick inside task_tick
5257 * or will deadlock ?
5258 */
5259 TS_PLUGIN_TICK_START;
5260 litmus_tick(rq, curr); 5269 litmus_tick(rq, curr);
5261 TS_PLUGIN_TICK_END;
5262 5270
5263 spin_unlock(&rq->lock); 5271 spin_unlock(&rq->lock);
5264 5272
@@ -5470,14 +5478,6 @@ need_resched_nonpreemptible:
5470 update_rq_clock(rq); 5478 update_rq_clock(rq);
5471 clear_tsk_need_resched(prev); 5479 clear_tsk_need_resched(prev);
5472 5480
5473 /*
5474 * LITMUS_TODO: can we integrate litmus_schedule in
5475 * pick_next_task?
5476 */
5477 TS_PLUGIN_SCHED_START;
5478 litmus_schedule(rq, prev);
5479 TS_PLUGIN_SCHED_END;
5480
5481 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 5481 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
5482 if (unlikely(signal_pending_state(prev->state, prev))) 5482 if (unlikely(signal_pending_state(prev->state, prev)))
5483 prev->state = TASK_RUNNING; 5483 prev->state = TASK_RUNNING;