diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-28 10:25:34 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 17:21:11 -0400 |
commit | e68debebdc2983600063cd6b04c6a51c4b7ddcc1 (patch) | |
tree | 60eb377f29b67cbd7b22e46e41d2d2e0b1ec22c1 | |
parent | 9ac80419f88f192cdf586da3df585c224ef27773 (diff) |
Integrate litmus_tick() in task_tick_litmus()
- remove the call to litmus_tick() from scheduler_tick() just after
having performed the class task_tick() and integrate
litmus_tick() in task_tick_litmus()
- task_tick_litmus() is the handler for the litmus class task_tick()
method. It is called in non-queued mode from scheduler_tick()
-rw-r--r-- | kernel/sched.c | 32 | ||||
-rw-r--r-- | litmus/sched_litmus.c | 117 |
2 files changed, 91 insertions, 58 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ee894ee8a0bb..9ad41979c0b2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -485,6 +485,11 @@ struct rt_rq { | |||
485 | #endif | 485 | #endif |
486 | }; | 486 | }; |
487 | 487 | ||
488 | /* Litmus related fields in a runqueue */ | ||
489 | struct litmus_rq { | ||
490 | struct task_struct *prev; | ||
491 | }; | ||
492 | |||
488 | #ifdef CONFIG_SMP | 493 | #ifdef CONFIG_SMP |
489 | 494 | ||
490 | /* | 495 | /* |
@@ -549,6 +554,7 @@ struct rq { | |||
549 | 554 | ||
550 | struct cfs_rq cfs; | 555 | struct cfs_rq cfs; |
551 | struct rt_rq rt; | 556 | struct rt_rq rt; |
557 | struct litmus_rq litmus; | ||
552 | 558 | ||
553 | #ifdef CONFIG_FAIR_GROUP_SCHED | 559 | #ifdef CONFIG_FAIR_GROUP_SCHED |
554 | /* list of leaf cfs_rq on this cpu: */ | 560 | /* list of leaf cfs_rq on this cpu: */ |
@@ -574,8 +580,6 @@ struct rq { | |||
574 | 580 | ||
575 | atomic_t nr_iowait; | 581 | atomic_t nr_iowait; |
576 | 582 | ||
577 | struct task_struct *litmus_next; | ||
578 | |||
579 | #ifdef CONFIG_SMP | 583 | #ifdef CONFIG_SMP |
580 | struct root_domain *rd; | 584 | struct root_domain *rd; |
581 | struct sched_domain *sd; | 585 | struct sched_domain *sd; |
@@ -2786,6 +2790,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev) | |||
2786 | { | 2790 | { |
2787 | if (prev->sched_class->pre_schedule) | 2791 | if (prev->sched_class->pre_schedule) |
2788 | prev->sched_class->pre_schedule(rq, prev); | 2792 | prev->sched_class->pre_schedule(rq, prev); |
2793 | |||
2794 | /* LITMUS^RT not very clean hack: we need to save the prev task | ||
2795 | * as our scheduling decision rely on it (as we drop the rq lock | ||
2796 | * something in prev can change...); there is no way to escape | ||
2797 | * this ack apart from modifying pick_nex_task(rq, _prev_) or | ||
2798 | * falling back on the previous solution of decoupling | ||
2799 | * scheduling decisions | ||
2800 | */ | ||
2801 | rq->litmus.prev = prev; | ||
2789 | } | 2802 | } |
2790 | 2803 | ||
2791 | /* rq->lock is NOT held, but preemption is disabled */ | 2804 | /* rq->lock is NOT held, but preemption is disabled */ |
@@ -5252,13 +5265,8 @@ void scheduler_tick(void) | |||
5252 | update_cpu_load(rq); | 5265 | update_cpu_load(rq); |
5253 | curr->sched_class->task_tick(rq, curr, 0); | 5266 | curr->sched_class->task_tick(rq, curr, 0); |
5254 | 5267 | ||
5255 | /* | 5268 | /* litmus_tick may force current to resched */ |
5256 | * LITMUS_TODO: can we move litmus_tick inside task_tick | ||
5257 | * or will deadlock ? | ||
5258 | */ | ||
5259 | TS_PLUGIN_TICK_START; | ||
5260 | litmus_tick(rq, curr); | 5269 | litmus_tick(rq, curr); |
5261 | TS_PLUGIN_TICK_END; | ||
5262 | 5270 | ||
5263 | spin_unlock(&rq->lock); | 5271 | spin_unlock(&rq->lock); |
5264 | 5272 | ||
@@ -5470,14 +5478,6 @@ need_resched_nonpreemptible: | |||
5470 | update_rq_clock(rq); | 5478 | update_rq_clock(rq); |
5471 | clear_tsk_need_resched(prev); | 5479 | clear_tsk_need_resched(prev); |
5472 | 5480 | ||
5473 | /* | ||
5474 | * LITMUS_TODO: can we integrate litmus_schedule in | ||
5475 | * pick_next_task? | ||
5476 | */ | ||
5477 | TS_PLUGIN_SCHED_START; | ||
5478 | litmus_schedule(rq, prev); | ||
5479 | TS_PLUGIN_SCHED_END; | ||
5480 | |||
5481 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 5481 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
5482 | if (unlikely(signal_pending_state(prev->state, prev))) | 5482 | if (unlikely(signal_pending_state(prev->state, prev))) |
5483 | prev->state = TASK_RUNNING; | 5483 | prev->state = TASK_RUNNING; |
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index ccedd3670ac5..9906e059879a 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -20,33 +20,40 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p) | |||
20 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | 20 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); |
21 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2); | 21 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2); |
22 | 22 | ||
23 | /* | ||
24 | * litmus_tick gets called by scheduler_tick() with HZ freq | ||
25 | * Interrupts are disabled | ||
26 | */ | ||
23 | static void litmus_tick(struct rq *rq, struct task_struct *p) | 27 | static void litmus_tick(struct rq *rq, struct task_struct *p) |
24 | { | 28 | { |
29 | TS_PLUGIN_TICK_START; | ||
30 | |||
25 | if (is_realtime(p)) | 31 | if (is_realtime(p)) |
26 | update_time_litmus(rq, p); | 32 | update_time_litmus(rq, p); |
33 | |||
34 | /* plugin tick */ | ||
27 | litmus->tick(p); | 35 | litmus->tick(p); |
36 | |||
37 | return; | ||
28 | } | 38 | } |
29 | 39 | ||
30 | static void litmus_schedule(struct rq *rq, struct task_struct *prev) | 40 | static struct task_struct * |
41 | litmus_schedule(struct rq *rq, struct task_struct *prev) | ||
31 | { | 42 | { |
32 | struct rq* other_rq; | 43 | struct rq* other_rq; |
44 | struct task_struct *next; | ||
45 | |||
33 | long was_running; | 46 | long was_running; |
34 | lt_t _maybe_deadlock = 0; | 47 | lt_t _maybe_deadlock = 0; |
35 | /* WARNING: rq is _not_ locked! */ | ||
36 | if (is_realtime(prev)) { | ||
37 | update_time_litmus(rq, prev); | ||
38 | if (!is_running(prev)) | ||
39 | tsk_rt(prev)->present = 0; | ||
40 | } | ||
41 | 48 | ||
42 | /* let the plugin schedule */ | 49 | /* let the plugin schedule */ |
43 | rq->litmus_next = litmus->schedule(prev); | 50 | next = litmus->schedule(prev); |
44 | 51 | ||
45 | /* check if a global plugin pulled a task from a different RQ */ | 52 | /* check if a global plugin pulled a task from a different RQ */ |
46 | if (rq->litmus_next && task_rq(rq->litmus_next) != rq) { | 53 | if (next && task_rq(next) != rq) { |
47 | /* we need to migrate the task */ | 54 | /* we need to migrate the task */ |
48 | other_rq = task_rq(rq->litmus_next); | 55 | other_rq = task_rq(next); |
49 | TRACE_TASK(rq->litmus_next, "migrate from %d\n", other_rq->cpu); | 56 | TRACE_TASK(next, "migrate from %d\n", other_rq->cpu); |
50 | 57 | ||
51 | /* while we drop the lock, the prev task could change its | 58 | /* while we drop the lock, the prev task could change its |
52 | * state | 59 | * state |
@@ -59,18 +66,18 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
59 | * the case of cross or circular migrations. It's the job of | 66 | * the case of cross or circular migrations. It's the job of |
60 | * the plugin to make sure that doesn't happen. | 67 | * the plugin to make sure that doesn't happen. |
61 | */ | 68 | */ |
62 | TRACE_TASK(rq->litmus_next, "stack_in_use=%d\n", | 69 | TRACE_TASK(next, "stack_in_use=%d\n", |
63 | rq->litmus_next->rt_param.stack_in_use); | 70 | next->rt_param.stack_in_use); |
64 | if (rq->litmus_next->rt_param.stack_in_use != NO_CPU) { | 71 | if (next->rt_param.stack_in_use != NO_CPU) { |
65 | TRACE_TASK(rq->litmus_next, "waiting to deschedule\n"); | 72 | TRACE_TASK(next, "waiting to deschedule\n"); |
66 | _maybe_deadlock = litmus_clock(); | 73 | _maybe_deadlock = litmus_clock(); |
67 | } | 74 | } |
68 | while (rq->litmus_next->rt_param.stack_in_use != NO_CPU) { | 75 | while (next->rt_param.stack_in_use != NO_CPU) { |
69 | cpu_relax(); | 76 | cpu_relax(); |
70 | mb(); | 77 | mb(); |
71 | if (rq->litmus_next->rt_param.stack_in_use == NO_CPU) | 78 | if (next->rt_param.stack_in_use == NO_CPU) |
72 | TRACE_TASK(rq->litmus_next, | 79 | TRACE_TASK(next,"descheduled. Proceeding.\n"); |
73 | "descheduled. Proceeding.\n"); | 80 | |
74 | if (lt_before(_maybe_deadlock + 10000000, | 81 | if (lt_before(_maybe_deadlock + 10000000, |
75 | litmus_clock())) { | 82 | litmus_clock())) { |
76 | /* We've been spinning for 10ms. | 83 | /* We've been spinning for 10ms. |
@@ -79,20 +86,19 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
79 | * we will have debug info instead of a hard | 86 | * we will have debug info instead of a hard |
80 | * deadlock. | 87 | * deadlock. |
81 | */ | 88 | */ |
82 | TRACE_TASK(rq->litmus_next, | 89 | TRACE_TASK(next,"stack too long in use. " |
83 | "stack too long in use. " | ||
84 | "Deadlock?\n"); | 90 | "Deadlock?\n"); |
85 | rq->litmus_next = NULL; | 91 | next = NULL; |
86 | 92 | ||
87 | /* bail out */ | 93 | /* bail out */ |
88 | spin_lock(&rq->lock); | 94 | spin_lock(&rq->lock); |
89 | return; | 95 | return next; |
90 | } | 96 | } |
91 | } | 97 | } |
92 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 98 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
93 | if (rq->litmus_next->oncpu) | 99 | if (next->oncpu) |
94 | TRACE_TASK(rq->litmus_next, "waiting for !oncpu"); | 100 | TRACE_TASK(next, "waiting for !oncpu"); |
95 | while (rq->litmus_next->oncpu) { | 101 | while (next->oncpu) { |
96 | cpu_relax(); | 102 | cpu_relax(); |
97 | mb(); | 103 | mb(); |
98 | } | 104 | } |
@@ -114,7 +120,7 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
114 | } | 120 | } |
115 | } | 121 | } |
116 | 122 | ||
117 | set_task_cpu(rq->litmus_next, smp_processor_id()); | 123 | set_task_cpu(next, smp_processor_id()); |
118 | 124 | ||
119 | /* DEBUG: now that we have the lock we need to make sure a | 125 | /* DEBUG: now that we have the lock we need to make sure a |
120 | * couple of things still hold: | 126 | * couple of things still hold: |
@@ -123,22 +129,24 @@ static void litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
123 | * If either is violated, then the active plugin is | 129 | * If either is violated, then the active plugin is |
124 | * doing something wrong. | 130 | * doing something wrong. |
125 | */ | 131 | */ |
126 | if (!is_realtime(rq->litmus_next) || | 132 | if (!is_realtime(next) || !is_running(next)) { |
127 | !is_running(rq->litmus_next)) { | ||
128 | /* BAD BAD BAD */ | 133 | /* BAD BAD BAD */ |
129 | TRACE_TASK(rq->litmus_next, | 134 | TRACE_TASK(next,"BAD: migration invariant FAILED: " |
130 | "BAD: migration invariant FAILED: " | ||
131 | "rt=%d running=%d\n", | 135 | "rt=%d running=%d\n", |
132 | is_realtime(rq->litmus_next), | 136 | is_realtime(next), |
133 | is_running(rq->litmus_next)); | 137 | is_running(next)); |
134 | /* drop the task */ | 138 | /* drop the task */ |
135 | rq->litmus_next = NULL; | 139 | next = NULL; |
136 | } | 140 | } |
137 | /* release the other CPU's runqueue, but keep ours */ | 141 | /* release the other CPU's runqueue, but keep ours */ |
138 | spin_unlock(&other_rq->lock); | 142 | spin_unlock(&other_rq->lock); |
139 | } | 143 | } |
140 | if (rq->litmus_next) | 144 | if (next) { |
141 | rq->litmus_next->rt_param.stack_in_use = rq->cpu; | 145 | next->rt_param.stack_in_use = rq->cpu; |
146 | next->se.exec_start = rq->clock; | ||
147 | } | ||
148 | |||
149 | return next; | ||
142 | } | 150 | } |
143 | 151 | ||
144 | static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, | 152 | static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, |
@@ -174,22 +182,46 @@ static void check_preempt_curr_litmus(struct rq *rq, struct task_struct *p, int | |||
174 | { | 182 | { |
175 | } | 183 | } |
176 | 184 | ||
177 | /* has already been taken care of */ | ||
178 | static void put_prev_task_litmus(struct rq *rq, struct task_struct *p) | 185 | static void put_prev_task_litmus(struct rq *rq, struct task_struct *p) |
179 | { | 186 | { |
180 | } | 187 | } |
181 | 188 | ||
189 | static void pre_schedule_litmus(struct rq *rq, struct task_struct *prev) | ||
190 | { | ||
191 | update_time_litmus(rq, prev); | ||
192 | if (!is_running(prev)) | ||
193 | tsk_rt(prev)->present = 0; | ||
194 | } | ||
195 | |||
196 | /* pick_next_task_litmus() - litmus_schedule() function | ||
197 | * | ||
198 | * return the next task to be scheduled | ||
199 | */ | ||
182 | static struct task_struct *pick_next_task_litmus(struct rq *rq) | 200 | static struct task_struct *pick_next_task_litmus(struct rq *rq) |
183 | { | 201 | { |
184 | struct task_struct* picked = rq->litmus_next; | 202 | /* get the to-be-switched-out task (prev) */ |
185 | rq->litmus_next = NULL; | 203 | struct task_struct *prev = rq->litmus.prev; |
186 | if (picked) | 204 | struct task_struct *next; |
187 | picked->se.exec_start = rq->clock; | 205 | |
188 | return picked; | 206 | /* if not called from schedule() but from somewhere |
207 | * else (e.g., migration), return now! | ||
208 | */ | ||
209 | if(!rq->litmus.prev) | ||
210 | return NULL; | ||
211 | |||
212 | rq->litmus.prev = NULL; | ||
213 | |||
214 | TS_PLUGIN_SCHED_START; | ||
215 | next = litmus_schedule(rq, prev); | ||
216 | TS_PLUGIN_SCHED_END; | ||
217 | |||
218 | return next; | ||
189 | } | 219 | } |
190 | 220 | ||
191 | static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued) | 221 | static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued) |
192 | { | 222 | { |
223 | /* nothing to do; tick related tasks are done by litmus_tick() */ | ||
224 | return; | ||
193 | } | 225 | } |
194 | 226 | ||
195 | static void switched_to_litmus(struct rq *rq, struct task_struct *p, int running) | 227 | static void switched_to_litmus(struct rq *rq, struct task_struct *p, int running) |
@@ -263,6 +295,7 @@ const struct sched_class litmus_sched_class = { | |||
263 | 295 | ||
264 | .load_balance = load_balance_litmus, | 296 | .load_balance = load_balance_litmus, |
265 | .move_one_task = move_one_task_litmus, | 297 | .move_one_task = move_one_task_litmus, |
298 | .pre_schedule = pre_schedule_litmus, | ||
266 | #endif | 299 | #endif |
267 | 300 | ||
268 | .set_curr_task = set_curr_task_litmus, | 301 | .set_curr_task = set_curr_task_litmus, |