aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-09-21 21:07:29 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-09-21 21:07:29 -0400
commit844cba1fd1189a755e60317a3f2b4403e7908b5b (patch)
tree24c4baccae9478026ec4c5552a26fffcb8ca9f55 /litmus
parent31354ae224da034d067e2f05eeb604802c5fd7cf (diff)
[EDF-fm] Remove code uglyness and properly initialize jobno
- Remove stale comments in the code. - Remove non needed checks and add proper invariants. - Increment per-cpu job number when a task is released (task_new). This removes the need of detecting the 'first' job.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_edf_fm.c63
1 files changed, 9 insertions, 54 deletions
diff --git a/litmus/sched_edf_fm.c b/litmus/sched_edf_fm.c
index 8eb7b9c12237..70c5d289288f 100644
--- a/litmus/sched_edf_fm.c
+++ b/litmus/sched_edf_fm.c
@@ -91,8 +91,7 @@ int edffm_higher_prio(struct task_struct* first, struct task_struct* second)
91} 91}
92 92
93/* need_to_preempt - check whether the task t needs to be preempted 93/* need_to_preempt - check whether the task t needs to be preempted
94 * call only with irqs disabled and with ready_lock acquired 94 * call only with irqs disabled and with ready_lock acquired
95 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
96 */ 95 */
97int edffm_preemption_needed(rt_domain_t* rt, struct task_struct *t) 96int edffm_preemption_needed(rt_domain_t* rt, struct task_struct *t)
98{ 97{
@@ -187,10 +186,6 @@ static noinline void insert_task_in_remote_ready(struct task_struct *task)
187{ 186{
188 edffm_domain_t *this = remote_edffm(task_cpu(task)); 187 edffm_domain_t *this = remote_edffm(task_cpu(task));
189 edffm_domain_t *remote = remote_edffm(get_partition(task)); 188 edffm_domain_t *remote = remote_edffm(get_partition(task));
190 /* FIXME we don't drop the rqlock, so this task shouldn't change
191 * This function shouldn't be called by a wakeup (as the job hasn't
192 * completed yet) */
193 struct task_struct *old;
194 189
195 BUG_ON(get_partition(task) != remote->cpu); 190 BUG_ON(get_partition(task) != remote->cpu);
196 191
@@ -198,7 +193,6 @@ static noinline void insert_task_in_remote_ready(struct task_struct *task)
198 this->cpu, remote->cpu); 193 this->cpu, remote->cpu);
199 TRACE_TASK(task, "Inserting in remote ready queue\n"); 194 TRACE_TASK(task, "Inserting in remote ready queue\n");
200 195
201 old = task;
202 WARN_ON(!irqs_disabled()); 196 WARN_ON(!irqs_disabled());
203 197
204 raw_spin_unlock(&this->slock); 198 raw_spin_unlock(&this->slock);
@@ -215,13 +209,6 @@ static noinline void insert_task_in_remote_ready(struct task_struct *task)
215 raw_spin_unlock(&remote->slock); 209 raw_spin_unlock(&remote->slock);
216 TRACE_TASK(task,"edffm_lock %d released\n", remote->cpu); 210 TRACE_TASK(task,"edffm_lock %d released\n", remote->cpu);
217 211
218 if (old != task) {
219 TRACE_TASK(task,"Task changed while we dropped"
220 " the lock: old = (0x%p),"
221 " cur = (0x%p)\n",
222 old, task);
223 }
224
225 /* ask remote cpu to reschedule, we are already rescheduling on this */ 212 /* ask remote cpu to reschedule, we are already rescheduling on this */
226 preempt(remote); 213 preempt(remote);
227} 214}
@@ -262,23 +249,12 @@ static void requeue(struct task_struct* t, rt_domain_t *edf)
262/* Update statistics for the _current_ job. 249/* Update statistics for the _current_ job.
263 * - job_no was incremented _before_ starting this job 250 * - job_no was incremented _before_ starting this job
264 * (release_at / prepare_for_next_period) 251 * (release_at / prepare_for_next_period)
265 * - job_no can be incremented several times before what we consider 252 * - cpu_job_no is incremented when the job completes
266 * to be the 'true' release. FIXME: this has the side effect of
267 * discarding the first job...
268 */ 253 */
269static void update_job_counter(struct task_struct *t) 254static void update_job_counter(struct task_struct *t)
270{ 255{
271 int cpu_pos; 256 int cpu_pos;
272 257
273 /* We need a precise accounting for job numbers; when the first real
274 * job completes, both job CPU conters are 0; we reset job_no counter
275 */
276 if (tsk_rt(t)->job_params.cpu_job_no[0] == 0 &&
277 tsk_rt(t)->job_params.cpu_job_no[1] == 0) {
278 /* first "real job" has executed once */
279 t->rt_param.job_params.job_no = 1;
280 }
281
282 /* Which CPU counter should be incremented? */ 258 /* Which CPU counter should be incremented? */
283 cpu_pos = edffm_cpu_pos(t->rt_param.task_params.cpu, t); 259 cpu_pos = edffm_cpu_pos(t->rt_param.task_params.cpu, t);
284 t->rt_param.job_params.cpu_job_no[cpu_pos]++; 260 t->rt_param.job_params.cpu_job_no[cpu_pos]++;
@@ -288,14 +264,11 @@ static void update_job_counter(struct task_struct *t)
288 t->rt_param.task_params.cpu); 264 t->rt_param.task_params.cpu);
289} 265}
290 266
291/* What is the next cpu for this job? (eq. 8) */ 267/* What is the next cpu for this job? (eq. 8, in EDF-Fm paper) */
292static int next_cpu_for_job(struct task_struct *t) 268static int next_cpu_for_job(struct task_struct *t)
293{ 269{
294 BUG_ON(!is_migrat_task(t)); 270 BUG_ON(!is_migrat_task(t));
295 271
296 /* update_job_counter() reset job_no to 1
297 * when the first "real" job is detected
298 */
299 if ((t->rt_param.job_params.job_no) == 272 if ((t->rt_param.job_params.job_no) ==
300 (((lt_t) cur_cpu_job_no(t) * cur_cpu_fract_den(t)) / 273 (((lt_t) cur_cpu_job_no(t) * cur_cpu_fract_den(t)) /
301 cur_cpu_fract_num(t))) 274 cur_cpu_fract_num(t)))
@@ -365,10 +338,6 @@ static struct task_struct* edffm_schedule(struct task_struct * prev)
365 338
366 raw_spin_lock(&edffm->slock); 339 raw_spin_lock(&edffm->slock);
367 340
368 /* sanity checking
369 * differently from gedf, when a task exits (dead)
370 * edffm->schedule may be null and prev _is_ realtime
371 */
372 BUG_ON(edffm->scheduled && edffm->scheduled != prev); 341 BUG_ON(edffm->scheduled && edffm->scheduled != prev);
373 BUG_ON(edffm->scheduled && !is_realtime(prev)); 342 BUG_ON(edffm->scheduled && !is_realtime(prev));
374 343
@@ -382,10 +351,7 @@ static struct task_struct* edffm_schedule(struct task_struct * prev)
382 change_cpu = exists && wrong_cpu(edffm->scheduled); 351 change_cpu = exists && wrong_cpu(edffm->scheduled);
383 preempt = edffm_preemption_needed(edf, prev); 352 preempt = edffm_preemption_needed(edf, prev);
384 353
385 /* FIXME can this happen??? */ 354 BUG_ON(blocks && change_cpu);
386 if(blocks && change_cpu) {
387 TRACE_TASK(prev, "WARN: blocked, but should change CPU!\n");
388 }
389 355
390 if (exists) 356 if (exists)
391 TRACE_TASK(prev, 357 TRACE_TASK(prev,
@@ -394,14 +360,10 @@ static struct task_struct* edffm_schedule(struct task_struct * prev)
394 blocks, out_of_time, sleep, preempt, 360 blocks, out_of_time, sleep, preempt,
395 change_cpu, prev->state, signal_pending(prev)); 361 change_cpu, prev->state, signal_pending(prev));
396 362
397 /* If we need to preempt do so. 363 /* If we need to preempt do so. */
398 * The following checks set resched to 1 in case of special
399 * circumstances.
400 */
401 resched = preempt; 364 resched = preempt;
402 365
403 /* If a task blocks we have no choice but to reschedule. 366 /* If a task blocks we have no choice but to reschedule. */
404 */
405 if (blocks) 367 if (blocks)
406 resched = 1; 368 resched = 1;
407 369
@@ -428,9 +390,7 @@ static struct task_struct* edffm_schedule(struct task_struct * prev)
428 */ 390 */
429 next = NULL; 391 next = NULL;
430 if (resched || !exists) { 392 if (resched || !exists) {
431 /* Take care of a previously scheduled 393
432 * job by taking it out of the Linux runqueue.
433 */
434 if (edffm->scheduled && !blocks) 394 if (edffm->scheduled && !blocks)
435 requeue(edffm->scheduled, edf); 395 requeue(edffm->scheduled, edf);
436 next = __take_ready(edf); 396 next = __take_ready(edf);
@@ -465,9 +425,8 @@ static void edffm_task_new(struct task_struct * t, int on_rq, int running)
465 TRACE_TASK(t, "EDF-fm: task new, cpu = %d\n", 425 TRACE_TASK(t, "EDF-fm: task new, cpu = %d\n",
466 t->rt_param.task_params.cpu); 426 t->rt_param.task_params.cpu);
467 427
468 /* FIXME: setup job parameters, we will start counting 'real' jobs
469 * after the first completion */
470 release_at(t, litmus_clock()); 428 release_at(t, litmus_clock());
429 update_job_counter(t);
471 430
472 /* The task should be running in the queue, otherwise signal 431 /* The task should be running in the queue, otherwise signal
473 * code will try to wake it up with fatal consequences. 432 * code will try to wake it up with fatal consequences.
@@ -531,7 +490,6 @@ static void edffm_task_wake_up(struct task_struct *task)
531 490
532static void edffm_task_block(struct task_struct *t) 491static void edffm_task_block(struct task_struct *t)
533{ 492{
534 /* only running tasks can block, thus t is in no queue */
535 TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state); 493 TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state);
536 494
537 BUG_ON(!is_realtime(t)); 495 BUG_ON(!is_realtime(t));
@@ -587,10 +545,7 @@ static int __init init_edffm(void)
587 int i; 545 int i;
588 edffm_domain_t *edffm; 546 edffm_domain_t *edffm;
589 547
590 /* We do not really want to support cpu hotplug, do we? ;) 548 /* Note, broken if num_online_cpus() may change */
591 * However, if we are so crazy to do so,
592 * we cannot use num_online_cpu()
593 */
594 for (i = 0; i < num_online_cpus(); i++) { 549 for (i = 0; i < num_online_cpus(); i++) {
595 edffm = remote_edffm(i); 550 edffm = remote_edffm(i);
596 edffm->cpu = i; 551 edffm->cpu = i;