aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2014-02-19 15:31:18 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2014-02-19 15:33:42 -0500
commit8b10d67cc74a4dbe67aa3cb9e17281710f0d6017 (patch)
tree470e510e42fb5ccc73029ca938816cc568ac158f
parent8a74106e25e8e4d38bc1ad651258de170262ee73 (diff)
Port C-FL PGM algorithms to C-EDF.
-rw-r--r--litmus/sched_cedf.c49
1 files changed, 25 insertions, 24 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index ca4cd0c1d3f0..33ea89e81eff 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -456,9 +456,10 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
456 if (exists) 456 if (exists)
457 TRACE_TASK(prev, 457 TRACE_TASK(prev,
458 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " 458 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
459 "state:%d sig:%d\n", 459 "state:%d sig:%d boosted:%d\n",
460 blocks, out_of_time, np, sleep, preempt, 460 blocks, out_of_time, np, sleep, preempt,
461 prev->state, signal_pending(prev)); 461 prev->state, signal_pending(prev),
462 is_priority_boosted(entry->scheduled));
462 if (entry->linked && preempt) 463 if (entry->linked && preempt)
463 TRACE_TASK(prev, "will be preempted by %s/%d\n", 464 TRACE_TASK(prev, "will be preempted by %s/%d\n",
464 entry->linked->comm, entry->linked->pid); 465 entry->linked->comm, entry->linked->pid);
@@ -473,17 +474,24 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
473 474
474 /* We are either sending tokens or waiting for tokes. 475 /* We are either sending tokens or waiting for tokes.
475 If waiting: Boost priority so we'll be scheduled 476 If waiting: Boost priority so we'll be scheduled
476 immediately when needed tokens arrive. 477 immediately when needed tokens arrive.
477 If sending: Boost priority so no one (specifically, our 478 If sending: Boost priority so no one (specifically, our
478 consumers) will preempt us while signalling the token 479 consumers) will preempt us while signalling the token
479 transmission. 480 transmission.
480 */ 481 */
481 tsk_rt(entry->scheduled)->priority_boosted = 1; 482 tsk_rt(entry->scheduled)->priority_boosted = 1;
482 tsk_rt(entry->scheduled)->boost_start_time = litmus_clock(); 483 tsk_rt(entry->scheduled)->boost_start_time = litmus_clock();
483 484
484 if (likely(!blocks)) { 485 if (likely(!blocks)) {
485 unlink(entry->scheduled); 486 unlink(entry->scheduled);
486 cedf_job_arrival(entry->scheduled); 487 cedf_job_arrival(entry->scheduled);
488 /* we may regain the processor */
489 if (preempt) {
490 preempt = entry->scheduled != entry->linked;
491 if (!preempt) {
492 TRACE_TASK(entry->scheduled, "blocked preemption by lazy boosting.\n");
493 }
494 }
487 } 495 }
488 } 496 }
489 } 497 }
@@ -500,26 +508,17 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
500 /* recheck priority */ 508 /* recheck priority */
501 unlink(entry->scheduled); 509 unlink(entry->scheduled);
502 cedf_job_arrival(entry->scheduled); 510 cedf_job_arrival(entry->scheduled);
511 /* we may lose the processor */
512 if (!preempt) {
513 preempt = entry->scheduled != entry->linked;
514 if (preempt) {
515 TRACE_TASK(entry->scheduled, "preempted by lazy unboosting.\n");
516 }
517 }
503 } 518 }
504 } 519 }
505 } 520 }
506 } 521 }
507#if 0
508 else if(is_pgm_waiting(entry->scheduled)) {
509 int shifted_release;
510
511 TRACE_TASK(entry->scheduled, "is waiting for PGM tokens.\n");
512 /* release the next job if we have the tokens we need */
513 shifted_release = setup_pgm_release(entry->scheduled);
514
515 /* setup_pgm_release() can screw with our priority,
516 so recheck it */
517 if (shifted_release && likely(!blocks)) {
518 unlink(entry->scheduled);
519 cedf_job_arrival(entry->scheduled);
520 }
521 }
522#endif
523 } 522 }
524#endif 523#endif
525 524
@@ -533,10 +532,13 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
533 * that we are still linked. Multiple calls to request_exit_np() don't 532 * that we are still linked. Multiple calls to request_exit_np() don't
534 * hurt. 533 * hurt.
535 */ 534 */
536 if (np && (out_of_time || preempt || sleep)) { 535 if (np && (out_of_time || sleep)) {
537 unlink(entry->scheduled); 536 unlink(entry->scheduled);
538 request_exit_np(entry->scheduled); 537 request_exit_np(entry->scheduled);
539 } 538 }
539 else if(np && preempt) {
540 request_exit_np(entry->scheduled);
541 }
540 542
541 /* Any task that is preemptable and either exhausts its execution 543 /* Any task that is preemptable and either exhausts its execution
542 * budget or wants to sleep completes. We may have to reschedule after 544 * budget or wants to sleep completes. We may have to reschedule after
@@ -586,7 +588,6 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
586 TRACE("becomes idle at %llu.\n", litmus_clock()); 588 TRACE("becomes idle at %llu.\n", litmus_clock());
587#endif 589#endif
588 590
589
590 return next; 591 return next;
591} 592}
592 593