diff options
-rw-r--r-- | litmus/sched_gsn_edf.c | 100 |
1 files changed, 65 insertions, 35 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index c5c4d4947c5a..073f0f4db821 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -455,45 +455,67 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
455 | if (exists) | 455 | if (exists) |
456 | TRACE_TASK(prev, | 456 | TRACE_TASK(prev, |
457 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | 457 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " |
458 | "state:%d sig:%d\n", | 458 | "state:%d sig:%d boosted:%d\n", |
459 | blocks, out_of_time, np, sleep, preempt, | 459 | blocks, out_of_time, np, sleep, preempt, |
460 | prev->state, signal_pending(prev)); | 460 | prev->state, signal_pending(prev), |
461 | is_priority_boosted(entry->scheduled)); | ||
461 | if (entry->linked && preempt) | 462 | if (entry->linked && preempt) |
462 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 463 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
463 | entry->linked->comm, entry->linked->pid); | 464 | entry->linked->comm, entry->linked->pid); |
464 | 465 | ||
465 | #ifdef CONFIG_SCHED_PGM | 466 | #ifdef CONFIG_SCHED_PGM |
466 | if (exists && is_pgm_waiting(entry->scheduled)) { | 467 | if (exists) { |
467 | if (!is_priority_boosted(entry->scheduled)) { | 468 | if (is_pgm_sending(entry->scheduled)) { |
468 | TRACE_TASK(entry->scheduled, "is waiting for PGM tokens.\n"); | 469 | if (!is_pgm_satisfied(entry->scheduled)) { |
469 | BUG_ON(is_pgm_satisfied(entry->scheduled)); | 470 | if (!is_priority_boosted(entry->scheduled)) { |
470 | 471 | TRACE_TASK(entry->scheduled, "is sending PGM tokens and needs boosting.\n"); | |
471 | /* Boost priority so we'll be scheduled immediately | 472 | BUG_ON(is_pgm_satisfied(entry->scheduled)); |
472 | when needed tokens arrive. */ | 473 | |
473 | tsk_rt(entry->scheduled)->priority_boosted = 1; | 474 | /* We are either sending tokens or waiting for tokes. |
474 | tsk_rt(entry->scheduled)->boost_start_time = litmus_clock(); | 475 | If waiting: Boost priority so we'll be scheduled |
475 | 476 | immediately when needed tokens arrive. | |
476 | if (unlikely(!blocks)) { | 477 | If sending: Boost priority so no one (specifically, our |
477 | /* Task has probably blocked on an inbound token socket, but | 478 | consumers) will preempt us while signalling the token |
478 | if not, re-evaluate scheduling decisions */ | 479 | transmission. |
479 | unlink(entry->scheduled); | 480 | */ |
480 | gsnedf_job_arrival(entry->scheduled); | 481 | tsk_rt(entry->scheduled)->priority_boosted = 1; |
482 | tsk_rt(entry->scheduled)->boost_start_time = litmus_clock(); | ||
483 | |||
484 | if (likely(!blocks)) { | ||
485 | unlink(entry->scheduled); | ||
486 | gsnedf_job_arrival(entry->scheduled); | ||
487 | /* we may regain the processor */ | ||
488 | if (preempt) { | ||
489 | preempt = entry->scheduled != entry->linked; | ||
490 | if (!preempt) { | ||
491 | TRACE_TASK(entry->scheduled, "blocked preemption by lazy boosting.\n"); | ||
492 | } | ||
493 | } | ||
494 | } | ||
495 | } | ||
481 | } | 496 | } |
482 | } | 497 | else { /* sending is satisfied */ |
483 | else if (is_pgm_satisfied(entry->scheduled)) { | 498 | tsk_rt(entry->scheduled)->ctrl_page->pgm_sending = 0; |
484 | TRACE_TASK(entry->scheduled, "is done waiting for PGM tokens.\n"); | 499 | tsk_rt(entry->scheduled)->ctrl_page->pgm_satisfied = 0; |
485 | BUG_ON(!is_priority_boosted(entry->scheduled)); | 500 | |
486 | 501 | if (is_priority_boosted(entry->scheduled)) { | |
487 | /* clear any boosting */ | 502 | TRACE_TASK(entry->scheduled, |
488 | tsk_rt(entry->scheduled)->priority_boosted = 0; | 503 | "is done sending PGM tokens must relinquish boosting.\n"); |
489 | setup_pgm_release(entry->scheduled); | 504 | /* clear boosting */ |
490 | 505 | tsk_rt(entry->scheduled)->priority_boosted = 0; | |
491 | if (likely(!blocks)) { | 506 | if(likely(!blocks)) { |
492 | /* Task has probably called sched_yield(), so blocking is | 507 | /* recheck priority */ |
493 | unlikely. Re-evaluate scheduling decisions because we | 508 | unlink(entry->scheduled); |
494 | still want to run. */ | 509 | gsnedf_job_arrival(entry->scheduled); |
495 | unlink(entry->scheduled); | 510 | /* we may lose the processor */ |
496 | gsnedf_job_arrival(entry->scheduled); | 511 | if (!preempt) { |
512 | preempt = entry->scheduled != entry->linked; | ||
513 | if (preempt) { | ||
514 | TRACE_TASK(entry->scheduled, "preempted by lazy unboosting.\n"); | ||
515 | } | ||
516 | } | ||
517 | } | ||
518 | } | ||
497 | } | 519 | } |
498 | } | 520 | } |
499 | } | 521 | } |
@@ -509,10 +531,13 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
509 | * that we are still linked. Multiple calls to request_exit_np() don't | 531 | * that we are still linked. Multiple calls to request_exit_np() don't |
510 | * hurt. | 532 | * hurt. |
511 | */ | 533 | */ |
512 | if (np && (out_of_time || preempt || sleep)) { | 534 | if (np && (out_of_time || sleep)) { |
513 | unlink(entry->scheduled); | 535 | unlink(entry->scheduled); |
514 | request_exit_np(entry->scheduled); | 536 | request_exit_np(entry->scheduled); |
515 | } | 537 | } |
538 | else if (np && preempt) { | ||
539 | request_exit_np(entry->scheduled); | ||
540 | } | ||
516 | 541 | ||
517 | /* Any task that is preemptable and either exhausts its execution | 542 | /* Any task that is preemptable and either exhausts its execution |
518 | * budget or wants to sleep completes. We may have to reschedule after | 543 | * budget or wants to sleep completes. We may have to reschedule after |
@@ -543,12 +568,14 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
543 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 568 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
544 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | 569 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); |
545 | } | 570 | } |
546 | } else | 571 | } |
572 | else { | ||
547 | /* Only override Linux scheduler if we have a real-time task | 573 | /* Only override Linux scheduler if we have a real-time task |
548 | * scheduled that needs to continue. | 574 | * scheduled that needs to continue. |
549 | */ | 575 | */ |
550 | if (exists) | 576 | if (exists) |
551 | next = prev; | 577 | next = prev; |
578 | } | ||
552 | 579 | ||
553 | sched_state_task_picked(); | 580 | sched_state_task_picked(); |
554 | 581 | ||
@@ -563,7 +590,6 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
563 | TRACE("becomes idle at %llu.\n", litmus_clock()); | 590 | TRACE("becomes idle at %llu.\n", litmus_clock()); |
564 | #endif | 591 | #endif |
565 | 592 | ||
566 | |||
567 | return next; | 593 | return next; |
568 | } | 594 | } |
569 | 595 | ||
@@ -635,6 +661,10 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
635 | release_at(task, now); | 661 | release_at(task, now); |
636 | sched_trace_task_release(task); | 662 | sched_trace_task_release(task); |
637 | } | 663 | } |
664 | if (is_pgm_waiting(task)) { | ||
665 | /* shift out release/deadline, if needed */ | ||
666 | setup_pgm_release(task); | ||
667 | } | ||
638 | gsnedf_job_arrival(task); | 668 | gsnedf_job_arrival(task); |
639 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 669 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
640 | } | 670 | } |