aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_gsn_edf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r--litmus/sched_gsn_edf.c434
1 files changed, 432 insertions, 2 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index ac7685fe69f0..b40ff7ba4f0e 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -35,6 +35,10 @@
35#include <litmus/litmus_softirq.h> 35#include <litmus/litmus_softirq.h>
36#endif 36#endif
37 37
38#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
39#include <linux/interrupt.h>
40#endif
41
38#ifdef CONFIG_LITMUS_NVIDIA 42#ifdef CONFIG_LITMUS_NVIDIA
39#include <litmus/nvidia_info.h> 43#include <litmus/nvidia_info.h>
40#endif 44#endif
@@ -126,6 +130,16 @@ static struct bheap gsnedf_cpu_heap;
126static rt_domain_t gsnedf; 130static rt_domain_t gsnedf;
127#define gsnedf_lock (gsnedf.ready_lock) 131#define gsnedf_lock (gsnedf.ready_lock)
128 132
133#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
134struct tasklet_head
135{
136 struct tasklet_struct *head;
137 struct tasklet_struct **tail;
138};
139
140struct tasklet_head gsnedf_pending_tasklets;
141#endif
142
129 143
130/* Uncomment this if you want to see all scheduling decisions in the 144/* Uncomment this if you want to see all scheduling decisions in the
131 * TRACE() log. 145 * TRACE() log.
@@ -393,6 +407,410 @@ static void gsnedf_tick(struct task_struct* t)
393 } 407 }
394} 408}
395 409
410
411
412
413
414
415
416
417
418
419
420
421#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
422
423
424static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
425{
426 if (!atomic_read(&tasklet->count)) {
427 sched_trace_tasklet_begin(tasklet->owner);
428
429 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
430 {
431 BUG();
432 }
433 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
434 tasklet->func(tasklet->data);
435 tasklet_unlock(tasklet);
436
437 sched_trace_tasklet_end(tasklet->owner, flushed);
438 }
439 else {
440 BUG();
441 }
442}
443
444
445static void __extract_tasklets(struct task_struct* task, struct tasklet_head* task_tasklets)
446{
447 struct tasklet_struct* step;
448 struct tasklet_struct* tasklet;
449 struct tasklet_struct* prev;
450
451 task_tasklets->head = NULL;
452 task_tasklets->tail = &(task_tasklets->head);
453
454 prev = NULL;
455 for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next)
456 {
457 if(step->owner == task)
458 {
459 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
460
461 tasklet = step;
462
463 if(prev) {
464 prev->next = tasklet->next;
465 }
466 else if(gsnedf_pending_tasklets.head == tasklet) {
467 // we're at the head.
468 gsnedf_pending_tasklets.head = tasklet->next;
469 }
470
471 if(gsnedf_pending_tasklets.tail == &tasklet) {
472 // we're at the tail
473 if(prev) {
474 gsnedf_pending_tasklets.tail = &prev;
475 }
476 else {
477 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
478 }
479 }
480
481 tasklet->next = NULL;
482 *(task_tasklets->tail) = tasklet;
483 task_tasklets->tail = &(tasklet->next);
484 }
485 else {
486 prev = step;
487 }
488 }
489}
490
491static void flush_tasklets(struct task_struct* task)
492{
493 unsigned long flags;
494 struct tasklet_head task_tasklets;
495 struct tasklet_struct* step;
496
497 raw_spin_lock_irqsave(&gsnedf_lock, flags);
498 __extract_tasklets(task, &task_tasklets);
499 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
500
501 if(gsnedf_pending_tasklets.head != NULL) {
502 TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid);
503 }
504
505 // now execute any flushed tasklets.
506 for(step = gsnedf_pending_tasklets.head; step != NULL; /**/)
507 {
508 struct tasklet_struct* temp = step->next;
509
510 step->next = NULL;
511 __do_lit_tasklet(step, 1ul);
512
513 step = temp;
514 }
515}
516
517
518static void do_lit_tasklets(struct task_struct* sched_task)
519{
520 int work_to_do = 1;
521 struct tasklet_struct *tasklet = NULL;
522 //struct tasklet_struct *step;
523 unsigned long flags;
524
525 while(work_to_do) {
526 // remove tasklet at head of list if it has higher priority.
527 raw_spin_lock_irqsave(&gsnedf_lock, flags);
528
529 /*
530 step = gsnedf_pending_tasklets.head;
531 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
532 while(step != NULL){
533 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
534 step = step->next;
535 }
536 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1);
537 TRACE("%s: done.\n", __FUNCTION__);
538 */
539
540
541 if(gsnedf_pending_tasklets.head != NULL) {
542 // remove tasklet at head.
543 tasklet = gsnedf_pending_tasklets.head;
544
545 if(edf_higher_prio(tasklet->owner, sched_task)) {
546
547 if(NULL == tasklet->next) {
548 // tasklet is at the head, list only has one element
549 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
550 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
551 }
552
553 // remove the tasklet from the queue
554 gsnedf_pending_tasklets.head = tasklet->next;
555
556 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
557 }
558 else {
559 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
560 tasklet = NULL;
561 }
562 }
563 else {
564 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
565 }
566
567
568 /*
569 step = gsnedf_pending_tasklets.head;
570 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
571 while(step != NULL){
572 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
573 step = step->next;
574 }
575 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1);
576 TRACE("%s: done.\n", __FUNCTION__);
577 */
578
579
580 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
581
582 if(tasklet) {
583 __do_lit_tasklet(tasklet, 0ul);
584 tasklet = NULL;
585 }
586 else {
587 work_to_do = 0;
588 }
589 }
590
591 //TRACE("%s: exited.\n", __FUNCTION__);
592}
593
594
595static void run_tasklets(struct task_struct* sched_task)
596{
597#if 0
598 int task_is_rt = is_realtime(sched_task);
599 cedf_domain_t* cluster;
600
601 if(is_realtime(sched_task)) {
602 cluster = task_cpu_cluster(sched_task);
603 }
604 else {
605 cluster = remote_cluster(get_cpu());
606 }
607
608 if(cluster && gsnedf_pending_tasklets.head != NULL) {
609 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
610
611 do_lit_tasklets(cluster, sched_task);
612 }
613
614 if(!task_is_rt) {
615 put_cpu_no_resched();
616 }
617#else
618
619 preempt_disable();
620
621 if(gsnedf_pending_tasklets.head != NULL) {
622 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
623 do_lit_tasklets(sched_task);
624 }
625
626 preempt_enable_no_resched();
627
628#endif
629}
630
631
632static void __add_pai_tasklet(struct tasklet_struct* tasklet)
633{
634 struct tasklet_struct* step;
635
636 /*
637 step = gsnedf_pending_tasklets.head;
638 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
639 while(step != NULL){
640 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
641 step = step->next;
642 }
643 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1);
644 TRACE("%s: done.\n", __FUNCTION__);
645 */
646
647
648 tasklet->next = NULL; // make sure there are no old values floating around
649
650 step = gsnedf_pending_tasklets.head;
651 if(step == NULL) {
652 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
653 // insert at tail.
654 *(gsnedf_pending_tasklets.tail) = tasklet;
655 gsnedf_pending_tasklets.tail = &(tasklet->next);
656 }
657 else if((*(gsnedf_pending_tasklets.tail) != NULL) &&
658 edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) {
659 // insert at tail.
660 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
661
662 *(gsnedf_pending_tasklets.tail) = tasklet;
663 gsnedf_pending_tasklets.tail = &(tasklet->next);
664 }
665 else {
666
667 //WARN_ON(1 == 1);
668
669 // insert the tasklet somewhere in the middle.
670
671 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
672
673 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
674 step = step->next;
675 }
676
677 // insert tasklet right before step->next.
678
679 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
680
681 tasklet->next = step->next;
682 step->next = tasklet;
683
684 // patch up the head if needed.
685 if(gsnedf_pending_tasklets.head == step)
686 {
687 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
688 gsnedf_pending_tasklets.head = tasklet;
689 }
690 }
691
692 /*
693 step = gsnedf_pending_tasklets.head;
694 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
695 while(step != NULL){
696 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
697 step = step->next;
698 }
699 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1);
700 TRACE("%s: done.\n", __FUNCTION__);
701 */
702
703 // TODO: Maintain this list in priority order.
704 // tasklet->next = NULL;
705 // *(gsnedf_pending_tasklets.tail) = tasklet;
706 // gsnedf_pending_tasklets.tail = &tasklet->next;
707}
708
709static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
710{
711 cpu_entry_t *targetCPU = NULL;
712 int thisCPU;
713 int runLocal = 0;
714 int runNow = 0;
715 unsigned long flags;
716
717 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
718 {
719 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
720 return 0;
721 }
722
723
724 raw_spin_lock_irqsave(&gsnedf_lock, flags);
725
726 thisCPU = smp_processor_id();
727
728#if 1
729#ifdef CONFIG_SCHED_CPU_AFFINITY
730 {
731 cpu_entry_t* affinity = NULL;
732
733 // use this CPU if it is in our cluster and isn't running any RT work.
734 if(
735#ifdef CONFIG_RELEASE_MASTER
736 (thisCPU != gsnedf.release_master) &&
737#endif
738 (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) {
739 affinity = &(__get_cpu_var(gsnedf_cpu_entries));
740 }
741 else {
742 // this CPU is busy or shouldn't run tasklet in this cluster.
743 // look for available near by CPUs.
744 // NOTE: Affinity towards owner and not this CPU. Is this right?
745 affinity =
746 gsnedf_get_nearest_available_cpu(
747 &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner)));
748 }
749
750 targetCPU = affinity;
751 }
752#endif
753#endif
754
755 if (targetCPU == NULL) {
756 targetCPU = lowest_prio_cpu();
757 }
758
759 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
760 if (thisCPU == targetCPU->cpu) {
761 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
762 runLocal = 1;
763 runNow = 1;
764 }
765 else {
766 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
767 runLocal = 0;
768 runNow = 1;
769 }
770 }
771 else {
772 runLocal = 0;
773 runNow = 0;
774 }
775
776 if(!runLocal) {
777 // enqueue the tasklet
778 __add_pai_tasklet(tasklet);
779 }
780
781 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
782
783
784 if (runLocal /*&& runNow */) { // runNow == 1 is implied
785 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
786 __do_lit_tasklet(tasklet, 0ul);
787 }
788 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
789 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
790 preempt(targetCPU); // need to be protected by cedf_lock?
791 }
792 else {
793 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
794 }
795
796 return(1); // success
797}
798
799
800#endif
801
802
803
804
805
806
807
808
809
810
811
812
813
396/* Getting schedule() right is a bit tricky. schedule() may not make any 814/* Getting schedule() right is a bit tricky. schedule() may not make any
397 * assumptions on the state of the current task since it may be called for a 815 * assumptions on the state of the current task since it may be called for a
398 * number of reasons. The reasons include a scheduler_tick() determined that it 816 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -592,7 +1010,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
592static void gsnedf_task_wake_up(struct task_struct *task) 1010static void gsnedf_task_wake_up(struct task_struct *task)
593{ 1011{
594 unsigned long flags; 1012 unsigned long flags;
595 lt_t now; 1013 //lt_t now;
596 1014
597 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 1015 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
598 1016
@@ -660,6 +1078,10 @@ static void gsnedf_task_exit(struct task_struct * t)
660 } 1078 }
661 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1079 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
662 1080
1081#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1082 flush_tasklets(t);
1083#endif
1084
663 BUG_ON(!is_realtime(t)); 1085 BUG_ON(!is_realtime(t));
664 TRACE_TASK(t, "RIP\n"); 1086 TRACE_TASK(t, "RIP\n");
665} 1087}
@@ -1602,6 +2024,11 @@ static long gsnedf_activate_plugin(void)
1602 } 2024 }
1603#endif 2025#endif
1604 } 2026 }
2027
2028#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2029 gsnedf_pending_tasklets.head = NULL;
2030 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
2031#endif
1605 2032
1606#ifdef CONFIG_LITMUS_SOFTIRQD 2033#ifdef CONFIG_LITMUS_SOFTIRQD
1607 spawn_klitirqd(NULL); 2034 spawn_klitirqd(NULL);
@@ -1636,7 +2063,10 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
1636 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, 2063 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
1637 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, 2064 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
1638#endif 2065#endif
1639 2066#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2067 .enqueue_pai_tasklet = enqueue_pai_tasklet,
2068 .run_tasklets = run_tasklets,
2069#endif
1640}; 2070};
1641 2071
1642 2072