aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_cfifo.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_cfifo.c')
-rw-r--r--litmus/sched_cfifo.c450
1 files changed, 448 insertions, 2 deletions
diff --git a/litmus/sched_cfifo.c b/litmus/sched_cfifo.c
index f515446f76ed..689b2dbe5fae 100644
--- a/litmus/sched_cfifo.c
+++ b/litmus/sched_cfifo.c
@@ -55,6 +55,10 @@
55#include <litmus/litmus_softirq.h> 55#include <litmus/litmus_softirq.h>
56#endif 56#endif
57 57
58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
59#include <linux/interrupt.h>
60#endif
61
58#ifdef CONFIG_LITMUS_NVIDIA 62#ifdef CONFIG_LITMUS_NVIDIA
59#include <litmus/nvidia_info.h> 63#include <litmus/nvidia_info.h>
60#endif 64#endif
@@ -91,6 +95,15 @@ DEFINE_PER_CPU(cpu_entry_t, cfifo_cpu_entries);
91#define test_will_schedule(cpu) \ 95#define test_will_schedule(cpu) \
92 (atomic_read(&per_cpu(cfifo_cpu_entries, cpu).will_schedule)) 96 (atomic_read(&per_cpu(cfifo_cpu_entries, cpu).will_schedule))
93 97
98
99#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
100struct tasklet_head
101{
102 struct tasklet_struct *head;
103 struct tasklet_struct **tail;
104};
105#endif
106
94/* 107/*
95 * In C-FIFO there is a cfifo domain _per_ cluster 108 * In C-FIFO there is a cfifo domain _per_ cluster
96 * The number of clusters is dynamically determined accordingly to the 109 * The number of clusters is dynamically determined accordingly to the
@@ -108,6 +121,12 @@ typedef struct clusterdomain {
108 struct bheap cpu_heap; 121 struct bheap cpu_heap;
109 /* lock for this cluster */ 122 /* lock for this cluster */
110#define cfifo_lock domain.ready_lock 123#define cfifo_lock domain.ready_lock
124
125
126#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
127 struct tasklet_head pending_tasklets;
128#endif
129
111} cfifo_domain_t; 130} cfifo_domain_t;
112 131
113/* a cfifo_domain per cluster; allocation is done at init/activation time */ 132/* a cfifo_domain per cluster; allocation is done at init/activation time */
@@ -251,7 +270,7 @@ static void preempt(cpu_entry_t *entry)
251 preempt_if_preemptable(entry->scheduled, entry->cpu); 270 preempt_if_preemptable(entry->scheduled, entry->cpu);
252} 271}
253 272
254/* requeue - Put an unlinked task into gsn-edf domain. 273/* requeue - Put an unlinked task into c-fifo domain.
255 * Caller must hold cfifo_lock. 274 * Caller must hold cfifo_lock.
256 */ 275 */
257static noinline void requeue(struct task_struct* task) 276static noinline void requeue(struct task_struct* task)
@@ -395,6 +414,419 @@ static void cfifo_tick(struct task_struct* t)
395 } 414 }
396} 415}
397 416
417
418
419
420
421
422
423
424
425
426
427
428#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
429
430
431static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
432{
433 if (!atomic_read(&tasklet->count)) {
434 sched_trace_tasklet_begin(tasklet->owner);
435
436 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
437 {
438 BUG();
439 }
440 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
441 tasklet->func(tasklet->data);
442 tasklet_unlock(tasklet);
443
444 sched_trace_tasklet_end(tasklet->owner, flushed);
445 }
446 else {
447 BUG();
448 }
449}
450
451
452static void __extract_tasklets(cfifo_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets)
453{
454 struct tasklet_struct* step;
455 struct tasklet_struct* tasklet;
456 struct tasklet_struct* prev;
457
458 task_tasklets->head = NULL;
459 task_tasklets->tail = &(task_tasklets->head);
460
461 prev = NULL;
462 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next)
463 {
464 if(step->owner == task)
465 {
466 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
467
468 tasklet = step;
469
470 if(prev) {
471 prev->next = tasklet->next;
472 }
473 else if(cluster->pending_tasklets.head == tasklet) {
474 // we're at the head.
475 cluster->pending_tasklets.head = tasklet->next;
476 }
477
478 if(cluster->pending_tasklets.tail == &tasklet) {
479 // we're at the tail
480 if(prev) {
481 cluster->pending_tasklets.tail = &prev;
482 }
483 else {
484 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
485 }
486 }
487
488 tasklet->next = NULL;
489 *(task_tasklets->tail) = tasklet;
490 task_tasklets->tail = &(tasklet->next);
491 }
492 else {
493 prev = step;
494 }
495 }
496}
497
498static void flush_tasklets(cfifo_domain_t* cluster, struct task_struct* task)
499{
500 unsigned long flags;
501 struct tasklet_head task_tasklets;
502 struct tasklet_struct* step;
503
504 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags);
505 __extract_tasklets(cluster, task, &task_tasklets);
506 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
507
508 if(cluster->pending_tasklets.head != NULL) {
509 TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid);
510 }
511
512 // now execute any flushed tasklets.
513 for(step = cluster->pending_tasklets.head; step != NULL; /**/)
514 {
515 struct tasklet_struct* temp = step->next;
516
517 step->next = NULL;
518 __do_lit_tasklet(step, 1ul);
519
520 step = temp;
521 }
522}
523
524
525static void do_lit_tasklets(cfifo_domain_t* cluster, struct task_struct* sched_task)
526{
527 int work_to_do = 1;
528 struct tasklet_struct *tasklet = NULL;
529 //struct tasklet_struct *step;
530 unsigned long flags;
531
532 while(work_to_do) {
533 // remove tasklet at head of list if it has higher priority.
534 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags);
535
536 /*
537 step = cluster->pending_tasklets.head;
538 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
539 while(step != NULL){
540 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
541 step = step->next;
542 }
543 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
544 TRACE("%s: done.\n", __FUNCTION__);
545 */
546
547
548 if(cluster->pending_tasklets.head != NULL) {
549 // remove tasklet at head.
550 tasklet = cluster->pending_tasklets.head;
551
552 if(fifo_higher_prio(tasklet->owner, sched_task)) {
553
554 if(NULL == tasklet->next) {
555 // tasklet is at the head, list only has one element
556 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
557 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
558 }
559
560 // remove the tasklet from the queue
561 cluster->pending_tasklets.head = tasklet->next;
562
563 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
564 }
565 else {
566 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
567 tasklet = NULL;
568 }
569 }
570 else {
571 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
572 }
573
574 /*
575 step = cluster->pending_tasklets.head;
576 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
577 while(step != NULL){
578 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
579 step = step->next;
580 }
581 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
582 TRACE("%s: done.\n", __FUNCTION__);
583 */
584
585 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
586
587 if(tasklet) {
588 __do_lit_tasklet(tasklet, 0ul);
589 tasklet = NULL;
590 }
591 else {
592 work_to_do = 0;
593 }
594 }
595
596 //TRACE("%s: exited.\n", __FUNCTION__);
597}
598
599
600static void run_tasklets(struct task_struct* sched_task)
601{
602 cfifo_domain_t* cluster;
603
604#if 0
605 int task_is_rt = is_realtime(sched_task);
606 cfifo_domain_t* cluster;
607
608 if(is_realtime(sched_task)) {
609 cluster = task_cpu_cluster(sched_task);
610 }
611 else {
612 cluster = remote_cluster(get_cpu());
613 }
614
615 if(cluster && cluster->pending_tasklets.head != NULL) {
616 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
617
618 do_lit_tasklets(cluster, sched_task);
619 }
620
621 if(!task_is_rt) {
622 put_cpu_no_resched();
623 }
624#else
625
626 preempt_disable();
627
628 cluster = (is_realtime(sched_task)) ?
629 task_cpu_cluster(sched_task) :
630 remote_cluster(smp_processor_id());
631
632 if(cluster && cluster->pending_tasklets.head != NULL) {
633 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
634 do_lit_tasklets(cluster, sched_task);
635 }
636
637 preempt_enable_no_resched();
638
639#endif
640}
641
642
643static void __add_pai_tasklet(struct tasklet_struct* tasklet, cfifo_domain_t* cluster)
644{
645 struct tasklet_struct* step;
646
647 /*
648 step = cluster->pending_tasklets.head;
649 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
650 while(step != NULL){
651 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
652 step = step->next;
653 }
654 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
655 TRACE("%s: done.\n", __FUNCTION__);
656 */
657
658
659 tasklet->next = NULL; // make sure there are no old values floating around
660
661 step = cluster->pending_tasklets.head;
662 if(step == NULL) {
663 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
664 // insert at tail.
665 *(cluster->pending_tasklets.tail) = tasklet;
666 cluster->pending_tasklets.tail = &(tasklet->next);
667 }
668 else if((*(cluster->pending_tasklets.tail) != NULL) &&
669 fifo_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
670 // insert at tail.
671 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
672
673 *(cluster->pending_tasklets.tail) = tasklet;
674 cluster->pending_tasklets.tail = &(tasklet->next);
675 }
676 else {
677
678 //WARN_ON(1 == 1);
679
680 // insert the tasklet somewhere in the middle.
681
682 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
683
684 while(step->next && fifo_higher_prio(step->next->owner, tasklet->owner)) {
685 step = step->next;
686 }
687
688 // insert tasklet right before step->next.
689
690 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
691
692 tasklet->next = step->next;
693 step->next = tasklet;
694
695 // patch up the head if needed.
696 if(cluster->pending_tasklets.head == step)
697 {
698 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
699 cluster->pending_tasklets.head = tasklet;
700 }
701 }
702
703 /*
704 step = cluster->pending_tasklets.head;
705 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
706 while(step != NULL){
707 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
708 step = step->next;
709 }
710 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
711 TRACE("%s: done.\n", __FUNCTION__);
712 */
713
714 // TODO: Maintain this list in priority order.
715 // tasklet->next = NULL;
716 // *(cluster->pending_tasklets.tail) = tasklet;
717 // cluster->pending_tasklets.tail = &tasklet->next;
718}
719
720static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
721{
722 cfifo_domain_t *cluster = NULL;
723 cpu_entry_t *targetCPU = NULL;
724 int thisCPU;
725 int runLocal = 0;
726 int runNow = 0;
727 unsigned long flags;
728
729 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
730 {
731 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
732 return 0;
733 }
734
735 cluster = task_cpu_cluster(tasklet->owner);
736
737 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags);
738
739 thisCPU = smp_processor_id();
740
741#if 1
742#ifdef CONFIG_SCHED_CPU_AFFINITY
743 {
744 cpu_entry_t* affinity = NULL;
745
746 // use this CPU if it is in our cluster and isn't running any RT work.
747 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cfifo_cpu_entries).linked == NULL)) {
748 affinity = &(__get_cpu_var(cfifo_cpu_entries));
749 }
750 else {
751 // this CPU is busy or shouldn't run tasklet in this cluster.
752 // look for available near by CPUs.
753 // NOTE: Affinity towards owner and not this CPU. Is this right?
754 affinity =
755 cfifo_get_nearest_available_cpu(cluster,
756 &per_cpu(cfifo_cpu_entries, task_cpu(tasklet->owner)));
757 }
758
759 targetCPU = affinity;
760 }
761#endif
762#endif
763
764 if (targetCPU == NULL) {
765 targetCPU = lowest_prio_cpu(cluster);
766 }
767
768 if (fifo_higher_prio(tasklet->owner, targetCPU->linked)) {
769 if (thisCPU == targetCPU->cpu) {
770 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
771 runLocal = 1;
772 runNow = 1;
773 }
774 else {
775 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
776 runLocal = 0;
777 runNow = 1;
778 }
779 }
780 else {
781 runLocal = 0;
782 runNow = 0;
783 }
784
785 if(!runLocal) {
786 // enqueue the tasklet
787 __add_pai_tasklet(tasklet, cluster);
788 }
789
790 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
791
792
793 if (runLocal /*&& runNow */) { // runNow == 1 is implied
794 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
795 __do_lit_tasklet(tasklet, 0ul);
796 }
797 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
798 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
799 preempt(targetCPU); // need to be protected by cfifo_lock?
800 }
801 else {
802 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
803 }
804
805 return(1); // success
806}
807
808
809#endif
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
398/* Getting schedule() right is a bit tricky. schedule() may not make any 830/* Getting schedule() right is a bit tricky. schedule() may not make any
399 * assumptions on the state of the current task since it may be called for a 831 * assumptions on the state of the current task since it may be called for a
400 * number of reasons. The reasons include a scheduler_tick() determined that it 832 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -544,7 +976,7 @@ static void cfifo_task_new(struct task_struct * t, int on_rq, int running)
544 cpu_entry_t* entry; 976 cpu_entry_t* entry;
545 cfifo_domain_t* cluster; 977 cfifo_domain_t* cluster;
546 978
547 TRACE("gsn edf: task new %d\n", t->pid); 979 TRACE("cfifo: task new %d\n", t->pid);
548 980
549 /* the cluster doesn't change even if t is running */ 981 /* the cluster doesn't change even if t is running */
550 cluster = task_cpu_cluster(t); 982 cluster = task_cpu_cluster(t);
@@ -650,6 +1082,10 @@ static void cfifo_task_exit(struct task_struct * t)
650 } 1082 }
651 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); 1083 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
652 1084
1085#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1086 flush_tasklets(cluster, t);
1087#endif
1088
653 BUG_ON(!is_realtime(t)); 1089 BUG_ON(!is_realtime(t));
654 TRACE_TASK(t, "RIP\n"); 1090 TRACE_TASK(t, "RIP\n");
655} 1091}
@@ -1467,6 +1903,12 @@ static long cfifo_activate_plugin(void)
1467 bheap_init(&(cfifo[i].cpu_heap)); 1903 bheap_init(&(cfifo[i].cpu_heap));
1468 fifo_domain_init(&(cfifo[i].domain), NULL, cfifo_release_jobs); 1904 fifo_domain_init(&(cfifo[i].domain), NULL, cfifo_release_jobs);
1469 1905
1906
1907#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1908 cfifo[i].pending_tasklets.head = NULL;
1909 cfifo[i].pending_tasklets.tail = &(cfifo[i].pending_tasklets.head);
1910#endif
1911
1470 if(!zalloc_cpumask_var(&cfifo[i].cpu_map, GFP_ATOMIC)) 1912 if(!zalloc_cpumask_var(&cfifo[i].cpu_map, GFP_ATOMIC))
1471 return -ENOMEM; 1913 return -ENOMEM;
1472 } 1914 }
@@ -1578,6 +2020,10 @@ static struct sched_plugin cfifo_plugin __cacheline_aligned_in_smp = {
1578#ifdef CONFIG_LITMUS_SOFTIRQD 2020#ifdef CONFIG_LITMUS_SOFTIRQD
1579 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, 2021 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
1580 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, 2022 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
2023#endif
2024#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2025 .enqueue_pai_tasklet = enqueue_pai_tasklet,
2026 .run_tasklets = run_tasklets,
1581#endif 2027#endif
1582}; 2028};
1583 2029