aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-01-14 16:56:47 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-01-14 16:56:47 -0500
commit1a582a2c5e361e01a4c64f185bb1a23c3f70701a (patch)
tree31862cf35d16f5c226b46007fd5645ad26440700
parent53a6dbb9f5337e77fce9c2672488c1c5e0621beb (diff)
Port PAI interrupts to GSN-EDF, C-RM/RM-SRT/FIFO.
-rw-r--r--litmus/sched_cedf.c32
-rw-r--r--litmus/sched_cfifo.c450
-rw-r--r--litmus/sched_crm.c448
-rw-r--r--litmus/sched_crm_srt.c445
-rw-r--r--litmus/sched_gsn_edf.c434
5 files changed, 1787 insertions, 22 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 4924da21865e..02106f455c0f 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -124,7 +124,6 @@ typedef struct clusterdomain {
124 124
125 125
126#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 126#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
127 raw_spinlock_t tasklet_lock;
128 struct tasklet_head pending_tasklets; 127 struct tasklet_head pending_tasklets;
129#endif 128#endif
130 129
@@ -430,7 +429,7 @@ static void cedf_tick(struct task_struct* t)
430#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 429#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
431 430
432 431
433void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) 432static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
434{ 433{
435 if (!atomic_read(&tasklet->count)) { 434 if (!atomic_read(&tasklet->count)) {
436 sched_trace_tasklet_begin(tasklet->owner); 435 sched_trace_tasklet_begin(tasklet->owner);
@@ -451,7 +450,7 @@ void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
451} 450}
452 451
453 452
454void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets) 453static void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets)
455{ 454{
456 struct tasklet_struct* step; 455 struct tasklet_struct* step;
457 struct tasklet_struct* tasklet; 456 struct tasklet_struct* tasklet;
@@ -497,7 +496,7 @@ void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task, struct
497 } 496 }
498} 497}
499 498
500void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task) 499static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task)
501{ 500{
502 unsigned long flags; 501 unsigned long flags;
503 struct tasklet_head task_tasklets; 502 struct tasklet_head task_tasklets;
@@ -524,18 +523,18 @@ void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task)
524} 523}
525 524
526 525
527void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task) 526static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task)
528{ 527{
529 int work_to_do = 1; 528 int work_to_do = 1;
530 struct tasklet_struct *tasklet = NULL; 529 struct tasklet_struct *tasklet = NULL;
531 struct tasklet_struct *step; 530 //struct tasklet_struct *step;
532 unsigned long flags; 531 unsigned long flags;
533 532
534 while(work_to_do) { 533 while(work_to_do) {
535 // remove tasklet at head of list if it has higher priority. 534 // remove tasklet at head of list if it has higher priority.
536 raw_spin_lock_irqsave(&cluster->cedf_lock, flags); 535 raw_spin_lock_irqsave(&cluster->cedf_lock, flags);
537 536
538 537/*
539 step = cluster->pending_tasklets.head; 538 step = cluster->pending_tasklets.head;
540 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); 539 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
541 while(step != NULL){ 540 while(step != NULL){
@@ -544,6 +543,7 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task)
544 } 543 }
545 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); 544 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
546 TRACE("%s: done.\n", __FUNCTION__); 545 TRACE("%s: done.\n", __FUNCTION__);
546 */
547 547
548 548
549 if(cluster->pending_tasklets.head != NULL) { 549 if(cluster->pending_tasklets.head != NULL) {
@@ -573,6 +573,7 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task)
573 } 573 }
574 574
575 575
576 /*
576 step = cluster->pending_tasklets.head; 577 step = cluster->pending_tasklets.head;
577 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); 578 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
578 while(step != NULL){ 579 while(step != NULL){
@@ -581,6 +582,7 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task)
581 } 582 }
582 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); 583 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
583 TRACE("%s: done.\n", __FUNCTION__); 584 TRACE("%s: done.\n", __FUNCTION__);
585 */
584 586
585 587
586 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); 588 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags);
@@ -598,7 +600,7 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task)
598} 600}
599 601
600 602
601void run_tasklets(struct task_struct* sched_task) 603static void run_tasklets(struct task_struct* sched_task)
602{ 604{
603 cedf_domain_t* cluster; 605 cedf_domain_t* cluster;
604 606
@@ -641,10 +643,11 @@ void run_tasklets(struct task_struct* sched_task)
641} 643}
642 644
643 645
644void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) 646static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
645{ 647{
646 struct tasklet_struct* step; 648 struct tasklet_struct* step;
647 649
650 /*
648 step = cluster->pending_tasklets.head; 651 step = cluster->pending_tasklets.head;
649 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); 652 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
650 while(step != NULL){ 653 while(step != NULL){
@@ -653,6 +656,7 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
653 } 656 }
654 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); 657 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
655 TRACE("%s: done.\n", __FUNCTION__); 658 TRACE("%s: done.\n", __FUNCTION__);
659 */
656 660
657 661
658 tasklet->next = NULL; // make sure there are no old values floating around 662 tasklet->next = NULL; // make sure there are no old values floating around
@@ -674,7 +678,7 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
674 } 678 }
675 else { 679 else {
676 680
677 WARN_ON(1 == 1); 681 //WARN_ON(1 == 1);
678 682
679 // insert the tasklet somewhere in the middle. 683 // insert the tasklet somewhere in the middle.
680 684
@@ -699,7 +703,7 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
699 } 703 }
700 } 704 }
701 705
702 706 /*
703 step = cluster->pending_tasklets.head; 707 step = cluster->pending_tasklets.head;
704 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); 708 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
705 while(step != NULL){ 709 while(step != NULL){
@@ -707,7 +711,8 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
707 step = step->next; 711 step = step->next;
708 } 712 }
709 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); 713 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
710 TRACE("%s: done.\n", __FUNCTION__); 714 TRACE("%s: done.\n", __FUNCTION__);
715 */
711 716
712// TODO: Maintain this list in priority order. 717// TODO: Maintain this list in priority order.
713// tasklet->next = NULL; 718// tasklet->next = NULL;
@@ -715,7 +720,7 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
715// cluster->pending_tasklets.tail = &tasklet->next; 720// cluster->pending_tasklets.tail = &tasklet->next;
716} 721}
717 722
718int enqueue_pai_tasklet(struct tasklet_struct* tasklet) 723static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
719{ 724{
720 cedf_domain_t *cluster = NULL; 725 cedf_domain_t *cluster = NULL;
721 cpu_entry_t *targetCPU = NULL; 726 cpu_entry_t *targetCPU = NULL;
@@ -1909,7 +1914,6 @@ static long cedf_activate_plugin(void)
1909 1914
1910 1915
1911#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 1916#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1912 raw_spin_lock_init(&(cedf[i].tasklet_lock));
1913 cedf[i].pending_tasklets.head = NULL; 1917 cedf[i].pending_tasklets.head = NULL;
1914 cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head); 1918 cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head);
1915#endif 1919#endif
diff --git a/litmus/sched_cfifo.c b/litmus/sched_cfifo.c
index f515446f76ed..689b2dbe5fae 100644
--- a/litmus/sched_cfifo.c
+++ b/litmus/sched_cfifo.c
@@ -55,6 +55,10 @@
55#include <litmus/litmus_softirq.h> 55#include <litmus/litmus_softirq.h>
56#endif 56#endif
57 57
58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
59#include <linux/interrupt.h>
60#endif
61
58#ifdef CONFIG_LITMUS_NVIDIA 62#ifdef CONFIG_LITMUS_NVIDIA
59#include <litmus/nvidia_info.h> 63#include <litmus/nvidia_info.h>
60#endif 64#endif
@@ -91,6 +95,15 @@ DEFINE_PER_CPU(cpu_entry_t, cfifo_cpu_entries);
91#define test_will_schedule(cpu) \ 95#define test_will_schedule(cpu) \
92 (atomic_read(&per_cpu(cfifo_cpu_entries, cpu).will_schedule)) 96 (atomic_read(&per_cpu(cfifo_cpu_entries, cpu).will_schedule))
93 97
98
99#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
100struct tasklet_head
101{
102 struct tasklet_struct *head;
103 struct tasklet_struct **tail;
104};
105#endif
106
94/* 107/*
95 * In C-FIFO there is a cfifo domain _per_ cluster 108 * In C-FIFO there is a cfifo domain _per_ cluster
96 * The number of clusters is dynamically determined accordingly to the 109 * The number of clusters is dynamically determined accordingly to the
@@ -108,6 +121,12 @@ typedef struct clusterdomain {
108 struct bheap cpu_heap; 121 struct bheap cpu_heap;
109 /* lock for this cluster */ 122 /* lock for this cluster */
110#define cfifo_lock domain.ready_lock 123#define cfifo_lock domain.ready_lock
124
125
126#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
127 struct tasklet_head pending_tasklets;
128#endif
129
111} cfifo_domain_t; 130} cfifo_domain_t;
112 131
113/* a cfifo_domain per cluster; allocation is done at init/activation time */ 132/* a cfifo_domain per cluster; allocation is done at init/activation time */
@@ -251,7 +270,7 @@ static void preempt(cpu_entry_t *entry)
251 preempt_if_preemptable(entry->scheduled, entry->cpu); 270 preempt_if_preemptable(entry->scheduled, entry->cpu);
252} 271}
253 272
254/* requeue - Put an unlinked task into gsn-edf domain. 273/* requeue - Put an unlinked task into c-fifo domain.
255 * Caller must hold cfifo_lock. 274 * Caller must hold cfifo_lock.
256 */ 275 */
257static noinline void requeue(struct task_struct* task) 276static noinline void requeue(struct task_struct* task)
@@ -395,6 +414,419 @@ static void cfifo_tick(struct task_struct* t)
395 } 414 }
396} 415}
397 416
417
418
419
420
421
422
423
424
425
426
427
428#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
429
430
431static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
432{
433 if (!atomic_read(&tasklet->count)) {
434 sched_trace_tasklet_begin(tasklet->owner);
435
436 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
437 {
438 BUG();
439 }
440 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
441 tasklet->func(tasklet->data);
442 tasklet_unlock(tasklet);
443
444 sched_trace_tasklet_end(tasklet->owner, flushed);
445 }
446 else {
447 BUG();
448 }
449}
450
451
452static void __extract_tasklets(cfifo_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets)
453{
454 struct tasklet_struct* step;
455 struct tasklet_struct* tasklet;
456 struct tasklet_struct* prev;
457
458 task_tasklets->head = NULL;
459 task_tasklets->tail = &(task_tasklets->head);
460
461 prev = NULL;
462 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next)
463 {
464 if(step->owner == task)
465 {
466 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
467
468 tasklet = step;
469
470 if(prev) {
471 prev->next = tasklet->next;
472 }
473 else if(cluster->pending_tasklets.head == tasklet) {
474 // we're at the head.
475 cluster->pending_tasklets.head = tasklet->next;
476 }
477
478 if(cluster->pending_tasklets.tail == &tasklet) {
479 // we're at the tail
480 if(prev) {
481 cluster->pending_tasklets.tail = &prev;
482 }
483 else {
484 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
485 }
486 }
487
488 tasklet->next = NULL;
489 *(task_tasklets->tail) = tasklet;
490 task_tasklets->tail = &(tasklet->next);
491 }
492 else {
493 prev = step;
494 }
495 }
496}
497
498static void flush_tasklets(cfifo_domain_t* cluster, struct task_struct* task)
499{
500 unsigned long flags;
501 struct tasklet_head task_tasklets;
502 struct tasklet_struct* step;
503
504 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags);
505 __extract_tasklets(cluster, task, &task_tasklets);
506 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
507
508 if(cluster->pending_tasklets.head != NULL) {
509 TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid);
510 }
511
512 // now execute any flushed tasklets.
513 for(step = cluster->pending_tasklets.head; step != NULL; /**/)
514 {
515 struct tasklet_struct* temp = step->next;
516
517 step->next = NULL;
518 __do_lit_tasklet(step, 1ul);
519
520 step = temp;
521 }
522}
523
524
525static void do_lit_tasklets(cfifo_domain_t* cluster, struct task_struct* sched_task)
526{
527 int work_to_do = 1;
528 struct tasklet_struct *tasklet = NULL;
529 //struct tasklet_struct *step;
530 unsigned long flags;
531
532 while(work_to_do) {
533 // remove tasklet at head of list if it has higher priority.
534 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags);
535
536 /*
537 step = cluster->pending_tasklets.head;
538 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
539 while(step != NULL){
540 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
541 step = step->next;
542 }
543 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
544 TRACE("%s: done.\n", __FUNCTION__);
545 */
546
547
548 if(cluster->pending_tasklets.head != NULL) {
549 // remove tasklet at head.
550 tasklet = cluster->pending_tasklets.head;
551
552 if(fifo_higher_prio(tasklet->owner, sched_task)) {
553
554 if(NULL == tasklet->next) {
555 // tasklet is at the head, list only has one element
556 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
557 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
558 }
559
560 // remove the tasklet from the queue
561 cluster->pending_tasklets.head = tasklet->next;
562
563 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
564 }
565 else {
566 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
567 tasklet = NULL;
568 }
569 }
570 else {
571 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
572 }
573
574 /*
575 step = cluster->pending_tasklets.head;
576 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
577 while(step != NULL){
578 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
579 step = step->next;
580 }
581 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
582 TRACE("%s: done.\n", __FUNCTION__);
583 */
584
585 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
586
587 if(tasklet) {
588 __do_lit_tasklet(tasklet, 0ul);
589 tasklet = NULL;
590 }
591 else {
592 work_to_do = 0;
593 }
594 }
595
596 //TRACE("%s: exited.\n", __FUNCTION__);
597}
598
599
600static void run_tasklets(struct task_struct* sched_task)
601{
602 cfifo_domain_t* cluster;
603
604#if 0
605 int task_is_rt = is_realtime(sched_task);
606 cfifo_domain_t* cluster;
607
608 if(is_realtime(sched_task)) {
609 cluster = task_cpu_cluster(sched_task);
610 }
611 else {
612 cluster = remote_cluster(get_cpu());
613 }
614
615 if(cluster && cluster->pending_tasklets.head != NULL) {
616 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
617
618 do_lit_tasklets(cluster, sched_task);
619 }
620
621 if(!task_is_rt) {
622 put_cpu_no_resched();
623 }
624#else
625
626 preempt_disable();
627
628 cluster = (is_realtime(sched_task)) ?
629 task_cpu_cluster(sched_task) :
630 remote_cluster(smp_processor_id());
631
632 if(cluster && cluster->pending_tasklets.head != NULL) {
633 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
634 do_lit_tasklets(cluster, sched_task);
635 }
636
637 preempt_enable_no_resched();
638
639#endif
640}
641
642
643static void __add_pai_tasklet(struct tasklet_struct* tasklet, cfifo_domain_t* cluster)
644{
645 struct tasklet_struct* step;
646
647 /*
648 step = cluster->pending_tasklets.head;
649 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
650 while(step != NULL){
651 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
652 step = step->next;
653 }
654 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
655 TRACE("%s: done.\n", __FUNCTION__);
656 */
657
658
659 tasklet->next = NULL; // make sure there are no old values floating around
660
661 step = cluster->pending_tasklets.head;
662 if(step == NULL) {
663 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
664 // insert at tail.
665 *(cluster->pending_tasklets.tail) = tasklet;
666 cluster->pending_tasklets.tail = &(tasklet->next);
667 }
668 else if((*(cluster->pending_tasklets.tail) != NULL) &&
669 fifo_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
670 // insert at tail.
671 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
672
673 *(cluster->pending_tasklets.tail) = tasklet;
674 cluster->pending_tasklets.tail = &(tasklet->next);
675 }
676 else {
677
678 //WARN_ON(1 == 1);
679
680 // insert the tasklet somewhere in the middle.
681
682 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
683
684 while(step->next && fifo_higher_prio(step->next->owner, tasklet->owner)) {
685 step = step->next;
686 }
687
688 // insert tasklet right before step->next.
689
690 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
691
692 tasklet->next = step->next;
693 step->next = tasklet;
694
695 // patch up the head if needed.
696 if(cluster->pending_tasklets.head == step)
697 {
698 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
699 cluster->pending_tasklets.head = tasklet;
700 }
701 }
702
703 /*
704 step = cluster->pending_tasklets.head;
705 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
706 while(step != NULL){
707 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
708 step = step->next;
709 }
710 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
711 TRACE("%s: done.\n", __FUNCTION__);
712 */
713
714 // TODO: Maintain this list in priority order.
715 // tasklet->next = NULL;
716 // *(cluster->pending_tasklets.tail) = tasklet;
717 // cluster->pending_tasklets.tail = &tasklet->next;
718}
719
720static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
721{
722 cfifo_domain_t *cluster = NULL;
723 cpu_entry_t *targetCPU = NULL;
724 int thisCPU;
725 int runLocal = 0;
726 int runNow = 0;
727 unsigned long flags;
728
729 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
730 {
731 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
732 return 0;
733 }
734
735 cluster = task_cpu_cluster(tasklet->owner);
736
737 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags);
738
739 thisCPU = smp_processor_id();
740
741#if 1
742#ifdef CONFIG_SCHED_CPU_AFFINITY
743 {
744 cpu_entry_t* affinity = NULL;
745
746 // use this CPU if it is in our cluster and isn't running any RT work.
747 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cfifo_cpu_entries).linked == NULL)) {
748 affinity = &(__get_cpu_var(cfifo_cpu_entries));
749 }
750 else {
751 // this CPU is busy or shouldn't run tasklet in this cluster.
752 // look for available near by CPUs.
753 // NOTE: Affinity towards owner and not this CPU. Is this right?
754 affinity =
755 cfifo_get_nearest_available_cpu(cluster,
756 &per_cpu(cfifo_cpu_entries, task_cpu(tasklet->owner)));
757 }
758
759 targetCPU = affinity;
760 }
761#endif
762#endif
763
764 if (targetCPU == NULL) {
765 targetCPU = lowest_prio_cpu(cluster);
766 }
767
768 if (fifo_higher_prio(tasklet->owner, targetCPU->linked)) {
769 if (thisCPU == targetCPU->cpu) {
770 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
771 runLocal = 1;
772 runNow = 1;
773 }
774 else {
775 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
776 runLocal = 0;
777 runNow = 1;
778 }
779 }
780 else {
781 runLocal = 0;
782 runNow = 0;
783 }
784
785 if(!runLocal) {
786 // enqueue the tasklet
787 __add_pai_tasklet(tasklet, cluster);
788 }
789
790 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
791
792
793 if (runLocal /*&& runNow */) { // runNow == 1 is implied
794 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
795 __do_lit_tasklet(tasklet, 0ul);
796 }
797 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
798 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
799 preempt(targetCPU); // need to be protected by cfifo_lock?
800 }
801 else {
802 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
803 }
804
805 return(1); // success
806}
807
808
809#endif
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
398/* Getting schedule() right is a bit tricky. schedule() may not make any 830/* Getting schedule() right is a bit tricky. schedule() may not make any
399 * assumptions on the state of the current task since it may be called for a 831 * assumptions on the state of the current task since it may be called for a
400 * number of reasons. The reasons include a scheduler_tick() determined that it 832 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -544,7 +976,7 @@ static void cfifo_task_new(struct task_struct * t, int on_rq, int running)
544 cpu_entry_t* entry; 976 cpu_entry_t* entry;
545 cfifo_domain_t* cluster; 977 cfifo_domain_t* cluster;
546 978
547 TRACE("gsn edf: task new %d\n", t->pid); 979 TRACE("cfifo: task new %d\n", t->pid);
548 980
549 /* the cluster doesn't change even if t is running */ 981 /* the cluster doesn't change even if t is running */
550 cluster = task_cpu_cluster(t); 982 cluster = task_cpu_cluster(t);
@@ -650,6 +1082,10 @@ static void cfifo_task_exit(struct task_struct * t)
650 } 1082 }
651 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); 1083 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
652 1084
1085#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1086 flush_tasklets(cluster, t);
1087#endif
1088
653 BUG_ON(!is_realtime(t)); 1089 BUG_ON(!is_realtime(t));
654 TRACE_TASK(t, "RIP\n"); 1090 TRACE_TASK(t, "RIP\n");
655} 1091}
@@ -1467,6 +1903,12 @@ static long cfifo_activate_plugin(void)
1467 bheap_init(&(cfifo[i].cpu_heap)); 1903 bheap_init(&(cfifo[i].cpu_heap));
1468 fifo_domain_init(&(cfifo[i].domain), NULL, cfifo_release_jobs); 1904 fifo_domain_init(&(cfifo[i].domain), NULL, cfifo_release_jobs);
1469 1905
1906
1907#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1908 cfifo[i].pending_tasklets.head = NULL;
1909 cfifo[i].pending_tasklets.tail = &(cfifo[i].pending_tasklets.head);
1910#endif
1911
1470 if(!zalloc_cpumask_var(&cfifo[i].cpu_map, GFP_ATOMIC)) 1912 if(!zalloc_cpumask_var(&cfifo[i].cpu_map, GFP_ATOMIC))
1471 return -ENOMEM; 1913 return -ENOMEM;
1472 } 1914 }
@@ -1578,6 +2020,10 @@ static struct sched_plugin cfifo_plugin __cacheline_aligned_in_smp = {
1578#ifdef CONFIG_LITMUS_SOFTIRQD 2020#ifdef CONFIG_LITMUS_SOFTIRQD
1579 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, 2021 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
1580 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, 2022 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
2023#endif
2024#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2025 .enqueue_pai_tasklet = enqueue_pai_tasklet,
2026 .run_tasklets = run_tasklets,
1581#endif 2027#endif
1582}; 2028};
1583 2029
diff --git a/litmus/sched_crm.c b/litmus/sched_crm.c
index 061b29eaff7e..fd7fab982998 100644
--- a/litmus/sched_crm.c
+++ b/litmus/sched_crm.c
@@ -55,6 +55,10 @@
55#include <litmus/litmus_softirq.h> 55#include <litmus/litmus_softirq.h>
56#endif 56#endif
57 57
58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
59#include <linux/interrupt.h>
60#endif
61
58#ifdef CONFIG_LITMUS_NVIDIA 62#ifdef CONFIG_LITMUS_NVIDIA
59#include <litmus/nvidia_info.h> 63#include <litmus/nvidia_info.h>
60#endif 64#endif
@@ -91,6 +95,14 @@ DEFINE_PER_CPU(cpu_entry_t, crm_cpu_entries);
91#define test_will_schedule(cpu) \ 95#define test_will_schedule(cpu) \
92 (atomic_read(&per_cpu(crm_cpu_entries, cpu).will_schedule)) 96 (atomic_read(&per_cpu(crm_cpu_entries, cpu).will_schedule))
93 97
98#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
99struct tasklet_head
100{
101 struct tasklet_struct *head;
102 struct tasklet_struct **tail;
103};
104#endif
105
94/* 106/*
95 * In C-RM there is a crm domain _per_ cluster 107 * In C-RM there is a crm domain _per_ cluster
96 * The number of clusters is dynamically determined accordingly to the 108 * The number of clusters is dynamically determined accordingly to the
@@ -108,6 +120,10 @@ typedef struct clusterdomain {
108 struct bheap cpu_heap; 120 struct bheap cpu_heap;
109 /* lock for this cluster */ 121 /* lock for this cluster */
110#define crm_lock domain.ready_lock 122#define crm_lock domain.ready_lock
123
124#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
125 struct tasklet_head pending_tasklets;
126#endif
111} crm_domain_t; 127} crm_domain_t;
112 128
113/* a crm_domain per cluster; allocation is done at init/activation time */ 129/* a crm_domain per cluster; allocation is done at init/activation time */
@@ -251,7 +267,7 @@ static void preempt(cpu_entry_t *entry)
251 preempt_if_preemptable(entry->scheduled, entry->cpu); 267 preempt_if_preemptable(entry->scheduled, entry->cpu);
252} 268}
253 269
254/* requeue - Put an unlinked task into gsn-edf domain. 270/* requeue - Put an unlinked task into c-rm domain.
255 * Caller must hold crm_lock. 271 * Caller must hold crm_lock.
256 */ 272 */
257static noinline void requeue(struct task_struct* task) 273static noinline void requeue(struct task_struct* task)
@@ -395,6 +411,421 @@ static void crm_tick(struct task_struct* t)
395 } 411 }
396} 412}
397 413
414
415
416
417
418
419
420
421
422#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
423
424
425static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
426{
427 if (!atomic_read(&tasklet->count)) {
428 sched_trace_tasklet_begin(tasklet->owner);
429
430 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
431 {
432 BUG();
433 }
434 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
435 tasklet->func(tasklet->data);
436 tasklet_unlock(tasklet);
437
438 sched_trace_tasklet_end(tasklet->owner, flushed);
439 }
440 else {
441 BUG();
442 }
443}
444
445
446static void __extract_tasklets(crm_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets)
447{
448 struct tasklet_struct* step;
449 struct tasklet_struct* tasklet;
450 struct tasklet_struct* prev;
451
452 task_tasklets->head = NULL;
453 task_tasklets->tail = &(task_tasklets->head);
454
455 prev = NULL;
456 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next)
457 {
458 if(step->owner == task)
459 {
460 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
461
462 tasklet = step;
463
464 if(prev) {
465 prev->next = tasklet->next;
466 }
467 else if(cluster->pending_tasklets.head == tasklet) {
468 // we're at the head.
469 cluster->pending_tasklets.head = tasklet->next;
470 }
471
472 if(cluster->pending_tasklets.tail == &tasklet) {
473 // we're at the tail
474 if(prev) {
475 cluster->pending_tasklets.tail = &prev;
476 }
477 else {
478 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
479 }
480 }
481
482 tasklet->next = NULL;
483 *(task_tasklets->tail) = tasklet;
484 task_tasklets->tail = &(tasklet->next);
485 }
486 else {
487 prev = step;
488 }
489 }
490}
491
492static void flush_tasklets(crm_domain_t* cluster, struct task_struct* task)
493{
494 unsigned long flags;
495 struct tasklet_head task_tasklets;
496 struct tasklet_struct* step;
497
498 raw_spin_lock_irqsave(&cluster->crm_lock, flags);
499 __extract_tasklets(cluster, task, &task_tasklets);
500 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags);
501
502 if(cluster->pending_tasklets.head != NULL) {
503 TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid);
504 }
505
506 // now execute any flushed tasklets.
507 for(step = cluster->pending_tasklets.head; step != NULL; /**/)
508 {
509 struct tasklet_struct* temp = step->next;
510
511 step->next = NULL;
512 __do_lit_tasklet(step, 1ul);
513
514 step = temp;
515 }
516}
517
518
519static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_task)
520{
521 int work_to_do = 1;
522 struct tasklet_struct *tasklet = NULL;
523 //struct tasklet_struct *step;
524 unsigned long flags;
525
526 while(work_to_do) {
527 // remove tasklet at head of list if it has higher priority.
528 raw_spin_lock_irqsave(&cluster->crm_lock, flags);
529
530 /*
531 step = cluster->pending_tasklets.head;
532 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
533 while(step != NULL){
534 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
535 step = step->next;
536 }
537 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
538 TRACE("%s: done.\n", __FUNCTION__);
539 */
540
541 if(cluster->pending_tasklets.head != NULL) {
542 // remove tasklet at head.
543 tasklet = cluster->pending_tasklets.head;
544
545 if(rm_higher_prio(tasklet->owner, sched_task)) {
546
547 if(NULL == tasklet->next) {
548 // tasklet is at the head, list only has one element
549 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
550 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
551 }
552
553 // remove the tasklet from the queue
554 cluster->pending_tasklets.head = tasklet->next;
555
556 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
557 }
558 else {
559 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
560 tasklet = NULL;
561 }
562 }
563 else {
564 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
565 }
566
567 /*
568 step = cluster->pending_tasklets.head;
569 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
570 while(step != NULL){
571 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
572 step = step->next;
573 }
574 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
575 TRACE("%s: done.\n", __FUNCTION__);
576 */
577
578 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags);
579
580 if(tasklet) {
581 __do_lit_tasklet(tasklet, 0ul);
582 tasklet = NULL;
583 }
584 else {
585 work_to_do = 0;
586 }
587 }
588
589 //TRACE("%s: exited.\n", __FUNCTION__);
590}
591
592
593static void run_tasklets(struct task_struct* sched_task)
594{
595 crm_domain_t* cluster;
596
597#if 0
598 int task_is_rt = is_realtime(sched_task);
599 crm_domain_t* cluster;
600
601 if(is_realtime(sched_task)) {
602 cluster = task_cpu_cluster(sched_task);
603 }
604 else {
605 cluster = remote_cluster(get_cpu());
606 }
607
608 if(cluster && cluster->pending_tasklets.head != NULL) {
609 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
610
611 do_lit_tasklets(cluster, sched_task);
612 }
613
614 if(!task_is_rt) {
615 put_cpu_no_resched();
616 }
617#else
618
619 preempt_disable();
620
621 cluster = (is_realtime(sched_task)) ?
622 task_cpu_cluster(sched_task) :
623 remote_cluster(smp_processor_id());
624
625 if(cluster && cluster->pending_tasklets.head != NULL) {
626 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
627 do_lit_tasklets(cluster, sched_task);
628 }
629
630 preempt_enable_no_resched();
631
632#endif
633}
634
635
636static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_domain_t* cluster)
637{
638 struct tasklet_struct* step;
639
640 /*
641 step = cluster->pending_tasklets.head;
642 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
643 while(step != NULL){
644 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
645 step = step->next;
646 }
647 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
648 TRACE("%s: done.\n", __FUNCTION__);
649 */
650
651 tasklet->next = NULL; // make sure there are no old values floating around
652
653 step = cluster->pending_tasklets.head;
654 if(step == NULL) {
655 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
656 // insert at tail.
657 *(cluster->pending_tasklets.tail) = tasklet;
658 cluster->pending_tasklets.tail = &(tasklet->next);
659 }
660 else if((*(cluster->pending_tasklets.tail) != NULL) &&
661 rm_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
662 // insert at tail.
663 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
664
665 *(cluster->pending_tasklets.tail) = tasklet;
666 cluster->pending_tasklets.tail = &(tasklet->next);
667 }
668 else {
669
670 //WARN_ON(1 == 1);
671
672 // insert the tasklet somewhere in the middle.
673
674 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
675
676 while(step->next && rm_higher_prio(step->next->owner, tasklet->owner)) {
677 step = step->next;
678 }
679
680 // insert tasklet right before step->next.
681
682 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
683
684 tasklet->next = step->next;
685 step->next = tasklet;
686
687 // patch up the head if needed.
688 if(cluster->pending_tasklets.head == step)
689 {
690 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
691 cluster->pending_tasklets.head = tasklet;
692 }
693 }
694
695 /*
696 step = cluster->pending_tasklets.head;
697 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
698 while(step != NULL){
699 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
700 step = step->next;
701 }
702 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
703 TRACE("%s: done.\n", __FUNCTION__);
704 */
705
706 // TODO: Maintain this list in priority order.
707 // tasklet->next = NULL;
708 // *(cluster->pending_tasklets.tail) = tasklet;
709 // cluster->pending_tasklets.tail = &tasklet->next;
710}
711
712static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
713{
714 crm_domain_t *cluster = NULL;
715 cpu_entry_t *targetCPU = NULL;
716 int thisCPU;
717 int runLocal = 0;
718 int runNow = 0;
719 unsigned long flags;
720
721 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
722 {
723 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
724 return 0;
725 }
726
727 cluster = task_cpu_cluster(tasklet->owner);
728
729 raw_spin_lock_irqsave(&cluster->crm_lock, flags);
730
731 thisCPU = smp_processor_id();
732
733#if 1
734#ifdef CONFIG_SCHED_CPU_AFFINITY
735 {
736 cpu_entry_t* affinity = NULL;
737
738 // use this CPU if it is in our cluster and isn't running any RT work.
739 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(crm_cpu_entries).linked == NULL)) {
740 affinity = &(__get_cpu_var(crm_cpu_entries));
741 }
742 else {
743 // this CPU is busy or shouldn't run tasklet in this cluster.
744 // look for available near by CPUs.
745 // NOTE: Affinity towards owner and not this CPU. Is this right?
746 affinity =
747 crm_get_nearest_available_cpu(cluster,
748 &per_cpu(crm_cpu_entries, task_cpu(tasklet->owner)));
749 }
750
751 targetCPU = affinity;
752 }
753#endif
754#endif
755
756 if (targetCPU == NULL) {
757 targetCPU = lowest_prio_cpu(cluster);
758 }
759
760 if (rm_higher_prio(tasklet->owner, targetCPU->linked)) {
761 if (thisCPU == targetCPU->cpu) {
762 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
763 runLocal = 1;
764 runNow = 1;
765 }
766 else {
767 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
768 runLocal = 0;
769 runNow = 1;
770 }
771 }
772 else {
773 runLocal = 0;
774 runNow = 0;
775 }
776
777 if(!runLocal) {
778 // enqueue the tasklet
779 __add_pai_tasklet(tasklet, cluster);
780 }
781
782 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags);
783
784
785 if (runLocal /*&& runNow */) { // runNow == 1 is implied
786 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
787 __do_lit_tasklet(tasklet, 0ul);
788 }
789 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
790 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
791 preempt(targetCPU); // need to be protected by crm_lock?
792 }
793 else {
794 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
795 }
796
797 return(1); // success
798}
799
800
801#endif
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
398/* Getting schedule() right is a bit tricky. schedule() may not make any 829/* Getting schedule() right is a bit tricky. schedule() may not make any
399 * assumptions on the state of the current task since it may be called for a 830 * assumptions on the state of the current task since it may be called for a
400 * number of reasons. The reasons include a scheduler_tick() determined that it 831 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -544,7 +975,7 @@ static void crm_task_new(struct task_struct * t, int on_rq, int running)
544 cpu_entry_t* entry; 975 cpu_entry_t* entry;
545 crm_domain_t* cluster; 976 crm_domain_t* cluster;
546 977
547 TRACE("gsn edf: task new %d\n", t->pid); 978 TRACE("crm: task new %d\n", t->pid);
548 979
549 /* the cluster doesn't change even if t is running */ 980 /* the cluster doesn't change even if t is running */
550 cluster = task_cpu_cluster(t); 981 cluster = task_cpu_cluster(t);
@@ -650,6 +1081,10 @@ static void crm_task_exit(struct task_struct * t)
650 } 1081 }
651 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); 1082 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags);
652 1083
1084#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1085 flush_tasklets(cluster, t);
1086#endif
1087
653 BUG_ON(!is_realtime(t)); 1088 BUG_ON(!is_realtime(t));
654 TRACE_TASK(t, "RIP\n"); 1089 TRACE_TASK(t, "RIP\n");
655} 1090}
@@ -1467,6 +1902,11 @@ static long crm_activate_plugin(void)
1467 bheap_init(&(crm[i].cpu_heap)); 1902 bheap_init(&(crm[i].cpu_heap));
1468 rm_domain_init(&(crm[i].domain), NULL, crm_release_jobs); 1903 rm_domain_init(&(crm[i].domain), NULL, crm_release_jobs);
1469 1904
1905#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1906 crm[i].pending_tasklets.head = NULL;
1907 crm[i].pending_tasklets.tail = &(crm[i].pending_tasklets.head);
1908#endif
1909
1470 if(!zalloc_cpumask_var(&crm[i].cpu_map, GFP_ATOMIC)) 1910 if(!zalloc_cpumask_var(&crm[i].cpu_map, GFP_ATOMIC))
1471 return -ENOMEM; 1911 return -ENOMEM;
1472 } 1912 }
@@ -1578,6 +2018,10 @@ static struct sched_plugin crm_plugin __cacheline_aligned_in_smp = {
1578#ifdef CONFIG_LITMUS_SOFTIRQD 2018#ifdef CONFIG_LITMUS_SOFTIRQD
1579 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, 2019 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
1580 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, 2020 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
2021#endif
2022#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2023 .enqueue_pai_tasklet = enqueue_pai_tasklet,
2024 .run_tasklets = run_tasklets,
1581#endif 2025#endif
1582}; 2026};
1583 2027
diff --git a/litmus/sched_crm_srt.c b/litmus/sched_crm_srt.c
index 4473f35e64cd..c0004354573d 100644
--- a/litmus/sched_crm_srt.c
+++ b/litmus/sched_crm_srt.c
@@ -55,6 +55,10 @@
55#include <litmus/litmus_softirq.h> 55#include <litmus/litmus_softirq.h>
56#endif 56#endif
57 57
58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
59#include <linux/interrupt.h>
60#endif
61
58#ifdef CONFIG_LITMUS_NVIDIA 62#ifdef CONFIG_LITMUS_NVIDIA
59#include <litmus/nvidia_info.h> 63#include <litmus/nvidia_info.h>
60#endif 64#endif
@@ -91,6 +95,15 @@ DEFINE_PER_CPU(cpu_entry_t, crm_srt_cpu_entries);
91#define test_will_schedule(cpu) \ 95#define test_will_schedule(cpu) \
92 (atomic_read(&per_cpu(crm_srt_cpu_entries, cpu).will_schedule)) 96 (atomic_read(&per_cpu(crm_srt_cpu_entries, cpu).will_schedule))
93 97
98
99#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
100struct tasklet_head
101{
102 struct tasklet_struct *head;
103 struct tasklet_struct **tail;
104};
105#endif
106
94/* 107/*
95 * In C-RM-SRT there is a crm_srt domain _per_ cluster 108 * In C-RM-SRT there is a crm_srt domain _per_ cluster
96 * The number of clusters is dynamically determined accordingly to the 109 * The number of clusters is dynamically determined accordingly to the
@@ -108,6 +121,12 @@ typedef struct clusterdomain {
108 struct bheap cpu_heap; 121 struct bheap cpu_heap;
109 /* lock for this cluster */ 122 /* lock for this cluster */
110#define crm_srt_lock domain.ready_lock 123#define crm_srt_lock domain.ready_lock
124
125
126#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
127 struct tasklet_head pending_tasklets;
128#endif
129
111} crm_srt_domain_t; 130} crm_srt_domain_t;
112 131
113/* a crm_srt_domain per cluster; allocation is done at init/activation time */ 132/* a crm_srt_domain per cluster; allocation is done at init/activation time */
@@ -251,7 +270,7 @@ static void preempt(cpu_entry_t *entry)
251 preempt_if_preemptable(entry->scheduled, entry->cpu); 270 preempt_if_preemptable(entry->scheduled, entry->cpu);
252} 271}
253 272
254/* requeue - Put an unlinked task into gsn-edf domain. 273/* requeue - Put an unlinked task into c-rm-srt domain.
255 * Caller must hold crm_srt_lock. 274 * Caller must hold crm_srt_lock.
256 */ 275 */
257static noinline void requeue(struct task_struct* task) 276static noinline void requeue(struct task_struct* task)
@@ -395,6 +414,415 @@ static void crm_srt_tick(struct task_struct* t)
395 } 414 }
396} 415}
397 416
417
418
419
420
421
422
423
424
425
426
427#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
428
429
430static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
431{
432 if (!atomic_read(&tasklet->count)) {
433 sched_trace_tasklet_begin(tasklet->owner);
434
435 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
436 {
437 BUG();
438 }
439 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
440 tasklet->func(tasklet->data);
441 tasklet_unlock(tasklet);
442
443 sched_trace_tasklet_end(tasklet->owner, flushed);
444 }
445 else {
446 BUG();
447 }
448}
449
450
451static void __extract_tasklets(crm_srt_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets)
452{
453 struct tasklet_struct* step;
454 struct tasklet_struct* tasklet;
455 struct tasklet_struct* prev;
456
457 task_tasklets->head = NULL;
458 task_tasklets->tail = &(task_tasklets->head);
459
460 prev = NULL;
461 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next)
462 {
463 if(step->owner == task)
464 {
465 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
466
467 tasklet = step;
468
469 if(prev) {
470 prev->next = tasklet->next;
471 }
472 else if(cluster->pending_tasklets.head == tasklet) {
473 // we're at the head.
474 cluster->pending_tasklets.head = tasklet->next;
475 }
476
477 if(cluster->pending_tasklets.tail == &tasklet) {
478 // we're at the tail
479 if(prev) {
480 cluster->pending_tasklets.tail = &prev;
481 }
482 else {
483 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
484 }
485 }
486
487 tasklet->next = NULL;
488 *(task_tasklets->tail) = tasklet;
489 task_tasklets->tail = &(tasklet->next);
490 }
491 else {
492 prev = step;
493 }
494 }
495}
496
497static void flush_tasklets(crm_srt_domain_t* cluster, struct task_struct* task)
498{
499 unsigned long flags;
500 struct tasklet_head task_tasklets;
501 struct tasklet_struct* step;
502
503 raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);
504 __extract_tasklets(cluster, task, &task_tasklets);
505 raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
506
507 if(cluster->pending_tasklets.head != NULL) {
508 TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid);
509 }
510
511 // now execute any flushed tasklets.
512 for(step = cluster->pending_tasklets.head; step != NULL; /**/)
513 {
514 struct tasklet_struct* temp = step->next;
515
516 step->next = NULL;
517 __do_lit_tasklet(step, 1ul);
518
519 step = temp;
520 }
521}
522
523
524static void do_lit_tasklets(crm_srt_domain_t* cluster, struct task_struct* sched_task)
525{
526 int work_to_do = 1;
527 struct tasklet_struct *tasklet = NULL;
528 //struct tasklet_struct *step;
529 unsigned long flags;
530
531 while(work_to_do) {
532 // remove tasklet at head of list if it has higher priority.
533 raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);
534
535 /*
536 step = cluster->pending_tasklets.head;
537 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
538 while(step != NULL){
539 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
540 step = step->next;
541 }
542 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
543 TRACE("%s: done.\n", __FUNCTION__);
544 */
545
546 if(cluster->pending_tasklets.head != NULL) {
547 // remove tasklet at head.
548 tasklet = cluster->pending_tasklets.head;
549
550 if(rm_srt_higher_prio(tasklet->owner, sched_task)) {
551
552 if(NULL == tasklet->next) {
553 // tasklet is at the head, list only has one element
554 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
555 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
556 }
557
558 // remove the tasklet from the queue
559 cluster->pending_tasklets.head = tasklet->next;
560
561 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
562 }
563 else {
564 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
565 tasklet = NULL;
566 }
567 }
568 else {
569 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
570 }
571
572 /*
573 step = cluster->pending_tasklets.head;
574 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
575 while(step != NULL){
576 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
577 step = step->next;
578 }
579 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
580 TRACE("%s: done.\n", __FUNCTION__);
581 */
582
583 raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
584
585 if(tasklet) {
586 __do_lit_tasklet(tasklet, 0ul);
587 tasklet = NULL;
588 }
589 else {
590 work_to_do = 0;
591 }
592 }
593
594 //TRACE("%s: exited.\n", __FUNCTION__);
595}
596
597
598static void run_tasklets(struct task_struct* sched_task)
599{
600 crm_srt_domain_t* cluster;
601
602#if 0
603 int task_is_rt = is_realtime(sched_task);
604 crm_srt_domain_t* cluster;
605
606 if(is_realtime(sched_task)) {
607 cluster = task_cpu_cluster(sched_task);
608 }
609 else {
610 cluster = remote_cluster(get_cpu());
611 }
612
613 if(cluster && cluster->pending_tasklets.head != NULL) {
614 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
615
616 do_lit_tasklets(cluster, sched_task);
617 }
618
619 if(!task_is_rt) {
620 put_cpu_no_resched();
621 }
622#else
623
624 preempt_disable();
625
626 cluster = (is_realtime(sched_task)) ?
627 task_cpu_cluster(sched_task) :
628 remote_cluster(smp_processor_id());
629
630 if(cluster && cluster->pending_tasklets.head != NULL) {
631 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
632 do_lit_tasklets(cluster, sched_task);
633 }
634
635 preempt_enable_no_resched();
636
637#endif
638}
639
640
641static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_srt_domain_t* cluster)
642{
643 struct tasklet_struct* step;
644
645 /*
646 step = cluster->pending_tasklets.head;
647 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
648 while(step != NULL){
649 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
650 step = step->next;
651 }
652 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
653 TRACE("%s: done.\n", __FUNCTION__);
654 */
655
656 tasklet->next = NULL; // make sure there are no old values floating around
657
658 step = cluster->pending_tasklets.head;
659 if(step == NULL) {
660 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
661 // insert at tail.
662 *(cluster->pending_tasklets.tail) = tasklet;
663 cluster->pending_tasklets.tail = &(tasklet->next);
664 }
665 else if((*(cluster->pending_tasklets.tail) != NULL) &&
666 rm_srt_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
667 // insert at tail.
668 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
669
670 *(cluster->pending_tasklets.tail) = tasklet;
671 cluster->pending_tasklets.tail = &(tasklet->next);
672 }
673 else {
674
675 //WARN_ON(1 == 1);
676
677 // insert the tasklet somewhere in the middle.
678
679 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
680
681 while(step->next && rm_srt_higher_prio(step->next->owner, tasklet->owner)) {
682 step = step->next;
683 }
684
685 // insert tasklet right before step->next.
686
687 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
688
689 tasklet->next = step->next;
690 step->next = tasklet;
691
692 // patch up the head if needed.
693 if(cluster->pending_tasklets.head == step)
694 {
695 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
696 cluster->pending_tasklets.head = tasklet;
697 }
698 }
699
700 /*
701 step = cluster->pending_tasklets.head;
702 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
703 while(step != NULL){
704 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
705 step = step->next;
706 }
707 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
708 TRACE("%s: done.\n", __FUNCTION__);
709 */
710
711 // TODO: Maintain this list in priority order.
712 // tasklet->next = NULL;
713 // *(cluster->pending_tasklets.tail) = tasklet;
714 // cluster->pending_tasklets.tail = &tasklet->next;
715}
716
717static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
718{
719 crm_srt_domain_t *cluster = NULL;
720 cpu_entry_t *targetCPU = NULL;
721 int thisCPU;
722 int runLocal = 0;
723 int runNow = 0;
724 unsigned long flags;
725
726 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
727 {
728 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
729 return 0;
730 }
731
732 cluster = task_cpu_cluster(tasklet->owner);
733
734 raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);
735
736 thisCPU = smp_processor_id();
737
738#if 1
739#ifdef CONFIG_SCHED_CPU_AFFINITY
740 {
741 cpu_entry_t* affinity = NULL;
742
743 // use this CPU if it is in our cluster and isn't running any RT work.
744 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(crm_srt_cpu_entries).linked == NULL)) {
745 affinity = &(__get_cpu_var(crm_srt_cpu_entries));
746 }
747 else {
748 // this CPU is busy or shouldn't run tasklet in this cluster.
749 // look for available near by CPUs.
750 // NOTE: Affinity towards owner and not this CPU. Is this right?
751 affinity =
752 crm_srt_get_nearest_available_cpu(cluster,
753 &per_cpu(crm_srt_cpu_entries, task_cpu(tasklet->owner)));
754 }
755
756 targetCPU = affinity;
757 }
758#endif
759#endif
760
761 if (targetCPU == NULL) {
762 targetCPU = lowest_prio_cpu(cluster);
763 }
764
765 if (rm_srt_higher_prio(tasklet->owner, targetCPU->linked)) {
766 if (thisCPU == targetCPU->cpu) {
767 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
768 runLocal = 1;
769 runNow = 1;
770 }
771 else {
772 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
773 runLocal = 0;
774 runNow = 1;
775 }
776 }
777 else {
778 runLocal = 0;
779 runNow = 0;
780 }
781
782 if(!runLocal) {
783 // enqueue the tasklet
784 __add_pai_tasklet(tasklet, cluster);
785 }
786
787 raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
788
789
790 if (runLocal /*&& runNow */) { // runNow == 1 is implied
791 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
792 __do_lit_tasklet(tasklet, 0ul);
793 }
794 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
795 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
796 preempt(targetCPU); // need to be protected by crm_srt_lock?
797 }
798 else {
799 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
800 }
801
802 return(1); // success
803}
804
805
806#endif
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
398/* Getting schedule() right is a bit tricky. schedule() may not make any 826/* Getting schedule() right is a bit tricky. schedule() may not make any
399 * assumptions on the state of the current task since it may be called for a 827 * assumptions on the state of the current task since it may be called for a
400 * number of reasons. The reasons include a scheduler_tick() determined that it 828 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -544,7 +972,7 @@ static void crm_srt_task_new(struct task_struct * t, int on_rq, int running)
544 cpu_entry_t* entry; 972 cpu_entry_t* entry;
545 crm_srt_domain_t* cluster; 973 crm_srt_domain_t* cluster;
546 974
547 TRACE("gsn edf: task new %d\n", t->pid); 975 TRACE("crm srt: task new %d\n", t->pid);
548 976
549 /* the cluster doesn't change even if t is running */ 977 /* the cluster doesn't change even if t is running */
550 cluster = task_cpu_cluster(t); 978 cluster = task_cpu_cluster(t);
@@ -650,6 +1078,10 @@ static void crm_srt_task_exit(struct task_struct * t)
650 } 1078 }
651 raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); 1079 raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
652 1080
1081#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1082 flush_tasklets(cluster, t);
1083#endif
1084
653 BUG_ON(!is_realtime(t)); 1085 BUG_ON(!is_realtime(t));
654 TRACE_TASK(t, "RIP\n"); 1086 TRACE_TASK(t, "RIP\n");
655} 1087}
@@ -1467,6 +1899,11 @@ static long crm_srt_activate_plugin(void)
1467 bheap_init(&(crm_srt[i].cpu_heap)); 1899 bheap_init(&(crm_srt[i].cpu_heap));
1468 rm_srt_domain_init(&(crm_srt[i].domain), NULL, crm_srt_release_jobs); 1900 rm_srt_domain_init(&(crm_srt[i].domain), NULL, crm_srt_release_jobs);
1469 1901
1902#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1903 crm_srt[i].pending_tasklets.head = NULL;
1904 crm_srt[i].pending_tasklets.tail = &(crm_srt[i].pending_tasklets.head);
1905#endif
1906
1470 if(!zalloc_cpumask_var(&crm_srt[i].cpu_map, GFP_ATOMIC)) 1907 if(!zalloc_cpumask_var(&crm_srt[i].cpu_map, GFP_ATOMIC))
1471 return -ENOMEM; 1908 return -ENOMEM;
1472 } 1909 }
@@ -1578,6 +2015,10 @@ static struct sched_plugin crm_srt_plugin __cacheline_aligned_in_smp = {
1578#ifdef CONFIG_LITMUS_SOFTIRQD 2015#ifdef CONFIG_LITMUS_SOFTIRQD
1579 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, 2016 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
1580 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, 2017 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
2018#endif
2019#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2020 .enqueue_pai_tasklet = enqueue_pai_tasklet,
2021 .run_tasklets = run_tasklets,
1581#endif 2022#endif
1582}; 2023};
1583 2024
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index ac7685fe69f0..b40ff7ba4f0e 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -35,6 +35,10 @@
35#include <litmus/litmus_softirq.h> 35#include <litmus/litmus_softirq.h>
36#endif 36#endif
37 37
38#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
39#include <linux/interrupt.h>
40#endif
41
38#ifdef CONFIG_LITMUS_NVIDIA 42#ifdef CONFIG_LITMUS_NVIDIA
39#include <litmus/nvidia_info.h> 43#include <litmus/nvidia_info.h>
40#endif 44#endif
@@ -126,6 +130,16 @@ static struct bheap gsnedf_cpu_heap;
126static rt_domain_t gsnedf; 130static rt_domain_t gsnedf;
127#define gsnedf_lock (gsnedf.ready_lock) 131#define gsnedf_lock (gsnedf.ready_lock)
128 132
133#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
134struct tasklet_head
135{
136 struct tasklet_struct *head;
137 struct tasklet_struct **tail;
138};
139
140struct tasklet_head gsnedf_pending_tasklets;
141#endif
142
129 143
130/* Uncomment this if you want to see all scheduling decisions in the 144/* Uncomment this if you want to see all scheduling decisions in the
131 * TRACE() log. 145 * TRACE() log.
@@ -393,6 +407,410 @@ static void gsnedf_tick(struct task_struct* t)
393 } 407 }
394} 408}
395 409
410
411
412
413
414
415
416
417
418
419
420
421#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
422
423
424static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
425{
426 if (!atomic_read(&tasklet->count)) {
427 sched_trace_tasklet_begin(tasklet->owner);
428
429 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
430 {
431 BUG();
432 }
433 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
434 tasklet->func(tasklet->data);
435 tasklet_unlock(tasklet);
436
437 sched_trace_tasklet_end(tasklet->owner, flushed);
438 }
439 else {
440 BUG();
441 }
442}
443
444
445static void __extract_tasklets(struct task_struct* task, struct tasklet_head* task_tasklets)
446{
447 struct tasklet_struct* step;
448 struct tasklet_struct* tasklet;
449 struct tasklet_struct* prev;
450
451 task_tasklets->head = NULL;
452 task_tasklets->tail = &(task_tasklets->head);
453
454 prev = NULL;
455 for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next)
456 {
457 if(step->owner == task)
458 {
459 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
460
461 tasklet = step;
462
463 if(prev) {
464 prev->next = tasklet->next;
465 }
466 else if(gsnedf_pending_tasklets.head == tasklet) {
467 // we're at the head.
468 gsnedf_pending_tasklets.head = tasklet->next;
469 }
470
471 if(gsnedf_pending_tasklets.tail == &tasklet) {
472 // we're at the tail
473 if(prev) {
474 gsnedf_pending_tasklets.tail = &prev;
475 }
476 else {
477 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
478 }
479 }
480
481 tasklet->next = NULL;
482 *(task_tasklets->tail) = tasklet;
483 task_tasklets->tail = &(tasklet->next);
484 }
485 else {
486 prev = step;
487 }
488 }
489}
490
491static void flush_tasklets(struct task_struct* task)
492{
493 unsigned long flags;
494 struct tasklet_head task_tasklets;
495 struct tasklet_struct* step;
496
497 raw_spin_lock_irqsave(&gsnedf_lock, flags);
498 __extract_tasklets(task, &task_tasklets);
499 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
500
501 if(gsnedf_pending_tasklets.head != NULL) {
502 TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid);
503 }
504
505 // now execute any flushed tasklets.
506 for(step = gsnedf_pending_tasklets.head; step != NULL; /**/)
507 {
508 struct tasklet_struct* temp = step->next;
509
510 step->next = NULL;
511 __do_lit_tasklet(step, 1ul);
512
513 step = temp;
514 }
515}
516
517
518static void do_lit_tasklets(struct task_struct* sched_task)
519{
520 int work_to_do = 1;
521 struct tasklet_struct *tasklet = NULL;
522 //struct tasklet_struct *step;
523 unsigned long flags;
524
525 while(work_to_do) {
526 // remove tasklet at head of list if it has higher priority.
527 raw_spin_lock_irqsave(&gsnedf_lock, flags);
528
529 /*
530 step = gsnedf_pending_tasklets.head;
531 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
532 while(step != NULL){
533 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
534 step = step->next;
535 }
536 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1);
537 TRACE("%s: done.\n", __FUNCTION__);
538 */
539
540
541 if(gsnedf_pending_tasklets.head != NULL) {
542 // remove tasklet at head.
543 tasklet = gsnedf_pending_tasklets.head;
544
545 if(edf_higher_prio(tasklet->owner, sched_task)) {
546
547 if(NULL == tasklet->next) {
548 // tasklet is at the head, list only has one element
549 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
550 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
551 }
552
553 // remove the tasklet from the queue
554 gsnedf_pending_tasklets.head = tasklet->next;
555
556 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
557 }
558 else {
559 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
560 tasklet = NULL;
561 }
562 }
563 else {
564 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
565 }
566
567
568 /*
569 step = gsnedf_pending_tasklets.head;
570 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
571 while(step != NULL){
572 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
573 step = step->next;
574 }
575 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1);
576 TRACE("%s: done.\n", __FUNCTION__);
577 */
578
579
580 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
581
582 if(tasklet) {
583 __do_lit_tasklet(tasklet, 0ul);
584 tasklet = NULL;
585 }
586 else {
587 work_to_do = 0;
588 }
589 }
590
591 //TRACE("%s: exited.\n", __FUNCTION__);
592}
593
594
595static void run_tasklets(struct task_struct* sched_task)
596{
597#if 0
598 int task_is_rt = is_realtime(sched_task);
599 cedf_domain_t* cluster;
600
601 if(is_realtime(sched_task)) {
602 cluster = task_cpu_cluster(sched_task);
603 }
604 else {
605 cluster = remote_cluster(get_cpu());
606 }
607
608 if(cluster && gsnedf_pending_tasklets.head != NULL) {
609 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
610
611 do_lit_tasklets(cluster, sched_task);
612 }
613
614 if(!task_is_rt) {
615 put_cpu_no_resched();
616 }
617#else
618
619 preempt_disable();
620
621 if(gsnedf_pending_tasklets.head != NULL) {
622 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
623 do_lit_tasklets(sched_task);
624 }
625
626 preempt_enable_no_resched();
627
628#endif
629}
630
631
632static void __add_pai_tasklet(struct tasklet_struct* tasklet)
633{
634 struct tasklet_struct* step;
635
636 /*
637 step = gsnedf_pending_tasklets.head;
638 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
639 while(step != NULL){
640 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
641 step = step->next;
642 }
643 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1);
644 TRACE("%s: done.\n", __FUNCTION__);
645 */
646
647
648 tasklet->next = NULL; // make sure there are no old values floating around
649
650 step = gsnedf_pending_tasklets.head;
651 if(step == NULL) {
652 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
653 // insert at tail.
654 *(gsnedf_pending_tasklets.tail) = tasklet;
655 gsnedf_pending_tasklets.tail = &(tasklet->next);
656 }
657 else if((*(gsnedf_pending_tasklets.tail) != NULL) &&
658 edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) {
659 // insert at tail.
660 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
661
662 *(gsnedf_pending_tasklets.tail) = tasklet;
663 gsnedf_pending_tasklets.tail = &(tasklet->next);
664 }
665 else {
666
667 //WARN_ON(1 == 1);
668
669 // insert the tasklet somewhere in the middle.
670
671 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
672
673 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
674 step = step->next;
675 }
676
677 // insert tasklet right before step->next.
678
679 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
680
681 tasklet->next = step->next;
682 step->next = tasklet;
683
684 // patch up the head if needed.
685 if(gsnedf_pending_tasklets.head == step)
686 {
687 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
688 gsnedf_pending_tasklets.head = tasklet;
689 }
690 }
691
692 /*
693 step = gsnedf_pending_tasklets.head;
694 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
695 while(step != NULL){
696 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
697 step = step->next;
698 }
699 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1);
700 TRACE("%s: done.\n", __FUNCTION__);
701 */
702
703 // TODO: Maintain this list in priority order.
704 // tasklet->next = NULL;
705 // *(gsnedf_pending_tasklets.tail) = tasklet;
706 // gsnedf_pending_tasklets.tail = &tasklet->next;
707}
708
709static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
710{
711 cpu_entry_t *targetCPU = NULL;
712 int thisCPU;
713 int runLocal = 0;
714 int runNow = 0;
715 unsigned long flags;
716
717 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
718 {
719 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
720 return 0;
721 }
722
723
724 raw_spin_lock_irqsave(&gsnedf_lock, flags);
725
726 thisCPU = smp_processor_id();
727
728#if 1
729#ifdef CONFIG_SCHED_CPU_AFFINITY
730 {
731 cpu_entry_t* affinity = NULL;
732
733 // use this CPU if it is in our cluster and isn't running any RT work.
734 if(
735#ifdef CONFIG_RELEASE_MASTER
736 (thisCPU != gsnedf.release_master) &&
737#endif
738 (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) {
739 affinity = &(__get_cpu_var(gsnedf_cpu_entries));
740 }
741 else {
742 // this CPU is busy or shouldn't run tasklet in this cluster.
743 // look for available near by CPUs.
744 // NOTE: Affinity towards owner and not this CPU. Is this right?
745 affinity =
746 gsnedf_get_nearest_available_cpu(
747 &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner)));
748 }
749
750 targetCPU = affinity;
751 }
752#endif
753#endif
754
755 if (targetCPU == NULL) {
756 targetCPU = lowest_prio_cpu();
757 }
758
759 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
760 if (thisCPU == targetCPU->cpu) {
761 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
762 runLocal = 1;
763 runNow = 1;
764 }
765 else {
766 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
767 runLocal = 0;
768 runNow = 1;
769 }
770 }
771 else {
772 runLocal = 0;
773 runNow = 0;
774 }
775
776 if(!runLocal) {
777 // enqueue the tasklet
778 __add_pai_tasklet(tasklet);
779 }
780
781 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
782
783
784 if (runLocal /*&& runNow */) { // runNow == 1 is implied
785 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
786 __do_lit_tasklet(tasklet, 0ul);
787 }
788 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
789 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
790 preempt(targetCPU); // need to be protected by cedf_lock?
791 }
792 else {
793 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
794 }
795
796 return(1); // success
797}
798
799
800#endif
801
802
803
804
805
806
807
808
809
810
811
812
813
396/* Getting schedule() right is a bit tricky. schedule() may not make any 814/* Getting schedule() right is a bit tricky. schedule() may not make any
397 * assumptions on the state of the current task since it may be called for a 815 * assumptions on the state of the current task since it may be called for a
398 * number of reasons. The reasons include a scheduler_tick() determined that it 816 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -592,7 +1010,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
592static void gsnedf_task_wake_up(struct task_struct *task) 1010static void gsnedf_task_wake_up(struct task_struct *task)
593{ 1011{
594 unsigned long flags; 1012 unsigned long flags;
595 lt_t now; 1013 //lt_t now;
596 1014
597 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 1015 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
598 1016
@@ -660,6 +1078,10 @@ static void gsnedf_task_exit(struct task_struct * t)
660 } 1078 }
661 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1079 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
662 1080
1081#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1082 flush_tasklets(t);
1083#endif
1084
663 BUG_ON(!is_realtime(t)); 1085 BUG_ON(!is_realtime(t));
664 TRACE_TASK(t, "RIP\n"); 1086 TRACE_TASK(t, "RIP\n");
665} 1087}
@@ -1602,6 +2024,11 @@ static long gsnedf_activate_plugin(void)
1602 } 2024 }
1603#endif 2025#endif
1604 } 2026 }
2027
2028#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2029 gsnedf_pending_tasklets.head = NULL;
2030 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
2031#endif
1605 2032
1606#ifdef CONFIG_LITMUS_SOFTIRQD 2033#ifdef CONFIG_LITMUS_SOFTIRQD
1607 spawn_klitirqd(NULL); 2034 spawn_klitirqd(NULL);
@@ -1636,7 +2063,10 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
1636 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, 2063 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
1637 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, 2064 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
1638#endif 2065#endif
1639 2066#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
2067 .enqueue_pai_tasklet = enqueue_pai_tasklet,
2068 .run_tasklets = run_tasklets,
2069#endif
1640}; 2070};
1641 2071
1642 2072