aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/sched_plugin.h2
-rw-r--r--include/litmus/trace.h6
-rw-r--r--litmus/sched_cedf.c71
-rw-r--r--litmus/sched_cfifo.c14
-rw-r--r--litmus/sched_crm.c72
-rw-r--r--litmus/sched_crm_srt.c14
-rw-r--r--litmus/sched_gsn_edf.c15
7 files changed, 151 insertions, 43 deletions
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 3fc64f832fef..8fdf05dd7cd3 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -11,9 +11,7 @@
11#include <litmus/locking.h> 11#include <litmus/locking.h>
12#endif 12#endif
13 13
14#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
15#include <linux/interrupt.h> 14#include <linux/interrupt.h>
16#endif
17 15
18/************************ setup/tear down ********************/ 16/************************ setup/tear down ********************/
19 17
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index aa3ee4a6757b..09d409b60268 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -114,4 +114,10 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
114 114
115#endif 115#endif
116 116
117#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
118#define TS_NV_SCHED_BOTISR_START TIMESTAMP(206)
119#define TS_NV_SCHED_BOTISR_END TIMESTAMP(207)
120#endif
121
122
117#endif /* !_SYS_TRACE_H_ */ 123#endif /* !_SYS_TRACE_H_ */
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 02106f455c0f..9aa5822c3834 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -57,6 +57,7 @@
57 57
58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
59#include <linux/interrupt.h> 59#include <linux/interrupt.h>
60#include <litmus/trace.h>
60#endif 61#endif
61 62
62#ifdef CONFIG_LITMUS_NVIDIA 63#ifdef CONFIG_LITMUS_NVIDIA
@@ -432,17 +433,24 @@ static void cedf_tick(struct task_struct* t)
432static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) 433static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
433{ 434{
434 if (!atomic_read(&tasklet->count)) { 435 if (!atomic_read(&tasklet->count)) {
435 sched_trace_tasklet_begin(tasklet->owner); 436 if(tasklet->owner) {
437 sched_trace_tasklet_begin(tasklet->owner);
438 }
436 439
437 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) 440 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
438 { 441 {
439 BUG(); 442 BUG();
440 } 443 }
441 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); 444 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
445 __FUNCTION__,
446 (tasklet->owner) ? tasklet->owner->pid : -1,
447 (tasklet->owner) ? 0 : 1);
442 tasklet->func(tasklet->data); 448 tasklet->func(tasklet->data);
443 tasklet_unlock(tasklet); 449 tasklet_unlock(tasklet);
444 450
445 sched_trace_tasklet_end(tasklet->owner, flushed); 451 if(tasklet->owner) {
452 sched_trace_tasklet_end(tasklet->owner, flushed);
453 }
446 } 454 }
447 else { 455 else {
448 BUG(); 456 BUG();
@@ -498,6 +506,7 @@ static void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task,
498 506
499static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task) 507static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task)
500{ 508{
509#if 0
501 unsigned long flags; 510 unsigned long flags;
502 struct tasklet_head task_tasklets; 511 struct tasklet_head task_tasklets;
503 struct tasklet_struct* step; 512 struct tasklet_struct* step;
@@ -520,6 +529,27 @@ static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task)
520 529
521 step = temp; 530 step = temp;
522 } 531 }
532#endif
533
534 // lazy flushing.
535 // just change ownership to NULL and let an idle processor
536 // take care of it. :P
537
538 struct tasklet_struct* step;
539 unsigned long flags;
540
541 raw_spin_lock_irqsave(&cluster->cedf_lock, flags);
542
543 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next)
544 {
545 if(step->owner == task)
546 {
547 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
548 step->owner = NULL;
549 }
550 }
551
552 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags);
523} 553}
524 554
525 555
@@ -531,6 +561,9 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
531 unsigned long flags; 561 unsigned long flags;
532 562
533 while(work_to_do) { 563 while(work_to_do) {
564
565 TS_NV_SCHED_BOTISR_START;
566
534 // remove tasklet at head of list if it has higher priority. 567 // remove tasklet at head of list if it has higher priority.
535 raw_spin_lock_irqsave(&cluster->cedf_lock, flags); 568 raw_spin_lock_irqsave(&cluster->cedf_lock, flags);
536 569
@@ -544,7 +577,6 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
544 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); 577 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
545 TRACE("%s: done.\n", __FUNCTION__); 578 TRACE("%s: done.\n", __FUNCTION__);
546 */ 579 */
547
548 580
549 if(cluster->pending_tasklets.head != NULL) { 581 if(cluster->pending_tasklets.head != NULL) {
550 // remove tasklet at head. 582 // remove tasklet at head.
@@ -554,17 +586,17 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
554 586
555 if(NULL == tasklet->next) { 587 if(NULL == tasklet->next) {
556 // tasklet is at the head, list only has one element 588 // tasklet is at the head, list only has one element
557 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 589 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1);
558 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); 590 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
559 } 591 }
560 592
561 // remove the tasklet from the queue 593 // remove the tasklet from the queue
562 cluster->pending_tasklets.head = tasklet->next; 594 cluster->pending_tasklets.head = tasklet->next;
563 595
564 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 596 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1);
565 } 597 }
566 else { 598 else {
567 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); 599 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1, smp_processor_id());
568 tasklet = NULL; 600 tasklet = NULL;
569 } 601 }
570 } 602 }
@@ -584,9 +616,11 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
584 TRACE("%s: done.\n", __FUNCTION__); 616 TRACE("%s: done.\n", __FUNCTION__);
585 */ 617 */
586 618
587
588 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); 619 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags);
589 620
621
622 TS_NV_SCHED_BOTISR_END;
623
590 if(tasklet) { 624 if(tasklet) {
591 __do_lit_tasklet(tasklet, 0ul); 625 __do_lit_tasklet(tasklet, 0ul);
592 tasklet = NULL; 626 tasklet = NULL;
@@ -690,7 +724,16 @@ static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* clu
690 724
691 // insert tasklet right before step->next. 725 // insert tasklet right before step->next.
692 726
693 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); 727 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__,
728 tasklet->owner->pid,
729 (step->owner) ?
730 step->owner->pid :
731 -1,
732 (step->next) ?
733 ((step->next->owner) ?
734 step->next->owner->pid :
735 -1) :
736 -1);
694 737
695 tasklet->next = step->next; 738 tasklet->next = step->next;
696 step->next = tasklet; 739 step->next = tasklet;
@@ -1080,6 +1123,10 @@ static void cedf_task_exit(struct task_struct * t)
1080 unsigned long flags; 1123 unsigned long flags;
1081 cedf_domain_t *cluster = task_cpu_cluster(t); 1124 cedf_domain_t *cluster = task_cpu_cluster(t);
1082 1125
1126#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1127 flush_tasklets(cluster, t);
1128#endif
1129
1083 /* unlink if necessary */ 1130 /* unlink if necessary */
1084 raw_spin_lock_irqsave(&cluster->cedf_lock, flags); 1131 raw_spin_lock_irqsave(&cluster->cedf_lock, flags);
1085 unlink(t); 1132 unlink(t);
@@ -1091,10 +1138,6 @@ static void cedf_task_exit(struct task_struct * t)
1091 } 1138 }
1092 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); 1139 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags);
1093 1140
1094#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1095 flush_tasklets(cluster, t);
1096#endif
1097
1098 BUG_ON(!is_realtime(t)); 1141 BUG_ON(!is_realtime(t));
1099 TRACE_TASK(t, "RIP\n"); 1142 TRACE_TASK(t, "RIP\n");
1100} 1143}
diff --git a/litmus/sched_cfifo.c b/litmus/sched_cfifo.c
index 689b2dbe5fae..7fbdec3f1d15 100644
--- a/litmus/sched_cfifo.c
+++ b/litmus/sched_cfifo.c
@@ -57,6 +57,7 @@
57 57
58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
59#include <linux/interrupt.h> 59#include <linux/interrupt.h>
60#include <litmus/trace.h>
60#endif 61#endif
61 62
62#ifdef CONFIG_LITMUS_NVIDIA 63#ifdef CONFIG_LITMUS_NVIDIA
@@ -530,6 +531,9 @@ static void do_lit_tasklets(cfifo_domain_t* cluster, struct task_struct* sched_t
530 unsigned long flags; 531 unsigned long flags;
531 532
532 while(work_to_do) { 533 while(work_to_do) {
534
535 TS_NV_SCHED_BOTISR_START;
536
533 // remove tasklet at head of list if it has higher priority. 537 // remove tasklet at head of list if it has higher priority.
534 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags); 538 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags);
535 539
@@ -584,6 +588,8 @@ static void do_lit_tasklets(cfifo_domain_t* cluster, struct task_struct* sched_t
584 588
585 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); 589 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
586 590
591 TS_NV_SCHED_BOTISR_END;
592
587 if(tasklet) { 593 if(tasklet) {
588 __do_lit_tasklet(tasklet, 0ul); 594 __do_lit_tasklet(tasklet, 0ul);
589 tasklet = NULL; 595 tasklet = NULL;
@@ -1071,6 +1077,10 @@ static void cfifo_task_exit(struct task_struct * t)
1071 unsigned long flags; 1077 unsigned long flags;
1072 cfifo_domain_t *cluster = task_cpu_cluster(t); 1078 cfifo_domain_t *cluster = task_cpu_cluster(t);
1073 1079
1080#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1081 flush_tasklets(cluster, t);
1082#endif
1083
1074 /* unlink if necessary */ 1084 /* unlink if necessary */
1075 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags); 1085 raw_spin_lock_irqsave(&cluster->cfifo_lock, flags);
1076 unlink(t); 1086 unlink(t);
@@ -1081,10 +1091,6 @@ static void cfifo_task_exit(struct task_struct * t)
1081 tsk_rt(t)->scheduled_on = NO_CPU; 1091 tsk_rt(t)->scheduled_on = NO_CPU;
1082 } 1092 }
1083 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); 1093 raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags);
1084
1085#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1086 flush_tasklets(cluster, t);
1087#endif
1088 1094
1089 BUG_ON(!is_realtime(t)); 1095 BUG_ON(!is_realtime(t));
1090 TRACE_TASK(t, "RIP\n"); 1096 TRACE_TASK(t, "RIP\n");
diff --git a/litmus/sched_crm.c b/litmus/sched_crm.c
index fd7fab982998..e51de10557f9 100644
--- a/litmus/sched_crm.c
+++ b/litmus/sched_crm.c
@@ -57,6 +57,7 @@
57 57
58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
59#include <linux/interrupt.h> 59#include <linux/interrupt.h>
60#include <litmus/trace.h>
60#endif 61#endif
61 62
62#ifdef CONFIG_LITMUS_NVIDIA 63#ifdef CONFIG_LITMUS_NVIDIA
@@ -425,17 +426,24 @@ static void crm_tick(struct task_struct* t)
425static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) 426static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
426{ 427{
427 if (!atomic_read(&tasklet->count)) { 428 if (!atomic_read(&tasklet->count)) {
428 sched_trace_tasklet_begin(tasklet->owner); 429 if(tasklet->owner) {
430 sched_trace_tasklet_begin(tasklet->owner);
431 }
429 432
430 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) 433 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
431 { 434 {
432 BUG(); 435 BUG();
433 } 436 }
434 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); 437 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
438 __FUNCTION__,
439 (tasklet->owner) ? tasklet->owner->pid : -1,
440 (tasklet->owner) ? 0 : 1);
435 tasklet->func(tasklet->data); 441 tasklet->func(tasklet->data);
436 tasklet_unlock(tasklet); 442 tasklet_unlock(tasklet);
437 443
438 sched_trace_tasklet_end(tasklet->owner, flushed); 444 if(tasklet->owner) {
445 sched_trace_tasklet_end(tasklet->owner, flushed);
446 }
439 } 447 }
440 else { 448 else {
441 BUG(); 449 BUG();
@@ -491,6 +499,7 @@ static void __extract_tasklets(crm_domain_t* cluster, struct task_struct* task,
491 499
492static void flush_tasklets(crm_domain_t* cluster, struct task_struct* task) 500static void flush_tasklets(crm_domain_t* cluster, struct task_struct* task)
493{ 501{
502#if 0
494 unsigned long flags; 503 unsigned long flags;
495 struct tasklet_head task_tasklets; 504 struct tasklet_head task_tasklets;
496 struct tasklet_struct* step; 505 struct tasklet_struct* step;
@@ -513,6 +522,27 @@ static void flush_tasklets(crm_domain_t* cluster, struct task_struct* task)
513 522
514 step = temp; 523 step = temp;
515 } 524 }
525#endif
526
527 // lazy flushing.
528 // just change ownership to NULL and let an idle processor
529 // take care of it. :P
530
531 struct tasklet_struct* step;
532 unsigned long flags;
533
534 raw_spin_lock_irqsave(&cluster->crm_lock, flags);
535
536 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next)
537 {
538 if(step->owner == task)
539 {
540 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
541 step->owner = NULL;
542 }
543 }
544
545 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags);
516} 546}
517 547
518 548
@@ -524,6 +554,9 @@ static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_tas
524 unsigned long flags; 554 unsigned long flags;
525 555
526 while(work_to_do) { 556 while(work_to_do) {
557
558 TS_NV_SCHED_BOTISR_START;
559
527 // remove tasklet at head of list if it has higher priority. 560 // remove tasklet at head of list if it has higher priority.
528 raw_spin_lock_irqsave(&cluster->crm_lock, flags); 561 raw_spin_lock_irqsave(&cluster->crm_lock, flags);
529 562
@@ -546,17 +579,17 @@ static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_tas
546 579
547 if(NULL == tasklet->next) { 580 if(NULL == tasklet->next) {
548 // tasklet is at the head, list only has one element 581 // tasklet is at the head, list only has one element
549 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 582 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1);
550 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); 583 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
551 } 584 }
552 585
553 // remove the tasklet from the queue 586 // remove the tasklet from the queue
554 cluster->pending_tasklets.head = tasklet->next; 587 cluster->pending_tasklets.head = tasklet->next;
555 588
556 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 589 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1);
557 } 590 }
558 else { 591 else {
559 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); 592 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1, smp_processor_id());
560 tasklet = NULL; 593 tasklet = NULL;
561 } 594 }
562 } 595 }
@@ -577,6 +610,8 @@ static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_tas
577 610
578 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); 611 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags);
579 612
613 TS_NV_SCHED_BOTISR_END;
614
580 if(tasklet) { 615 if(tasklet) {
581 __do_lit_tasklet(tasklet, 0ul); 616 __do_lit_tasklet(tasklet, 0ul);
582 tasklet = NULL; 617 tasklet = NULL;
@@ -619,8 +654,8 @@ static void run_tasklets(struct task_struct* sched_task)
619 preempt_disable(); 654 preempt_disable();
620 655
621 cluster = (is_realtime(sched_task)) ? 656 cluster = (is_realtime(sched_task)) ?
622 task_cpu_cluster(sched_task) : 657 task_cpu_cluster(sched_task) :
623 remote_cluster(smp_processor_id()); 658 remote_cluster(smp_processor_id());
624 659
625 if(cluster && cluster->pending_tasklets.head != NULL) { 660 if(cluster && cluster->pending_tasklets.head != NULL) {
626 TRACE("%s: There are tasklets to process.\n", __FUNCTION__); 661 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
@@ -679,8 +714,17 @@ static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_domain_t* clus
679 714
680 // insert tasklet right before step->next. 715 // insert tasklet right before step->next.
681 716
682 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); 717 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__,
683 718 tasklet->owner->pid,
719 (step->owner) ?
720 step->owner->pid :
721 -1,
722 (step->next) ?
723 ((step->next->owner) ?
724 step->next->owner->pid :
725 -1) :
726 -1);
727
684 tasklet->next = step->next; 728 tasklet->next = step->next;
685 step->next = tasklet; 729 step->next = tasklet;
686 730
@@ -1070,6 +1114,10 @@ static void crm_task_exit(struct task_struct * t)
1070 unsigned long flags; 1114 unsigned long flags;
1071 crm_domain_t *cluster = task_cpu_cluster(t); 1115 crm_domain_t *cluster = task_cpu_cluster(t);
1072 1116
1117#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1118 flush_tasklets(cluster, t);
1119#endif
1120
1073 /* unlink if necessary */ 1121 /* unlink if necessary */
1074 raw_spin_lock_irqsave(&cluster->crm_lock, flags); 1122 raw_spin_lock_irqsave(&cluster->crm_lock, flags);
1075 unlink(t); 1123 unlink(t);
@@ -1080,10 +1128,6 @@ static void crm_task_exit(struct task_struct * t)
1080 tsk_rt(t)->scheduled_on = NO_CPU; 1128 tsk_rt(t)->scheduled_on = NO_CPU;
1081 } 1129 }
1082 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); 1130 raw_spin_unlock_irqrestore(&cluster->crm_lock, flags);
1083
1084#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1085 flush_tasklets(cluster, t);
1086#endif
1087 1131
1088 BUG_ON(!is_realtime(t)); 1132 BUG_ON(!is_realtime(t));
1089 TRACE_TASK(t, "RIP\n"); 1133 TRACE_TASK(t, "RIP\n");
diff --git a/litmus/sched_crm_srt.c b/litmus/sched_crm_srt.c
index c0004354573d..f0064d486953 100644
--- a/litmus/sched_crm_srt.c
+++ b/litmus/sched_crm_srt.c
@@ -57,6 +57,7 @@
57 57
58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 58#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
59#include <linux/interrupt.h> 59#include <linux/interrupt.h>
60#include <litmus/trace.h>
60#endif 61#endif
61 62
62#ifdef CONFIG_LITMUS_NVIDIA 63#ifdef CONFIG_LITMUS_NVIDIA
@@ -529,6 +530,9 @@ static void do_lit_tasklets(crm_srt_domain_t* cluster, struct task_struct* sched
529 unsigned long flags; 530 unsigned long flags;
530 531
531 while(work_to_do) { 532 while(work_to_do) {
533
534 TS_NV_SCHED_BOTISR_START;
535
532 // remove tasklet at head of list if it has higher priority. 536 // remove tasklet at head of list if it has higher priority.
533 raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); 537 raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);
534 538
@@ -582,6 +586,8 @@ static void do_lit_tasklets(crm_srt_domain_t* cluster, struct task_struct* sched
582 586
583 raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); 587 raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
584 588
589 TS_NV_SCHED_BOTISR_END;
590
585 if(tasklet) { 591 if(tasklet) {
586 __do_lit_tasklet(tasklet, 0ul); 592 __do_lit_tasklet(tasklet, 0ul);
587 tasklet = NULL; 593 tasklet = NULL;
@@ -1067,6 +1073,10 @@ static void crm_srt_task_exit(struct task_struct * t)
1067 unsigned long flags; 1073 unsigned long flags;
1068 crm_srt_domain_t *cluster = task_cpu_cluster(t); 1074 crm_srt_domain_t *cluster = task_cpu_cluster(t);
1069 1075
1076#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1077 flush_tasklets(cluster, t);
1078#endif
1079
1070 /* unlink if necessary */ 1080 /* unlink if necessary */
1071 raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); 1081 raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags);
1072 unlink(t); 1082 unlink(t);
@@ -1077,10 +1087,6 @@ static void crm_srt_task_exit(struct task_struct * t)
1077 tsk_rt(t)->scheduled_on = NO_CPU; 1087 tsk_rt(t)->scheduled_on = NO_CPU;
1078 } 1088 }
1079 raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); 1089 raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags);
1080
1081#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1082 flush_tasklets(cluster, t);
1083#endif
1084 1090
1085 BUG_ON(!is_realtime(t)); 1091 BUG_ON(!is_realtime(t));
1086 TRACE_TASK(t, "RIP\n"); 1092 TRACE_TASK(t, "RIP\n");
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index b40ff7ba4f0e..30c745fe33a7 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -37,6 +37,7 @@
37 37
38#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 38#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <litmus/trace.h>
40#endif 41#endif
41 42
42#ifdef CONFIG_LITMUS_NVIDIA 43#ifdef CONFIG_LITMUS_NVIDIA
@@ -523,6 +524,9 @@ static void do_lit_tasklets(struct task_struct* sched_task)
523 unsigned long flags; 524 unsigned long flags;
524 525
525 while(work_to_do) { 526 while(work_to_do) {
527
528 TS_NV_SCHED_BOTISR_START;
529
526 // remove tasklet at head of list if it has higher priority. 530 // remove tasklet at head of list if it has higher priority.
527 raw_spin_lock_irqsave(&gsnedf_lock, flags); 531 raw_spin_lock_irqsave(&gsnedf_lock, flags);
528 532
@@ -576,9 +580,10 @@ static void do_lit_tasklets(struct task_struct* sched_task)
576 TRACE("%s: done.\n", __FUNCTION__); 580 TRACE("%s: done.\n", __FUNCTION__);
577 */ 581 */
578 582
579
580 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 583 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
581 584
585 TS_NV_SCHED_BOTISR_END;
586
582 if(tasklet) { 587 if(tasklet) {
583 __do_lit_tasklet(tasklet, 0ul); 588 __do_lit_tasklet(tasklet, 0ul);
584 tasklet = NULL; 589 tasklet = NULL;
@@ -1069,6 +1074,10 @@ static void gsnedf_task_exit(struct task_struct * t)
1069{ 1074{
1070 unsigned long flags; 1075 unsigned long flags;
1071 1076
1077#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1078 flush_tasklets(t);
1079#endif
1080
1072 /* unlink if necessary */ 1081 /* unlink if necessary */
1073 raw_spin_lock_irqsave(&gsnedf_lock, flags); 1082 raw_spin_lock_irqsave(&gsnedf_lock, flags);
1074 unlink(t); 1083 unlink(t);
@@ -1077,10 +1086,6 @@ static void gsnedf_task_exit(struct task_struct * t)
1077 tsk_rt(t)->scheduled_on = NO_CPU; 1086 tsk_rt(t)->scheduled_on = NO_CPU;
1078 } 1087 }
1079 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1088 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
1080
1081#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1082 flush_tasklets(t);
1083#endif
1084 1089
1085 BUG_ON(!is_realtime(t)); 1090 BUG_ON(!is_realtime(t));
1086 TRACE_TASK(t, "RIP\n"); 1091 TRACE_TASK(t, "RIP\n");