aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-01-14 14:20:07 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-01-14 14:20:07 -0500
commit53a6dbb9f5337e77fce9c2672488c1c5e0621beb (patch)
tree83ffcf95fe422c592187ab17be7d25e374e19718
parent5d7dcfa10ea0dd283773a301e3ce610a7797d582 (diff)
Completed PAI for C-EDF.
-rw-r--r--include/litmus/sched_plugin.h2
-rw-r--r--include/litmus/sched_trace_external.h20
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/softirq.c3
-rw-r--r--litmus/litmus_softirq.c5
-rw-r--r--litmus/sched_cedf.c319
-rw-r--r--litmus/sched_plugin.c8
-rw-r--r--litmus/sched_trace_external.c8
8 files changed, 316 insertions, 54 deletions
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 12a9ab65a673..3fc64f832fef 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -75,6 +75,7 @@ typedef void (*clear_prio_inh_klitirqd_t)(struct task_struct* klitirqd,
75 75
76 76
77typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); 77typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet);
78typedef void (*run_tasklets_t)(struct task_struct* next);
78 79
79/********************* sys call backends ********************/ 80/********************* sys call backends ********************/
80/* This function causes the caller to sleep until the next release */ 81/* This function causes the caller to sleep until the next release */
@@ -125,6 +126,7 @@ struct sched_plugin {
125 126
126#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 127#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
127 enqueue_pai_tasklet_t enqueue_pai_tasklet; 128 enqueue_pai_tasklet_t enqueue_pai_tasklet;
129 run_tasklets_t run_tasklets;
128#endif 130#endif
129} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); 131} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
130 132
diff --git a/include/litmus/sched_trace_external.h b/include/litmus/sched_trace_external.h
index 90424d5c564c..e70e45e4cf51 100644
--- a/include/litmus/sched_trace_external.h
+++ b/include/litmus/sched_trace_external.h
@@ -4,6 +4,8 @@
4#ifndef _LINUX_SCHED_TRACE_EXTERNAL_H_ 4#ifndef _LINUX_SCHED_TRACE_EXTERNAL_H_
5#define _LINUX_SCHED_TRACE_EXTERNAL_H_ 5#define _LINUX_SCHED_TRACE_EXTERNAL_H_
6 6
7
8#ifdef CONFIG_SCHED_TASK_TRACE
7extern void __sched_trace_tasklet_begin_external(struct task_struct* t); 9extern void __sched_trace_tasklet_begin_external(struct task_struct* t);
8static inline void sched_trace_tasklet_begin_external(struct task_struct* t) 10static inline void sched_trace_tasklet_begin_external(struct task_struct* t)
9{ 11{
@@ -28,6 +30,7 @@ static inline void sched_trace_work_end_external(struct task_struct* t, struct t
28 __sched_trace_work_end_external(t, e, f); 30 __sched_trace_work_end_external(t, e, f);
29} 31}
30 32
33#ifdef CONFIG_LITMUS_NVIDIA
31extern void __sched_trace_nv_interrupt_begin_external(u32 device); 34extern void __sched_trace_nv_interrupt_begin_external(u32 device);
32static inline void sched_trace_nv_interrupt_begin_external(u32 device) 35static inline void sched_trace_nv_interrupt_begin_external(u32 device)
33{ 36{
@@ -39,6 +42,23 @@ static inline void sched_trace_nv_interrupt_end_external(u32 device)
39{ 42{
40 __sched_trace_nv_interrupt_end_external(device); 43 __sched_trace_nv_interrupt_end_external(device);
41} 44}
45#endif
46
47#else
48
49// no tracing.
50static inline void sched_trace_tasklet_begin_external(struct task_struct* t){}
51static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed){}
52static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e){}
53static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f){}
54
55#ifdef CONFIG_LITMUS_NVIDIA
56static inline void sched_trace_nv_interrupt_begin_external(u32 device){}
57static inline void sched_trace_nv_interrupt_end_external(u32 device){}
58#endif
59
60#endif
61
42 62
43#ifdef CONFIG_LITMUS_NVIDIA 63#ifdef CONFIG_LITMUS_NVIDIA
44 64
diff --git a/kernel/sched.c b/kernel/sched.c
index 3aa2be09122b..08b725cd9182 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2883,6 +2883,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2883 struct mm_struct *mm, *oldmm; 2883 struct mm_struct *mm, *oldmm;
2884 2884
2885 prepare_task_switch(rq, prev, next); 2885 prepare_task_switch(rq, prev, next);
2886
2886 trace_sched_switch(prev, next); 2887 trace_sched_switch(prev, next);
2887 mm = next->mm; 2888 mm = next->mm;
2888 oldmm = prev->active_mm; 2889 oldmm = prev->active_mm;
@@ -3901,6 +3902,10 @@ need_resched_nonpreemptible:
3901 reacquire_klitirqd_lock(prev); 3902 reacquire_klitirqd_lock(prev);
3902#endif 3903#endif
3903 3904
3905#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
3906 litmus->run_tasklets(prev);
3907#endif
3908
3904 srp_ceiling_block(); 3909 srp_ceiling_block();
3905} 3910}
3906EXPORT_SYMBOL(schedule); 3911EXPORT_SYMBOL(schedule);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ae77c5c1d17e..d3217c54d2bf 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -442,9 +442,6 @@ void __tasklet_schedule(struct tasklet_struct *t)
442 if(likely(_litmus_tasklet_schedule(t,nvidia_device))) 442 if(likely(_litmus_tasklet_schedule(t,nvidia_device)))
443 { 443 {
444 unlock_nv_registry(nvidia_device, &flags); 444 unlock_nv_registry(nvidia_device, &flags);
445
446 TS_NV_RELEASE_BOTISR_END;
447
448 return; 445 return;
449 } 446 }
450 else 447 else
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c
index f5cca964b6c6..c49676c6d3a7 100644
--- a/litmus/litmus_softirq.c
+++ b/litmus/litmus_softirq.c
@@ -470,6 +470,9 @@ static void do_lit_tasklet(struct klitirqd_info* which,
470 /* execute tasklet if it has my priority and is free */ 470 /* execute tasklet if it has my priority and is free */
471 if ((t->owner == which->current_owner) && tasklet_trylock(t)) { 471 if ((t->owner == which->current_owner) && tasklet_trylock(t)) {
472 if (!atomic_read(&t->count)) { 472 if (!atomic_read(&t->count)) {
473
474 sched_trace_tasklet_begin(t->owner);
475
473 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 476 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
474 { 477 {
475 BUG(); 478 BUG();
@@ -480,6 +483,8 @@ static void do_lit_tasklet(struct klitirqd_info* which,
480 483
481 atomic_dec(count); 484 atomic_dec(count);
482 485
486 sched_trace_tasklet_end(t->owner, 0ul);
487
483 continue; /* process more tasklets */ 488 continue; /* process more tasklets */
484 } 489 }
485 tasklet_unlock(t); 490 tasklet_unlock(t);
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index f0356de60b2f..4924da21865e 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -124,6 +124,7 @@ typedef struct clusterdomain {
124 124
125 125
126#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 126#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
127 raw_spinlock_t tasklet_lock;
127 struct tasklet_head pending_tasklets; 128 struct tasklet_head pending_tasklets;
128#endif 129#endif
129 130
@@ -429,36 +430,137 @@ static void cedf_tick(struct task_struct* t)
429#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 430#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
430 431
431 432
432void __do_lit_tasklet(struct tasklet_struct* tasklet) 433void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
433{ 434{
434 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) 435 if (!atomic_read(&tasklet->count)) {
435 { 436 sched_trace_tasklet_begin(tasklet->owner);
437
438 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
439 {
440 BUG();
441 }
442 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
443 tasklet->func(tasklet->data);
444 tasklet_unlock(tasklet);
445
446 sched_trace_tasklet_end(tasklet->owner, flushed);
447 }
448 else {
436 BUG(); 449 BUG();
437 } 450 }
438 TRACE("%s: Invoking tasklet with owner pid = %d.\n", __FUNCTION__, tasklet->owner->pid); 451}
439 tasklet->func(tasklet->data); 452
440 tasklet_unlock(tasklet); 453
454void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets)
455{
456 struct tasklet_struct* step;
457 struct tasklet_struct* tasklet;
458 struct tasklet_struct* prev;
459
460 task_tasklets->head = NULL;
461 task_tasklets->tail = &(task_tasklets->head);
462
463 prev = NULL;
464 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next)
465 {
466 if(step->owner == task)
467 {
468 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
469
470 tasklet = step;
471
472 if(prev) {
473 prev->next = tasklet->next;
474 }
475 else if(cluster->pending_tasklets.head == tasklet) {
476 // we're at the head.
477 cluster->pending_tasklets.head = tasklet->next;
478 }
479
480 if(cluster->pending_tasklets.tail == &tasklet) {
481 // we're at the tail
482 if(prev) {
483 cluster->pending_tasklets.tail = &prev;
484 }
485 else {
486 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
487 }
488 }
489
490 tasklet->next = NULL;
491 *(task_tasklets->tail) = tasklet;
492 task_tasklets->tail = &(tasklet->next);
493 }
494 else {
495 prev = step;
496 }
497 }
498}
499
500void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task)
501{
502 unsigned long flags;
503 struct tasklet_head task_tasklets;
504 struct tasklet_struct* step;
505
506 raw_spin_lock_irqsave(&cluster->cedf_lock, flags);
507 __extract_tasklets(cluster, task, &task_tasklets);
508 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags);
509
510 if(cluster->pending_tasklets.head != NULL) {
511 TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid);
512 }
513
514 // now execute any flushed tasklets.
515 for(step = cluster->pending_tasklets.head; step != NULL; /**/)
516 {
517 struct tasklet_struct* temp = step->next;
518
519 step->next = NULL;
520 __do_lit_tasklet(step, 1ul);
441 521
522 step = temp;
523 }
442} 524}
443 525
444void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* next) 526
527void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task)
445{ 528{
446 int work_to_do = 1; 529 int work_to_do = 1;
447 struct tasklet_struct *tasklet = NULL; 530 struct tasklet_struct *tasklet = NULL;
448 531 struct tasklet_struct *step;
449 TRACE("%s: entered.\n", __FUNCTION__); 532 unsigned long flags;
450 533
451 while(work_to_do) { 534 while(work_to_do) {
452 // remove tasklet at head of list if it has higher priority. 535 // remove tasklet at head of list if it has higher priority.
453 raw_spin_lock(&cluster->cedf_lock); 536 raw_spin_lock_irqsave(&cluster->cedf_lock, flags);
454 // remove tasklet at head. 537
538
539 step = cluster->pending_tasklets.head;
540 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
541 while(step != NULL){
542 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
543 step = step->next;
544 }
545 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
546 TRACE("%s: done.\n", __FUNCTION__);
547
548
455 if(cluster->pending_tasklets.head != NULL) { 549 if(cluster->pending_tasklets.head != NULL) {
550 // remove tasklet at head.
456 tasklet = cluster->pending_tasklets.head; 551 tasklet = cluster->pending_tasklets.head;
457 552
458 if(edf_higher_prio(tasklet->owner, next)) { 553 if(edf_higher_prio(tasklet->owner, sched_task)) {
554
555 if(NULL == tasklet->next) {
556 // tasklet is at the head, list only has one element
557 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
558 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
559 }
560
459 // remove the tasklet from the queue 561 // remove the tasklet from the queue
460 cluster->pending_tasklets.head = tasklet->next; 562 cluster->pending_tasklets.head = tasklet->next;
461 563
462 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 564 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
463 } 565 }
464 else { 566 else {
@@ -467,12 +569,24 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* next)
467 } 569 }
468 } 570 }
469 else { 571 else {
470 //TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); 572 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
471 } 573 }
472 raw_spin_unlock(&cluster->cedf_lock); 574
575
576 step = cluster->pending_tasklets.head;
577 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
578 while(step != NULL){
579 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
580 step = step->next;
581 }
582 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
583 TRACE("%s: done.\n", __FUNCTION__);
584
585
586 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags);
473 587
474 if(tasklet) { 588 if(tasklet) {
475 __do_lit_tasklet(tasklet); 589 __do_lit_tasklet(tasklet, 0ul);
476 tasklet = NULL; 590 tasklet = NULL;
477 } 591 }
478 else { 592 else {
@@ -480,7 +594,50 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* next)
480 } 594 }
481 } 595 }
482 596
483 TRACE("%s: exited.\n", __FUNCTION__); 597 //TRACE("%s: exited.\n", __FUNCTION__);
598}
599
600
601void run_tasklets(struct task_struct* sched_task)
602{
603 cedf_domain_t* cluster;
604
605#if 0
606 int task_is_rt = is_realtime(sched_task);
607 cedf_domain_t* cluster;
608
609 if(is_realtime(sched_task)) {
610 cluster = task_cpu_cluster(sched_task);
611 }
612 else {
613 cluster = remote_cluster(get_cpu());
614 }
615
616 if(cluster && cluster->pending_tasklets.head != NULL) {
617 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
618
619 do_lit_tasklets(cluster, sched_task);
620 }
621
622 if(!task_is_rt) {
623 put_cpu_no_resched();
624 }
625#else
626
627 preempt_disable();
628
629 cluster = (is_realtime(sched_task)) ?
630 task_cpu_cluster(sched_task) :
631 remote_cluster(smp_processor_id());
632
633 if(cluster && cluster->pending_tasklets.head != NULL) {
634 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
635 do_lit_tasklets(cluster, sched_task);
636 }
637
638 preempt_enable_no_resched();
639
640#endif
484} 641}
485 642
486 643
@@ -489,41 +646,47 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
489 struct tasklet_struct* step; 646 struct tasklet_struct* step;
490 647
491 step = cluster->pending_tasklets.head; 648 step = cluster->pending_tasklets.head;
492 TRACE("%s: (BEFORE) dumping tasklet queue...\n"); 649 TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__);
493 while(step != NULL){ 650 while(step != NULL){
494 TRACE("%s: %d\n", __FUNCTION__, step->owner); 651 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
495 step = step->next; 652 step = step->next;
496 } 653 }
654 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
497 TRACE("%s: done.\n", __FUNCTION__); 655 TRACE("%s: done.\n", __FUNCTION__);
498 656
499 657
658 tasklet->next = NULL; // make sure there are no old values floating around
659
500 step = cluster->pending_tasklets.head; 660 step = cluster->pending_tasklets.head;
501 if(step == NULL) { 661 if(step == NULL) {
502 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); 662 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
503 // insert at tail. 663 // insert at tail.
504 tasklet->next = NULL;
505 *(cluster->pending_tasklets.tail) = tasklet; 664 *(cluster->pending_tasklets.tail) = tasklet;
506 cluster->pending_tasklets.tail = &tasklet->next; 665 cluster->pending_tasklets.tail = &(tasklet->next);
507 } 666 }
508 else if((*cluster->pending_tasklets.tail != NULL) && 667 else if((*(cluster->pending_tasklets.tail) != NULL) &&
509 edf_higher_prio((*cluster->pending_tasklets.tail)->owner, tasklet->owner)) { 668 edf_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
510 // insert at tail. 669 // insert at tail.
511 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); 670 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
512 671
513 tasklet->next = NULL;
514 *(cluster->pending_tasklets.tail) = tasklet; 672 *(cluster->pending_tasklets.tail) = tasklet;
515 cluster->pending_tasklets.tail = &tasklet->next; 673 cluster->pending_tasklets.tail = &(tasklet->next);
516 } 674 }
517 else { 675 else {
676
677 WARN_ON(1 == 1);
678
518 // insert the tasklet somewhere in the middle. 679 // insert the tasklet somewhere in the middle.
519 680
681 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
682
520 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) { 683 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
521 step = step->next; 684 step = step->next;
522 } 685 }
523 686
524 // insert tasklet right before step->next. 687 // insert tasklet right before step->next.
525 688
526 TRACE("%s: tasklet belongs at end. inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); 689 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
527 690
528 tasklet->next = step->next; 691 tasklet->next = step->next;
529 step->next = tasklet; 692 step->next = tasklet;
@@ -540,9 +703,10 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
540 step = cluster->pending_tasklets.head; 703 step = cluster->pending_tasklets.head;
541 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); 704 TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__);
542 while(step != NULL){ 705 while(step != NULL){
543 TRACE("%s: %d\n", __FUNCTION__, step->owner); 706 TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid);
544 step = step->next; 707 step = step->next;
545 } 708 }
709 TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1);
546 TRACE("%s: done.\n", __FUNCTION__); 710 TRACE("%s: done.\n", __FUNCTION__);
547 711
548// TODO: Maintain this list in priority order. 712// TODO: Maintain this list in priority order.
@@ -553,37 +717,89 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
553 717
554int enqueue_pai_tasklet(struct tasklet_struct* tasklet) 718int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
555{ 719{
556 cedf_domain_t* cluster = task_cpu_cluster(tasklet->owner); 720 cedf_domain_t *cluster = NULL;
557 cpu_entry_t *lowest; 721 cpu_entry_t *targetCPU = NULL;
722 int thisCPU;
723 int runLocal = 0;
724 int runNow = 0;
558 unsigned long flags; 725 unsigned long flags;
559 726
560 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) 727 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
561 { 728 {
562 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); 729 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
563 BUG(); 730 return 0;
564 } 731 }
565 732
733 cluster = task_cpu_cluster(tasklet->owner);
734
566 raw_spin_lock_irqsave(&cluster->cedf_lock, flags); 735 raw_spin_lock_irqsave(&cluster->cedf_lock, flags);
567 736
568 lowest = lowest_prio_cpu(cluster); 737 thisCPU = smp_processor_id();
569 if (edf_higher_prio(tasklet->owner, lowest->linked)) { 738
570 if (smp_processor_id() == lowest->cpu) { 739#if 1
571 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); 740#ifdef CONFIG_SCHED_CPU_AFFINITY
572 // execute the tasklet now. 741 {
573 __do_lit_tasklet(tasklet); 742 cpu_entry_t* affinity = NULL;
743
744 // use this CPU if it is in our cluster and isn't running any RT work.
745 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cedf_cpu_entries).linked == NULL)) {
746 affinity = &(__get_cpu_var(cedf_cpu_entries));
574 } 747 }
575 else { 748 else {
576 // preempt the lowest CPU 749 // this CPU is busy or shouldn't run tasklet in this cluster.
577 __add_pai_tasklet(tasklet, cluster); 750 // look for available near by CPUs.
578 751 // NOTE: Affinity towards owner and not this CPU. Is this right?
579 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, lowest->cpu); 752 affinity =
580 753 cedf_get_nearest_available_cpu(cluster,
581 preempt(lowest); 754 &per_cpu(cedf_cpu_entries, task_cpu(tasklet->owner)));
582 } 755 }
756
757 targetCPU = affinity;
758 }
759#endif
760#endif
761
762 if (targetCPU == NULL) {
763 targetCPU = lowest_prio_cpu(cluster);
764 }
765
766 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
767 if (thisCPU == targetCPU->cpu) {
768 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
769 runLocal = 1;
770 runNow = 1;
771 }
772 else {
773 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
774 runLocal = 0;
775 runNow = 1;
776 }
777 }
778 else {
779 runLocal = 0;
780 runNow = 0;
781 }
782
783 if(!runLocal) {
784 // enqueue the tasklet
785 __add_pai_tasklet(tasklet, cluster);
583 } 786 }
584 787
585 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); 788 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags);
586 789
790
791 if (runLocal /*&& runNow */) { // runNow == 1 is implied
792 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
793 __do_lit_tasklet(tasklet, 0ul);
794 }
795 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
796 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
797 preempt(targetCPU); // need to be protected by cedf_lock?
798 }
799 else {
800 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
801 }
802
587 return(1); // success 803 return(1); // success
588} 804}
589 805
@@ -721,9 +937,14 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
721 937
722 raw_spin_unlock(&cluster->cedf_lock); 938 raw_spin_unlock(&cluster->cedf_lock);
723 939
940 /*
724#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 941#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
725 do_lit_tasklets(cluster, next); 942 if(cluster->pending_tasklets.head != NULL) // peak at data. normally locked with cluster->cedf_lock
726#endif 943 {
944 do_lit_tasklets(cluster, next);
945 }
946#endif
947*/
727 948
728#ifdef WANT_ALL_SCHED_EVENTS 949#ifdef WANT_ALL_SCHED_EVENTS
729 TRACE("cedf_lock released, next=0x%p\n", next); 950 TRACE("cedf_lock released, next=0x%p\n", next);
@@ -865,6 +1086,10 @@ static void cedf_task_exit(struct task_struct * t)
865 } 1086 }
866 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); 1087 raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags);
867 1088
1089#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1090 flush_tasklets(cluster, t);
1091#endif
1092
868 BUG_ON(!is_realtime(t)); 1093 BUG_ON(!is_realtime(t));
869 TRACE_TASK(t, "RIP\n"); 1094 TRACE_TASK(t, "RIP\n");
870} 1095}
@@ -1684,8 +1909,9 @@ static long cedf_activate_plugin(void)
1684 1909
1685 1910
1686#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 1911#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1912 raw_spin_lock_init(&(cedf[i].tasklet_lock));
1687 cedf[i].pending_tasklets.head = NULL; 1913 cedf[i].pending_tasklets.head = NULL;
1688 cedf[i].pending_tasklets.tail = &cedf[i].pending_tasklets.head; 1914 cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head);
1689#endif 1915#endif
1690 1916
1691 1917
@@ -1803,6 +2029,7 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
1803#endif 2029#endif
1804#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 2030#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1805 .enqueue_pai_tasklet = enqueue_pai_tasklet, 2031 .enqueue_pai_tasklet = enqueue_pai_tasklet,
2032 .run_tasklets = run_tasklets,
1806#endif 2033#endif
1807}; 2034};
1808 2035
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index e393d749baf5..d977e80aa32f 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -155,9 +155,14 @@ static void litmus_dummy_clear_prio_inh_klitirqd(struct task_struct* klitirqd,
155#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 155#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
156static int litmus_dummy_enqueue_pai_tasklet(struct tasklet_struct* t) 156static int litmus_dummy_enqueue_pai_tasklet(struct tasklet_struct* t)
157{ 157{
158 TRACE("PAI Tasklet unsupported in this plugin!!!!!!\n"); 158 TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
159 return(0); // failure. 159 return(0); // failure.
160} 160}
161
162static void litmus_dummy_run_tasklets(struct task_struct* t)
163{
164 //TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
165}
161#endif 166#endif
162 167
163 168
@@ -187,6 +192,7 @@ struct sched_plugin linux_sched_plugin = {
187#endif 192#endif
188#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 193#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
189 .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet, 194 .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet,
195 .run_tasklets = litmus_dummy_run_tasklets,
190#endif 196#endif
191 .admit_task = litmus_dummy_admit_task 197 .admit_task = litmus_dummy_admit_task
192}; 198};
diff --git a/litmus/sched_trace_external.c b/litmus/sched_trace_external.c
index 5b7e6152416a..cf8e1d78aa77 100644
--- a/litmus/sched_trace_external.c
+++ b/litmus/sched_trace_external.c
@@ -34,15 +34,15 @@ EXPORT_SYMBOL(__sched_trace_work_end_external);
34 34
35void __sched_trace_nv_interrupt_begin_external(u32 device) 35void __sched_trace_nv_interrupt_begin_external(u32 device)
36{ 36{
37 unsigned long _device = device; 37 //unsigned long _device = device;
38 sched_trace_nv_interrupt_begin(_device); 38 sched_trace_nv_interrupt_begin((unsigned long)device);
39} 39}
40EXPORT_SYMBOL(__sched_trace_nv_interrupt_begin_external); 40EXPORT_SYMBOL(__sched_trace_nv_interrupt_begin_external);
41 41
42void __sched_trace_nv_interrupt_end_external(u32 device) 42void __sched_trace_nv_interrupt_end_external(u32 device)
43{ 43{
44 unsigned long _device = device; 44 //unsigned long _device = device;
45 sched_trace_nv_interrupt_end(_device); 45 sched_trace_nv_interrupt_end((unsigned long)device);
46} 46}
47EXPORT_SYMBOL(__sched_trace_nv_interrupt_end_external); 47EXPORT_SYMBOL(__sched_trace_nv_interrupt_end_external);
48 48