diff options
Diffstat (limited to 'litmus/sched_crm_srt.c')
-rw-r--r-- | litmus/sched_crm_srt.c | 445 |
1 files changed, 443 insertions, 2 deletions
diff --git a/litmus/sched_crm_srt.c b/litmus/sched_crm_srt.c index 4473f35e64cd..c0004354573d 100644 --- a/litmus/sched_crm_srt.c +++ b/litmus/sched_crm_srt.c | |||
@@ -55,6 +55,10 @@ | |||
55 | #include <litmus/litmus_softirq.h> | 55 | #include <litmus/litmus_softirq.h> |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
59 | #include <linux/interrupt.h> | ||
60 | #endif | ||
61 | |||
58 | #ifdef CONFIG_LITMUS_NVIDIA | 62 | #ifdef CONFIG_LITMUS_NVIDIA |
59 | #include <litmus/nvidia_info.h> | 63 | #include <litmus/nvidia_info.h> |
60 | #endif | 64 | #endif |
@@ -91,6 +95,15 @@ DEFINE_PER_CPU(cpu_entry_t, crm_srt_cpu_entries); | |||
91 | #define test_will_schedule(cpu) \ | 95 | #define test_will_schedule(cpu) \ |
92 | (atomic_read(&per_cpu(crm_srt_cpu_entries, cpu).will_schedule)) | 96 | (atomic_read(&per_cpu(crm_srt_cpu_entries, cpu).will_schedule)) |
93 | 97 | ||
98 | |||
99 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
100 | struct tasklet_head | ||
101 | { | ||
102 | struct tasklet_struct *head; | ||
103 | struct tasklet_struct **tail; | ||
104 | }; | ||
105 | #endif | ||
106 | |||
94 | /* | 107 | /* |
95 | * In C-RM-SRT there is a crm_srt domain _per_ cluster | 108 | * In C-RM-SRT there is a crm_srt domain _per_ cluster |
96 | * The number of clusters is dynamically determined accordingly to the | 109 | * The number of clusters is dynamically determined accordingly to the |
@@ -108,6 +121,12 @@ typedef struct clusterdomain { | |||
108 | struct bheap cpu_heap; | 121 | struct bheap cpu_heap; |
109 | /* lock for this cluster */ | 122 | /* lock for this cluster */ |
110 | #define crm_srt_lock domain.ready_lock | 123 | #define crm_srt_lock domain.ready_lock |
124 | |||
125 | |||
126 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
127 | struct tasklet_head pending_tasklets; | ||
128 | #endif | ||
129 | |||
111 | } crm_srt_domain_t; | 130 | } crm_srt_domain_t; |
112 | 131 | ||
113 | /* a crm_srt_domain per cluster; allocation is done at init/activation time */ | 132 | /* a crm_srt_domain per cluster; allocation is done at init/activation time */ |
@@ -251,7 +270,7 @@ static void preempt(cpu_entry_t *entry) | |||
251 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 270 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
252 | } | 271 | } |
253 | 272 | ||
254 | /* requeue - Put an unlinked task into gsn-edf domain. | 273 | /* requeue - Put an unlinked task into c-rm-srt domain. |
255 | * Caller must hold crm_srt_lock. | 274 | * Caller must hold crm_srt_lock. |
256 | */ | 275 | */ |
257 | static noinline void requeue(struct task_struct* task) | 276 | static noinline void requeue(struct task_struct* task) |
@@ -395,6 +414,415 @@ static void crm_srt_tick(struct task_struct* t) | |||
395 | } | 414 | } |
396 | } | 415 | } |
397 | 416 | ||
417 | |||
418 | |||
419 | |||
420 | |||
421 | |||
422 | |||
423 | |||
424 | |||
425 | |||
426 | |||
427 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
428 | |||
429 | |||
430 | static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) | ||
431 | { | ||
432 | if (!atomic_read(&tasklet->count)) { | ||
433 | sched_trace_tasklet_begin(tasklet->owner); | ||
434 | |||
435 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) | ||
436 | { | ||
437 | BUG(); | ||
438 | } | ||
439 | TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); | ||
440 | tasklet->func(tasklet->data); | ||
441 | tasklet_unlock(tasklet); | ||
442 | |||
443 | sched_trace_tasklet_end(tasklet->owner, flushed); | ||
444 | } | ||
445 | else { | ||
446 | BUG(); | ||
447 | } | ||
448 | } | ||
449 | |||
450 | |||
451 | static void __extract_tasklets(crm_srt_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets) | ||
452 | { | ||
453 | struct tasklet_struct* step; | ||
454 | struct tasklet_struct* tasklet; | ||
455 | struct tasklet_struct* prev; | ||
456 | |||
457 | task_tasklets->head = NULL; | ||
458 | task_tasklets->tail = &(task_tasklets->head); | ||
459 | |||
460 | prev = NULL; | ||
461 | for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) | ||
462 | { | ||
463 | if(step->owner == task) | ||
464 | { | ||
465 | TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); | ||
466 | |||
467 | tasklet = step; | ||
468 | |||
469 | if(prev) { | ||
470 | prev->next = tasklet->next; | ||
471 | } | ||
472 | else if(cluster->pending_tasklets.head == tasklet) { | ||
473 | // we're at the head. | ||
474 | cluster->pending_tasklets.head = tasklet->next; | ||
475 | } | ||
476 | |||
477 | if(cluster->pending_tasklets.tail == &tasklet) { | ||
478 | // we're at the tail | ||
479 | if(prev) { | ||
480 | cluster->pending_tasklets.tail = &prev; | ||
481 | } | ||
482 | else { | ||
483 | cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); | ||
484 | } | ||
485 | } | ||
486 | |||
487 | tasklet->next = NULL; | ||
488 | *(task_tasklets->tail) = tasklet; | ||
489 | task_tasklets->tail = &(tasklet->next); | ||
490 | } | ||
491 | else { | ||
492 | prev = step; | ||
493 | } | ||
494 | } | ||
495 | } | ||
496 | |||
497 | static void flush_tasklets(crm_srt_domain_t* cluster, struct task_struct* task) | ||
498 | { | ||
499 | unsigned long flags; | ||
500 | struct tasklet_head task_tasklets; | ||
501 | struct tasklet_struct* step; | ||
502 | |||
503 | raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); | ||
504 | __extract_tasklets(cluster, task, &task_tasklets); | ||
505 | raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); | ||
506 | |||
507 | if(cluster->pending_tasklets.head != NULL) { | ||
508 | TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid); | ||
509 | } | ||
510 | |||
511 | // now execute any flushed tasklets. | ||
512 | for(step = cluster->pending_tasklets.head; step != NULL; /**/) | ||
513 | { | ||
514 | struct tasklet_struct* temp = step->next; | ||
515 | |||
516 | step->next = NULL; | ||
517 | __do_lit_tasklet(step, 1ul); | ||
518 | |||
519 | step = temp; | ||
520 | } | ||
521 | } | ||
522 | |||
523 | |||
524 | static void do_lit_tasklets(crm_srt_domain_t* cluster, struct task_struct* sched_task) | ||
525 | { | ||
526 | int work_to_do = 1; | ||
527 | struct tasklet_struct *tasklet = NULL; | ||
528 | //struct tasklet_struct *step; | ||
529 | unsigned long flags; | ||
530 | |||
531 | while(work_to_do) { | ||
532 | // remove tasklet at head of list if it has higher priority. | ||
533 | raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); | ||
534 | |||
535 | /* | ||
536 | step = cluster->pending_tasklets.head; | ||
537 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
538 | while(step != NULL){ | ||
539 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
540 | step = step->next; | ||
541 | } | ||
542 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
543 | TRACE("%s: done.\n", __FUNCTION__); | ||
544 | */ | ||
545 | |||
546 | if(cluster->pending_tasklets.head != NULL) { | ||
547 | // remove tasklet at head. | ||
548 | tasklet = cluster->pending_tasklets.head; | ||
549 | |||
550 | if(rm_srt_higher_prio(tasklet->owner, sched_task)) { | ||
551 | |||
552 | if(NULL == tasklet->next) { | ||
553 | // tasklet is at the head, list only has one element | ||
554 | TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
555 | cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); | ||
556 | } | ||
557 | |||
558 | // remove the tasklet from the queue | ||
559 | cluster->pending_tasklets.head = tasklet->next; | ||
560 | |||
561 | TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
562 | } | ||
563 | else { | ||
564 | TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); | ||
565 | tasklet = NULL; | ||
566 | } | ||
567 | } | ||
568 | else { | ||
569 | TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); | ||
570 | } | ||
571 | |||
572 | /* | ||
573 | step = cluster->pending_tasklets.head; | ||
574 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
575 | while(step != NULL){ | ||
576 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
577 | step = step->next; | ||
578 | } | ||
579 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
580 | TRACE("%s: done.\n", __FUNCTION__); | ||
581 | */ | ||
582 | |||
583 | raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); | ||
584 | |||
585 | if(tasklet) { | ||
586 | __do_lit_tasklet(tasklet, 0ul); | ||
587 | tasklet = NULL; | ||
588 | } | ||
589 | else { | ||
590 | work_to_do = 0; | ||
591 | } | ||
592 | } | ||
593 | |||
594 | //TRACE("%s: exited.\n", __FUNCTION__); | ||
595 | } | ||
596 | |||
597 | |||
598 | static void run_tasklets(struct task_struct* sched_task) | ||
599 | { | ||
600 | crm_srt_domain_t* cluster; | ||
601 | |||
602 | #if 0 | ||
603 | int task_is_rt = is_realtime(sched_task); | ||
604 | crm_srt_domain_t* cluster; | ||
605 | |||
606 | if(is_realtime(sched_task)) { | ||
607 | cluster = task_cpu_cluster(sched_task); | ||
608 | } | ||
609 | else { | ||
610 | cluster = remote_cluster(get_cpu()); | ||
611 | } | ||
612 | |||
613 | if(cluster && cluster->pending_tasklets.head != NULL) { | ||
614 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
615 | |||
616 | do_lit_tasklets(cluster, sched_task); | ||
617 | } | ||
618 | |||
619 | if(!task_is_rt) { | ||
620 | put_cpu_no_resched(); | ||
621 | } | ||
622 | #else | ||
623 | |||
624 | preempt_disable(); | ||
625 | |||
626 | cluster = (is_realtime(sched_task)) ? | ||
627 | task_cpu_cluster(sched_task) : | ||
628 | remote_cluster(smp_processor_id()); | ||
629 | |||
630 | if(cluster && cluster->pending_tasklets.head != NULL) { | ||
631 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
632 | do_lit_tasklets(cluster, sched_task); | ||
633 | } | ||
634 | |||
635 | preempt_enable_no_resched(); | ||
636 | |||
637 | #endif | ||
638 | } | ||
639 | |||
640 | |||
641 | static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_srt_domain_t* cluster) | ||
642 | { | ||
643 | struct tasklet_struct* step; | ||
644 | |||
645 | /* | ||
646 | step = cluster->pending_tasklets.head; | ||
647 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
648 | while(step != NULL){ | ||
649 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
650 | step = step->next; | ||
651 | } | ||
652 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
653 | TRACE("%s: done.\n", __FUNCTION__); | ||
654 | */ | ||
655 | |||
656 | tasklet->next = NULL; // make sure there are no old values floating around | ||
657 | |||
658 | step = cluster->pending_tasklets.head; | ||
659 | if(step == NULL) { | ||
660 | TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); | ||
661 | // insert at tail. | ||
662 | *(cluster->pending_tasklets.tail) = tasklet; | ||
663 | cluster->pending_tasklets.tail = &(tasklet->next); | ||
664 | } | ||
665 | else if((*(cluster->pending_tasklets.tail) != NULL) && | ||
666 | rm_srt_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) { | ||
667 | // insert at tail. | ||
668 | TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); | ||
669 | |||
670 | *(cluster->pending_tasklets.tail) = tasklet; | ||
671 | cluster->pending_tasklets.tail = &(tasklet->next); | ||
672 | } | ||
673 | else { | ||
674 | |||
675 | //WARN_ON(1 == 1); | ||
676 | |||
677 | // insert the tasklet somewhere in the middle. | ||
678 | |||
679 | TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); | ||
680 | |||
681 | while(step->next && rm_srt_higher_prio(step->next->owner, tasklet->owner)) { | ||
682 | step = step->next; | ||
683 | } | ||
684 | |||
685 | // insert tasklet right before step->next. | ||
686 | |||
687 | TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); | ||
688 | |||
689 | tasklet->next = step->next; | ||
690 | step->next = tasklet; | ||
691 | |||
692 | // patch up the head if needed. | ||
693 | if(cluster->pending_tasklets.head == step) | ||
694 | { | ||
695 | TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); | ||
696 | cluster->pending_tasklets.head = tasklet; | ||
697 | } | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | step = cluster->pending_tasklets.head; | ||
702 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
703 | while(step != NULL){ | ||
704 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
705 | step = step->next; | ||
706 | } | ||
707 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
708 | TRACE("%s: done.\n", __FUNCTION__); | ||
709 | */ | ||
710 | |||
711 | // TODO: Maintain this list in priority order. | ||
712 | // tasklet->next = NULL; | ||
713 | // *(cluster->pending_tasklets.tail) = tasklet; | ||
714 | // cluster->pending_tasklets.tail = &tasklet->next; | ||
715 | } | ||
716 | |||
717 | static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) | ||
718 | { | ||
719 | crm_srt_domain_t *cluster = NULL; | ||
720 | cpu_entry_t *targetCPU = NULL; | ||
721 | int thisCPU; | ||
722 | int runLocal = 0; | ||
723 | int runNow = 0; | ||
724 | unsigned long flags; | ||
725 | |||
726 | if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) | ||
727 | { | ||
728 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | cluster = task_cpu_cluster(tasklet->owner); | ||
733 | |||
734 | raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); | ||
735 | |||
736 | thisCPU = smp_processor_id(); | ||
737 | |||
738 | #if 1 | ||
739 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
740 | { | ||
741 | cpu_entry_t* affinity = NULL; | ||
742 | |||
743 | // use this CPU if it is in our cluster and isn't running any RT work. | ||
744 | if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(crm_srt_cpu_entries).linked == NULL)) { | ||
745 | affinity = &(__get_cpu_var(crm_srt_cpu_entries)); | ||
746 | } | ||
747 | else { | ||
748 | // this CPU is busy or shouldn't run tasklet in this cluster. | ||
749 | // look for available near by CPUs. | ||
750 | // NOTE: Affinity towards owner and not this CPU. Is this right? | ||
751 | affinity = | ||
752 | crm_srt_get_nearest_available_cpu(cluster, | ||
753 | &per_cpu(crm_srt_cpu_entries, task_cpu(tasklet->owner))); | ||
754 | } | ||
755 | |||
756 | targetCPU = affinity; | ||
757 | } | ||
758 | #endif | ||
759 | #endif | ||
760 | |||
761 | if (targetCPU == NULL) { | ||
762 | targetCPU = lowest_prio_cpu(cluster); | ||
763 | } | ||
764 | |||
765 | if (rm_srt_higher_prio(tasklet->owner, targetCPU->linked)) { | ||
766 | if (thisCPU == targetCPU->cpu) { | ||
767 | TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); | ||
768 | runLocal = 1; | ||
769 | runNow = 1; | ||
770 | } | ||
771 | else { | ||
772 | TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); | ||
773 | runLocal = 0; | ||
774 | runNow = 1; | ||
775 | } | ||
776 | } | ||
777 | else { | ||
778 | runLocal = 0; | ||
779 | runNow = 0; | ||
780 | } | ||
781 | |||
782 | if(!runLocal) { | ||
783 | // enqueue the tasklet | ||
784 | __add_pai_tasklet(tasklet, cluster); | ||
785 | } | ||
786 | |||
787 | raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); | ||
788 | |||
789 | |||
790 | if (runLocal /*&& runNow */) { // runNow == 1 is implied | ||
791 | TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); | ||
792 | __do_lit_tasklet(tasklet, 0ul); | ||
793 | } | ||
794 | else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied | ||
795 | TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); | ||
796 | preempt(targetCPU); // need to be protected by crm_srt_lock? | ||
797 | } | ||
798 | else { | ||
799 | TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); | ||
800 | } | ||
801 | |||
802 | return(1); // success | ||
803 | } | ||
804 | |||
805 | |||
806 | #endif | ||
807 | |||
808 | |||
809 | |||
810 | |||
811 | |||
812 | |||
813 | |||
814 | |||
815 | |||
816 | |||
817 | |||
818 | |||
819 | |||
820 | |||
821 | |||
822 | |||
823 | |||
824 | |||
825 | |||
398 | /* Getting schedule() right is a bit tricky. schedule() may not make any | 826 | /* Getting schedule() right is a bit tricky. schedule() may not make any |
399 | * assumptions on the state of the current task since it may be called for a | 827 | * assumptions on the state of the current task since it may be called for a |
400 | * number of reasons. The reasons include a scheduler_tick() determined that it | 828 | * number of reasons. The reasons include a scheduler_tick() determined that it |
@@ -544,7 +972,7 @@ static void crm_srt_task_new(struct task_struct * t, int on_rq, int running) | |||
544 | cpu_entry_t* entry; | 972 | cpu_entry_t* entry; |
545 | crm_srt_domain_t* cluster; | 973 | crm_srt_domain_t* cluster; |
546 | 974 | ||
547 | TRACE("gsn edf: task new %d\n", t->pid); | 975 | TRACE("crm srt: task new %d\n", t->pid); |
548 | 976 | ||
549 | /* the cluster doesn't change even if t is running */ | 977 | /* the cluster doesn't change even if t is running */ |
550 | cluster = task_cpu_cluster(t); | 978 | cluster = task_cpu_cluster(t); |
@@ -650,6 +1078,10 @@ static void crm_srt_task_exit(struct task_struct * t) | |||
650 | } | 1078 | } |
651 | raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); | 1079 | raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); |
652 | 1080 | ||
1081 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1082 | flush_tasklets(cluster, t); | ||
1083 | #endif | ||
1084 | |||
653 | BUG_ON(!is_realtime(t)); | 1085 | BUG_ON(!is_realtime(t)); |
654 | TRACE_TASK(t, "RIP\n"); | 1086 | TRACE_TASK(t, "RIP\n"); |
655 | } | 1087 | } |
@@ -1467,6 +1899,11 @@ static long crm_srt_activate_plugin(void) | |||
1467 | bheap_init(&(crm_srt[i].cpu_heap)); | 1899 | bheap_init(&(crm_srt[i].cpu_heap)); |
1468 | rm_srt_domain_init(&(crm_srt[i].domain), NULL, crm_srt_release_jobs); | 1900 | rm_srt_domain_init(&(crm_srt[i].domain), NULL, crm_srt_release_jobs); |
1469 | 1901 | ||
1902 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1903 | crm_srt[i].pending_tasklets.head = NULL; | ||
1904 | crm_srt[i].pending_tasklets.tail = &(crm_srt[i].pending_tasklets.head); | ||
1905 | #endif | ||
1906 | |||
1470 | if(!zalloc_cpumask_var(&crm_srt[i].cpu_map, GFP_ATOMIC)) | 1907 | if(!zalloc_cpumask_var(&crm_srt[i].cpu_map, GFP_ATOMIC)) |
1471 | return -ENOMEM; | 1908 | return -ENOMEM; |
1472 | } | 1909 | } |
@@ -1578,6 +2015,10 @@ static struct sched_plugin crm_srt_plugin __cacheline_aligned_in_smp = { | |||
1578 | #ifdef CONFIG_LITMUS_SOFTIRQD | 2015 | #ifdef CONFIG_LITMUS_SOFTIRQD |
1579 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, | 2016 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, |
1580 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, | 2017 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, |
2018 | #endif | ||
2019 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
2020 | .enqueue_pai_tasklet = enqueue_pai_tasklet, | ||
2021 | .run_tasklets = run_tasklets, | ||
1581 | #endif | 2022 | #endif |
1582 | }; | 2023 | }; |
1583 | 2024 | ||