diff options
Diffstat (limited to 'litmus/sched_crm.c')
-rw-r--r-- | litmus/sched_crm.c | 448 |
1 files changed, 446 insertions, 2 deletions
diff --git a/litmus/sched_crm.c b/litmus/sched_crm.c index 061b29eaff7e..fd7fab982998 100644 --- a/litmus/sched_crm.c +++ b/litmus/sched_crm.c | |||
@@ -55,6 +55,10 @@ | |||
55 | #include <litmus/litmus_softirq.h> | 55 | #include <litmus/litmus_softirq.h> |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
59 | #include <linux/interrupt.h> | ||
60 | #endif | ||
61 | |||
58 | #ifdef CONFIG_LITMUS_NVIDIA | 62 | #ifdef CONFIG_LITMUS_NVIDIA |
59 | #include <litmus/nvidia_info.h> | 63 | #include <litmus/nvidia_info.h> |
60 | #endif | 64 | #endif |
@@ -91,6 +95,14 @@ DEFINE_PER_CPU(cpu_entry_t, crm_cpu_entries); | |||
91 | #define test_will_schedule(cpu) \ | 95 | #define test_will_schedule(cpu) \ |
92 | (atomic_read(&per_cpu(crm_cpu_entries, cpu).will_schedule)) | 96 | (atomic_read(&per_cpu(crm_cpu_entries, cpu).will_schedule)) |
93 | 97 | ||
98 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
99 | struct tasklet_head | ||
100 | { | ||
101 | struct tasklet_struct *head; | ||
102 | struct tasklet_struct **tail; | ||
103 | }; | ||
104 | #endif | ||
105 | |||
94 | /* | 106 | /* |
95 | * In C-RM there is a crm domain _per_ cluster | 107 | * In C-RM there is a crm domain _per_ cluster |
96 | * The number of clusters is dynamically determined accordingly to the | 108 | * The number of clusters is dynamically determined accordingly to the |
@@ -108,6 +120,10 @@ typedef struct clusterdomain { | |||
108 | struct bheap cpu_heap; | 120 | struct bheap cpu_heap; |
109 | /* lock for this cluster */ | 121 | /* lock for this cluster */ |
110 | #define crm_lock domain.ready_lock | 122 | #define crm_lock domain.ready_lock |
123 | |||
124 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
125 | struct tasklet_head pending_tasklets; | ||
126 | #endif | ||
111 | } crm_domain_t; | 127 | } crm_domain_t; |
112 | 128 | ||
113 | /* a crm_domain per cluster; allocation is done at init/activation time */ | 129 | /* a crm_domain per cluster; allocation is done at init/activation time */ |
@@ -251,7 +267,7 @@ static void preempt(cpu_entry_t *entry) | |||
251 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 267 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
252 | } | 268 | } |
253 | 269 | ||
254 | /* requeue - Put an unlinked task into gsn-edf domain. | 270 | /* requeue - Put an unlinked task into c-rm domain. |
255 | * Caller must hold crm_lock. | 271 | * Caller must hold crm_lock. |
256 | */ | 272 | */ |
257 | static noinline void requeue(struct task_struct* task) | 273 | static noinline void requeue(struct task_struct* task) |
@@ -395,6 +411,421 @@ static void crm_tick(struct task_struct* t) | |||
395 | } | 411 | } |
396 | } | 412 | } |
397 | 413 | ||
414 | |||
415 | |||
416 | |||
417 | |||
418 | |||
419 | |||
420 | |||
421 | |||
422 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
423 | |||
424 | |||
425 | static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) | ||
426 | { | ||
427 | if (!atomic_read(&tasklet->count)) { | ||
428 | sched_trace_tasklet_begin(tasklet->owner); | ||
429 | |||
430 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) | ||
431 | { | ||
432 | BUG(); | ||
433 | } | ||
434 | TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); | ||
435 | tasklet->func(tasklet->data); | ||
436 | tasklet_unlock(tasklet); | ||
437 | |||
438 | sched_trace_tasklet_end(tasklet->owner, flushed); | ||
439 | } | ||
440 | else { | ||
441 | BUG(); | ||
442 | } | ||
443 | } | ||
444 | |||
445 | |||
446 | static void __extract_tasklets(crm_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets) | ||
447 | { | ||
448 | struct tasklet_struct* step; | ||
449 | struct tasklet_struct* tasklet; | ||
450 | struct tasklet_struct* prev; | ||
451 | |||
452 | task_tasklets->head = NULL; | ||
453 | task_tasklets->tail = &(task_tasklets->head); | ||
454 | |||
455 | prev = NULL; | ||
456 | for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) | ||
457 | { | ||
458 | if(step->owner == task) | ||
459 | { | ||
460 | TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); | ||
461 | |||
462 | tasklet = step; | ||
463 | |||
464 | if(prev) { | ||
465 | prev->next = tasklet->next; | ||
466 | } | ||
467 | else if(cluster->pending_tasklets.head == tasklet) { | ||
468 | // we're at the head. | ||
469 | cluster->pending_tasklets.head = tasklet->next; | ||
470 | } | ||
471 | |||
472 | if(cluster->pending_tasklets.tail == &tasklet) { | ||
473 | // we're at the tail | ||
474 | if(prev) { | ||
475 | cluster->pending_tasklets.tail = &prev; | ||
476 | } | ||
477 | else { | ||
478 | cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); | ||
479 | } | ||
480 | } | ||
481 | |||
482 | tasklet->next = NULL; | ||
483 | *(task_tasklets->tail) = tasklet; | ||
484 | task_tasklets->tail = &(tasklet->next); | ||
485 | } | ||
486 | else { | ||
487 | prev = step; | ||
488 | } | ||
489 | } | ||
490 | } | ||
491 | |||
492 | static void flush_tasklets(crm_domain_t* cluster, struct task_struct* task) | ||
493 | { | ||
494 | unsigned long flags; | ||
495 | struct tasklet_head task_tasklets; | ||
496 | struct tasklet_struct* step; | ||
497 | |||
498 | raw_spin_lock_irqsave(&cluster->crm_lock, flags); | ||
499 | __extract_tasklets(cluster, task, &task_tasklets); | ||
500 | raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); | ||
501 | |||
502 | if(cluster->pending_tasklets.head != NULL) { | ||
503 | TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid); | ||
504 | } | ||
505 | |||
506 | // now execute any flushed tasklets. | ||
507 | for(step = cluster->pending_tasklets.head; step != NULL; /**/) | ||
508 | { | ||
509 | struct tasklet_struct* temp = step->next; | ||
510 | |||
511 | step->next = NULL; | ||
512 | __do_lit_tasklet(step, 1ul); | ||
513 | |||
514 | step = temp; | ||
515 | } | ||
516 | } | ||
517 | |||
518 | |||
519 | static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_task) | ||
520 | { | ||
521 | int work_to_do = 1; | ||
522 | struct tasklet_struct *tasklet = NULL; | ||
523 | //struct tasklet_struct *step; | ||
524 | unsigned long flags; | ||
525 | |||
526 | while(work_to_do) { | ||
527 | // remove tasklet at head of list if it has higher priority. | ||
528 | raw_spin_lock_irqsave(&cluster->crm_lock, flags); | ||
529 | |||
530 | /* | ||
531 | step = cluster->pending_tasklets.head; | ||
532 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
533 | while(step != NULL){ | ||
534 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
535 | step = step->next; | ||
536 | } | ||
537 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
538 | TRACE("%s: done.\n", __FUNCTION__); | ||
539 | */ | ||
540 | |||
541 | if(cluster->pending_tasklets.head != NULL) { | ||
542 | // remove tasklet at head. | ||
543 | tasklet = cluster->pending_tasklets.head; | ||
544 | |||
545 | if(rm_higher_prio(tasklet->owner, sched_task)) { | ||
546 | |||
547 | if(NULL == tasklet->next) { | ||
548 | // tasklet is at the head, list only has one element | ||
549 | TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
550 | cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); | ||
551 | } | ||
552 | |||
553 | // remove the tasklet from the queue | ||
554 | cluster->pending_tasklets.head = tasklet->next; | ||
555 | |||
556 | TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
557 | } | ||
558 | else { | ||
559 | TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); | ||
560 | tasklet = NULL; | ||
561 | } | ||
562 | } | ||
563 | else { | ||
564 | TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | step = cluster->pending_tasklets.head; | ||
569 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
570 | while(step != NULL){ | ||
571 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
572 | step = step->next; | ||
573 | } | ||
574 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
575 | TRACE("%s: done.\n", __FUNCTION__); | ||
576 | */ | ||
577 | |||
578 | raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); | ||
579 | |||
580 | if(tasklet) { | ||
581 | __do_lit_tasklet(tasklet, 0ul); | ||
582 | tasklet = NULL; | ||
583 | } | ||
584 | else { | ||
585 | work_to_do = 0; | ||
586 | } | ||
587 | } | ||
588 | |||
589 | //TRACE("%s: exited.\n", __FUNCTION__); | ||
590 | } | ||
591 | |||
592 | |||
593 | static void run_tasklets(struct task_struct* sched_task) | ||
594 | { | ||
595 | crm_domain_t* cluster; | ||
596 | |||
597 | #if 0 | ||
598 | int task_is_rt = is_realtime(sched_task); | ||
599 | crm_domain_t* cluster; | ||
600 | |||
601 | if(is_realtime(sched_task)) { | ||
602 | cluster = task_cpu_cluster(sched_task); | ||
603 | } | ||
604 | else { | ||
605 | cluster = remote_cluster(get_cpu()); | ||
606 | } | ||
607 | |||
608 | if(cluster && cluster->pending_tasklets.head != NULL) { | ||
609 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
610 | |||
611 | do_lit_tasklets(cluster, sched_task); | ||
612 | } | ||
613 | |||
614 | if(!task_is_rt) { | ||
615 | put_cpu_no_resched(); | ||
616 | } | ||
617 | #else | ||
618 | |||
619 | preempt_disable(); | ||
620 | |||
621 | cluster = (is_realtime(sched_task)) ? | ||
622 | task_cpu_cluster(sched_task) : | ||
623 | remote_cluster(smp_processor_id()); | ||
624 | |||
625 | if(cluster && cluster->pending_tasklets.head != NULL) { | ||
626 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
627 | do_lit_tasklets(cluster, sched_task); | ||
628 | } | ||
629 | |||
630 | preempt_enable_no_resched(); | ||
631 | |||
632 | #endif | ||
633 | } | ||
634 | |||
635 | |||
636 | static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_domain_t* cluster) | ||
637 | { | ||
638 | struct tasklet_struct* step; | ||
639 | |||
640 | /* | ||
641 | step = cluster->pending_tasklets.head; | ||
642 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
643 | while(step != NULL){ | ||
644 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
645 | step = step->next; | ||
646 | } | ||
647 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
648 | TRACE("%s: done.\n", __FUNCTION__); | ||
649 | */ | ||
650 | |||
651 | tasklet->next = NULL; // make sure there are no old values floating around | ||
652 | |||
653 | step = cluster->pending_tasklets.head; | ||
654 | if(step == NULL) { | ||
655 | TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); | ||
656 | // insert at tail. | ||
657 | *(cluster->pending_tasklets.tail) = tasklet; | ||
658 | cluster->pending_tasklets.tail = &(tasklet->next); | ||
659 | } | ||
660 | else if((*(cluster->pending_tasklets.tail) != NULL) && | ||
661 | rm_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) { | ||
662 | // insert at tail. | ||
663 | TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); | ||
664 | |||
665 | *(cluster->pending_tasklets.tail) = tasklet; | ||
666 | cluster->pending_tasklets.tail = &(tasklet->next); | ||
667 | } | ||
668 | else { | ||
669 | |||
670 | //WARN_ON(1 == 1); | ||
671 | |||
672 | // insert the tasklet somewhere in the middle. | ||
673 | |||
674 | TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); | ||
675 | |||
676 | while(step->next && rm_higher_prio(step->next->owner, tasklet->owner)) { | ||
677 | step = step->next; | ||
678 | } | ||
679 | |||
680 | // insert tasklet right before step->next. | ||
681 | |||
682 | TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); | ||
683 | |||
684 | tasklet->next = step->next; | ||
685 | step->next = tasklet; | ||
686 | |||
687 | // patch up the head if needed. | ||
688 | if(cluster->pending_tasklets.head == step) | ||
689 | { | ||
690 | TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); | ||
691 | cluster->pending_tasklets.head = tasklet; | ||
692 | } | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | step = cluster->pending_tasklets.head; | ||
697 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
698 | while(step != NULL){ | ||
699 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
700 | step = step->next; | ||
701 | } | ||
702 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); | ||
703 | TRACE("%s: done.\n", __FUNCTION__); | ||
704 | */ | ||
705 | |||
706 | // TODO: Maintain this list in priority order. | ||
707 | // tasklet->next = NULL; | ||
708 | // *(cluster->pending_tasklets.tail) = tasklet; | ||
709 | // cluster->pending_tasklets.tail = &tasklet->next; | ||
710 | } | ||
711 | |||
712 | static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) | ||
713 | { | ||
714 | crm_domain_t *cluster = NULL; | ||
715 | cpu_entry_t *targetCPU = NULL; | ||
716 | int thisCPU; | ||
717 | int runLocal = 0; | ||
718 | int runNow = 0; | ||
719 | unsigned long flags; | ||
720 | |||
721 | if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) | ||
722 | { | ||
723 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | cluster = task_cpu_cluster(tasklet->owner); | ||
728 | |||
729 | raw_spin_lock_irqsave(&cluster->crm_lock, flags); | ||
730 | |||
731 | thisCPU = smp_processor_id(); | ||
732 | |||
733 | #if 1 | ||
734 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
735 | { | ||
736 | cpu_entry_t* affinity = NULL; | ||
737 | |||
738 | // use this CPU if it is in our cluster and isn't running any RT work. | ||
739 | if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(crm_cpu_entries).linked == NULL)) { | ||
740 | affinity = &(__get_cpu_var(crm_cpu_entries)); | ||
741 | } | ||
742 | else { | ||
743 | // this CPU is busy or shouldn't run tasklet in this cluster. | ||
744 | // look for available near by CPUs. | ||
745 | // NOTE: Affinity towards owner and not this CPU. Is this right? | ||
746 | affinity = | ||
747 | crm_get_nearest_available_cpu(cluster, | ||
748 | &per_cpu(crm_cpu_entries, task_cpu(tasklet->owner))); | ||
749 | } | ||
750 | |||
751 | targetCPU = affinity; | ||
752 | } | ||
753 | #endif | ||
754 | #endif | ||
755 | |||
756 | if (targetCPU == NULL) { | ||
757 | targetCPU = lowest_prio_cpu(cluster); | ||
758 | } | ||
759 | |||
760 | if (rm_higher_prio(tasklet->owner, targetCPU->linked)) { | ||
761 | if (thisCPU == targetCPU->cpu) { | ||
762 | TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); | ||
763 | runLocal = 1; | ||
764 | runNow = 1; | ||
765 | } | ||
766 | else { | ||
767 | TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); | ||
768 | runLocal = 0; | ||
769 | runNow = 1; | ||
770 | } | ||
771 | } | ||
772 | else { | ||
773 | runLocal = 0; | ||
774 | runNow = 0; | ||
775 | } | ||
776 | |||
777 | if(!runLocal) { | ||
778 | // enqueue the tasklet | ||
779 | __add_pai_tasklet(tasklet, cluster); | ||
780 | } | ||
781 | |||
782 | raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); | ||
783 | |||
784 | |||
785 | if (runLocal /*&& runNow */) { // runNow == 1 is implied | ||
786 | TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); | ||
787 | __do_lit_tasklet(tasklet, 0ul); | ||
788 | } | ||
789 | else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied | ||
790 | TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); | ||
791 | preempt(targetCPU); // need to be protected by crm_lock? | ||
792 | } | ||
793 | else { | ||
794 | TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); | ||
795 | } | ||
796 | |||
797 | return(1); // success | ||
798 | } | ||
799 | |||
800 | |||
801 | #endif | ||
802 | |||
803 | |||
804 | |||
805 | |||
806 | |||
807 | |||
808 | |||
809 | |||
810 | |||
811 | |||
812 | |||
813 | |||
814 | |||
815 | |||
816 | |||
817 | |||
818 | |||
819 | |||
820 | |||
821 | |||
822 | |||
823 | |||
824 | |||
825 | |||
826 | |||
827 | |||
828 | |||
398 | /* Getting schedule() right is a bit tricky. schedule() may not make any | 829 | /* Getting schedule() right is a bit tricky. schedule() may not make any |
399 | * assumptions on the state of the current task since it may be called for a | 830 | * assumptions on the state of the current task since it may be called for a |
400 | * number of reasons. The reasons include a scheduler_tick() determined that it | 831 | * number of reasons. The reasons include a scheduler_tick() determined that it |
@@ -544,7 +975,7 @@ static void crm_task_new(struct task_struct * t, int on_rq, int running) | |||
544 | cpu_entry_t* entry; | 975 | cpu_entry_t* entry; |
545 | crm_domain_t* cluster; | 976 | crm_domain_t* cluster; |
546 | 977 | ||
547 | TRACE("gsn edf: task new %d\n", t->pid); | 978 | TRACE("crm: task new %d\n", t->pid); |
548 | 979 | ||
549 | /* the cluster doesn't change even if t is running */ | 980 | /* the cluster doesn't change even if t is running */ |
550 | cluster = task_cpu_cluster(t); | 981 | cluster = task_cpu_cluster(t); |
@@ -650,6 +1081,10 @@ static void crm_task_exit(struct task_struct * t) | |||
650 | } | 1081 | } |
651 | raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); | 1082 | raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); |
652 | 1083 | ||
1084 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1085 | flush_tasklets(cluster, t); | ||
1086 | #endif | ||
1087 | |||
653 | BUG_ON(!is_realtime(t)); | 1088 | BUG_ON(!is_realtime(t)); |
654 | TRACE_TASK(t, "RIP\n"); | 1089 | TRACE_TASK(t, "RIP\n"); |
655 | } | 1090 | } |
@@ -1467,6 +1902,11 @@ static long crm_activate_plugin(void) | |||
1467 | bheap_init(&(crm[i].cpu_heap)); | 1902 | bheap_init(&(crm[i].cpu_heap)); |
1468 | rm_domain_init(&(crm[i].domain), NULL, crm_release_jobs); | 1903 | rm_domain_init(&(crm[i].domain), NULL, crm_release_jobs); |
1469 | 1904 | ||
1905 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1906 | crm[i].pending_tasklets.head = NULL; | ||
1907 | crm[i].pending_tasklets.tail = &(crm[i].pending_tasklets.head); | ||
1908 | #endif | ||
1909 | |||
1470 | if(!zalloc_cpumask_var(&crm[i].cpu_map, GFP_ATOMIC)) | 1910 | if(!zalloc_cpumask_var(&crm[i].cpu_map, GFP_ATOMIC)) |
1471 | return -ENOMEM; | 1911 | return -ENOMEM; |
1472 | } | 1912 | } |
@@ -1578,6 +2018,10 @@ static struct sched_plugin crm_plugin __cacheline_aligned_in_smp = { | |||
1578 | #ifdef CONFIG_LITMUS_SOFTIRQD | 2018 | #ifdef CONFIG_LITMUS_SOFTIRQD |
1579 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, | 2019 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, |
1580 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, | 2020 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, |
2021 | #endif | ||
2022 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
2023 | .enqueue_pai_tasklet = enqueue_pai_tasklet, | ||
2024 | .run_tasklets = run_tasklets, | ||
1581 | #endif | 2025 | #endif |
1582 | }; | 2026 | }; |
1583 | 2027 | ||