diff options
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 852 |
1 files changed, 833 insertions, 19 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 73fe1c442a0d..9b0a8d3b624d 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/uaccess.h> | ||
32 | 33 | ||
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | 35 | ||
@@ -45,7 +46,18 @@ | |||
45 | 46 | ||
46 | /* to configure the cluster size */ | 47 | /* to configure the cluster size */ |
47 | #include <litmus/litmus_proc.h> | 48 | #include <litmus/litmus_proc.h> |
48 | #include <linux/uaccess.h> | 49 | |
50 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
51 | #include <litmus/affinity.h> | ||
52 | #endif | ||
53 | |||
54 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
55 | #include <litmus/litmus_softirq.h> | ||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_LITMUS_NVIDIA | ||
59 | #include <litmus/nvidia_info.h> | ||
60 | #endif | ||
49 | 61 | ||
50 | /* Reference configuration variable. Determines which cache level is used to | 62 | /* Reference configuration variable. Determines which cache level is used to |
51 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that | 63 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that |
@@ -95,7 +107,7 @@ typedef struct clusterdomain { | |||
95 | struct bheap_node *heap_node; | 107 | struct bheap_node *heap_node; |
96 | struct bheap cpu_heap; | 108 | struct bheap cpu_heap; |
97 | /* lock for this cluster */ | 109 | /* lock for this cluster */ |
98 | #define lock domain.ready_lock | 110 | #define cedf_lock domain.ready_lock |
99 | } cedf_domain_t; | 111 | } cedf_domain_t; |
100 | 112 | ||
101 | /* a cedf_domain per cluster; allocation is done at init/activation time */ | 113 | /* a cedf_domain per cluster; allocation is done at init/activation time */ |
@@ -257,21 +269,50 @@ static noinline void requeue(struct task_struct* task) | |||
257 | } | 269 | } |
258 | } | 270 | } |
259 | 271 | ||
272 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
273 | static cpu_entry_t* cedf_get_nearest_available_cpu( | ||
274 | cedf_domain_t *cluster, cpu_entry_t* start) | ||
275 | { | ||
276 | cpu_entry_t* affinity; | ||
277 | |||
278 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1); | ||
279 | |||
280 | /* make sure CPU is in our cluster */ | ||
281 | if(affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) | ||
282 | return(affinity); | ||
283 | else | ||
284 | return(NULL); | ||
285 | } | ||
286 | #endif | ||
287 | |||
288 | |||
260 | /* check for any necessary preemptions */ | 289 | /* check for any necessary preemptions */ |
261 | static void check_for_preemptions(cedf_domain_t *cluster) | 290 | static void check_for_preemptions(cedf_domain_t *cluster) |
262 | { | 291 | { |
263 | struct task_struct *task; | 292 | struct task_struct *task; |
264 | cpu_entry_t* last; | 293 | cpu_entry_t *last; |
265 | 294 | ||
266 | for(last = lowest_prio_cpu(cluster); | 295 | for(last = lowest_prio_cpu(cluster); |
267 | edf_preemption_needed(&cluster->domain, last->linked); | 296 | edf_preemption_needed(&cluster->domain, last->linked); |
268 | last = lowest_prio_cpu(cluster)) { | 297 | last = lowest_prio_cpu(cluster)) { |
269 | /* preemption necessary */ | 298 | /* preemption necessary */ |
270 | task = __take_ready(&cluster->domain); | 299 | task = __take_ready(&cluster->domain); |
271 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | 300 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
272 | task->pid, last->cpu); | 301 | { |
302 | cpu_entry_t* affinity = | ||
303 | cedf_get_nearest_available_cpu(cluster, | ||
304 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | ||
305 | if(affinity) | ||
306 | last = affinity; | ||
307 | else if(last->linked) | ||
308 | requeue(last->linked); | ||
309 | } | ||
310 | #else | ||
273 | if (last->linked) | 311 | if (last->linked) |
274 | requeue(last->linked); | 312 | requeue(last->linked); |
313 | #endif | ||
314 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
315 | task->pid, last->cpu); | ||
275 | link_task_to_cpu(task, last); | 316 | link_task_to_cpu(task, last); |
276 | preempt(last); | 317 | preempt(last); |
277 | } | 318 | } |
@@ -292,12 +333,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
292 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | 333 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); |
293 | unsigned long flags; | 334 | unsigned long flags; |
294 | 335 | ||
295 | raw_spin_lock_irqsave(&cluster->lock, flags); | 336 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
296 | 337 | ||
297 | __merge_ready(&cluster->domain, tasks); | 338 | __merge_ready(&cluster->domain, tasks); |
298 | check_for_preemptions(cluster); | 339 | check_for_preemptions(cluster); |
299 | 340 | ||
300 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 341 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
301 | } | 342 | } |
302 | 343 | ||
303 | /* caller holds cedf_lock */ | 344 | /* caller holds cedf_lock */ |
@@ -307,6 +348,10 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
307 | 348 | ||
308 | sched_trace_task_completion(t, forced); | 349 | sched_trace_task_completion(t, forced); |
309 | 350 | ||
351 | #ifdef CONFIG_LITMUS_NVIDIA | ||
352 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
353 | #endif | ||
354 | |||
310 | TRACE_TASK(t, "job_completion().\n"); | 355 | TRACE_TASK(t, "job_completion().\n"); |
311 | 356 | ||
312 | /* set flags */ | 357 | /* set flags */ |
@@ -378,7 +423,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
378 | int out_of_time, sleep, preempt, np, exists, blocks; | 423 | int out_of_time, sleep, preempt, np, exists, blocks; |
379 | struct task_struct* next = NULL; | 424 | struct task_struct* next = NULL; |
380 | 425 | ||
381 | raw_spin_lock(&cluster->lock); | 426 | raw_spin_lock(&cluster->cedf_lock); |
382 | clear_will_schedule(); | 427 | clear_will_schedule(); |
383 | 428 | ||
384 | /* sanity checking */ | 429 | /* sanity checking */ |
@@ -462,7 +507,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
462 | next = prev; | 507 | next = prev; |
463 | 508 | ||
464 | sched_state_task_picked(); | 509 | sched_state_task_picked(); |
465 | raw_spin_unlock(&cluster->lock); | 510 | raw_spin_unlock(&cluster->cedf_lock); |
466 | 511 | ||
467 | #ifdef WANT_ALL_SCHED_EVENTS | 512 | #ifdef WANT_ALL_SCHED_EVENTS |
468 | TRACE("cedf_lock released, next=0x%p\n", next); | 513 | TRACE("cedf_lock released, next=0x%p\n", next); |
@@ -504,7 +549,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
504 | /* the cluster doesn't change even if t is running */ | 549 | /* the cluster doesn't change even if t is running */ |
505 | cluster = task_cpu_cluster(t); | 550 | cluster = task_cpu_cluster(t); |
506 | 551 | ||
507 | raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); | 552 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
508 | 553 | ||
509 | /* setup job params */ | 554 | /* setup job params */ |
510 | release_at(t, litmus_clock()); | 555 | release_at(t, litmus_clock()); |
@@ -521,20 +566,22 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
521 | t->rt_param.linked_on = NO_CPU; | 566 | t->rt_param.linked_on = NO_CPU; |
522 | 567 | ||
523 | cedf_job_arrival(t); | 568 | cedf_job_arrival(t); |
524 | raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); | 569 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
525 | } | 570 | } |
526 | 571 | ||
527 | static void cedf_task_wake_up(struct task_struct *task) | 572 | static void cedf_task_wake_up(struct task_struct *task) |
528 | { | 573 | { |
529 | unsigned long flags; | 574 | unsigned long flags; |
530 | lt_t now; | 575 | //lt_t now; |
531 | cedf_domain_t *cluster; | 576 | cedf_domain_t *cluster; |
532 | 577 | ||
533 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 578 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
534 | 579 | ||
535 | cluster = task_cpu_cluster(task); | 580 | cluster = task_cpu_cluster(task); |
536 | 581 | ||
537 | raw_spin_lock_irqsave(&cluster->lock, flags); | 582 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
583 | |||
584 | #if 0 // sporadic task model | ||
538 | /* We need to take suspensions because of semaphores into | 585 | /* We need to take suspensions because of semaphores into |
539 | * account! If a job resumes after being suspended due to acquiring | 586 | * account! If a job resumes after being suspended due to acquiring |
540 | * a semaphore, it should never be treated as a new job release. | 587 | * a semaphore, it should never be treated as a new job release. |
@@ -556,8 +603,17 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
556 | } | 603 | } |
557 | } | 604 | } |
558 | } | 605 | } |
559 | cedf_job_arrival(task); | 606 | #endif |
560 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 607 | |
608 | //BUG_ON(tsk_rt(task)->linked_on != NO_CPU); | ||
609 | set_rt_flags(task, RT_F_RUNNING); // periodic model | ||
610 | |||
611 | if(tsk_rt(task)->linked_on == NO_CPU) | ||
612 | cedf_job_arrival(task); | ||
613 | else | ||
614 | TRACE("WTF, mate?!\n"); | ||
615 | |||
616 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); | ||
561 | } | 617 | } |
562 | 618 | ||
563 | static void cedf_task_block(struct task_struct *t) | 619 | static void cedf_task_block(struct task_struct *t) |
@@ -570,9 +626,9 @@ static void cedf_task_block(struct task_struct *t) | |||
570 | cluster = task_cpu_cluster(t); | 626 | cluster = task_cpu_cluster(t); |
571 | 627 | ||
572 | /* unlink if necessary */ | 628 | /* unlink if necessary */ |
573 | raw_spin_lock_irqsave(&cluster->lock, flags); | 629 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
574 | unlink(t); | 630 | unlink(t); |
575 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 631 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
576 | 632 | ||
577 | BUG_ON(!is_realtime(t)); | 633 | BUG_ON(!is_realtime(t)); |
578 | } | 634 | } |
@@ -584,7 +640,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
584 | cedf_domain_t *cluster = task_cpu_cluster(t); | 640 | cedf_domain_t *cluster = task_cpu_cluster(t); |
585 | 641 | ||
586 | /* unlink if necessary */ | 642 | /* unlink if necessary */ |
587 | raw_spin_lock_irqsave(&cluster->lock, flags); | 643 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
588 | unlink(t); | 644 | unlink(t); |
589 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 645 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
590 | cpu_entry_t *cpu; | 646 | cpu_entry_t *cpu; |
@@ -592,7 +648,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
592 | cpu->scheduled = NULL; | 648 | cpu->scheduled = NULL; |
593 | tsk_rt(t)->scheduled_on = NO_CPU; | 649 | tsk_rt(t)->scheduled_on = NO_CPU; |
594 | } | 650 | } |
595 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 651 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
596 | 652 | ||
597 | BUG_ON(!is_realtime(t)); | 653 | BUG_ON(!is_realtime(t)); |
598 | TRACE_TASK(t, "RIP\n"); | 654 | TRACE_TASK(t, "RIP\n"); |
@@ -603,6 +659,721 @@ static long cedf_admit_task(struct task_struct* tsk) | |||
603 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | 659 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; |
604 | } | 660 | } |
605 | 661 | ||
662 | |||
663 | |||
664 | |||
665 | |||
666 | |||
667 | |||
668 | |||
669 | |||
670 | |||
671 | |||
672 | |||
673 | |||
674 | #ifdef CONFIG_LITMUS_LOCKING | ||
675 | |||
676 | #include <litmus/fdso.h> | ||
677 | |||
678 | |||
679 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
680 | { | ||
681 | int linked_on; | ||
682 | int check_preempt = 0; | ||
683 | |||
684 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
685 | |||
686 | if(prio_inh != NULL) | ||
687 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
688 | else | ||
689 | TRACE_TASK(t, "inherits priority from %p\n", prio_inh); | ||
690 | |||
691 | sched_trace_eff_prio_change(t, prio_inh); | ||
692 | |||
693 | tsk_rt(t)->inh_task = prio_inh; | ||
694 | |||
695 | linked_on = tsk_rt(t)->linked_on; | ||
696 | |||
697 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
698 | if (linked_on != NO_CPU) { | ||
699 | TRACE_TASK(t, "%s: linked on %d\n", | ||
700 | __FUNCTION__, linked_on); | ||
701 | /* Holder is scheduled; need to re-order CPUs. | ||
702 | * We can't use heap_decrease() here since | ||
703 | * the cpu_heap is ordered in reverse direction, so | ||
704 | * it is actually an increase. */ | ||
705 | bheap_delete(cpu_lower_prio, &cluster->cpu_heap, | ||
706 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
707 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, | ||
708 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
709 | } else { | ||
710 | /* holder may be queued: first stop queue changes */ | ||
711 | raw_spin_lock(&cluster->domain.release_lock); | ||
712 | if (is_queued(t)) { | ||
713 | TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); | ||
714 | |||
715 | /* We need to update the position of holder in some | ||
716 | * heap. Note that this could be a release heap if we | ||
717 | * budget enforcement is used and this job overran. */ | ||
718 | check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node); | ||
719 | |||
720 | } else { | ||
721 | /* Nothing to do: if it is not queued and not linked | ||
722 | * then it is either sleeping or currently being moved | ||
723 | * by other code (e.g., a timer interrupt handler) that | ||
724 | * will use the correct priority when enqueuing the | ||
725 | * task. */ | ||
726 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); | ||
727 | } | ||
728 | raw_spin_unlock(&cluster->domain.release_lock); | ||
729 | |||
730 | /* If holder was enqueued in a release heap, then the following | ||
731 | * preemption check is pointless, but we can't easily detect | ||
732 | * that case. If you want to fix this, then consider that | ||
733 | * simply adding a state flag requires O(n) time to update when | ||
734 | * releasing n tasks, which conflicts with the goal to have | ||
735 | * O(log n) merges. */ | ||
736 | if (check_preempt) { | ||
737 | /* heap_decrease() hit the top level of the heap: make | ||
738 | * sure preemption checks get the right task, not the | ||
739 | * potentially stale cache. */ | ||
740 | bheap_uncache_min(edf_ready_order, &cluster->domain.ready_queue); | ||
741 | check_for_preemptions(cluster); | ||
742 | } | ||
743 | } | ||
744 | } | ||
745 | |||
746 | /* called with IRQs off */ | ||
747 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
748 | { | ||
749 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
750 | |||
751 | raw_spin_lock(&cluster->cedf_lock); | ||
752 | |||
753 | __set_priority_inheritance(t, prio_inh); | ||
754 | |||
755 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
756 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
757 | { | ||
758 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", | ||
759 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
760 | |||
761 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); | ||
762 | } | ||
763 | #endif | ||
764 | |||
765 | raw_spin_unlock(&cluster->cedf_lock); | ||
766 | } | ||
767 | |||
768 | |||
769 | /* called with IRQs off */ | ||
770 | static void __clear_priority_inheritance(struct task_struct* t) | ||
771 | { | ||
772 | TRACE_TASK(t, "priority restored\n"); | ||
773 | |||
774 | if(tsk_rt(t)->scheduled_on != NO_CPU) | ||
775 | { | ||
776 | sched_trace_eff_prio_change(t, NULL); | ||
777 | |||
778 | tsk_rt(t)->inh_task = NULL; | ||
779 | |||
780 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
781 | * since the priority was effectively lowered. */ | ||
782 | unlink(t); | ||
783 | cedf_job_arrival(t); | ||
784 | } | ||
785 | else | ||
786 | { | ||
787 | __set_priority_inheritance(t, NULL); | ||
788 | } | ||
789 | |||
790 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
791 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
792 | { | ||
793 | TRACE_TASK(t, "%s/%d inheritance set back to owner.\n", | ||
794 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
795 | |||
796 | if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU) | ||
797 | { | ||
798 | sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t); | ||
799 | |||
800 | tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t; | ||
801 | |||
802 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
803 | * since the priority was effectively lowered. */ | ||
804 | unlink(tsk_rt(t)->cur_klitirqd); | ||
805 | cedf_job_arrival(tsk_rt(t)->cur_klitirqd); | ||
806 | } | ||
807 | else | ||
808 | { | ||
809 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t); | ||
810 | } | ||
811 | } | ||
812 | #endif | ||
813 | } | ||
814 | |||
815 | /* called with IRQs off */ | ||
816 | static void clear_priority_inheritance(struct task_struct* t) | ||
817 | { | ||
818 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
819 | |||
820 | raw_spin_lock(&cluster->cedf_lock); | ||
821 | __clear_priority_inheritance(t); | ||
822 | raw_spin_unlock(&cluster->cedf_lock); | ||
823 | } | ||
824 | |||
825 | |||
826 | |||
827 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
828 | /* called with IRQs off */ | ||
829 | static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
830 | struct task_struct* old_owner, | ||
831 | struct task_struct* new_owner) | ||
832 | { | ||
833 | cedf_domain_t* cluster = task_cpu_cluster(klitirqd); | ||
834 | |||
835 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
836 | |||
837 | raw_spin_lock(&cluster->cedf_lock); | ||
838 | |||
839 | if(old_owner != new_owner) | ||
840 | { | ||
841 | if(old_owner) | ||
842 | { | ||
843 | // unreachable? | ||
844 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
845 | } | ||
846 | |||
847 | TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", | ||
848 | new_owner->comm, new_owner->pid); | ||
849 | |||
850 | tsk_rt(new_owner)->cur_klitirqd = klitirqd; | ||
851 | } | ||
852 | |||
853 | __set_priority_inheritance(klitirqd, | ||
854 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
855 | new_owner : | ||
856 | tsk_rt(new_owner)->inh_task); | ||
857 | |||
858 | raw_spin_unlock(&cluster->cedf_lock); | ||
859 | } | ||
860 | |||
861 | /* called with IRQs off */ | ||
862 | static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
863 | struct task_struct* old_owner) | ||
864 | { | ||
865 | cedf_domain_t* cluster = task_cpu_cluster(klitirqd); | ||
866 | |||
867 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
868 | |||
869 | raw_spin_lock(&cluster->cedf_lock); | ||
870 | |||
871 | TRACE_TASK(klitirqd, "priority restored\n"); | ||
872 | |||
873 | if(tsk_rt(klitirqd)->scheduled_on != NO_CPU) | ||
874 | { | ||
875 | tsk_rt(klitirqd)->inh_task = NULL; | ||
876 | |||
877 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
878 | * since the priority was effectively lowered. */ | ||
879 | unlink(klitirqd); | ||
880 | cedf_job_arrival(klitirqd); | ||
881 | } | ||
882 | else | ||
883 | { | ||
884 | __set_priority_inheritance(klitirqd, NULL); | ||
885 | } | ||
886 | |||
887 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
888 | |||
889 | raw_spin_unlock(&cluster->cedf_lock); | ||
890 | } | ||
891 | #endif // CONFIG_LITMUS_SOFTIRQD | ||
892 | |||
893 | |||
894 | /* ******************** KFMLP support ********************** */ | ||
895 | |||
896 | /* struct for semaphore with priority inheritance */ | ||
897 | struct kfmlp_queue | ||
898 | { | ||
899 | wait_queue_head_t wait; | ||
900 | struct task_struct* owner; | ||
901 | struct task_struct* hp_waiter; | ||
902 | int count; /* number of waiters + holder */ | ||
903 | }; | ||
904 | |||
905 | struct kfmlp_semaphore | ||
906 | { | ||
907 | struct litmus_lock litmus_lock; | ||
908 | |||
909 | spinlock_t lock; | ||
910 | |||
911 | int num_resources; /* aka k */ | ||
912 | struct kfmlp_queue *queues; /* array */ | ||
913 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
914 | }; | ||
915 | |||
916 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
917 | { | ||
918 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
919 | } | ||
920 | |||
921 | static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, | ||
922 | struct kfmlp_queue* queue) | ||
923 | { | ||
924 | return (queue - &sem->queues[0]); | ||
925 | } | ||
926 | |||
927 | static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem, | ||
928 | struct task_struct* holder) | ||
929 | { | ||
930 | int i; | ||
931 | for(i = 0; i < sem->num_resources; ++i) | ||
932 | if(sem->queues[i].owner == holder) | ||
933 | return(&sem->queues[i]); | ||
934 | return(NULL); | ||
935 | } | ||
936 | |||
937 | /* caller is responsible for locking */ | ||
938 | static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, | ||
939 | struct task_struct *skip) | ||
940 | { | ||
941 | struct list_head *pos; | ||
942 | struct task_struct *queued, *found = NULL; | ||
943 | |||
944 | list_for_each(pos, &kqueue->wait.task_list) { | ||
945 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
946 | task_list)->private; | ||
947 | |||
948 | /* Compare task prios, find high prio task. */ | ||
949 | if (queued != skip && edf_higher_prio(queued, found)) | ||
950 | found = queued; | ||
951 | } | ||
952 | return found; | ||
953 | } | ||
954 | |||
955 | static inline struct kfmlp_queue* kfmlp_find_shortest( | ||
956 | struct kfmlp_semaphore* sem, | ||
957 | struct kfmlp_queue* search_start) | ||
958 | { | ||
959 | // we start our search at search_start instead of at the beginning of the | ||
960 | // queue list to load-balance across all resources. | ||
961 | struct kfmlp_queue* step = search_start; | ||
962 | struct kfmlp_queue* shortest = sem->shortest_queue; | ||
963 | |||
964 | do | ||
965 | { | ||
966 | step = (step+1 != &sem->queues[sem->num_resources]) ? | ||
967 | step+1 : &sem->queues[0]; | ||
968 | if(step->count < shortest->count) | ||
969 | { | ||
970 | shortest = step; | ||
971 | if(step->count == 0) | ||
972 | break; /* can't get any shorter */ | ||
973 | } | ||
974 | }while(step != search_start); | ||
975 | |||
976 | return(shortest); | ||
977 | } | ||
978 | |||
979 | static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | ||
980 | { | ||
981 | /* must hold sem->lock */ | ||
982 | |||
983 | struct kfmlp_queue *my_queue = NULL; | ||
984 | struct task_struct *max_hp = NULL; | ||
985 | |||
986 | |||
987 | struct list_head *pos; | ||
988 | struct task_struct *queued; | ||
989 | int i; | ||
990 | |||
991 | for(i = 0; i < sem->num_resources; ++i) | ||
992 | { | ||
993 | if( (sem->queues[i].count > 1) && | ||
994 | ((my_queue == NULL) || | ||
995 | (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) | ||
996 | { | ||
997 | my_queue = &sem->queues[i]; | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | if(my_queue) | ||
1002 | { | ||
1003 | cedf_domain_t* cluster; | ||
1004 | |||
1005 | max_hp = my_queue->hp_waiter; | ||
1006 | BUG_ON(!max_hp); | ||
1007 | |||
1008 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", | ||
1009 | kfmlp_get_idx(sem, my_queue), | ||
1010 | max_hp->comm, max_hp->pid, | ||
1011 | kfmlp_get_idx(sem, my_queue)); | ||
1012 | |||
1013 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | ||
1014 | |||
1015 | /* | ||
1016 | if(my_queue->hp_waiter) | ||
1017 | TRACE_CUR("queue %d: new hp_waiter is %s/%d\n", | ||
1018 | kfmlp_get_idx(sem, my_queue), | ||
1019 | my_queue->hp_waiter->comm, | ||
1020 | my_queue->hp_waiter->pid); | ||
1021 | else | ||
1022 | TRACE_CUR("queue %d: new hp_waiter is %p\n", | ||
1023 | kfmlp_get_idx(sem, my_queue), NULL); | ||
1024 | */ | ||
1025 | |||
1026 | cluster = task_cpu_cluster(max_hp); | ||
1027 | |||
1028 | raw_spin_lock(&cluster->cedf_lock); | ||
1029 | |||
1030 | /* | ||
1031 | if(my_queue->owner) | ||
1032 | TRACE_CUR("queue %d: owner is %s/%d\n", | ||
1033 | kfmlp_get_idx(sem, my_queue), | ||
1034 | my_queue->owner->comm, | ||
1035 | my_queue->owner->pid); | ||
1036 | else | ||
1037 | TRACE_CUR("queue %d: owner is %p\n", | ||
1038 | kfmlp_get_idx(sem, my_queue), | ||
1039 | NULL); | ||
1040 | */ | ||
1041 | |||
1042 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | ||
1043 | { | ||
1044 | __clear_priority_inheritance(my_queue->owner); | ||
1045 | if(my_queue->hp_waiter != NULL) | ||
1046 | { | ||
1047 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1048 | } | ||
1049 | } | ||
1050 | raw_spin_unlock(&cluster->cedf_lock); | ||
1051 | |||
1052 | list_for_each(pos, &my_queue->wait.task_list) | ||
1053 | { | ||
1054 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1055 | task_list)->private; | ||
1056 | /* Compare task prios, find high prio task. */ | ||
1057 | if (queued == max_hp) | ||
1058 | { | ||
1059 | /* | ||
1060 | TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n", | ||
1061 | kfmlp_get_idx(sem, my_queue)); | ||
1062 | */ | ||
1063 | __remove_wait_queue(&my_queue->wait, | ||
1064 | list_entry(pos, wait_queue_t, task_list)); | ||
1065 | break; | ||
1066 | } | ||
1067 | } | ||
1068 | --(my_queue->count); | ||
1069 | } | ||
1070 | |||
1071 | return(max_hp); | ||
1072 | } | ||
1073 | |||
1074 | int cedf_kfmlp_lock(struct litmus_lock* l) | ||
1075 | { | ||
1076 | struct task_struct* t = current; | ||
1077 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1078 | struct kfmlp_queue* my_queue; | ||
1079 | wait_queue_t wait; | ||
1080 | unsigned long flags; | ||
1081 | |||
1082 | if (!is_realtime(t)) | ||
1083 | return -EPERM; | ||
1084 | |||
1085 | spin_lock_irqsave(&sem->lock, flags); | ||
1086 | |||
1087 | my_queue = sem->shortest_queue; | ||
1088 | |||
1089 | if (my_queue->owner) { | ||
1090 | /* resource is not free => must suspend and wait */ | ||
1091 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | ||
1092 | kfmlp_get_idx(sem, my_queue)); | ||
1093 | |||
1094 | init_waitqueue_entry(&wait, t); | ||
1095 | |||
1096 | /* FIXME: interruptible would be nice some day */ | ||
1097 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1098 | |||
1099 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | ||
1100 | |||
1101 | /* check if we need to activate priority inheritance */ | ||
1102 | if (edf_higher_prio(t, my_queue->hp_waiter)) | ||
1103 | { | ||
1104 | my_queue->hp_waiter = t; | ||
1105 | if (edf_higher_prio(t, my_queue->owner)) | ||
1106 | { | ||
1107 | set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1108 | } | ||
1109 | } | ||
1110 | |||
1111 | ++(my_queue->count); | ||
1112 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1113 | |||
1114 | /* release lock before sleeping */ | ||
1115 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1116 | |||
1117 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
1118 | * when we wake up; we are guaranteed to have the lock since | ||
1119 | * there is only one wake up per release (or steal). | ||
1120 | */ | ||
1121 | schedule(); | ||
1122 | |||
1123 | |||
1124 | if(my_queue->owner == t) | ||
1125 | { | ||
1126 | TRACE_CUR("queue %d: acquired through waiting\n", | ||
1127 | kfmlp_get_idx(sem, my_queue)); | ||
1128 | } | ||
1129 | else | ||
1130 | { | ||
1131 | /* this case may happen if our wait entry was stolen | ||
1132 | between queues. record where we went.*/ | ||
1133 | my_queue = kfmlp_get_queue(sem, t); | ||
1134 | BUG_ON(!my_queue); | ||
1135 | TRACE_CUR("queue %d: acquired through stealing\n", | ||
1136 | kfmlp_get_idx(sem, my_queue)); | ||
1137 | } | ||
1138 | } | ||
1139 | else | ||
1140 | { | ||
1141 | TRACE_CUR("queue %d: acquired immediately\n", | ||
1142 | kfmlp_get_idx(sem, my_queue)); | ||
1143 | |||
1144 | my_queue->owner = t; | ||
1145 | |||
1146 | ++(my_queue->count); | ||
1147 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1148 | |||
1149 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1150 | } | ||
1151 | |||
1152 | return kfmlp_get_idx(sem, my_queue); | ||
1153 | } | ||
1154 | |||
1155 | int cedf_kfmlp_unlock(struct litmus_lock* l) | ||
1156 | { | ||
1157 | struct task_struct *t = current, *next; | ||
1158 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1159 | struct kfmlp_queue *my_queue; | ||
1160 | unsigned long flags; | ||
1161 | int err = 0; | ||
1162 | |||
1163 | spin_lock_irqsave(&sem->lock, flags); | ||
1164 | |||
1165 | my_queue = kfmlp_get_queue(sem, t); | ||
1166 | |||
1167 | if (!my_queue) { | ||
1168 | err = -EINVAL; | ||
1169 | goto out; | ||
1170 | } | ||
1171 | |||
1172 | /* check if there are jobs waiting for this resource */ | ||
1173 | next = __waitqueue_remove_first(&my_queue->wait); | ||
1174 | if (next) { | ||
1175 | /* | ||
1176 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
1177 | kfmlp_get_idx(sem, my_queue), | ||
1178 | next->comm, next->pid); | ||
1179 | */ | ||
1180 | /* next becomes the resouce holder */ | ||
1181 | my_queue->owner = next; | ||
1182 | |||
1183 | --(my_queue->count); | ||
1184 | if(my_queue->count < sem->shortest_queue->count) | ||
1185 | { | ||
1186 | sem->shortest_queue = my_queue; | ||
1187 | } | ||
1188 | |||
1189 | TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", | ||
1190 | kfmlp_get_idx(sem, my_queue), next->comm, next->pid); | ||
1191 | |||
1192 | /* determine new hp_waiter if necessary */ | ||
1193 | if (next == my_queue->hp_waiter) { | ||
1194 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1195 | /* next has the highest priority --- it doesn't need to | ||
1196 | * inherit. However, we need to make sure that the | ||
1197 | * next-highest priority in the queue is reflected in | ||
1198 | * hp_waiter. */ | ||
1199 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next); | ||
1200 | if (my_queue->hp_waiter) | ||
1201 | TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue)); | ||
1202 | else | ||
1203 | TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue)); | ||
1204 | } else { | ||
1205 | /* Well, if next is not the highest-priority waiter, | ||
1206 | * then it ought to inherit the highest-priority | ||
1207 | * waiter's priority. */ | ||
1208 | set_priority_inheritance(next, my_queue->hp_waiter); | ||
1209 | } | ||
1210 | |||
1211 | /* wake up next */ | ||
1212 | wake_up_process(next); | ||
1213 | } | ||
1214 | else | ||
1215 | { | ||
1216 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | ||
1217 | |||
1218 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | ||
1219 | |||
1220 | /* | ||
1221 | if(next) | ||
1222 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n", | ||
1223 | kfmlp_get_idx(sem, my_queue), | ||
1224 | next->comm, next->pid); | ||
1225 | */ | ||
1226 | |||
1227 | my_queue->owner = next; | ||
1228 | |||
1229 | if(next) | ||
1230 | { | ||
1231 | TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", | ||
1232 | kfmlp_get_idx(sem, my_queue), | ||
1233 | next->comm, next->pid); | ||
1234 | |||
1235 | /* wake up next */ | ||
1236 | wake_up_process(next); | ||
1237 | } | ||
1238 | else | ||
1239 | { | ||
1240 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | ||
1241 | |||
1242 | --(my_queue->count); | ||
1243 | if(my_queue->count < sem->shortest_queue->count) | ||
1244 | { | ||
1245 | sem->shortest_queue = my_queue; | ||
1246 | } | ||
1247 | } | ||
1248 | } | ||
1249 | |||
1250 | /* we lose the benefit of priority inheritance (if any) */ | ||
1251 | if (tsk_rt(t)->inh_task) | ||
1252 | clear_priority_inheritance(t); | ||
1253 | |||
1254 | out: | ||
1255 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1256 | |||
1257 | return err; | ||
1258 | } | ||
1259 | |||
1260 | int cedf_kfmlp_close(struct litmus_lock* l) | ||
1261 | { | ||
1262 | struct task_struct *t = current; | ||
1263 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1264 | struct kfmlp_queue *my_queue; | ||
1265 | unsigned long flags; | ||
1266 | |||
1267 | int owner; | ||
1268 | |||
1269 | spin_lock_irqsave(&sem->lock, flags); | ||
1270 | |||
1271 | my_queue = kfmlp_get_queue(sem, t); | ||
1272 | owner = (my_queue) ? (my_queue->owner == t) : 0; | ||
1273 | |||
1274 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1275 | |||
1276 | if (owner) | ||
1277 | cedf_kfmlp_unlock(l); | ||
1278 | |||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1282 | void cedf_kfmlp_free(struct litmus_lock* l) | ||
1283 | { | ||
1284 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1285 | kfree(sem->queues); | ||
1286 | kfree(sem); | ||
1287 | } | ||
1288 | |||
1289 | static struct litmus_lock_ops cedf_kfmlp_lock_ops = { | ||
1290 | .close = cedf_kfmlp_close, | ||
1291 | .lock = cedf_kfmlp_lock, | ||
1292 | .unlock = cedf_kfmlp_unlock, | ||
1293 | .deallocate = cedf_kfmlp_free, | ||
1294 | }; | ||
1295 | |||
1296 | static struct litmus_lock* cedf_new_kfmlp(void* __user arg, int* ret_code) | ||
1297 | { | ||
1298 | struct kfmlp_semaphore* sem; | ||
1299 | int num_resources = 0; | ||
1300 | int i; | ||
1301 | |||
1302 | if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) | ||
1303 | { | ||
1304 | *ret_code = -EINVAL; | ||
1305 | return(NULL); | ||
1306 | } | ||
1307 | if(__copy_from_user(&num_resources, arg, sizeof(num_resources))) | ||
1308 | { | ||
1309 | *ret_code = -EINVAL; | ||
1310 | return(NULL); | ||
1311 | } | ||
1312 | if(num_resources < 1) | ||
1313 | { | ||
1314 | *ret_code = -EINVAL; | ||
1315 | return(NULL); | ||
1316 | } | ||
1317 | |||
1318 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1319 | if(!sem) | ||
1320 | { | ||
1321 | *ret_code = -ENOMEM; | ||
1322 | return NULL; | ||
1323 | } | ||
1324 | |||
1325 | sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); | ||
1326 | if(!sem->queues) | ||
1327 | { | ||
1328 | kfree(sem); | ||
1329 | *ret_code = -ENOMEM; | ||
1330 | return NULL; | ||
1331 | } | ||
1332 | |||
1333 | sem->litmus_lock.ops = &cedf_kfmlp_lock_ops; | ||
1334 | spin_lock_init(&sem->lock); | ||
1335 | sem->num_resources = num_resources; | ||
1336 | |||
1337 | for(i = 0; i < num_resources; ++i) | ||
1338 | { | ||
1339 | sem->queues[i].owner = NULL; | ||
1340 | sem->queues[i].hp_waiter = NULL; | ||
1341 | init_waitqueue_head(&sem->queues[i].wait); | ||
1342 | sem->queues[i].count = 0; | ||
1343 | } | ||
1344 | |||
1345 | sem->shortest_queue = &sem->queues[0]; | ||
1346 | |||
1347 | *ret_code = 0; | ||
1348 | return &sem->litmus_lock; | ||
1349 | } | ||
1350 | |||
1351 | |||
1352 | /* **** lock constructor **** */ | ||
1353 | |||
1354 | static long cedf_allocate_lock(struct litmus_lock **lock, int type, | ||
1355 | void* __user arg) | ||
1356 | { | ||
1357 | int err = -ENXIO; | ||
1358 | |||
1359 | /* C-EDF currently only supports the FMLP for global resources | ||
1360 | WITHIN a given cluster. DO NOT USE CROSS-CLUSTER! */ | ||
1361 | switch (type) { | ||
1362 | case KFMLP_SEM: | ||
1363 | *lock = cedf_new_kfmlp(arg, &err); | ||
1364 | break; | ||
1365 | }; | ||
1366 | |||
1367 | return err; | ||
1368 | } | ||
1369 | |||
1370 | #endif // CONFIG_LITMUS_LOCKING | ||
1371 | |||
1372 | |||
1373 | |||
1374 | |||
1375 | |||
1376 | |||
606 | /* total number of cluster */ | 1377 | /* total number of cluster */ |
607 | static int num_clusters; | 1378 | static int num_clusters; |
608 | /* we do not support cluster of different sizes */ | 1379 | /* we do not support cluster of different sizes */ |
@@ -746,6 +1517,40 @@ static long cedf_activate_plugin(void) | |||
746 | break; | 1517 | break; |
747 | } | 1518 | } |
748 | } | 1519 | } |
1520 | |||
1521 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1522 | { | ||
1523 | /* distribute the daemons evenly across the clusters. */ | ||
1524 | int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC); | ||
1525 | int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters; | ||
1526 | int left_over = NR_LITMUS_SOFTIRQD % num_clusters; | ||
1527 | |||
1528 | int daemon = 0; | ||
1529 | for(i = 0; i < num_clusters; ++i) | ||
1530 | { | ||
1531 | int num_on_this_cluster = num_daemons_per_cluster; | ||
1532 | if(left_over) | ||
1533 | { | ||
1534 | ++num_on_this_cluster; | ||
1535 | --left_over; | ||
1536 | } | ||
1537 | |||
1538 | for(j = 0; j < num_on_this_cluster; ++j) | ||
1539 | { | ||
1540 | // first CPU of this cluster | ||
1541 | affinity[daemon++] = i*cluster_size; | ||
1542 | } | ||
1543 | } | ||
1544 | |||
1545 | spawn_klitirqd(affinity); | ||
1546 | |||
1547 | kfree(affinity); | ||
1548 | } | ||
1549 | #endif | ||
1550 | |||
1551 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1552 | init_nvidia_info(); | ||
1553 | #endif | ||
749 | 1554 | ||
750 | free_cpumask_var(mask); | 1555 | free_cpumask_var(mask); |
751 | clusters_allocated = 1; | 1556 | clusters_allocated = 1; |
@@ -765,6 +1570,15 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | |||
765 | .task_block = cedf_task_block, | 1570 | .task_block = cedf_task_block, |
766 | .admit_task = cedf_admit_task, | 1571 | .admit_task = cedf_admit_task, |
767 | .activate_plugin = cedf_activate_plugin, | 1572 | .activate_plugin = cedf_activate_plugin, |
1573 | #ifdef CONFIG_LITMUS_LOCKING | ||
1574 | .allocate_lock = cedf_allocate_lock, | ||
1575 | .set_prio_inh = set_priority_inheritance, | ||
1576 | .clear_prio_inh = clear_priority_inheritance, | ||
1577 | #endif | ||
1578 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1579 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, | ||
1580 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, | ||
1581 | #endif | ||
768 | }; | 1582 | }; |
769 | 1583 | ||
770 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; | 1584 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; |