diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:39:40 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:39:40 -0500 |
commit | ea53c912f8a86a8567697115b6a0d8152beee5c8 (patch) | |
tree | dd5ebfd653be7ad4d31e4226c8ffbeabcfaa33a6 | |
parent | 53c37bbcc07707f88312efca136aa239f25d775c (diff) |
Prepare k-FMLP for integration with Linux 3.0
Also added k-fmlp support to CEDF.
-rw-r--r-- | litmus/sched_cedf.c | 603 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 36 |
2 files changed, 590 insertions, 49 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 73fe1c442a0d..5e977dd2fef0 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/uaccess.h> | ||
32 | 33 | ||
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | 35 | ||
@@ -45,7 +46,6 @@ | |||
45 | 46 | ||
46 | /* to configure the cluster size */ | 47 | /* to configure the cluster size */ |
47 | #include <litmus/litmus_proc.h> | 48 | #include <litmus/litmus_proc.h> |
48 | #include <linux/uaccess.h> | ||
49 | 49 | ||
50 | /* Reference configuration variable. Determines which cache level is used to | 50 | /* Reference configuration variable. Determines which cache level is used to |
51 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that | 51 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that |
@@ -95,7 +95,7 @@ typedef struct clusterdomain { | |||
95 | struct bheap_node *heap_node; | 95 | struct bheap_node *heap_node; |
96 | struct bheap cpu_heap; | 96 | struct bheap cpu_heap; |
97 | /* lock for this cluster */ | 97 | /* lock for this cluster */ |
98 | #define lock domain.ready_lock | 98 | #define cedf_lock domain.ready_lock |
99 | } cedf_domain_t; | 99 | } cedf_domain_t; |
100 | 100 | ||
101 | /* a cedf_domain per cluster; allocation is done at init/activation time */ | 101 | /* a cedf_domain per cluster; allocation is done at init/activation time */ |
@@ -292,12 +292,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
292 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | 292 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); |
293 | unsigned long flags; | 293 | unsigned long flags; |
294 | 294 | ||
295 | raw_spin_lock_irqsave(&cluster->lock, flags); | 295 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
296 | 296 | ||
297 | __merge_ready(&cluster->domain, tasks); | 297 | __merge_ready(&cluster->domain, tasks); |
298 | check_for_preemptions(cluster); | 298 | check_for_preemptions(cluster); |
299 | 299 | ||
300 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 300 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
301 | } | 301 | } |
302 | 302 | ||
303 | /* caller holds cedf_lock */ | 303 | /* caller holds cedf_lock */ |
@@ -378,7 +378,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
378 | int out_of_time, sleep, preempt, np, exists, blocks; | 378 | int out_of_time, sleep, preempt, np, exists, blocks; |
379 | struct task_struct* next = NULL; | 379 | struct task_struct* next = NULL; |
380 | 380 | ||
381 | raw_spin_lock(&cluster->lock); | 381 | raw_spin_lock(&cluster->cedf_lock); |
382 | clear_will_schedule(); | 382 | clear_will_schedule(); |
383 | 383 | ||
384 | /* sanity checking */ | 384 | /* sanity checking */ |
@@ -462,7 +462,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
462 | next = prev; | 462 | next = prev; |
463 | 463 | ||
464 | sched_state_task_picked(); | 464 | sched_state_task_picked(); |
465 | raw_spin_unlock(&cluster->lock); | 465 | raw_spin_unlock(&cluster->cedf_lock); |
466 | 466 | ||
467 | #ifdef WANT_ALL_SCHED_EVENTS | 467 | #ifdef WANT_ALL_SCHED_EVENTS |
468 | TRACE("cedf_lock released, next=0x%p\n", next); | 468 | TRACE("cedf_lock released, next=0x%p\n", next); |
@@ -504,7 +504,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
504 | /* the cluster doesn't change even if t is running */ | 504 | /* the cluster doesn't change even if t is running */ |
505 | cluster = task_cpu_cluster(t); | 505 | cluster = task_cpu_cluster(t); |
506 | 506 | ||
507 | raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); | 507 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
508 | 508 | ||
509 | /* setup job params */ | 509 | /* setup job params */ |
510 | release_at(t, litmus_clock()); | 510 | release_at(t, litmus_clock()); |
@@ -521,7 +521,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
521 | t->rt_param.linked_on = NO_CPU; | 521 | t->rt_param.linked_on = NO_CPU; |
522 | 522 | ||
523 | cedf_job_arrival(t); | 523 | cedf_job_arrival(t); |
524 | raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); | 524 | raw_spin_unlock_irqrestore(&(cluster->cedf_lock), flags); |
525 | } | 525 | } |
526 | 526 | ||
527 | static void cedf_task_wake_up(struct task_struct *task) | 527 | static void cedf_task_wake_up(struct task_struct *task) |
@@ -534,7 +534,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
534 | 534 | ||
535 | cluster = task_cpu_cluster(task); | 535 | cluster = task_cpu_cluster(task); |
536 | 536 | ||
537 | raw_spin_lock_irqsave(&cluster->lock, flags); | 537 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
538 | /* We need to take suspensions because of semaphores into | 538 | /* We need to take suspensions because of semaphores into |
539 | * account! If a job resumes after being suspended due to acquiring | 539 | * account! If a job resumes after being suspended due to acquiring |
540 | * a semaphore, it should never be treated as a new job release. | 540 | * a semaphore, it should never be treated as a new job release. |
@@ -557,7 +557,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
557 | } | 557 | } |
558 | } | 558 | } |
559 | cedf_job_arrival(task); | 559 | cedf_job_arrival(task); |
560 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 560 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
561 | } | 561 | } |
562 | 562 | ||
563 | static void cedf_task_block(struct task_struct *t) | 563 | static void cedf_task_block(struct task_struct *t) |
@@ -570,9 +570,9 @@ static void cedf_task_block(struct task_struct *t) | |||
570 | cluster = task_cpu_cluster(t); | 570 | cluster = task_cpu_cluster(t); |
571 | 571 | ||
572 | /* unlink if necessary */ | 572 | /* unlink if necessary */ |
573 | raw_spin_lock_irqsave(&cluster->lock, flags); | 573 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
574 | unlink(t); | 574 | unlink(t); |
575 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 575 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
576 | 576 | ||
577 | BUG_ON(!is_realtime(t)); | 577 | BUG_ON(!is_realtime(t)); |
578 | } | 578 | } |
@@ -584,7 +584,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
584 | cedf_domain_t *cluster = task_cpu_cluster(t); | 584 | cedf_domain_t *cluster = task_cpu_cluster(t); |
585 | 585 | ||
586 | /* unlink if necessary */ | 586 | /* unlink if necessary */ |
587 | raw_spin_lock_irqsave(&cluster->lock, flags); | 587 | raw_spin_lock_irqsave(&cluster->cedf_lock, flags); |
588 | unlink(t); | 588 | unlink(t); |
589 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 589 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
590 | cpu_entry_t *cpu; | 590 | cpu_entry_t *cpu; |
@@ -592,7 +592,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
592 | cpu->scheduled = NULL; | 592 | cpu->scheduled = NULL; |
593 | tsk_rt(t)->scheduled_on = NO_CPU; | 593 | tsk_rt(t)->scheduled_on = NO_CPU; |
594 | } | 594 | } |
595 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 595 | raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); |
596 | 596 | ||
597 | BUG_ON(!is_realtime(t)); | 597 | BUG_ON(!is_realtime(t)); |
598 | TRACE_TASK(t, "RIP\n"); | 598 | TRACE_TASK(t, "RIP\n"); |
@@ -603,6 +603,578 @@ static long cedf_admit_task(struct task_struct* tsk) | |||
603 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | 603 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; |
604 | } | 604 | } |
605 | 605 | ||
606 | |||
607 | |||
608 | |||
609 | |||
610 | |||
611 | |||
612 | |||
613 | |||
614 | |||
615 | |||
616 | |||
617 | |||
618 | #ifdef CONFIG_LITMUS_LOCKING | ||
619 | |||
620 | #include <litmus/fdso.h> | ||
621 | |||
622 | |||
623 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
624 | { | ||
625 | int linked_on; | ||
626 | int check_preempt = 0; | ||
627 | |||
628 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
629 | |||
630 | if(prio_inh != NULL) | ||
631 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
632 | else | ||
633 | TRACE_TASK(t, "inherits priority from %p\n", prio_inh); | ||
634 | |||
635 | //sched_trace_eff_prio_change(t, prio_inh); | ||
636 | |||
637 | tsk_rt(t)->inh_task = prio_inh; | ||
638 | |||
639 | linked_on = tsk_rt(t)->linked_on; | ||
640 | |||
641 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
642 | if (linked_on != NO_CPU) { | ||
643 | TRACE_TASK(t, "%s: linked on %d\n", | ||
644 | __FUNCTION__, linked_on); | ||
645 | /* Holder is scheduled; need to re-order CPUs. | ||
646 | * We can't use heap_decrease() here since | ||
647 | * the cpu_heap is ordered in reverse direction, so | ||
648 | * it is actually an increase. */ | ||
649 | bheap_delete(cpu_lower_prio, &cluster->cpu_heap, | ||
650 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
651 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, | ||
652 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
653 | } else { | ||
654 | /* holder may be queued: first stop queue changes */ | ||
655 | raw_spin_lock(&cluster->domain.release_lock); | ||
656 | if (is_queued(t)) { | ||
657 | TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); | ||
658 | |||
659 | /* We need to update the position of holder in some | ||
660 | * heap. Note that this could be a release heap if we | ||
661 | * budget enforcement is used and this job overran. */ | ||
662 | check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node); | ||
663 | |||
664 | } else { | ||
665 | /* Nothing to do: if it is not queued and not linked | ||
666 | * then it is either sleeping or currently being moved | ||
667 | * by other code (e.g., a timer interrupt handler) that | ||
668 | * will use the correct priority when enqueuing the | ||
669 | * task. */ | ||
670 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); | ||
671 | } | ||
672 | raw_spin_unlock(&cluster->domain.release_lock); | ||
673 | |||
674 | /* If holder was enqueued in a release heap, then the following | ||
675 | * preemption check is pointless, but we can't easily detect | ||
676 | * that case. If you want to fix this, then consider that | ||
677 | * simply adding a state flag requires O(n) time to update when | ||
678 | * releasing n tasks, which conflicts with the goal to have | ||
679 | * O(log n) merges. */ | ||
680 | if (check_preempt) { | ||
681 | /* heap_decrease() hit the top level of the heap: make | ||
682 | * sure preemption checks get the right task, not the | ||
683 | * potentially stale cache. */ | ||
684 | bheap_uncache_min(edf_ready_order, &cluster->domain.ready_queue); | ||
685 | check_for_preemptions(cluster); | ||
686 | } | ||
687 | } | ||
688 | } | ||
689 | |||
690 | /* called with IRQs off */ | ||
691 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
692 | { | ||
693 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
694 | |||
695 | raw_spin_lock(&cluster->cedf_lock); | ||
696 | |||
697 | __set_priority_inheritance(t, prio_inh); | ||
698 | |||
699 | raw_spin_unlock(&cluster->cedf_lock); | ||
700 | } | ||
701 | |||
702 | |||
703 | /* called with IRQs off */ | ||
704 | static void __clear_priority_inheritance(struct task_struct* t) | ||
705 | { | ||
706 | TRACE_TASK(t, "priority restored\n"); | ||
707 | |||
708 | if(tsk_rt(t)->scheduled_on != NO_CPU) | ||
709 | { | ||
710 | //sched_trace_eff_prio_change(t, NULL); | ||
711 | |||
712 | tsk_rt(t)->inh_task = NULL; | ||
713 | |||
714 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
715 | * since the priority was effectively lowered. */ | ||
716 | unlink(t); | ||
717 | cedf_job_arrival(t); | ||
718 | } | ||
719 | else | ||
720 | { | ||
721 | __set_priority_inheritance(t, NULL); | ||
722 | } | ||
723 | } | ||
724 | |||
725 | /* called with IRQs off */ | ||
726 | static void clear_priority_inheritance(struct task_struct* t) | ||
727 | { | ||
728 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
729 | |||
730 | raw_spin_lock(&cluster->cedf_lock); | ||
731 | __clear_priority_inheritance(t); | ||
732 | raw_spin_unlock(&cluster->cedf_lock); | ||
733 | } | ||
734 | |||
735 | |||
736 | |||
737 | /* ******************** KFMLP support ********************** */ | ||
738 | |||
739 | /* struct for semaphore with priority inheritance */ | ||
740 | struct kfmlp_queue | ||
741 | { | ||
742 | wait_queue_head_t wait; | ||
743 | struct task_struct* owner; | ||
744 | struct task_struct* hp_waiter; | ||
745 | int count; /* number of waiters + holder */ | ||
746 | }; | ||
747 | |||
748 | struct kfmlp_semaphore | ||
749 | { | ||
750 | struct litmus_lock litmus_lock; | ||
751 | |||
752 | spinlock_t lock; | ||
753 | |||
754 | int num_resources; /* aka k */ | ||
755 | struct kfmlp_queue *queues; /* array */ | ||
756 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
757 | }; | ||
758 | |||
759 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
760 | { | ||
761 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
762 | } | ||
763 | |||
764 | static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, | ||
765 | struct kfmlp_queue* queue) | ||
766 | { | ||
767 | return (queue - &sem->queues[0]); | ||
768 | } | ||
769 | |||
770 | static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem, | ||
771 | struct task_struct* holder) | ||
772 | { | ||
773 | int i; | ||
774 | for(i = 0; i < sem->num_resources; ++i) | ||
775 | if(sem->queues[i].owner == holder) | ||
776 | return(&sem->queues[i]); | ||
777 | return(NULL); | ||
778 | } | ||
779 | |||
780 | /* caller is responsible for locking */ | ||
781 | static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, | ||
782 | struct task_struct *skip) | ||
783 | { | ||
784 | struct list_head *pos; | ||
785 | struct task_struct *queued, *found = NULL; | ||
786 | |||
787 | list_for_each(pos, &kqueue->wait.task_list) { | ||
788 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
789 | task_list)->private; | ||
790 | |||
791 | /* Compare task prios, find high prio task. */ | ||
792 | if (queued != skip && edf_higher_prio(queued, found)) | ||
793 | found = queued; | ||
794 | } | ||
795 | return found; | ||
796 | } | ||
797 | |||
798 | static inline struct kfmlp_queue* kfmlp_find_shortest( | ||
799 | struct kfmlp_semaphore* sem, | ||
800 | struct kfmlp_queue* search_start) | ||
801 | { | ||
802 | // we start our search at search_start instead of at the beginning of the | ||
803 | // queue list to load-balance across all resources. | ||
804 | struct kfmlp_queue* step = search_start; | ||
805 | struct kfmlp_queue* shortest = sem->shortest_queue; | ||
806 | |||
807 | do | ||
808 | { | ||
809 | step = (step+1 != &sem->queues[sem->num_resources]) ? | ||
810 | step+1 : &sem->queues[0]; | ||
811 | if(step->count < shortest->count) | ||
812 | { | ||
813 | shortest = step; | ||
814 | if(step->count == 0) | ||
815 | break; /* can't get any shorter */ | ||
816 | } | ||
817 | }while(step != search_start); | ||
818 | |||
819 | return(shortest); | ||
820 | } | ||
821 | |||
822 | static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | ||
823 | { | ||
824 | /* must hold sem->lock */ | ||
825 | |||
826 | struct kfmlp_queue *my_queue = NULL; | ||
827 | struct task_struct *max_hp = NULL; | ||
828 | |||
829 | |||
830 | struct list_head *pos; | ||
831 | struct task_struct *queued; | ||
832 | int i; | ||
833 | |||
834 | for(i = 0; i < sem->num_resources; ++i) | ||
835 | { | ||
836 | if( (sem->queues[i].count > 1) && | ||
837 | ((my_queue == NULL) || | ||
838 | (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) | ||
839 | { | ||
840 | my_queue = &sem->queues[i]; | ||
841 | } | ||
842 | } | ||
843 | |||
844 | if(my_queue) | ||
845 | { | ||
846 | cedf_domain_t* cluster; | ||
847 | |||
848 | max_hp = my_queue->hp_waiter; | ||
849 | BUG_ON(!max_hp); | ||
850 | |||
851 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", | ||
852 | kfmlp_get_idx(sem, my_queue), | ||
853 | max_hp->comm, max_hp->pid, | ||
854 | kfmlp_get_idx(sem, my_queue)); | ||
855 | |||
856 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | ||
857 | |||
858 | cluster = task_cpu_cluster(max_hp); | ||
859 | |||
860 | raw_spin_lock(&cluster->cedf_lock); | ||
861 | |||
862 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | ||
863 | { | ||
864 | __clear_priority_inheritance(my_queue->owner); | ||
865 | if(my_queue->hp_waiter != NULL) | ||
866 | { | ||
867 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
868 | } | ||
869 | } | ||
870 | raw_spin_unlock(&cluster->cedf_lock); | ||
871 | |||
872 | list_for_each(pos, &my_queue->wait.task_list) | ||
873 | { | ||
874 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
875 | task_list)->private; | ||
876 | /* Compare task prios, find high prio task. */ | ||
877 | if (queued == max_hp) | ||
878 | { | ||
879 | __remove_wait_queue(&my_queue->wait, | ||
880 | list_entry(pos, wait_queue_t, task_list)); | ||
881 | break; | ||
882 | } | ||
883 | } | ||
884 | --(my_queue->count); | ||
885 | } | ||
886 | |||
887 | return(max_hp); | ||
888 | } | ||
889 | |||
890 | int cedf_kfmlp_lock(struct litmus_lock* l) | ||
891 | { | ||
892 | struct task_struct* t = current; | ||
893 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
894 | struct kfmlp_queue* my_queue; | ||
895 | wait_queue_t wait; | ||
896 | unsigned long flags; | ||
897 | |||
898 | if (!is_realtime(t)) | ||
899 | return -EPERM; | ||
900 | |||
901 | spin_lock_irqsave(&sem->lock, flags); | ||
902 | |||
903 | my_queue = sem->shortest_queue; | ||
904 | |||
905 | if (my_queue->owner) { | ||
906 | /* resource is not free => must suspend and wait */ | ||
907 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | ||
908 | kfmlp_get_idx(sem, my_queue)); | ||
909 | |||
910 | init_waitqueue_entry(&wait, t); | ||
911 | |||
912 | /* FIXME: interruptible would be nice some day */ | ||
913 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
914 | |||
915 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | ||
916 | |||
917 | /* check if we need to activate priority inheritance */ | ||
918 | if (edf_higher_prio(t, my_queue->hp_waiter)) | ||
919 | { | ||
920 | my_queue->hp_waiter = t; | ||
921 | if (edf_higher_prio(t, my_queue->owner)) | ||
922 | { | ||
923 | set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
924 | } | ||
925 | } | ||
926 | |||
927 | ++(my_queue->count); | ||
928 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
929 | |||
930 | /* release lock before sleeping */ | ||
931 | spin_unlock_irqrestore(&sem->lock, flags); | ||
932 | |||
933 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
934 | * when we wake up; we are guaranteed to have the lock since | ||
935 | * there is only one wake up per release (or steal). | ||
936 | */ | ||
937 | schedule(); | ||
938 | |||
939 | |||
940 | if(my_queue->owner == t) | ||
941 | { | ||
942 | TRACE_CUR("queue %d: acquired through waiting\n", | ||
943 | kfmlp_get_idx(sem, my_queue)); | ||
944 | } | ||
945 | else | ||
946 | { | ||
947 | /* this case may happen if our wait entry was stolen | ||
948 | between queues. record where we went.*/ | ||
949 | my_queue = kfmlp_get_queue(sem, t); | ||
950 | BUG_ON(!my_queue); | ||
951 | TRACE_CUR("queue %d: acquired through stealing\n", | ||
952 | kfmlp_get_idx(sem, my_queue)); | ||
953 | } | ||
954 | } | ||
955 | else | ||
956 | { | ||
957 | TRACE_CUR("queue %d: acquired immediately\n", | ||
958 | kfmlp_get_idx(sem, my_queue)); | ||
959 | |||
960 | my_queue->owner = t; | ||
961 | |||
962 | ++(my_queue->count); | ||
963 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
964 | |||
965 | spin_unlock_irqrestore(&sem->lock, flags); | ||
966 | } | ||
967 | |||
968 | return kfmlp_get_idx(sem, my_queue); | ||
969 | } | ||
970 | |||
971 | int cedf_kfmlp_unlock(struct litmus_lock* l) | ||
972 | { | ||
973 | struct task_struct *t = current, *next; | ||
974 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
975 | struct kfmlp_queue *my_queue; | ||
976 | unsigned long flags; | ||
977 | int err = 0; | ||
978 | |||
979 | spin_lock_irqsave(&sem->lock, flags); | ||
980 | |||
981 | my_queue = kfmlp_get_queue(sem, t); | ||
982 | |||
983 | if (!my_queue) { | ||
984 | err = -EINVAL; | ||
985 | goto out; | ||
986 | } | ||
987 | |||
988 | /* check if there are jobs waiting for this resource */ | ||
989 | next = __waitqueue_remove_first(&my_queue->wait); | ||
990 | if (next) { | ||
991 | /* next becomes the resouce holder */ | ||
992 | my_queue->owner = next; | ||
993 | |||
994 | --(my_queue->count); | ||
995 | if(my_queue->count < sem->shortest_queue->count) | ||
996 | { | ||
997 | sem->shortest_queue = my_queue; | ||
998 | } | ||
999 | |||
1000 | TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", | ||
1001 | kfmlp_get_idx(sem, my_queue), next->comm, next->pid); | ||
1002 | |||
1003 | /* determine new hp_waiter if necessary */ | ||
1004 | if (next == my_queue->hp_waiter) { | ||
1005 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1006 | /* next has the highest priority --- it doesn't need to | ||
1007 | * inherit. However, we need to make sure that the | ||
1008 | * next-highest priority in the queue is reflected in | ||
1009 | * hp_waiter. */ | ||
1010 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next); | ||
1011 | if (my_queue->hp_waiter) | ||
1012 | TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue)); | ||
1013 | else | ||
1014 | TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue)); | ||
1015 | } else { | ||
1016 | /* Well, if next is not the highest-priority waiter, | ||
1017 | * then it ought to inherit the highest-priority | ||
1018 | * waiter's priority. */ | ||
1019 | set_priority_inheritance(next, my_queue->hp_waiter); | ||
1020 | } | ||
1021 | |||
1022 | /* wake up next */ | ||
1023 | wake_up_process(next); | ||
1024 | } | ||
1025 | else | ||
1026 | { | ||
1027 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | ||
1028 | |||
1029 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | ||
1030 | |||
1031 | my_queue->owner = next; | ||
1032 | |||
1033 | if(next) | ||
1034 | { | ||
1035 | TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", | ||
1036 | kfmlp_get_idx(sem, my_queue), | ||
1037 | next->comm, next->pid); | ||
1038 | |||
1039 | /* wake up next */ | ||
1040 | wake_up_process(next); | ||
1041 | } | ||
1042 | else | ||
1043 | { | ||
1044 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | ||
1045 | |||
1046 | --(my_queue->count); | ||
1047 | if(my_queue->count < sem->shortest_queue->count) | ||
1048 | { | ||
1049 | sem->shortest_queue = my_queue; | ||
1050 | } | ||
1051 | } | ||
1052 | } | ||
1053 | |||
1054 | /* we lose the benefit of priority inheritance (if any) */ | ||
1055 | if (tsk_rt(t)->inh_task) | ||
1056 | clear_priority_inheritance(t); | ||
1057 | |||
1058 | out: | ||
1059 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1060 | |||
1061 | return err; | ||
1062 | } | ||
1063 | |||
1064 | int cedf_kfmlp_close(struct litmus_lock* l) | ||
1065 | { | ||
1066 | struct task_struct *t = current; | ||
1067 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1068 | struct kfmlp_queue *my_queue; | ||
1069 | unsigned long flags; | ||
1070 | |||
1071 | int owner; | ||
1072 | |||
1073 | spin_lock_irqsave(&sem->lock, flags); | ||
1074 | |||
1075 | my_queue = kfmlp_get_queue(sem, t); | ||
1076 | owner = (my_queue) ? (my_queue->owner == t) : 0; | ||
1077 | |||
1078 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1079 | |||
1080 | if (owner) | ||
1081 | cedf_kfmlp_unlock(l); | ||
1082 | |||
1083 | return 0; | ||
1084 | } | ||
1085 | |||
1086 | void cedf_kfmlp_free(struct litmus_lock* l) | ||
1087 | { | ||
1088 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1089 | kfree(sem->queues); | ||
1090 | kfree(sem); | ||
1091 | } | ||
1092 | |||
1093 | static struct litmus_lock_ops cedf_kfmlp_lock_ops = { | ||
1094 | .close = cedf_kfmlp_close, | ||
1095 | .lock = cedf_kfmlp_lock, | ||
1096 | .unlock = cedf_kfmlp_unlock, | ||
1097 | .deallocate = cedf_kfmlp_free, | ||
1098 | }; | ||
1099 | |||
1100 | static struct litmus_lock* cedf_new_kfmlp(void* __user arg, int* ret_code) | ||
1101 | { | ||
1102 | struct kfmlp_semaphore* sem; | ||
1103 | int num_resources = 0; | ||
1104 | int i; | ||
1105 | |||
1106 | if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) | ||
1107 | { | ||
1108 | *ret_code = -EINVAL; | ||
1109 | return(NULL); | ||
1110 | } | ||
1111 | if(__copy_from_user(&num_resources, arg, sizeof(num_resources))) | ||
1112 | { | ||
1113 | *ret_code = -EINVAL; | ||
1114 | return(NULL); | ||
1115 | } | ||
1116 | if(num_resources < 1) | ||
1117 | { | ||
1118 | *ret_code = -EINVAL; | ||
1119 | return(NULL); | ||
1120 | } | ||
1121 | |||
1122 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1123 | if(!sem) | ||
1124 | { | ||
1125 | *ret_code = -ENOMEM; | ||
1126 | return NULL; | ||
1127 | } | ||
1128 | |||
1129 | sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); | ||
1130 | if(!sem->queues) | ||
1131 | { | ||
1132 | kfree(sem); | ||
1133 | *ret_code = -ENOMEM; | ||
1134 | return NULL; | ||
1135 | } | ||
1136 | |||
1137 | sem->litmus_lock.ops = &cedf_kfmlp_lock_ops; | ||
1138 | spin_lock_init(&sem->lock); | ||
1139 | sem->num_resources = num_resources; | ||
1140 | |||
1141 | for(i = 0; i < num_resources; ++i) | ||
1142 | { | ||
1143 | sem->queues[i].owner = NULL; | ||
1144 | sem->queues[i].hp_waiter = NULL; | ||
1145 | init_waitqueue_head(&sem->queues[i].wait); | ||
1146 | sem->queues[i].count = 0; | ||
1147 | } | ||
1148 | |||
1149 | sem->shortest_queue = &sem->queues[0]; | ||
1150 | |||
1151 | *ret_code = 0; | ||
1152 | return &sem->litmus_lock; | ||
1153 | } | ||
1154 | |||
1155 | |||
1156 | /* **** lock constructor **** */ | ||
1157 | |||
1158 | static long cedf_allocate_lock(struct litmus_lock **lock, int type, | ||
1159 | void* __user arg) | ||
1160 | { | ||
1161 | int err = -ENXIO; | ||
1162 | |||
1163 | /* C-EDF currently only supports the FMLP for global resources | ||
1164 | WITHIN a given cluster. DO NOT USE CROSS-CLUSTER! */ | ||
1165 | switch (type) { | ||
1166 | case KFMLP_SEM: | ||
1167 | *lock = cedf_new_kfmlp(arg, &err); | ||
1168 | break; | ||
1169 | }; | ||
1170 | |||
1171 | return err; | ||
1172 | } | ||
1173 | |||
1174 | #endif // CONFIG_LITMUS_LOCKING | ||
1175 | |||
1176 | |||
1177 | |||
606 | /* total number of cluster */ | 1178 | /* total number of cluster */ |
607 | static int num_clusters; | 1179 | static int num_clusters; |
608 | /* we do not support cluster of different sizes */ | 1180 | /* we do not support cluster of different sizes */ |
@@ -765,6 +1337,9 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | |||
765 | .task_block = cedf_task_block, | 1337 | .task_block = cedf_task_block, |
766 | .admit_task = cedf_admit_task, | 1338 | .admit_task = cedf_admit_task, |
767 | .activate_plugin = cedf_activate_plugin, | 1339 | .activate_plugin = cedf_activate_plugin, |
1340 | #ifdef CONFIG_LITMUS_LOCKING | ||
1341 | .allocate_lock = cedf_allocate_lock, | ||
1342 | #endif | ||
768 | }; | 1343 | }; |
769 | 1344 | ||
770 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; | 1345 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index bf8d989f1c4a..b87524cf1802 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -935,7 +935,6 @@ struct kfmlp_semaphore | |||
935 | spinlock_t lock; | 935 | spinlock_t lock; |
936 | 936 | ||
937 | int num_resources; /* aka k */ | 937 | int num_resources; /* aka k */ |
938 | |||
939 | struct kfmlp_queue *queues; /* array */ | 938 | struct kfmlp_queue *queues; /* array */ |
940 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | 939 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ |
941 | }; | 940 | }; |
@@ -992,6 +991,7 @@ static inline struct kfmlp_queue* kfmlp_find_shortest( | |||
992 | { | 991 | { |
993 | step = (step+1 != &sem->queues[sem->num_resources]) ? | 992 | step = (step+1 != &sem->queues[sem->num_resources]) ? |
994 | step+1 : &sem->queues[0]; | 993 | step+1 : &sem->queues[0]; |
994 | |||
995 | if(step->count < shortest->count) | 995 | if(step->count < shortest->count) |
996 | { | 996 | { |
997 | shortest = step; | 997 | shortest = step; |
@@ -1038,31 +1038,8 @@ struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | |||
1038 | 1038 | ||
1039 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | 1039 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); |
1040 | 1040 | ||
1041 | /* | ||
1042 | if(my_queue->hp_waiter) | ||
1043 | TRACE_CUR("queue %d: new hp_waiter is %s/%d\n", | ||
1044 | kfmlp_get_idx(sem, my_queue), | ||
1045 | my_queue->hp_waiter->comm, | ||
1046 | my_queue->hp_waiter->pid); | ||
1047 | else | ||
1048 | TRACE_CUR("queue %d: new hp_waiter is %p\n", | ||
1049 | kfmlp_get_idx(sem, my_queue), NULL); | ||
1050 | */ | ||
1051 | |||
1052 | raw_spin_lock(&gsnedf_lock); | 1041 | raw_spin_lock(&gsnedf_lock); |
1053 | 1042 | ||
1054 | /* | ||
1055 | if(my_queue->owner) | ||
1056 | TRACE_CUR("queue %d: owner is %s/%d\n", | ||
1057 | kfmlp_get_idx(sem, my_queue), | ||
1058 | my_queue->owner->comm, | ||
1059 | my_queue->owner->pid); | ||
1060 | else | ||
1061 | TRACE_CUR("queue %d: owner is %p\n", | ||
1062 | kfmlp_get_idx(sem, my_queue), | ||
1063 | NULL); | ||
1064 | */ | ||
1065 | |||
1066 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | 1043 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) |
1067 | { | 1044 | { |
1068 | __clear_priority_inheritance(my_queue->owner); | 1045 | __clear_priority_inheritance(my_queue->owner); |
@@ -1080,10 +1057,6 @@ struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | |||
1080 | /* Compare task prios, find high prio task. */ | 1057 | /* Compare task prios, find high prio task. */ |
1081 | if (queued == max_hp) | 1058 | if (queued == max_hp) |
1082 | { | 1059 | { |
1083 | /* | ||
1084 | TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n", | ||
1085 | kfmlp_get_idx(sem, my_queue)); | ||
1086 | */ | ||
1087 | __remove_wait_queue(&my_queue->wait, | 1060 | __remove_wait_queue(&my_queue->wait, |
1088 | list_entry(pos, wait_queue_t, task_list)); | 1061 | list_entry(pos, wait_queue_t, task_list)); |
1089 | break; | 1062 | break; |
@@ -1240,13 +1213,6 @@ int gsnedf_kfmlp_unlock(struct litmus_lock* l) | |||
1240 | 1213 | ||
1241 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | 1214 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ |
1242 | 1215 | ||
1243 | /* | ||
1244 | if(next) | ||
1245 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n", | ||
1246 | kfmlp_get_idx(sem, my_queue), | ||
1247 | next->comm, next->pid); | ||
1248 | */ | ||
1249 | |||
1250 | my_queue->owner = next; | 1216 | my_queue->owner = next; |
1251 | 1217 | ||
1252 | if(next) | 1218 | if(next) |