diff options
-rw-r--r-- | include/litmus/fdso.h | 6 | ||||
-rw-r--r-- | include/litmus/litmus.h | 1 | ||||
-rw-r--r-- | litmus/fdso.c | 1 | ||||
-rw-r--r-- | litmus/locking.c | 1 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 578 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 522 | ||||
-rw-r--r-- | litmus/sched_litmus.c | 2 |
7 files changed, 1069 insertions, 42 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index caf2a1e6918c..c740e8fc3e88 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h | |||
@@ -18,9 +18,10 @@ typedef enum { | |||
18 | MIN_OBJ_TYPE = 0, | 18 | MIN_OBJ_TYPE = 0, |
19 | 19 | ||
20 | FMLP_SEM = 0, | 20 | FMLP_SEM = 0, |
21 | SRP_SEM = 1, | 21 | KFMLP_SEM = 1, |
22 | SRP_SEM = 2, | ||
22 | 23 | ||
23 | MAX_OBJ_TYPE = 1 | 24 | MAX_OBJ_TYPE = SRP_SEM |
24 | } obj_type_t; | 25 | } obj_type_t; |
25 | 26 | ||
26 | struct inode_obj_id { | 27 | struct inode_obj_id { |
@@ -64,6 +65,7 @@ static inline void* od_lookup(int od, obj_type_t type) | |||
64 | } | 65 | } |
65 | 66 | ||
66 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) | 67 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) |
68 | #define lookup_kfmlp_sem(od)((struct pi_semaphore*) od_lookup(od, KFMLP_SEM)) | ||
67 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | 69 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) |
68 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | 70 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) |
69 | 71 | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 0b071fd359f9..95d0805519de 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -26,6 +26,7 @@ static inline int in_list(struct list_head* list) | |||
26 | ); | 26 | ); |
27 | } | 27 | } |
28 | 28 | ||
29 | |||
29 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | 30 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); |
30 | 31 | ||
31 | #define NO_CPU 0xffffffff | 32 | #define NO_CPU 0xffffffff |
diff --git a/litmus/fdso.c b/litmus/fdso.c index aa7b384264e3..2b7f9ba85857 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
@@ -22,6 +22,7 @@ extern struct fdso_ops generic_lock_ops; | |||
22 | 22 | ||
23 | static const struct fdso_ops* fdso_ops[] = { | 23 | static const struct fdso_ops* fdso_ops[] = { |
24 | &generic_lock_ops, /* FMLP_SEM */ | 24 | &generic_lock_ops, /* FMLP_SEM */ |
25 | &generic_lock_ops, /* KFMLP_SEM */ | ||
25 | &generic_lock_ops, /* SRP_SEM */ | 26 | &generic_lock_ops, /* SRP_SEM */ |
26 | }; | 27 | }; |
27 | 28 | ||
diff --git a/litmus/locking.c b/litmus/locking.c index 0c1aa6aa40b7..b3279c1930b7 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -121,7 +121,6 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) | |||
121 | return(t); | 121 | return(t); |
122 | } | 122 | } |
123 | 123 | ||
124 | |||
125 | #else | 124 | #else |
126 | 125 | ||
127 | struct fdso_ops generic_lock_ops = {}; | 126 | struct fdso_ops generic_lock_ops = {}; |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 480c62bc895b..87f8bc9bb50b 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/uaccess.h> | ||
32 | 33 | ||
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | 35 | ||
@@ -49,7 +50,6 @@ | |||
49 | 50 | ||
50 | /* to configure the cluster size */ | 51 | /* to configure the cluster size */ |
51 | #include <litmus/litmus_proc.h> | 52 | #include <litmus/litmus_proc.h> |
52 | #include <linux/uaccess.h> | ||
53 | 53 | ||
54 | /* Reference configuration variable. Determines which cache level is used to | 54 | /* Reference configuration variable. Determines which cache level is used to |
55 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that | 55 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that |
@@ -208,7 +208,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
208 | } | 208 | } |
209 | 209 | ||
210 | /* unlink - Make sure a task is not linked any longer to an entry | 210 | /* unlink - Make sure a task is not linked any longer to an entry |
211 | * where it was linked before. Must hold cedf_lock. | 211 | * where it was linked before. Must hold cluster_lock. |
212 | */ | 212 | */ |
213 | static noinline void unlink(struct task_struct* t) | 213 | static noinline void unlink(struct task_struct* t) |
214 | { | 214 | { |
@@ -244,7 +244,7 @@ static void preempt(cpu_entry_t *entry) | |||
244 | } | 244 | } |
245 | 245 | ||
246 | /* requeue - Put an unlinked task into gsn-edf domain. | 246 | /* requeue - Put an unlinked task into gsn-edf domain. |
247 | * Caller must hold cedf_lock. | 247 | * Caller must hold cluster_lock. |
248 | */ | 248 | */ |
249 | static noinline void requeue(struct task_struct* task) | 249 | static noinline void requeue(struct task_struct* task) |
250 | { | 250 | { |
@@ -339,7 +339,7 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
339 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | 339 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
340 | } | 340 | } |
341 | 341 | ||
342 | /* caller holds cedf_lock */ | 342 | /* caller holds cluster_lock */ |
343 | static noinline void job_completion(struct task_struct *t, int forced) | 343 | static noinline void job_completion(struct task_struct *t, int forced) |
344 | { | 344 | { |
345 | BUG_ON(!t); | 345 | BUG_ON(!t); |
@@ -428,6 +428,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
428 | #endif | 428 | #endif |
429 | 429 | ||
430 | raw_spin_lock(&cluster->cluster_lock); | 430 | raw_spin_lock(&cluster->cluster_lock); |
431 | |||
431 | clear_will_schedule(); | 432 | clear_will_schedule(); |
432 | 433 | ||
433 | /* sanity checking */ | 434 | /* sanity checking */ |
@@ -514,7 +515,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
514 | raw_spin_unlock(&cluster->cluster_lock); | 515 | raw_spin_unlock(&cluster->cluster_lock); |
515 | 516 | ||
516 | #ifdef WANT_ALL_SCHED_EVENTS | 517 | #ifdef WANT_ALL_SCHED_EVENTS |
517 | TRACE("cedf_lock released, next=0x%p\n", next); | 518 | TRACE("cluster_lock released, next=0x%p\n", next); |
518 | 519 | ||
519 | if (next) | 520 | if (next) |
520 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 521 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
@@ -594,6 +595,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
594 | cluster = task_cpu_cluster(task); | 595 | cluster = task_cpu_cluster(task); |
595 | 596 | ||
596 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | 597 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
598 | |||
597 | /* We need to take suspensions because of semaphores into | 599 | /* We need to take suspensions because of semaphores into |
598 | * account! If a job resumes after being suspended due to acquiring | 600 | * account! If a job resumes after being suspended due to acquiring |
599 | * a semaphore, it should never be treated as a new job release. | 601 | * a semaphore, it should never be treated as a new job release. |
@@ -616,6 +618,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
616 | } | 618 | } |
617 | } | 619 | } |
618 | cedf_job_arrival(task); | 620 | cedf_job_arrival(task); |
621 | |||
619 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | 622 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
620 | } | 623 | } |
621 | 624 | ||
@@ -662,6 +665,568 @@ static long cedf_admit_task(struct task_struct* tsk) | |||
662 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | 665 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; |
663 | } | 666 | } |
664 | 667 | ||
668 | |||
669 | |||
670 | #ifdef CONFIG_LITMUS_LOCKING | ||
671 | |||
672 | #include <litmus/fdso.h> | ||
673 | |||
674 | |||
675 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
676 | { | ||
677 | int linked_on; | ||
678 | int check_preempt = 0; | ||
679 | |||
680 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
681 | |||
682 | if(prio_inh != NULL) | ||
683 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
684 | else | ||
685 | TRACE_TASK(t, "inherits priority from %p\n", prio_inh); | ||
686 | |||
687 | //sched_trace_eff_prio_change(t, prio_inh); | ||
688 | |||
689 | tsk_rt(t)->inh_task = prio_inh; | ||
690 | |||
691 | linked_on = tsk_rt(t)->linked_on; | ||
692 | |||
693 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
694 | if (linked_on != NO_CPU) { | ||
695 | TRACE_TASK(t, "%s: linked on %d\n", | ||
696 | __FUNCTION__, linked_on); | ||
697 | /* Holder is scheduled; need to re-order CPUs. | ||
698 | * We can't use heap_decrease() here since | ||
699 | * the cpu_heap is ordered in reverse direction, so | ||
700 | * it is actually an increase. */ | ||
701 | bheap_delete(cpu_lower_prio, &cluster->cpu_heap, | ||
702 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
703 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, | ||
704 | per_cpu(cedf_cpu_entries, linked_on).hn); | ||
705 | } else { | ||
706 | /* holder may be queued: first stop queue changes */ | ||
707 | raw_spin_lock(&cluster->domain.release_lock); | ||
708 | if (is_queued(t)) { | ||
709 | TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); | ||
710 | |||
711 | /* We need to update the position of holder in some | ||
712 | * heap. Note that this could be a release heap if we | ||
713 | * budget enforcement is used and this job overran. */ | ||
714 | check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node); | ||
715 | |||
716 | } else { | ||
717 | /* Nothing to do: if it is not queued and not linked | ||
718 | * then it is either sleeping or currently being moved | ||
719 | * by other code (e.g., a timer interrupt handler) that | ||
720 | * will use the correct priority when enqueuing the | ||
721 | * task. */ | ||
722 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); | ||
723 | } | ||
724 | raw_spin_unlock(&cluster->domain.release_lock); | ||
725 | |||
726 | /* If holder was enqueued in a release heap, then the following | ||
727 | * preemption check is pointless, but we can't easily detect | ||
728 | * that case. If you want to fix this, then consider that | ||
729 | * simply adding a state flag requires O(n) time to update when | ||
730 | * releasing n tasks, which conflicts with the goal to have | ||
731 | * O(log n) merges. */ | ||
732 | if (check_preempt) { | ||
733 | /* heap_decrease() hit the top level of the heap: make | ||
734 | * sure preemption checks get the right task, not the | ||
735 | * potentially stale cache. */ | ||
736 | bheap_uncache_min(edf_ready_order, &cluster->domain.ready_queue); | ||
737 | check_for_preemptions(cluster); | ||
738 | } | ||
739 | } | ||
740 | } | ||
741 | |||
742 | /* called with IRQs off */ | ||
743 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
744 | { | ||
745 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
746 | |||
747 | raw_spin_lock(&cluster->cluster_lock); | ||
748 | |||
749 | __set_priority_inheritance(t, prio_inh); | ||
750 | |||
751 | raw_spin_unlock(&cluster->cluster_lock); | ||
752 | } | ||
753 | |||
754 | |||
755 | /* called with IRQs off */ | ||
756 | static void __clear_priority_inheritance(struct task_struct* t) | ||
757 | { | ||
758 | TRACE_TASK(t, "priority restored\n"); | ||
759 | |||
760 | if(tsk_rt(t)->scheduled_on != NO_CPU) | ||
761 | { | ||
762 | //sched_trace_eff_prio_change(t, NULL); | ||
763 | |||
764 | tsk_rt(t)->inh_task = NULL; | ||
765 | |||
766 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
767 | * since the priority was effectively lowered. */ | ||
768 | unlink(t); | ||
769 | cedf_job_arrival(t); | ||
770 | } | ||
771 | else | ||
772 | { | ||
773 | __set_priority_inheritance(t, NULL); | ||
774 | } | ||
775 | } | ||
776 | |||
777 | /* called with IRQs off */ | ||
778 | static void clear_priority_inheritance(struct task_struct* t) | ||
779 | { | ||
780 | cedf_domain_t* cluster = task_cpu_cluster(t); | ||
781 | |||
782 | raw_spin_lock(&cluster->cluster_lock); | ||
783 | __clear_priority_inheritance(t); | ||
784 | raw_spin_unlock(&cluster->cluster_lock); | ||
785 | } | ||
786 | |||
787 | |||
788 | |||
789 | /* ******************** KFMLP support ********************** */ | ||
790 | |||
791 | /* struct for semaphore with priority inheritance */ | ||
792 | struct kfmlp_queue | ||
793 | { | ||
794 | wait_queue_head_t wait; | ||
795 | struct task_struct* owner; | ||
796 | struct task_struct* hp_waiter; | ||
797 | int count; /* number of waiters + holder */ | ||
798 | }; | ||
799 | |||
800 | struct kfmlp_semaphore | ||
801 | { | ||
802 | struct litmus_lock litmus_lock; | ||
803 | |||
804 | spinlock_t lock; | ||
805 | |||
806 | int num_resources; /* aka k */ | ||
807 | struct kfmlp_queue *queues; /* array */ | ||
808 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
809 | }; | ||
810 | |||
811 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
812 | { | ||
813 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
814 | } | ||
815 | |||
816 | static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, | ||
817 | struct kfmlp_queue* queue) | ||
818 | { | ||
819 | return (queue - &sem->queues[0]); | ||
820 | } | ||
821 | |||
822 | static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem, | ||
823 | struct task_struct* holder) | ||
824 | { | ||
825 | int i; | ||
826 | for(i = 0; i < sem->num_resources; ++i) | ||
827 | if(sem->queues[i].owner == holder) | ||
828 | return(&sem->queues[i]); | ||
829 | return(NULL); | ||
830 | } | ||
831 | |||
832 | /* caller is responsible for locking */ | ||
833 | static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, | ||
834 | struct task_struct *skip) | ||
835 | { | ||
836 | struct list_head *pos; | ||
837 | struct task_struct *queued, *found = NULL; | ||
838 | |||
839 | list_for_each(pos, &kqueue->wait.task_list) { | ||
840 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
841 | task_list)->private; | ||
842 | |||
843 | /* Compare task prios, find high prio task. */ | ||
844 | if (queued != skip && edf_higher_prio(queued, found)) | ||
845 | found = queued; | ||
846 | } | ||
847 | return found; | ||
848 | } | ||
849 | |||
850 | static inline struct kfmlp_queue* kfmlp_find_shortest( | ||
851 | struct kfmlp_semaphore* sem, | ||
852 | struct kfmlp_queue* search_start) | ||
853 | { | ||
854 | // we start our search at search_start instead of at the beginning of the | ||
855 | // queue list to load-balance across all resources. | ||
856 | struct kfmlp_queue* step = search_start; | ||
857 | struct kfmlp_queue* shortest = sem->shortest_queue; | ||
858 | |||
859 | do | ||
860 | { | ||
861 | step = (step+1 != &sem->queues[sem->num_resources]) ? | ||
862 | step+1 : &sem->queues[0]; | ||
863 | if(step->count < shortest->count) | ||
864 | { | ||
865 | shortest = step; | ||
866 | if(step->count == 0) | ||
867 | break; /* can't get any shorter */ | ||
868 | } | ||
869 | }while(step != search_start); | ||
870 | |||
871 | return(shortest); | ||
872 | } | ||
873 | |||
874 | static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | ||
875 | { | ||
876 | /* must hold sem->lock */ | ||
877 | |||
878 | struct kfmlp_queue *my_queue = NULL; | ||
879 | struct task_struct *max_hp = NULL; | ||
880 | |||
881 | |||
882 | struct list_head *pos; | ||
883 | struct task_struct *queued; | ||
884 | int i; | ||
885 | |||
886 | for(i = 0; i < sem->num_resources; ++i) | ||
887 | { | ||
888 | if( (sem->queues[i].count > 1) && | ||
889 | ((my_queue == NULL) || | ||
890 | (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) | ||
891 | { | ||
892 | my_queue = &sem->queues[i]; | ||
893 | } | ||
894 | } | ||
895 | |||
896 | if(my_queue) | ||
897 | { | ||
898 | cedf_domain_t* cluster; | ||
899 | |||
900 | max_hp = my_queue->hp_waiter; | ||
901 | BUG_ON(!max_hp); | ||
902 | |||
903 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", | ||
904 | kfmlp_get_idx(sem, my_queue), | ||
905 | max_hp->comm, max_hp->pid, | ||
906 | kfmlp_get_idx(sem, my_queue)); | ||
907 | |||
908 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | ||
909 | |||
910 | cluster = task_cpu_cluster(max_hp); | ||
911 | |||
912 | raw_spin_lock(&cluster->cluster_lock); | ||
913 | |||
914 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | ||
915 | { | ||
916 | __clear_priority_inheritance(my_queue->owner); | ||
917 | if(my_queue->hp_waiter != NULL) | ||
918 | { | ||
919 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
920 | } | ||
921 | } | ||
922 | raw_spin_unlock(&cluster->cluster_lock); | ||
923 | |||
924 | list_for_each(pos, &my_queue->wait.task_list) | ||
925 | { | ||
926 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
927 | task_list)->private; | ||
928 | /* Compare task prios, find high prio task. */ | ||
929 | if (queued == max_hp) | ||
930 | { | ||
931 | __remove_wait_queue(&my_queue->wait, | ||
932 | list_entry(pos, wait_queue_t, task_list)); | ||
933 | break; | ||
934 | } | ||
935 | } | ||
936 | --(my_queue->count); | ||
937 | } | ||
938 | |||
939 | return(max_hp); | ||
940 | } | ||
941 | |||
942 | int cedf_kfmlp_lock(struct litmus_lock* l) | ||
943 | { | ||
944 | struct task_struct* t = current; | ||
945 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
946 | struct kfmlp_queue* my_queue; | ||
947 | wait_queue_t wait; | ||
948 | unsigned long flags; | ||
949 | |||
950 | if (!is_realtime(t)) | ||
951 | return -EPERM; | ||
952 | |||
953 | spin_lock_irqsave(&sem->lock, flags); | ||
954 | |||
955 | my_queue = sem->shortest_queue; | ||
956 | |||
957 | if (my_queue->owner) { | ||
958 | /* resource is not free => must suspend and wait */ | ||
959 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | ||
960 | kfmlp_get_idx(sem, my_queue)); | ||
961 | |||
962 | init_waitqueue_entry(&wait, t); | ||
963 | |||
964 | /* FIXME: interruptible would be nice some day */ | ||
965 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
966 | |||
967 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | ||
968 | |||
969 | /* check if we need to activate priority inheritance */ | ||
970 | if (edf_higher_prio(t, my_queue->hp_waiter)) | ||
971 | { | ||
972 | my_queue->hp_waiter = t; | ||
973 | if (edf_higher_prio(t, my_queue->owner)) | ||
974 | { | ||
975 | set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
976 | } | ||
977 | } | ||
978 | |||
979 | ++(my_queue->count); | ||
980 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
981 | |||
982 | /* release lock before sleeping */ | ||
983 | spin_unlock_irqrestore(&sem->lock, flags); | ||
984 | |||
985 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
986 | * when we wake up; we are guaranteed to have the lock since | ||
987 | * there is only one wake up per release (or steal). | ||
988 | */ | ||
989 | schedule(); | ||
990 | |||
991 | |||
992 | if(my_queue->owner == t) | ||
993 | { | ||
994 | TRACE_CUR("queue %d: acquired through waiting\n", | ||
995 | kfmlp_get_idx(sem, my_queue)); | ||
996 | } | ||
997 | else | ||
998 | { | ||
999 | /* this case may happen if our wait entry was stolen | ||
1000 | between queues. record where we went.*/ | ||
1001 | my_queue = kfmlp_get_queue(sem, t); | ||
1002 | BUG_ON(!my_queue); | ||
1003 | TRACE_CUR("queue %d: acquired through stealing\n", | ||
1004 | kfmlp_get_idx(sem, my_queue)); | ||
1005 | } | ||
1006 | } | ||
1007 | else | ||
1008 | { | ||
1009 | TRACE_CUR("queue %d: acquired immediately\n", | ||
1010 | kfmlp_get_idx(sem, my_queue)); | ||
1011 | |||
1012 | my_queue->owner = t; | ||
1013 | |||
1014 | ++(my_queue->count); | ||
1015 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1016 | |||
1017 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1018 | } | ||
1019 | |||
1020 | return kfmlp_get_idx(sem, my_queue); | ||
1021 | } | ||
1022 | |||
1023 | int cedf_kfmlp_unlock(struct litmus_lock* l) | ||
1024 | { | ||
1025 | struct task_struct *t = current, *next; | ||
1026 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1027 | struct kfmlp_queue *my_queue; | ||
1028 | unsigned long flags; | ||
1029 | int err = 0; | ||
1030 | |||
1031 | spin_lock_irqsave(&sem->lock, flags); | ||
1032 | |||
1033 | my_queue = kfmlp_get_queue(sem, t); | ||
1034 | |||
1035 | if (!my_queue) { | ||
1036 | err = -EINVAL; | ||
1037 | goto out; | ||
1038 | } | ||
1039 | |||
1040 | /* check if there are jobs waiting for this resource */ | ||
1041 | next = __waitqueue_remove_first(&my_queue->wait); | ||
1042 | if (next) { | ||
1043 | /* next becomes the resouce holder */ | ||
1044 | my_queue->owner = next; | ||
1045 | |||
1046 | --(my_queue->count); | ||
1047 | if(my_queue->count < sem->shortest_queue->count) | ||
1048 | { | ||
1049 | sem->shortest_queue = my_queue; | ||
1050 | } | ||
1051 | |||
1052 | TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", | ||
1053 | kfmlp_get_idx(sem, my_queue), next->comm, next->pid); | ||
1054 | |||
1055 | /* determine new hp_waiter if necessary */ | ||
1056 | if (next == my_queue->hp_waiter) { | ||
1057 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1058 | /* next has the highest priority --- it doesn't need to | ||
1059 | * inherit. However, we need to make sure that the | ||
1060 | * next-highest priority in the queue is reflected in | ||
1061 | * hp_waiter. */ | ||
1062 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next); | ||
1063 | if (my_queue->hp_waiter) | ||
1064 | TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue)); | ||
1065 | else | ||
1066 | TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue)); | ||
1067 | } else { | ||
1068 | /* Well, if next is not the highest-priority waiter, | ||
1069 | * then it ought to inherit the highest-priority | ||
1070 | * waiter's priority. */ | ||
1071 | set_priority_inheritance(next, my_queue->hp_waiter); | ||
1072 | } | ||
1073 | |||
1074 | /* wake up next */ | ||
1075 | wake_up_process(next); | ||
1076 | } | ||
1077 | else | ||
1078 | { | ||
1079 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | ||
1080 | |||
1081 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | ||
1082 | |||
1083 | my_queue->owner = next; | ||
1084 | |||
1085 | if(next) | ||
1086 | { | ||
1087 | TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", | ||
1088 | kfmlp_get_idx(sem, my_queue), | ||
1089 | next->comm, next->pid); | ||
1090 | |||
1091 | /* wake up next */ | ||
1092 | wake_up_process(next); | ||
1093 | } | ||
1094 | else | ||
1095 | { | ||
1096 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | ||
1097 | |||
1098 | --(my_queue->count); | ||
1099 | if(my_queue->count < sem->shortest_queue->count) | ||
1100 | { | ||
1101 | sem->shortest_queue = my_queue; | ||
1102 | } | ||
1103 | } | ||
1104 | } | ||
1105 | |||
1106 | /* we lose the benefit of priority inheritance (if any) */ | ||
1107 | if (tsk_rt(t)->inh_task) | ||
1108 | clear_priority_inheritance(t); | ||
1109 | |||
1110 | out: | ||
1111 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1112 | |||
1113 | return err; | ||
1114 | } | ||
1115 | |||
1116 | int cedf_kfmlp_close(struct litmus_lock* l) | ||
1117 | { | ||
1118 | struct task_struct *t = current; | ||
1119 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1120 | struct kfmlp_queue *my_queue; | ||
1121 | unsigned long flags; | ||
1122 | |||
1123 | int owner; | ||
1124 | |||
1125 | spin_lock_irqsave(&sem->lock, flags); | ||
1126 | |||
1127 | my_queue = kfmlp_get_queue(sem, t); | ||
1128 | owner = (my_queue) ? (my_queue->owner == t) : 0; | ||
1129 | |||
1130 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1131 | |||
1132 | if (owner) | ||
1133 | cedf_kfmlp_unlock(l); | ||
1134 | |||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | void cedf_kfmlp_free(struct litmus_lock* l) | ||
1139 | { | ||
1140 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1141 | kfree(sem->queues); | ||
1142 | kfree(sem); | ||
1143 | } | ||
1144 | |||
1145 | static struct litmus_lock_ops cedf_kfmlp_lock_ops = { | ||
1146 | .close = cedf_kfmlp_close, | ||
1147 | .lock = cedf_kfmlp_lock, | ||
1148 | .unlock = cedf_kfmlp_unlock, | ||
1149 | .deallocate = cedf_kfmlp_free, | ||
1150 | }; | ||
1151 | |||
1152 | static struct litmus_lock* cedf_new_kfmlp(void* __user arg, int* ret_code) | ||
1153 | { | ||
1154 | struct kfmlp_semaphore* sem; | ||
1155 | int num_resources = 0; | ||
1156 | int i; | ||
1157 | |||
1158 | if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) | ||
1159 | { | ||
1160 | *ret_code = -EINVAL; | ||
1161 | return(NULL); | ||
1162 | } | ||
1163 | if(__copy_from_user(&num_resources, arg, sizeof(num_resources))) | ||
1164 | { | ||
1165 | *ret_code = -EINVAL; | ||
1166 | return(NULL); | ||
1167 | } | ||
1168 | if(num_resources < 1) | ||
1169 | { | ||
1170 | *ret_code = -EINVAL; | ||
1171 | return(NULL); | ||
1172 | } | ||
1173 | |||
1174 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1175 | if(!sem) | ||
1176 | { | ||
1177 | *ret_code = -ENOMEM; | ||
1178 | return NULL; | ||
1179 | } | ||
1180 | |||
1181 | sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); | ||
1182 | if(!sem->queues) | ||
1183 | { | ||
1184 | kfree(sem); | ||
1185 | *ret_code = -ENOMEM; | ||
1186 | return NULL; | ||
1187 | } | ||
1188 | |||
1189 | sem->litmus_lock.ops = &cedf_kfmlp_lock_ops; | ||
1190 | spin_lock_init(&sem->lock); | ||
1191 | sem->num_resources = num_resources; | ||
1192 | |||
1193 | for(i = 0; i < num_resources; ++i) | ||
1194 | { | ||
1195 | sem->queues[i].owner = NULL; | ||
1196 | sem->queues[i].hp_waiter = NULL; | ||
1197 | init_waitqueue_head(&sem->queues[i].wait); | ||
1198 | sem->queues[i].count = 0; | ||
1199 | } | ||
1200 | |||
1201 | sem->shortest_queue = &sem->queues[0]; | ||
1202 | |||
1203 | *ret_code = 0; | ||
1204 | return &sem->litmus_lock; | ||
1205 | } | ||
1206 | |||
1207 | |||
1208 | /* **** lock constructor **** */ | ||
1209 | |||
1210 | static long cedf_allocate_lock(struct litmus_lock **lock, int type, | ||
1211 | void* __user arg) | ||
1212 | { | ||
1213 | int err = -ENXIO; | ||
1214 | |||
1215 | /* C-EDF currently only supports the FMLP for global resources | ||
1216 | WITHIN a given cluster. DO NOT USE CROSS-CLUSTER! */ | ||
1217 | switch (type) { | ||
1218 | case KFMLP_SEM: | ||
1219 | *lock = cedf_new_kfmlp(arg, &err); | ||
1220 | break; | ||
1221 | }; | ||
1222 | |||
1223 | return err; | ||
1224 | } | ||
1225 | |||
1226 | #endif // CONFIG_LITMUS_LOCKING | ||
1227 | |||
1228 | |||
1229 | |||
665 | /* total number of cluster */ | 1230 | /* total number of cluster */ |
666 | static int num_clusters; | 1231 | static int num_clusters; |
667 | /* we do not support cluster of different sizes */ | 1232 | /* we do not support cluster of different sizes */ |
@@ -831,6 +1396,9 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | |||
831 | .task_block = cedf_task_block, | 1396 | .task_block = cedf_task_block, |
832 | .admit_task = cedf_admit_task, | 1397 | .admit_task = cedf_admit_task, |
833 | .activate_plugin = cedf_activate_plugin, | 1398 | .activate_plugin = cedf_activate_plugin, |
1399 | #ifdef CONFIG_LITMUS_LOCKING | ||
1400 | .allocate_lock = cedf_allocate_lock, | ||
1401 | #endif | ||
834 | }; | 1402 | }; |
835 | 1403 | ||
836 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; | 1404 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 6ed504f4750e..d5bb326ebc9b 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/uaccess.h> | ||
16 | |||
15 | 17 | ||
16 | #include <litmus/litmus.h> | 18 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 19 | #include <litmus/jobs.h> |
@@ -437,6 +439,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
437 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | 439 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); |
438 | #endif | 440 | #endif |
439 | 441 | ||
442 | /* | ||
440 | if (exists) | 443 | if (exists) |
441 | TRACE_TASK(prev, | 444 | TRACE_TASK(prev, |
442 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | 445 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " |
@@ -446,7 +449,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
446 | if (entry->linked && preempt) | 449 | if (entry->linked && preempt) |
447 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 450 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
448 | entry->linked->comm, entry->linked->pid); | 451 | entry->linked->comm, entry->linked->pid); |
449 | 452 | */ | |
450 | 453 | ||
451 | /* If a task blocks we have no choice but to reschedule. | 454 | /* If a task blocks we have no choice but to reschedule. |
452 | */ | 455 | */ |
@@ -644,43 +647,44 @@ static long gsnedf_admit_task(struct task_struct* tsk) | |||
644 | 647 | ||
645 | #include <litmus/fdso.h> | 648 | #include <litmus/fdso.h> |
646 | 649 | ||
647 | /* called with IRQs off */ | 650 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
648 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
649 | { | 651 | { |
650 | int linked_on; | 652 | int linked_on; |
651 | int check_preempt = 0; | 653 | int check_preempt = 0; |
652 | 654 | ||
653 | raw_spin_lock(&gsnedf_lock); | 655 | if(prio_inh != NULL) |
654 | 656 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | |
655 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 657 | else |
658 | TRACE_TASK(t, "inherits priority from %p\n", prio_inh); | ||
659 | |||
656 | tsk_rt(t)->inh_task = prio_inh; | 660 | tsk_rt(t)->inh_task = prio_inh; |
657 | 661 | ||
658 | linked_on = tsk_rt(t)->linked_on; | 662 | linked_on = tsk_rt(t)->linked_on; |
659 | 663 | ||
660 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 664 | /* If it is scheduled, then we need to reorder the CPU heap. */ |
661 | if (linked_on != NO_CPU) { | 665 | if (linked_on != NO_CPU) { |
662 | TRACE_TASK(t, "%s: linked on %d\n", | 666 | TRACE_TASK(t, "%s: linked on %d\n", |
663 | __FUNCTION__, linked_on); | 667 | __FUNCTION__, linked_on); |
664 | /* Holder is scheduled; need to re-order CPUs. | 668 | /* Holder is scheduled; need to re-order CPUs. |
665 | * We can't use heap_decrease() here since | 669 | * We can't use heap_decrease() here since |
666 | * the cpu_heap is ordered in reverse direction, so | 670 | * the cpu_heap is ordered in reverse direction, so |
667 | * it is actually an increase. */ | 671 | * it is actually an increase. */ |
668 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | 672 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, |
669 | gsnedf_cpus[linked_on]->hn); | 673 | gsnedf_cpus[linked_on]->hn); |
670 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | 674 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, |
671 | gsnedf_cpus[linked_on]->hn); | 675 | gsnedf_cpus[linked_on]->hn); |
672 | } else { | 676 | } else { |
673 | /* holder may be queued: first stop queue changes */ | 677 | /* holder may be queued: first stop queue changes */ |
674 | raw_spin_lock(&gsnedf.release_lock); | 678 | raw_spin_lock(&gsnedf.release_lock); |
675 | if (is_queued(t)) { | 679 | if (is_queued(t)) { |
676 | TRACE_TASK(t, "%s: is queued\n", | 680 | TRACE_TASK(t, "%s: is queued\n", |
677 | __FUNCTION__); | 681 | __FUNCTION__); |
678 | /* We need to update the position of holder in some | 682 | /* We need to update the position of holder in some |
679 | * heap. Note that this could be a release heap if we | 683 | * heap. Note that this could be a release heap if we |
680 | * budget enforcement is used and this job overran. */ | 684 | * budget enforcement is used and this job overran. */ |
681 | check_preempt = | 685 | check_preempt = |
682 | !bheap_decrease(edf_ready_order, | 686 | !bheap_decrease(edf_ready_order, |
683 | tsk_rt(t)->heap_node); | 687 | tsk_rt(t)->heap_node); |
684 | } else { | 688 | } else { |
685 | /* Nothing to do: if it is not queued and not linked | 689 | /* Nothing to do: if it is not queued and not linked |
686 | * then it is either sleeping or currently being moved | 690 | * then it is either sleeping or currently being moved |
@@ -688,10 +692,10 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
688 | * will use the correct priority when enqueuing the | 692 | * will use the correct priority when enqueuing the |
689 | * task. */ | 693 | * task. */ |
690 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | 694 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", |
691 | __FUNCTION__); | 695 | __FUNCTION__); |
692 | } | 696 | } |
693 | raw_spin_unlock(&gsnedf.release_lock); | 697 | raw_spin_unlock(&gsnedf.release_lock); |
694 | 698 | ||
695 | /* If holder was enqueued in a release heap, then the following | 699 | /* If holder was enqueued in a release heap, then the following |
696 | * preemption check is pointless, but we can't easily detect | 700 | * preemption check is pointless, but we can't easily detect |
697 | * that case. If you want to fix this, then consider that | 701 | * that case. If you want to fix this, then consider that |
@@ -703,31 +707,48 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
703 | * sure preemption checks get the right task, not the | 707 | * sure preemption checks get the right task, not the |
704 | * potentially stale cache. */ | 708 | * potentially stale cache. */ |
705 | bheap_uncache_min(edf_ready_order, | 709 | bheap_uncache_min(edf_ready_order, |
706 | &gsnedf.ready_queue); | 710 | &gsnedf.ready_queue); |
707 | check_for_preemptions(); | 711 | check_for_preemptions(); |
708 | } | 712 | } |
709 | } | 713 | } |
710 | |||
711 | raw_spin_unlock(&gsnedf_lock); | ||
712 | } | 714 | } |
713 | 715 | ||
714 | /* called with IRQs off */ | 716 | /* called with IRQs off */ |
715 | static void clear_priority_inheritance(struct task_struct* t) | 717 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
716 | { | 718 | { |
717 | raw_spin_lock(&gsnedf_lock); | 719 | raw_spin_lock(&gsnedf_lock); |
720 | __set_priority_inheritance(t, prio_inh); | ||
721 | raw_spin_unlock(&gsnedf_lock); | ||
722 | } | ||
718 | 723 | ||
719 | /* A job only stops inheriting a priority when it releases a | 724 | static void __clear_priority_inheritance(struct task_struct* t) |
720 | * resource. Thus we can make the following assumption.*/ | 725 | { |
721 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | ||
722 | |||
723 | TRACE_TASK(t, "priority restored\n"); | 726 | TRACE_TASK(t, "priority restored\n"); |
724 | tsk_rt(t)->inh_task = NULL; | 727 | |
725 | 728 | if(tsk_rt(t)->scheduled_on != NO_CPU) | |
726 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 729 | { |
727 | * since the priority was effectively lowered. */ | 730 | TRACE_TASK(t, "priority restored: t is not currently scheduled.\n"); |
728 | unlink(t); | 731 | |
729 | gsnedf_job_arrival(t); | 732 | tsk_rt(t)->inh_task = NULL; |
733 | |||
734 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
735 | * since the priority was effectively lowered. */ | ||
736 | unlink(t); | ||
737 | gsnedf_job_arrival(t); | ||
738 | } | ||
739 | else | ||
740 | { | ||
741 | TRACE_TASK(t, "priority restored: t IS currently scheduled.\n"); | ||
742 | |||
743 | __set_priority_inheritance(t, NULL); | ||
744 | } | ||
745 | } | ||
730 | 746 | ||
747 | /* called with IRQs off */ | ||
748 | static void clear_priority_inheritance(struct task_struct* t) | ||
749 | { | ||
750 | raw_spin_lock(&gsnedf_lock); | ||
751 | __clear_priority_inheritance(t); | ||
731 | raw_spin_unlock(&gsnedf_lock); | 752 | raw_spin_unlock(&gsnedf_lock); |
732 | } | 753 | } |
733 | 754 | ||
@@ -932,11 +953,441 @@ static struct litmus_lock* gsnedf_new_fmlp(void) | |||
932 | return &sem->litmus_lock; | 953 | return &sem->litmus_lock; |
933 | } | 954 | } |
934 | 955 | ||
956 | |||
957 | |||
958 | |||
959 | |||
960 | |||
961 | |||
962 | /* ******************** KFMLP support ********************** */ | ||
963 | |||
964 | /* struct for semaphore with priority inheritance */ | ||
965 | struct kfmlp_queue | ||
966 | { | ||
967 | wait_queue_head_t wait; | ||
968 | struct task_struct* owner; | ||
969 | struct task_struct* hp_waiter; | ||
970 | int count; /* number of waiters + holder */ | ||
971 | }; | ||
972 | |||
973 | struct kfmlp_semaphore | ||
974 | { | ||
975 | struct litmus_lock litmus_lock; | ||
976 | |||
977 | spinlock_t lock; | ||
978 | |||
979 | int num_resources; /* aka k */ | ||
980 | struct kfmlp_queue *queues; /* array */ | ||
981 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
982 | }; | ||
983 | |||
984 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
985 | { | ||
986 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
987 | } | ||
988 | |||
989 | static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, | ||
990 | struct kfmlp_queue* queue) | ||
991 | { | ||
992 | return (queue - &sem->queues[0]); | ||
993 | } | ||
994 | |||
995 | static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem, | ||
996 | struct task_struct* holder) | ||
997 | { | ||
998 | int i; | ||
999 | for(i = 0; i < sem->num_resources; ++i) | ||
1000 | if(sem->queues[i].owner == holder) | ||
1001 | return(&sem->queues[i]); | ||
1002 | return(NULL); | ||
1003 | } | ||
1004 | |||
1005 | /* caller is responsible for locking */ | ||
1006 | struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, | ||
1007 | struct task_struct *skip) | ||
1008 | { | ||
1009 | struct list_head *pos; | ||
1010 | struct task_struct *queued, *found = NULL; | ||
1011 | |||
1012 | list_for_each(pos, &kqueue->wait.task_list) { | ||
1013 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1014 | task_list)->private; | ||
1015 | |||
1016 | /* Compare task prios, find high prio task. */ | ||
1017 | if (queued != skip && edf_higher_prio(queued, found)) | ||
1018 | found = queued; | ||
1019 | } | ||
1020 | return found; | ||
1021 | } | ||
1022 | |||
1023 | static inline struct kfmlp_queue* kfmlp_find_shortest( | ||
1024 | struct kfmlp_semaphore* sem, | ||
1025 | struct kfmlp_queue* search_start) | ||
1026 | { | ||
1027 | // we start our search at search_start instead of at the beginning of the | ||
1028 | // queue list to load-balance across all resources. | ||
1029 | struct kfmlp_queue* step = search_start; | ||
1030 | struct kfmlp_queue* shortest = sem->shortest_queue; | ||
1031 | |||
1032 | do | ||
1033 | { | ||
1034 | step = (step+1 != &sem->queues[sem->num_resources]) ? | ||
1035 | step+1 : &sem->queues[0]; | ||
1036 | |||
1037 | if(step->count < shortest->count) | ||
1038 | { | ||
1039 | shortest = step; | ||
1040 | if(step->count == 0) | ||
1041 | break; /* can't get any shorter */ | ||
1042 | } | ||
1043 | }while(step != search_start); | ||
1044 | |||
1045 | return(shortest); | ||
1046 | } | ||
1047 | |||
1048 | struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | ||
1049 | { | ||
1050 | /* must hold sem->lock */ | ||
1051 | |||
1052 | struct kfmlp_queue *my_queue = NULL; | ||
1053 | struct task_struct *max_hp = NULL; | ||
1054 | |||
1055 | |||
1056 | struct list_head *pos; | ||
1057 | struct task_struct *queued; | ||
1058 | int i; | ||
1059 | |||
1060 | for(i = 0; i < sem->num_resources; ++i) | ||
1061 | { | ||
1062 | if( (sem->queues[i].count > 1) && | ||
1063 | ((my_queue == NULL) || | ||
1064 | (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) | ||
1065 | { | ||
1066 | my_queue = &sem->queues[i]; | ||
1067 | } | ||
1068 | } | ||
1069 | |||
1070 | if(my_queue) | ||
1071 | { | ||
1072 | max_hp = my_queue->hp_waiter; | ||
1073 | |||
1074 | BUG_ON(!max_hp); | ||
1075 | |||
1076 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", | ||
1077 | kfmlp_get_idx(sem, my_queue), | ||
1078 | max_hp->comm, max_hp->pid, | ||
1079 | kfmlp_get_idx(sem, my_queue)); | ||
1080 | |||
1081 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | ||
1082 | |||
1083 | raw_spin_lock(&gsnedf_lock); | ||
1084 | |||
1085 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | ||
1086 | { | ||
1087 | __clear_priority_inheritance(my_queue->owner); | ||
1088 | if(my_queue->hp_waiter != NULL) | ||
1089 | { | ||
1090 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1091 | } | ||
1092 | } | ||
1093 | raw_spin_unlock(&gsnedf_lock); | ||
1094 | |||
1095 | list_for_each(pos, &my_queue->wait.task_list) | ||
1096 | { | ||
1097 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1098 | task_list)->private; | ||
1099 | /* Compare task prios, find high prio task. */ | ||
1100 | if (queued == max_hp) | ||
1101 | { | ||
1102 | __remove_wait_queue(&my_queue->wait, | ||
1103 | list_entry(pos, wait_queue_t, task_list)); | ||
1104 | break; | ||
1105 | } | ||
1106 | } | ||
1107 | --(my_queue->count); | ||
1108 | } | ||
1109 | |||
1110 | return(max_hp); | ||
1111 | } | ||
1112 | |||
1113 | int gsnedf_kfmlp_lock(struct litmus_lock* l) | ||
1114 | { | ||
1115 | struct task_struct* t = current; | ||
1116 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1117 | struct kfmlp_queue* my_queue; | ||
1118 | wait_queue_t wait; | ||
1119 | unsigned long flags; | ||
1120 | |||
1121 | if (!is_realtime(t)) | ||
1122 | return -EPERM; | ||
1123 | |||
1124 | spin_lock_irqsave(&sem->lock, flags); | ||
1125 | |||
1126 | my_queue = sem->shortest_queue; | ||
1127 | |||
1128 | if (my_queue->owner) { | ||
1129 | /* resource is not free => must suspend and wait */ | ||
1130 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | ||
1131 | kfmlp_get_idx(sem, my_queue)); | ||
1132 | |||
1133 | init_waitqueue_entry(&wait, t); | ||
1134 | |||
1135 | /* FIXME: interruptible would be nice some day */ | ||
1136 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1137 | |||
1138 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | ||
1139 | |||
1140 | /* check if we need to activate priority inheritance */ | ||
1141 | if (edf_higher_prio(t, my_queue->hp_waiter)) | ||
1142 | { | ||
1143 | my_queue->hp_waiter = t; | ||
1144 | if (edf_higher_prio(t, my_queue->owner)) | ||
1145 | { | ||
1146 | set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1147 | } | ||
1148 | } | ||
1149 | |||
1150 | ++(my_queue->count); | ||
1151 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1152 | |||
1153 | /* release lock before sleeping */ | ||
1154 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1155 | |||
1156 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
1157 | * when we wake up; we are guaranteed to have the lock since | ||
1158 | * there is only one wake up per release (or steal). | ||
1159 | */ | ||
1160 | schedule(); | ||
1161 | |||
1162 | |||
1163 | if(my_queue->owner == t) | ||
1164 | { | ||
1165 | TRACE_CUR("queue %d: acquired through waiting\n", | ||
1166 | kfmlp_get_idx(sem, my_queue)); | ||
1167 | } | ||
1168 | else | ||
1169 | { | ||
1170 | /* this case may happen if our wait entry was stolen | ||
1171 | between queues. */ | ||
1172 | BUG_ON(!kfmlp_get_queue(sem, t)); | ||
1173 | TRACE_CUR("queue %d: acquired through stealing\n", | ||
1174 | kfmlp_get_idx(sem, kfmlp_get_queue(sem, t))); | ||
1175 | } | ||
1176 | } | ||
1177 | else | ||
1178 | { | ||
1179 | TRACE_CUR("queue %d: acquired immediately\n", | ||
1180 | kfmlp_get_idx(sem, my_queue)); | ||
1181 | |||
1182 | my_queue->owner = t; | ||
1183 | |||
1184 | ++(my_queue->count); | ||
1185 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1186 | |||
1187 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1188 | } | ||
1189 | |||
1190 | return kfmlp_get_idx(sem, my_queue); | ||
1191 | } | ||
1192 | |||
1193 | int gsnedf_kfmlp_unlock(struct litmus_lock* l) | ||
1194 | { | ||
1195 | struct task_struct *t = current, *next; | ||
1196 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1197 | struct kfmlp_queue *my_queue; | ||
1198 | unsigned long flags; | ||
1199 | int err = 0; | ||
1200 | |||
1201 | spin_lock_irqsave(&sem->lock, flags); | ||
1202 | |||
1203 | my_queue = kfmlp_get_queue(sem, t); | ||
1204 | |||
1205 | if (!my_queue) { | ||
1206 | err = -EINVAL; | ||
1207 | goto out; | ||
1208 | } | ||
1209 | |||
1210 | /* check if there are jobs waiting for this resource */ | ||
1211 | next = __waitqueue_remove_first(&my_queue->wait); | ||
1212 | if (next) { | ||
1213 | /* | ||
1214 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
1215 | kfmlp_get_idx(sem, my_queue), | ||
1216 | next->comm, next->pid); | ||
1217 | */ | ||
1218 | /* next becomes the resouce holder */ | ||
1219 | my_queue->owner = next; | ||
1220 | |||
1221 | --(my_queue->count); | ||
1222 | if(my_queue->count < sem->shortest_queue->count) | ||
1223 | { | ||
1224 | sem->shortest_queue = my_queue; | ||
1225 | } | ||
1226 | |||
1227 | TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", | ||
1228 | kfmlp_get_idx(sem, my_queue), next->comm, next->pid); | ||
1229 | |||
1230 | /* determine new hp_waiter if necessary */ | ||
1231 | if (next == my_queue->hp_waiter) { | ||
1232 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1233 | /* next has the highest priority --- it doesn't need to | ||
1234 | * inherit. However, we need to make sure that the | ||
1235 | * next-highest priority in the queue is reflected in | ||
1236 | * hp_waiter. */ | ||
1237 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next); | ||
1238 | if (my_queue->hp_waiter) | ||
1239 | TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue)); | ||
1240 | else | ||
1241 | TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue)); | ||
1242 | } else { | ||
1243 | /* Well, if next is not the highest-priority waiter, | ||
1244 | * then it ought to inherit the highest-priority | ||
1245 | * waiter's priority. */ | ||
1246 | set_priority_inheritance(next, my_queue->hp_waiter); | ||
1247 | } | ||
1248 | |||
1249 | /* wake up next */ | ||
1250 | wake_up_process(next); | ||
1251 | } | ||
1252 | else | ||
1253 | { | ||
1254 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | ||
1255 | |||
1256 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | ||
1257 | |||
1258 | my_queue->owner = next; | ||
1259 | |||
1260 | if(next) | ||
1261 | { | ||
1262 | TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", | ||
1263 | kfmlp_get_idx(sem, my_queue), | ||
1264 | next->comm, next->pid); | ||
1265 | |||
1266 | /* wake up next */ | ||
1267 | wake_up_process(next); | ||
1268 | } | ||
1269 | else | ||
1270 | { | ||
1271 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | ||
1272 | |||
1273 | --(my_queue->count); | ||
1274 | if(my_queue->count < sem->shortest_queue->count) | ||
1275 | { | ||
1276 | sem->shortest_queue = my_queue; | ||
1277 | } | ||
1278 | } | ||
1279 | } | ||
1280 | |||
1281 | /* we lose the benefit of priority inheritance (if any) */ | ||
1282 | if (tsk_rt(t)->inh_task) | ||
1283 | clear_priority_inheritance(t); | ||
1284 | |||
1285 | out: | ||
1286 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1287 | |||
1288 | return err; | ||
1289 | } | ||
1290 | |||
1291 | int gsnedf_kfmlp_close(struct litmus_lock* l) | ||
1292 | { | ||
1293 | struct task_struct *t = current; | ||
1294 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1295 | struct kfmlp_queue *my_queue; | ||
1296 | unsigned long flags; | ||
1297 | |||
1298 | int owner; | ||
1299 | |||
1300 | spin_lock_irqsave(&sem->lock, flags); | ||
1301 | |||
1302 | my_queue = kfmlp_get_queue(sem, t); | ||
1303 | owner = (my_queue) ? (my_queue->owner == t) : 0; | ||
1304 | |||
1305 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1306 | |||
1307 | if (owner) | ||
1308 | gsnedf_kfmlp_unlock(l); | ||
1309 | |||
1310 | return 0; | ||
1311 | } | ||
1312 | |||
1313 | void gsnedf_kfmlp_free(struct litmus_lock* l) | ||
1314 | { | ||
1315 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1316 | kfree(sem->queues); | ||
1317 | kfree(sem); | ||
1318 | } | ||
1319 | |||
1320 | static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = { | ||
1321 | .close = gsnedf_kfmlp_close, | ||
1322 | .lock = gsnedf_kfmlp_lock, | ||
1323 | .unlock = gsnedf_kfmlp_unlock, | ||
1324 | .deallocate = gsnedf_kfmlp_free, | ||
1325 | }; | ||
1326 | |||
1327 | static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg, int* ret_code) | ||
1328 | { | ||
1329 | struct kfmlp_semaphore* sem; | ||
1330 | int num_resources = 0; | ||
1331 | int i; | ||
1332 | |||
1333 | if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) | ||
1334 | { | ||
1335 | *ret_code = -EINVAL; | ||
1336 | return(NULL); | ||
1337 | } | ||
1338 | if(__copy_from_user(&num_resources, arg, sizeof(num_resources))) | ||
1339 | { | ||
1340 | *ret_code = -EINVAL; | ||
1341 | return(NULL); | ||
1342 | } | ||
1343 | if(num_resources < 1) | ||
1344 | { | ||
1345 | *ret_code = -EINVAL; | ||
1346 | return(NULL); | ||
1347 | } | ||
1348 | |||
1349 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1350 | if(!sem) | ||
1351 | { | ||
1352 | *ret_code = -ENOMEM; | ||
1353 | return NULL; | ||
1354 | } | ||
1355 | |||
1356 | sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); | ||
1357 | if(!sem->queues) | ||
1358 | { | ||
1359 | kfree(sem); | ||
1360 | *ret_code = -ENOMEM; | ||
1361 | return NULL; | ||
1362 | } | ||
1363 | |||
1364 | sem->litmus_lock.ops = &gsnedf_kfmlp_lock_ops; | ||
1365 | spin_lock_init(&sem->lock); | ||
1366 | sem->num_resources = num_resources; | ||
1367 | |||
1368 | for(i = 0; i < num_resources; ++i) | ||
1369 | { | ||
1370 | sem->queues[i].owner = NULL; | ||
1371 | sem->queues[i].hp_waiter = NULL; | ||
1372 | init_waitqueue_head(&sem->queues[i].wait); | ||
1373 | sem->queues[i].count = 0; | ||
1374 | } | ||
1375 | |||
1376 | sem->shortest_queue = &sem->queues[0]; | ||
1377 | |||
1378 | *ret_code = 0; | ||
1379 | return &sem->litmus_lock; | ||
1380 | } | ||
1381 | |||
1382 | |||
1383 | |||
1384 | |||
1385 | |||
935 | /* **** lock constructor **** */ | 1386 | /* **** lock constructor **** */ |
936 | 1387 | ||
937 | 1388 | ||
938 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | 1389 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, |
939 | void* __user unused) | 1390 | void* __user arg) |
940 | { | 1391 | { |
941 | int err = -ENXIO; | 1392 | int err = -ENXIO; |
942 | 1393 | ||
@@ -951,7 +1402,10 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
951 | else | 1402 | else |
952 | err = -ENOMEM; | 1403 | err = -ENOMEM; |
953 | break; | 1404 | break; |
954 | 1405 | ||
1406 | case KFMLP_SEM: | ||
1407 | *lock = gsnedf_new_kfmlp(arg, &err); | ||
1408 | break; | ||
955 | }; | 1409 | }; |
956 | 1410 | ||
957 | return err; | 1411 | return err; |
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 5a15ce938984..9a6fe487718e 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -103,7 +103,9 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
103 | } | 103 | } |
104 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 104 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
105 | if (next->oncpu) | 105 | if (next->oncpu) |
106 | { | ||
106 | TRACE_TASK(next, "waiting for !oncpu"); | 107 | TRACE_TASK(next, "waiting for !oncpu"); |
108 | } | ||
107 | while (next->oncpu) { | 109 | while (next->oncpu) { |
108 | cpu_relax(); | 110 | cpu_relax(); |
109 | mb(); | 111 | mb(); |