diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-07-26 21:12:16 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-07-26 21:12:16 -0400 |
commit | a8defdf2a0a970dbf6a3facee6e5fcf468649195 (patch) | |
tree | f5c06cc9062a3a835300910db8e04ad94fb6ef2f /litmus | |
parent | c06966f73c0ce3bfff605e4bbc15ab6453176c8d (diff) |
GSN-EDF: add global OMLP support
Similar to the FMLP implementation, the
only difference is that it uses a hybrid FIFO/priority queue.
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/sched_gsn_edf.c | 316 |
1 files changed, 308 insertions, 8 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index a0f3ba54ed8f..9e3eb22eed74 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -610,14 +610,14 @@ static long gsnedf_admit_task(struct task_struct* tsk) | |||
610 | 610 | ||
611 | #include <litmus/fdso.h> | 611 | #include <litmus/fdso.h> |
612 | 612 | ||
613 | |||
614 | |||
613 | /* called with IRQs off */ | 615 | /* called with IRQs off */ |
614 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 616 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
615 | { | 617 | { |
616 | int linked_on; | 618 | int linked_on; |
617 | int check_preempt = 0; | 619 | int check_preempt = 0; |
618 | 620 | ||
619 | raw_spin_lock(&gsnedf_lock); | ||
620 | |||
621 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 621 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); |
622 | tsk_rt(t)->inh_task = prio_inh; | 622 | tsk_rt(t)->inh_task = prio_inh; |
623 | 623 | ||
@@ -642,7 +642,7 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
642 | TRACE_TASK(t, "%s: is queued\n", | 642 | TRACE_TASK(t, "%s: is queued\n", |
643 | __FUNCTION__); | 643 | __FUNCTION__); |
644 | /* We need to update the position of holder in some | 644 | /* We need to update the position of holder in some |
645 | * heap. Note that this could be a release heap if we | 645 | * heap. Note that this could be a release heap if |
646 | * budget enforcement is used and this job overran. */ | 646 | * budget enforcement is used and this job overran. */ |
647 | check_preempt = | 647 | check_preempt = |
648 | !bheap_decrease(edf_ready_order, | 648 | !bheap_decrease(edf_ready_order, |
@@ -673,15 +673,17 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
673 | check_for_preemptions(); | 673 | check_for_preemptions(); |
674 | } | 674 | } |
675 | } | 675 | } |
676 | |||
677 | raw_spin_unlock(&gsnedf_lock); | ||
678 | } | 676 | } |
679 | 677 | ||
680 | /* called with IRQs off */ | 678 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
681 | static void clear_priority_inheritance(struct task_struct* t) | ||
682 | { | 679 | { |
683 | raw_spin_lock(&gsnedf_lock); | 680 | raw_spin_lock(&gsnedf_lock); |
681 | __set_priority_inheritance(t, prio_inh); | ||
682 | raw_spin_unlock(&gsnedf_lock); | ||
683 | } | ||
684 | 684 | ||
685 | static void __clear_priority_inheritance(struct task_struct* t) | ||
686 | { | ||
685 | /* A job only stops inheriting a priority when it releases a | 687 | /* A job only stops inheriting a priority when it releases a |
686 | * resource. Thus we can make the following assumption.*/ | 688 | * resource. Thus we can make the following assumption.*/ |
687 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | 689 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); |
@@ -693,6 +695,35 @@ static void clear_priority_inheritance(struct task_struct* t) | |||
693 | * since the priority was effectively lowered. */ | 695 | * since the priority was effectively lowered. */ |
694 | unlink(t); | 696 | unlink(t); |
695 | gsnedf_job_arrival(t); | 697 | gsnedf_job_arrival(t); |
698 | } | ||
699 | |||
700 | /* called with IRQs off */ | ||
701 | static void clear_priority_inheritance(struct task_struct* t) | ||
702 | { | ||
703 | raw_spin_lock(&gsnedf_lock); | ||
704 | __clear_priority_inheritance(t); | ||
705 | raw_spin_unlock(&gsnedf_lock); | ||
706 | } | ||
707 | |||
708 | /* set and clear at the same time to avoid having to | ||
709 | * acquire the runqueue lock twice */ | ||
710 | static void update_priority_inheritance( | ||
711 | struct task_struct* deprived, | ||
712 | struct task_struct* blocker, | ||
713 | struct task_struct* blocked) | ||
714 | { | ||
715 | /* things to do: | ||
716 | * 1) deprived no longer inherits anything. | ||
717 | * 2) blocker gets blocked's priority. | ||
718 | */ | ||
719 | |||
720 | raw_spin_lock(&gsnedf_lock); | ||
721 | |||
722 | if (tsk_rt(deprived)->inh_task) | ||
723 | __clear_priority_inheritance(deprived); | ||
724 | |||
725 | if (blocked) | ||
726 | __set_priority_inheritance(blocker, blocked); | ||
696 | 727 | ||
697 | raw_spin_unlock(&gsnedf_lock); | 728 | raw_spin_unlock(&gsnedf_lock); |
698 | } | 729 | } |
@@ -898,6 +929,266 @@ static struct litmus_lock* gsnedf_new_fmlp(void) | |||
898 | return &sem->litmus_lock; | 929 | return &sem->litmus_lock; |
899 | } | 930 | } |
900 | 931 | ||
932 | |||
933 | /* ******************** OMLP support ********************** */ | ||
934 | |||
935 | /* struct for semaphore with priority inheritance */ | ||
936 | struct omlp_semaphore { | ||
937 | struct litmus_lock litmus_lock; | ||
938 | |||
939 | /* current resource holder */ | ||
940 | struct task_struct *owner; | ||
941 | |||
942 | /* highest-priority waiter */ | ||
943 | struct task_struct *hp_waiter; | ||
944 | |||
945 | /* FIFO queue of waiting tasks */ | ||
946 | wait_queue_head_t fifo_wait; | ||
947 | /* Priority queue of waiting tasks */ | ||
948 | wait_queue_head_t prio_wait; | ||
949 | |||
950 | /* How many slots remaining in FIFO queue? */ | ||
951 | unsigned int num_free; | ||
952 | }; | ||
953 | |||
954 | static inline struct omlp_semaphore* omlp_from_lock(struct litmus_lock* lock) | ||
955 | { | ||
956 | return container_of(lock, struct omlp_semaphore, litmus_lock); | ||
957 | } | ||
958 | |||
959 | /* already locked */ | ||
960 | static void omlp_enqueue(struct omlp_semaphore *sem, prio_wait_queue_t* wait) | ||
961 | { | ||
962 | if (sem->num_free) { | ||
963 | /* there is space in the FIFO queue */ | ||
964 | sem->num_free--; | ||
965 | __add_wait_queue_tail_exclusive(&sem->fifo_wait, &wait->wq); | ||
966 | } else { | ||
967 | /* nope, gotta go to the priority queue */ | ||
968 | __add_wait_queue_prio_exclusive(&sem->prio_wait, wait); | ||
969 | } | ||
970 | } | ||
971 | |||
972 | /* already locked */ | ||
973 | static int omlp_move(struct omlp_semaphore *sem) | ||
974 | { | ||
975 | struct list_head* first; | ||
976 | |||
977 | if (waitqueue_active(&sem->prio_wait)) { | ||
978 | first = sem->prio_wait.task_list.next; | ||
979 | list_move_tail(first, &sem->fifo_wait.task_list); | ||
980 | return 1; | ||
981 | } | ||
982 | else | ||
983 | return 0; | ||
984 | } | ||
985 | |||
986 | static struct task_struct* omlp_dequeue(struct omlp_semaphore *sem) | ||
987 | { | ||
988 | struct task_struct* first = __waitqueue_remove_first(&sem->fifo_wait); | ||
989 | |||
990 | if (first && !omlp_move(sem)) | ||
991 | sem->num_free++; | ||
992 | |||
993 | return first; | ||
994 | } | ||
995 | |||
996 | /* caller is responsible for locking */ | ||
997 | static struct task_struct* omlp_find_hp_waiter(struct omlp_semaphore *sem, | ||
998 | struct task_struct* skip) | ||
999 | { | ||
1000 | struct list_head *pos; | ||
1001 | struct task_struct *queued, *found = NULL; | ||
1002 | |||
1003 | /* check FIFO queue first */ | ||
1004 | list_for_each(pos, &sem->fifo_wait.task_list) { | ||
1005 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1006 | task_list)->private; | ||
1007 | |||
1008 | /* Compare task prios, find high prio task. */ | ||
1009 | if (queued != skip && edf_higher_prio(queued, found)) | ||
1010 | found = queued; | ||
1011 | } | ||
1012 | |||
1013 | /* check priority queue next */ | ||
1014 | if (waitqueue_active(&sem->prio_wait)) { | ||
1015 | /* first has highest priority */ | ||
1016 | pos = sem->prio_wait.task_list.next; | ||
1017 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1018 | task_list)->private; | ||
1019 | if (edf_higher_prio(queued, found)) | ||
1020 | found = queued; | ||
1021 | } | ||
1022 | |||
1023 | return found; | ||
1024 | } | ||
1025 | |||
1026 | int gsnedf_omlp_lock(struct litmus_lock* l) | ||
1027 | { | ||
1028 | struct task_struct* t = current; | ||
1029 | struct omlp_semaphore *sem = omlp_from_lock(l); | ||
1030 | prio_wait_queue_t wait; | ||
1031 | unsigned long flags; | ||
1032 | |||
1033 | if (!is_realtime(t)) | ||
1034 | return -EPERM; | ||
1035 | |||
1036 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | ||
1037 | |||
1038 | if (sem->owner) { | ||
1039 | /* resource is not free => must suspend and wait */ | ||
1040 | |||
1041 | init_prio_waitqueue_entry(&wait, t, get_deadline(t)); | ||
1042 | |||
1043 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1044 | |||
1045 | omlp_enqueue(sem, &wait); | ||
1046 | |||
1047 | /* check if we need to activate priority inheritance */ | ||
1048 | if (edf_higher_prio(t, sem->hp_waiter)) { | ||
1049 | sem->hp_waiter = t; | ||
1050 | if (edf_higher_prio(t, sem->owner)) | ||
1051 | set_priority_inheritance(sem->owner, sem->hp_waiter); | ||
1052 | } | ||
1053 | |||
1054 | TS_LOCK_SUSPEND; | ||
1055 | |||
1056 | /* release lock before sleeping */ | ||
1057 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1058 | |||
1059 | schedule(); | ||
1060 | |||
1061 | TS_LOCK_RESUME; | ||
1062 | |||
1063 | /* Since we hold the lock, no other task will change | ||
1064 | * ->owner. We can thus check it without acquiring the spin | ||
1065 | * lock. */ | ||
1066 | BUG_ON(sem->owner != t); | ||
1067 | } else { | ||
1068 | /* it's ours now */ | ||
1069 | sem->owner = t; | ||
1070 | |||
1071 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1072 | } | ||
1073 | |||
1074 | return 0; | ||
1075 | } | ||
1076 | |||
1077 | static int gsnedf_omlp_unlock(struct litmus_lock* l) | ||
1078 | { | ||
1079 | struct task_struct *t = current, *next, *blocked = NULL; | ||
1080 | struct omlp_semaphore *sem = omlp_from_lock(l); | ||
1081 | unsigned long flags; | ||
1082 | int err = 0; | ||
1083 | |||
1084 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | ||
1085 | |||
1086 | if (sem->owner != t) { | ||
1087 | err = -EINVAL; | ||
1088 | goto out; | ||
1089 | } | ||
1090 | |||
1091 | /* check if there are jobs waiting for this resource */ | ||
1092 | next = omlp_dequeue(sem); | ||
1093 | if (next) { | ||
1094 | /* next becomes the resouce holder */ | ||
1095 | sem->owner = next; | ||
1096 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
1097 | |||
1098 | /* determine new hp_waiter if necessary */ | ||
1099 | if (next == sem->hp_waiter) { | ||
1100 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1101 | /* next has the highest priority --- it doesn't need to | ||
1102 | * inherit. However, we need to make sure that the | ||
1103 | * next-highest priority in the queue is reflected in | ||
1104 | * hp_waiter. */ | ||
1105 | sem->hp_waiter = omlp_find_hp_waiter(sem, next); | ||
1106 | if (sem->hp_waiter) | ||
1107 | TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); | ||
1108 | else | ||
1109 | TRACE("no further waiters\n"); | ||
1110 | } else { | ||
1111 | /* Well, if next is not the highest-priority waiter, | ||
1112 | * then it ought to inherit the highest-priority | ||
1113 | * waiter's priority. */ | ||
1114 | blocked = sem->hp_waiter; | ||
1115 | } | ||
1116 | |||
1117 | /* wake up next */ | ||
1118 | wake_up_process(next); | ||
1119 | } else | ||
1120 | /* becomes available */ | ||
1121 | sem->owner = NULL; | ||
1122 | |||
1123 | /* we lose the benefit of priority inheritance (if any) */ | ||
1124 | if (tsk_rt(t)->inh_task || blocked) | ||
1125 | update_priority_inheritance(t, next, blocked); | ||
1126 | |||
1127 | out: | ||
1128 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1129 | |||
1130 | return err; | ||
1131 | } | ||
1132 | |||
1133 | static int gsnedf_omlp_close(struct litmus_lock* l) | ||
1134 | { | ||
1135 | struct task_struct *t = current; | ||
1136 | struct omlp_semaphore *sem = omlp_from_lock(l); | ||
1137 | unsigned long flags; | ||
1138 | |||
1139 | int owner; | ||
1140 | |||
1141 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | ||
1142 | |||
1143 | owner = sem->owner == t; | ||
1144 | |||
1145 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1146 | |||
1147 | if (owner) | ||
1148 | gsnedf_omlp_unlock(l); | ||
1149 | |||
1150 | return 0; | ||
1151 | } | ||
1152 | |||
1153 | static void gsnedf_omlp_free(struct litmus_lock* lock) | ||
1154 | { | ||
1155 | kfree(omlp_from_lock(lock)); | ||
1156 | } | ||
1157 | |||
1158 | static struct litmus_lock_ops gsnedf_omlp_lock_ops = { | ||
1159 | .close = gsnedf_omlp_close, | ||
1160 | .lock = gsnedf_omlp_lock, | ||
1161 | .unlock = gsnedf_omlp_unlock, | ||
1162 | .deallocate = gsnedf_omlp_free, | ||
1163 | }; | ||
1164 | |||
1165 | static struct litmus_lock* gsnedf_new_omlp(void) | ||
1166 | { | ||
1167 | struct omlp_semaphore* sem; | ||
1168 | |||
1169 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1170 | if (!sem) | ||
1171 | return NULL; | ||
1172 | |||
1173 | sem->owner = NULL; | ||
1174 | sem->hp_waiter = NULL; | ||
1175 | init_waitqueue_head(&sem->fifo_wait); | ||
1176 | init_waitqueue_head(&sem->prio_wait); | ||
1177 | sem->litmus_lock.ops = &gsnedf_omlp_lock_ops; | ||
1178 | /* free = cpus -1 since ->owner is the head and also counted */ | ||
1179 | sem->num_free = num_online_cpus() - 1; | ||
1180 | |||
1181 | #ifdef CONFIG_RELEASE_MASTER | ||
1182 | /* If we use dedicated interrupt handling, then there are actually | ||
1183 | * only m - 1 CPUs around. */ | ||
1184 | if (gsnedf.release_master != NO_CPU) | ||
1185 | sem->num_free -= 1; | ||
1186 | #endif | ||
1187 | |||
1188 | return &sem->litmus_lock; | ||
1189 | } | ||
1190 | |||
1191 | |||
901 | /* **** lock constructor **** */ | 1192 | /* **** lock constructor **** */ |
902 | 1193 | ||
903 | 1194 | ||
@@ -918,6 +1209,15 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
918 | err = -ENOMEM; | 1209 | err = -ENOMEM; |
919 | break; | 1210 | break; |
920 | 1211 | ||
1212 | case OMLP_SEM: | ||
1213 | /* O(m) Multiprocessor Locking Protocol */ | ||
1214 | *lock = gsnedf_new_omlp(); | ||
1215 | if (*lock) | ||
1216 | err = 0; | ||
1217 | else | ||
1218 | err = -ENOMEM; | ||
1219 | break; | ||
1220 | |||
921 | }; | 1221 | }; |
922 | 1222 | ||
923 | return err; | 1223 | return err; |