aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/fdso.h6
-rw-r--r--include/litmus/litmus.h1
-rw-r--r--litmus/fdso.c1
-rw-r--r--litmus/locking.c14
-rw-r--r--litmus/sched_gsn_edf.c569
-rw-r--r--litmus/sched_litmus.c2
6 files changed, 557 insertions, 36 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index caf2a1e6918c..c740e8fc3e88 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -18,9 +18,10 @@ typedef enum {
18 MIN_OBJ_TYPE = 0, 18 MIN_OBJ_TYPE = 0,
19 19
20 FMLP_SEM = 0, 20 FMLP_SEM = 0,
21 SRP_SEM = 1, 21 KFMLP_SEM = 1,
22 SRP_SEM = 2,
22 23
23 MAX_OBJ_TYPE = 1 24 MAX_OBJ_TYPE = SRP_SEM
24} obj_type_t; 25} obj_type_t;
25 26
26struct inode_obj_id { 27struct inode_obj_id {
@@ -64,6 +65,7 @@ static inline void* od_lookup(int od, obj_type_t type)
64} 65}
65 66
66#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) 67#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM))
68#define lookup_kfmlp_sem(od)((struct pi_semaphore*) od_lookup(od, KFMLP_SEM))
67#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) 69#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM))
68#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) 70#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
69 71
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 94086e2b38db..befdf6381693 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -27,6 +27,7 @@ static inline int in_list(struct list_head* list)
27} 27}
28 28
29struct task_struct* waitqueue_first(wait_queue_head_t *wq); 29struct task_struct* waitqueue_first(wait_queue_head_t *wq);
30struct task_struct* waitqueue_first_and_remove(wait_queue_head_t *wq);
30 31
31#define NO_CPU 0xffffffff 32#define NO_CPU 0xffffffff
32 33
diff --git a/litmus/fdso.c b/litmus/fdso.c
index aa7b384264e3..2b7f9ba85857 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -22,6 +22,7 @@ extern struct fdso_ops generic_lock_ops;
22 22
23static const struct fdso_ops* fdso_ops[] = { 23static const struct fdso_ops* fdso_ops[] = {
24 &generic_lock_ops, /* FMLP_SEM */ 24 &generic_lock_ops, /* FMLP_SEM */
25 &generic_lock_ops, /* KFMLP_SEM */
25 &generic_lock_ops, /* SRP_SEM */ 26 &generic_lock_ops, /* SRP_SEM */
26}; 27};
27 28
diff --git a/litmus/locking.c b/litmus/locking.c
index 728b56835cf7..e9e682adc2c9 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -119,6 +119,20 @@ struct task_struct* waitqueue_first(wait_queue_head_t *wq)
119 return NULL; 119 return NULL;
120} 120}
121 121
122struct task_struct* waitqueue_first_and_remove(wait_queue_head_t *wq)
123{
124 wait_queue_t *q;
125 struct task_struct* t = NULL;
126
127 if(waitqueue_active(wq))
128 {
129 q = list_entry(wq->task_list.next,
130 wait_queue_t, task_list);
131 t = (struct task_struct*) q->private;
132 __remove_wait_queue(wq, q);
133 }
134 return(t);
135}
122 136
123#else 137#else
124 138
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index c5c9600c33d8..ec641a094a08 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -12,6 +12,8 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/uaccess.h>
16
15 17
16#include <litmus/litmus.h> 18#include <litmus/litmus.h>
17#include <litmus/jobs.h> 19#include <litmus/jobs.h>
@@ -399,6 +401,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
399 TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); 401 TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
400#endif 402#endif
401 403
404 /*
402 if (exists) 405 if (exists)
403 TRACE_TASK(prev, 406 TRACE_TASK(prev,
404 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " 407 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
@@ -408,7 +411,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
408 if (entry->linked && preempt) 411 if (entry->linked && preempt)
409 TRACE_TASK(prev, "will be preempted by %s/%d\n", 412 TRACE_TASK(prev, "will be preempted by %s/%d\n",
410 entry->linked->comm, entry->linked->pid); 413 entry->linked->comm, entry->linked->pid);
411 414 */
412 415
413 /* If a task blocks we have no choice but to reschedule. 416 /* If a task blocks we have no choice but to reschedule.
414 */ 417 */
@@ -606,43 +609,44 @@ static long gsnedf_admit_task(struct task_struct* tsk)
606 609
607#include <litmus/fdso.h> 610#include <litmus/fdso.h>
608 611
609/* called with IRQs off */ 612static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
610static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
611{ 613{
612 int linked_on; 614 int linked_on;
613 int check_preempt = 0; 615 int check_preempt = 0;
614 616
615 raw_spin_lock(&gsnedf_lock); 617 if(prio_inh != NULL)
616 618 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
617 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); 619 else
620 TRACE_TASK(t, "inherits priority from %p\n", prio_inh);
621
618 tsk_rt(t)->inh_task = prio_inh; 622 tsk_rt(t)->inh_task = prio_inh;
619 623
620 linked_on = tsk_rt(t)->linked_on; 624 linked_on = tsk_rt(t)->linked_on;
621 625
622 /* If it is scheduled, then we need to reorder the CPU heap. */ 626 /* If it is scheduled, then we need to reorder the CPU heap. */
623 if (linked_on != NO_CPU) { 627 if (linked_on != NO_CPU) {
624 TRACE_TASK(t, "%s: linked on %d\n", 628 TRACE_TASK(t, "%s: linked on %d\n",
625 __FUNCTION__, linked_on); 629 __FUNCTION__, linked_on);
626 /* Holder is scheduled; need to re-order CPUs. 630 /* Holder is scheduled; need to re-order CPUs.
627 * We can't use heap_decrease() here since 631 * We can't use heap_decrease() here since
628 * the cpu_heap is ordered in reverse direction, so 632 * the cpu_heap is ordered in reverse direction, so
629 * it is actually an increase. */ 633 * it is actually an increase. */
630 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, 634 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap,
631 gsnedf_cpus[linked_on]->hn); 635 gsnedf_cpus[linked_on]->hn);
632 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, 636 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap,
633 gsnedf_cpus[linked_on]->hn); 637 gsnedf_cpus[linked_on]->hn);
634 } else { 638 } else {
635 /* holder may be queued: first stop queue changes */ 639 /* holder may be queued: first stop queue changes */
636 raw_spin_lock(&gsnedf.release_lock); 640 raw_spin_lock(&gsnedf.release_lock);
637 if (is_queued(t)) { 641 if (is_queued(t)) {
638 TRACE_TASK(t, "%s: is queued\n", 642 TRACE_TASK(t, "%s: is queued\n",
639 __FUNCTION__); 643 __FUNCTION__);
640 /* We need to update the position of holder in some 644 /* We need to update the position of holder in some
641 * heap. Note that this could be a release heap if we 645 * heap. Note that this could be a release heap if we
642 * budget enforcement is used and this job overran. */ 646 * budget enforcement is used and this job overran. */
643 check_preempt = 647 check_preempt =
644 !bheap_decrease(edf_ready_order, 648 !bheap_decrease(edf_ready_order,
645 tsk_rt(t)->heap_node); 649 tsk_rt(t)->heap_node);
646 } else { 650 } else {
647 /* Nothing to do: if it is not queued and not linked 651 /* Nothing to do: if it is not queued and not linked
648 * then it is either sleeping or currently being moved 652 * then it is either sleeping or currently being moved
@@ -650,10 +654,10 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
650 * will use the correct priority when enqueuing the 654 * will use the correct priority when enqueuing the
651 * task. */ 655 * task. */
652 TRACE_TASK(t, "%s: is NOT queued => Done.\n", 656 TRACE_TASK(t, "%s: is NOT queued => Done.\n",
653 __FUNCTION__); 657 __FUNCTION__);
654 } 658 }
655 raw_spin_unlock(&gsnedf.release_lock); 659 raw_spin_unlock(&gsnedf.release_lock);
656 660
657 /* If holder was enqueued in a release heap, then the following 661 /* If holder was enqueued in a release heap, then the following
658 * preemption check is pointless, but we can't easily detect 662 * preemption check is pointless, but we can't easily detect
659 * that case. If you want to fix this, then consider that 663 * that case. If you want to fix this, then consider that
@@ -665,31 +669,48 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
665 * sure preemption checks get the right task, not the 669 * sure preemption checks get the right task, not the
666 * potentially stale cache. */ 670 * potentially stale cache. */
667 bheap_uncache_min(edf_ready_order, 671 bheap_uncache_min(edf_ready_order,
668 &gsnedf.ready_queue); 672 &gsnedf.ready_queue);
669 check_for_preemptions(); 673 check_for_preemptions();
670 } 674 }
671 } 675 }
672
673 raw_spin_unlock(&gsnedf_lock);
674} 676}
675 677
676/* called with IRQs off */ 678/* called with IRQs off */
677static void clear_priority_inheritance(struct task_struct* t) 679static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
678{ 680{
679 raw_spin_lock(&gsnedf_lock); 681 raw_spin_lock(&gsnedf_lock);
682 __set_priority_inheritance(t, prio_inh);
683 raw_spin_unlock(&gsnedf_lock);
684}
680 685
681 /* A job only stops inheriting a priority when it releases a 686static void __clear_priority_inheritance(struct task_struct* t)
682 * resource. Thus we can make the following assumption.*/ 687{
683 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU);
684
685 TRACE_TASK(t, "priority restored\n"); 688 TRACE_TASK(t, "priority restored\n");
686 tsk_rt(t)->inh_task = NULL; 689
687 690 if(tsk_rt(t)->scheduled_on != NO_CPU)
688 /* Check if rescheduling is necessary. We can't use heap_decrease() 691 {
689 * since the priority was effectively lowered. */ 692 TRACE_TASK(t, "priority restored: t is not currently scheduled.\n");
690 unlink(t); 693
691 gsnedf_job_arrival(t); 694 tsk_rt(t)->inh_task = NULL;
695
696 /* Check if rescheduling is necessary. We can't use heap_decrease()
697 * since the priority was effectively lowered. */
698 unlink(t);
699 gsnedf_job_arrival(t);
700 }
701 else
702 {
703 TRACE_TASK(t, "priority restored: t IS currently scheduled.\n");
704
705 __set_priority_inheritance(t, NULL);
706 }
707}
692 708
709/* called with IRQs off */
710static void clear_priority_inheritance(struct task_struct* t)
711{
712 raw_spin_lock(&gsnedf_lock);
713 __clear_priority_inheritance(t);
693 raw_spin_unlock(&gsnedf_lock); 714 raw_spin_unlock(&gsnedf_lock);
694} 715}
695 716
@@ -892,11 +913,488 @@ static struct litmus_lock* gsnedf_new_fmlp(void)
892 return &sem->litmus_lock; 913 return &sem->litmus_lock;
893} 914}
894 915
916
917
918
919
920
921
922/* ******************** KFMLP support ********************** */
923
924/* struct for semaphore with priority inheritance */
925struct kfmlp_queue
926{
927 wait_queue_head_t wait;
928 struct task_struct* owner;
929 struct task_struct* hp_waiter;
930 int count; /* number of waiters + holder */
931};
932
933struct kfmlp_semaphore
934{
935 struct litmus_lock litmus_lock;
936
937 spinlock_t lock;
938
939 int num_resources; /* aka k */
940
941 struct kfmlp_queue *queues; /* array */
942 struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
943};
944
945static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock)
946{
947 return container_of(lock, struct kfmlp_semaphore, litmus_lock);
948}
949
950static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem,
951 struct kfmlp_queue* queue)
952{
953 return (queue - &sem->queues[0]);
954}
955
956static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem,
957 struct task_struct* holder)
958{
959 int i;
960 for(i = 0; i < sem->num_resources; ++i)
961 if(sem->queues[i].owner == holder)
962 return(&sem->queues[i]);
963 return(NULL);
964}
965
966/* caller is responsible for locking */
967struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue,
968 struct task_struct *skip)
969{
970 struct list_head *pos;
971 struct task_struct *queued, *found = NULL;
972
973 list_for_each(pos, &kqueue->wait.task_list) {
974 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
975 task_list)->private;
976
977 /* Compare task prios, find high prio task. */
978 if (queued != skip && edf_higher_prio(queued, found))
979 found = queued;
980 }
981 return found;
982}
983
984static inline struct kfmlp_queue* kfmlp_find_shortest(
985 struct kfmlp_semaphore* sem,
986 struct kfmlp_queue* search_start)
987{
988 // we start our search at search_start instead of at the beginning of the
989 // queue list to load-balance across all resources.
990 struct kfmlp_queue* step = search_start;
991 struct kfmlp_queue* shortest = sem->shortest_queue;
992
993 do
994 {
995 step = (step+1 != &sem->queues[sem->num_resources]) ?
996 step+1 : &sem->queues[0];
997 if(step->count < shortest->count)
998 {
999 shortest = step;
1000 if(step->count == 0)
1001 break; /* can't get any shorter */
1002 }
1003 }while(step != search_start);
1004
1005 return(shortest);
1006}
1007
1008struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1009{
1010 /* must hold sem->lock */
1011
1012 struct kfmlp_queue *my_queue = NULL;
1013 struct task_struct *max_hp = NULL;
1014
1015
1016 struct list_head *pos;
1017 struct task_struct *queued;
1018 int i;
1019
1020 for(i = 0; i < sem->num_resources; ++i)
1021 {
1022 if( (sem->queues[i].count > 1) &&
1023 ((my_queue == NULL) ||
1024 (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) )
1025 {
1026 my_queue = &sem->queues[i];
1027 }
1028 }
1029
1030 if(my_queue)
1031 {
1032 max_hp = my_queue->hp_waiter;
1033
1034 BUG_ON(!max_hp);
1035
1036 TRACE_CUR("queue %d: stealing %s/%d from queue %d\n",
1037 kfmlp_get_idx(sem, my_queue),
1038 max_hp->comm, max_hp->pid,
1039 kfmlp_get_idx(sem, my_queue));
1040
1041 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp);
1042
1043 if(my_queue->hp_waiter)
1044 TRACE_CUR("queue %d: new hp_waiter is %s/%d\n",
1045 kfmlp_get_idx(sem, my_queue),
1046 my_queue->hp_waiter->comm,
1047 my_queue->hp_waiter->pid);
1048 else
1049 TRACE_CUR("queue %d: new hp_waiter is %p\n",
1050 kfmlp_get_idx(sem, my_queue), NULL);
1051
1052 raw_spin_lock(&gsnedf_lock);
1053
1054 if(my_queue->owner)
1055 TRACE_CUR("queue %d: owner is %s/%d\n",
1056 kfmlp_get_idx(sem, my_queue),
1057 my_queue->owner->comm,
1058 my_queue->owner->pid);
1059 else
1060 TRACE_CUR("queue %d: owner is %p\n",
1061 kfmlp_get_idx(sem, my_queue),
1062 NULL);
1063
1064 if(tsk_rt(my_queue->owner)->inh_task == max_hp)
1065 {
1066 TRACE_CUR("queue %d: CRAZY: clearing the inheritance of %s/%d\n",
1067 kfmlp_get_idx(sem, my_queue), my_queue->owner->comm, my_queue->owner->pid);
1068
1069 __clear_priority_inheritance(my_queue->owner);
1070 if(my_queue->hp_waiter != NULL)
1071 {
1072 TRACE_CUR("queue %d: CRAZY: setting the inheritance of %s/%d to %s/%d\n",
1073 kfmlp_get_idx(sem, my_queue),
1074 my_queue->owner->comm, my_queue->owner->pid,
1075 my_queue->hp_waiter->comm, my_queue->hp_waiter->pid);
1076 __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
1077 }
1078 }
1079 raw_spin_unlock(&gsnedf_lock);
1080
1081 list_for_each(pos, &my_queue->wait.task_list)
1082 {
1083 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
1084 task_list)->private;
1085 /* Compare task prios, find high prio task. */
1086 if (queued == max_hp)
1087 {
1088 TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n",
1089 kfmlp_get_idx(sem, my_queue));
1090
1091 __remove_wait_queue(&my_queue->wait,
1092 list_entry(pos, wait_queue_t, task_list));
1093 break;
1094 }
1095 }
1096 --(my_queue->count);
1097 }
1098
1099 return(max_hp);
1100}
1101
1102int gsnedf_kfmlp_lock(struct litmus_lock* l)
1103{
1104 struct task_struct* t = current;
1105 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1106 struct kfmlp_queue* my_queue;
1107 wait_queue_t wait;
1108 unsigned long flags;
1109
1110 if (!is_realtime(t))
1111 return -EPERM;
1112
1113 spin_lock_irqsave(&sem->lock, flags);
1114
1115 my_queue = sem->shortest_queue;
1116
1117 if (my_queue->owner) {
1118 /* resource is not free => must suspend and wait */
1119 TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n",
1120 kfmlp_get_idx(sem, my_queue));
1121
1122 init_waitqueue_entry(&wait, t);
1123
1124 /* FIXME: interruptible would be nice some day */
1125 set_task_state(t, TASK_UNINTERRUPTIBLE);
1126
1127 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait);
1128
1129 /* check if we need to activate priority inheritance */
1130 if (edf_higher_prio(t, my_queue->hp_waiter))
1131 {
1132 my_queue->hp_waiter = t;
1133 if (edf_higher_prio(t, my_queue->owner))
1134 {
1135 set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
1136 }
1137 }
1138
1139 ++(my_queue->count);
1140 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
1141
1142 /* release lock before sleeping */
1143 spin_unlock_irqrestore(&sem->lock, flags);
1144
1145 /* We depend on the FIFO order. Thus, we don't need to recheck
1146 * when we wake up; we are guaranteed to have the lock since
1147 * there is only one wake up per release.
1148 */
1149
1150 schedule();
1151
1152 spin_lock_irqsave(&sem->lock, flags);
1153 if(my_queue->owner == t)
1154 {
1155 TRACE_CUR("queue %d: acquired through waiting\n",
1156 kfmlp_get_idx(sem, my_queue));
1157 //__remove_wait_queue(&my_queue->wait, &wait);
1158 }
1159 else
1160 {
1161 /* this case may happen if our wait entry was stolen
1162 between queues. */
1163
1164 struct kfmlp_queue* my_new_queue;
1165
1166 my_new_queue = kfmlp_get_queue(sem, t);
1167 BUG_ON(!my_new_queue);
1168 TRACE_CUR("queue %d: acquired through stealing\n",
1169 kfmlp_get_idx(sem, my_new_queue));
1170 }
1171 spin_unlock_irqrestore(&sem->lock, flags);
1172 }
1173 else
1174 {
1175 TRACE_CUR("queue %d: acquired immediatly\n",
1176 kfmlp_get_idx(sem, my_queue));
1177
1178 /* it's ours now */
1179 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - immediate\n",
1180 kfmlp_get_idx(sem, my_queue),
1181 t->comm, t->pid);
1182
1183 my_queue->owner = t;
1184
1185 ++(my_queue->count);
1186 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
1187
1188 spin_unlock_irqrestore(&sem->lock, flags);
1189 }
1190
1191 return kfmlp_get_idx(sem, my_queue);
1192}
1193
1194int gsnedf_kfmlp_unlock(struct litmus_lock* l)
1195{
1196 struct task_struct *t = current, *next;
1197 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1198 struct kfmlp_queue *my_queue;
1199 unsigned long flags;
1200 int err = 0;
1201
1202 spin_lock_irqsave(&sem->lock, flags);
1203
1204 my_queue = kfmlp_get_queue(sem, t);
1205
1206 if (!my_queue) {
1207 err = -EINVAL;
1208 goto out;
1209 }
1210
1211 /* check if there are jobs waiting for this resource */
1212 //next = waitqueue_first(&my_queue->wait);
1213 next = waitqueue_first_and_remove(&my_queue->wait);
1214 if (next) {
1215
1216 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
1217 kfmlp_get_idx(sem, my_queue),
1218 next->comm, next->pid);
1219
1220 /* next becomes the resouce holder */
1221 my_queue->owner = next;
1222
1223 --(my_queue->count);
1224 if(my_queue->count < sem->shortest_queue->count)
1225 {
1226 sem->shortest_queue = my_queue;
1227 }
1228
1229 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n",
1230 kfmlp_get_idx(sem, my_queue), next->comm, next->pid);
1231
1232 /* determine new hp_waiter if necessary */
1233 if (next == my_queue->hp_waiter) {
1234 TRACE_TASK(next, "was highest-prio waiter\n");
1235 /* next has the highest priority --- it doesn't need to
1236 * inherit. However, we need to make sure that the
1237 * next-highest priority in the queue is reflected in
1238 * hp_waiter. */
1239 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next);
1240 if (my_queue->hp_waiter)
1241 TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue));
1242 else
1243 TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue));
1244 } else {
1245 /* Well, if next is not the highest-priority waiter,
1246 * then it ought to inherit the highest-priority
1247 * waiter's priority. */
1248 set_priority_inheritance(next, my_queue->hp_waiter);
1249 }
1250
1251 /* wake up next */
1252 wake_up_process(next);
1253 }
1254 else
1255 {
1256 TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue));
1257
1258 next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */
1259
1260 if(next)
1261 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n",
1262 kfmlp_get_idx(sem, my_queue),
1263 next->comm, next->pid);
1264
1265 my_queue->owner = next;
1266
1267 if(next)
1268 {
1269 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n",
1270 kfmlp_get_idx(sem, my_queue),
1271 next->comm, next->pid);
1272
1273 /* wake up next */
1274 wake_up_process(next);
1275 }
1276 else
1277 {
1278 TRACE_CUR("queue %d: lock ownership passed to %p\n", kfmlp_get_idx(sem, my_queue), NULL);
1279
1280 --(my_queue->count);
1281 if(my_queue->count < sem->shortest_queue->count)
1282 {
1283 sem->shortest_queue = my_queue;
1284 }
1285 }
1286 }
1287
1288 /* we lose the benefit of priority inheritance (if any) */
1289 if (tsk_rt(t)->inh_task)
1290 clear_priority_inheritance(t);
1291
1292out:
1293 spin_unlock_irqrestore(&sem->lock, flags);
1294
1295 return err;
1296}
1297
1298int gsnedf_kfmlp_close(struct litmus_lock* l)
1299{
1300 struct task_struct *t = current;
1301 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1302 struct kfmlp_queue *my_queue;
1303 unsigned long flags;
1304
1305 int owner;
1306
1307 spin_lock_irqsave(&sem->lock, flags);
1308
1309 my_queue = kfmlp_get_queue(sem, t);
1310 owner = (my_queue) ? (my_queue->owner == t) : 0;
1311
1312 spin_unlock_irqrestore(&sem->lock, flags);
1313
1314 if (owner)
1315 gsnedf_kfmlp_unlock(l);
1316
1317 return 0;
1318}
1319
1320void gsnedf_kfmlp_free(struct litmus_lock* l)
1321{
1322 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1323 kfree(sem->queues);
1324 kfree(sem);
1325}
1326
1327static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = {
1328 .close = gsnedf_kfmlp_close,
1329 .lock = gsnedf_kfmlp_lock,
1330 .unlock = gsnedf_kfmlp_unlock,
1331 .deallocate = gsnedf_kfmlp_free,
1332};
1333
1334static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg, int* ret_code)
1335{
1336 struct kfmlp_semaphore* sem;
1337 int num_resources = 0;
1338 int i;
1339
1340 if(!access_ok(VERIFY_READ, arg, sizeof(num_resources)))
1341 {
1342 *ret_code = -EINVAL;
1343 return(NULL);
1344 }
1345 if(__copy_from_user(&num_resources, arg, sizeof(num_resources)))
1346 {
1347 *ret_code = -EINVAL;
1348 return(NULL);
1349 }
1350 if(num_resources < 1)
1351 {
1352 *ret_code = -EINVAL;
1353 return(NULL);
1354 }
1355
1356 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1357 if(!sem)
1358 {
1359 *ret_code = -ENOMEM;
1360 return NULL;
1361 }
1362
1363 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL);
1364 if(!sem->queues)
1365 {
1366 kfree(sem);
1367 *ret_code = -ENOMEM;
1368 return NULL;
1369 }
1370
1371 sem->litmus_lock.ops = &gsnedf_kfmlp_lock_ops;
1372 spin_lock_init(&sem->lock);
1373 sem->num_resources = num_resources;
1374
1375 for(i = 0; i < num_resources; ++i)
1376 {
1377 sem->queues[i].owner = NULL;
1378 sem->queues[i].hp_waiter = NULL;
1379 init_waitqueue_head(&sem->queues[i].wait);
1380 sem->queues[i].count = 0;
1381 }
1382
1383 sem->shortest_queue = &sem->queues[0];
1384
1385 *ret_code = 0;
1386 return &sem->litmus_lock;
1387}
1388
1389
1390
1391
1392
895/* **** lock constructor **** */ 1393/* **** lock constructor **** */
896 1394
897 1395
898static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, 1396static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
899 void* __user unused) 1397 void* __user arg)
900{ 1398{
901 int err = -ENXIO; 1399 int err = -ENXIO;
902 1400
@@ -911,7 +1409,10 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
911 else 1409 else
912 err = -ENOMEM; 1410 err = -ENOMEM;
913 break; 1411 break;
914 1412
1413 case KFMLP_SEM:
1414 *lock = gsnedf_new_kfmlp(arg, &err);
1415 break;
915 }; 1416 };
916 1417
917 return err; 1418 return err;
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index e6952896dc4b..1bca2e1a33cd 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -103,7 +103,9 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
103 } 103 }
104#ifdef __ARCH_WANT_UNLOCKED_CTXSW 104#ifdef __ARCH_WANT_UNLOCKED_CTXSW
105 if (next->oncpu) 105 if (next->oncpu)
106 {
106 TRACE_TASK(next, "waiting for !oncpu"); 107 TRACE_TASK(next, "waiting for !oncpu");
108 }
107 while (next->oncpu) { 109 while (next->oncpu) {
108 cpu_relax(); 110 cpu_relax();
109 mb(); 111 mb();