aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-21 21:42:12 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-21 21:42:12 -0400
commit9a478c57909aee8cb68b235ee711af19bc1d930f (patch)
tree5bc8657a86f90988f01ba0e1f5c5ee6ed65f9b86
parentcd7dbaa46c74c85ff16046541da71e933bf571f8 (diff)
Add internal support for the PCP in preparation of the DPCP.
-rw-r--r--include/litmus/fp_common.h8
-rw-r--r--include/litmus/wait.h38
-rw-r--r--litmus/fp_common.c2
-rw-r--r--litmus/locking.c31
-rw-r--r--litmus/sched_pfp.c311
5 files changed, 357 insertions, 33 deletions
diff --git a/include/litmus/fp_common.h b/include/litmus/fp_common.h
index 9171040e65f8..dd1f7bf1e347 100644
--- a/include/litmus/fp_common.h
+++ b/include/litmus/fp_common.h
@@ -64,6 +64,14 @@ static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, u
64 bheap_insert(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node); 64 bheap_insert(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node);
65} 65}
66 66
67static inline void fp_prio_remove(struct fp_prio_queue* q, struct task_struct* t, unsigned int index)
68{
69 BUG_ON(!is_queued(t));
70
71 bheap_delete(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node);
72 if (likely(bheap_empty(&q->queue[index])))
73 fpq_clear(q, index);
74}
67 75
68static inline struct task_struct* fp_prio_peek(struct fp_prio_queue* q) 76static inline struct task_struct* fp_prio_peek(struct fp_prio_queue* q)
69{ 77{
diff --git a/include/litmus/wait.h b/include/litmus/wait.h
index 62884f5eb9c8..ab3770904b1f 100644
--- a/include/litmus/wait.h
+++ b/include/litmus/wait.h
@@ -21,34 +21,24 @@ static inline void init_prio_waitqueue_entry(prio_wait_queue_t *pwq,
21 pwq->priority = priority; 21 pwq->priority = priority;
22} 22}
23 23
24static inline unsigned int __add_wait_queue_prio_exclusive( 24unsigned int __add_wait_queue_prio_exclusive(
25 wait_queue_head_t* head,
26 prio_wait_queue_t *new);
27
28static inline unsigned int add_wait_queue_prio_exclusive(
25 wait_queue_head_t* head, 29 wait_queue_head_t* head,
26 prio_wait_queue_t *new) 30 prio_wait_queue_t *new)
27{ 31{
28 struct list_head *pos; 32 unsigned long flags;
29 unsigned int passed = 0; 33 unsigned int passed;
30 34
31 new->wq.flags |= WQ_FLAG_EXCLUSIVE; 35 spin_lock_irqsave(&head->lock, flags);
32 36 passed = __add_wait_queue_prio_exclusive(head, new);
33 /* find a spot where the new entry is less than the next */ 37
34 list_for_each(pos, &head->task_list) { 38 spin_unlock_irqrestore(&head->lock, flags);
35 prio_wait_queue_t* queued = list_entry(pos, prio_wait_queue_t, 39
36 wq.task_list);
37
38 if (unlikely(lt_before(new->priority, queued->priority))) {
39 /* pos is not less than new, thus insert here */
40 __list_add(&new->wq.task_list, pos->prev, pos);
41 goto out;
42 }
43 passed++;
44 }
45
46 /* if we get to this point either the list is empty or every entry
47 * queued element is less than new.
48 * Let's add new to the end. */
49 list_add_tail(&new->wq.task_list, &head->task_list);
50out:
51 return passed; 40 return passed;
52} 41}
53 42
43
54#endif 44#endif
diff --git a/litmus/fp_common.c b/litmus/fp_common.c
index 8536da9ca98a..31fc2db20adf 100644
--- a/litmus/fp_common.c
+++ b/litmus/fp_common.c
@@ -26,7 +26,7 @@ int fp_higher_prio(struct task_struct* first,
26 struct task_struct *second_task = second; 26 struct task_struct *second_task = second;
27 27
28 /* There is no point in comparing a task to itself. */ 28 /* There is no point in comparing a task to itself. */
29 if (first && first == second) { 29 if (unlikely(first && first == second)) {
30 TRACE_TASK(first, 30 TRACE_TASK(first,
31 "WARNING: pointless FP priority comparison.\n"); 31 "WARNING: pointless FP priority comparison.\n");
32 return 0; 32 return 0;
diff --git a/litmus/locking.c b/litmus/locking.c
index 2693f1aca859..cea3191f6ed4 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -4,6 +4,7 @@
4 4
5#include <litmus/sched_plugin.h> 5#include <litmus/sched_plugin.h>
6#include <litmus/trace.h> 6#include <litmus/trace.h>
7#include <litmus/wait.h>
7 8
8static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); 9static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg);
9static int open_generic_lock(struct od_table_entry* entry, void* __user arg); 10static int open_generic_lock(struct od_table_entry* entry, void* __user arg);
@@ -121,6 +122,36 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
121 return(t); 122 return(t);
122} 123}
123 124
125unsigned int __add_wait_queue_prio_exclusive(
126 wait_queue_head_t* head,
127 prio_wait_queue_t *new)
128{
129 struct list_head *pos;
130 unsigned int passed = 0;
131
132 new->wq.flags |= WQ_FLAG_EXCLUSIVE;
133
134 /* find a spot where the new entry is less than the next */
135 list_for_each(pos, &head->task_list) {
136 prio_wait_queue_t* queued = list_entry(pos, prio_wait_queue_t,
137 wq.task_list);
138
139 if (unlikely(lt_before(new->priority, queued->priority))) {
140 /* pos is not less than new, thus insert here */
141 __list_add(&new->wq.task_list, pos->prev, pos);
142 goto out;
143 }
144 passed++;
145 }
146
147 /* if we get to this point either the list is empty or every entry
148 * queued element is less than new.
149 * Let's add new to the end. */
150 list_add_tail(&new->wq.task_list, &head->task_list);
151out:
152 return passed;
153}
154
124 155
125#else 156#else
126 157
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index 2b755ecab76e..38fd3c7f5c7f 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -50,6 +50,10 @@ static void preempt(pfp_domain_t *pfp)
50static unsigned int priority_index(struct task_struct* t) 50static unsigned int priority_index(struct task_struct* t)
51{ 51{
52#ifdef CONFIG_LOCKING 52#ifdef CONFIG_LOCKING
53 if (unlikely(t->rt_param.inh_task))
54 /* use effective priority */
55 t = t->rt_param.inh_task;
56
53 if (is_priority_boosted(t)) { 57 if (is_priority_boosted(t)) {
54 /* zero is reserved for priority-boosted tasks */ 58 /* zero is reserved for priority-boosted tasks */
55 return 0; 59 return 0;
@@ -308,16 +312,17 @@ static void pfp_task_exit(struct task_struct * t)
308 312
309 raw_spin_lock_irqsave(&pfp->slock, flags); 313 raw_spin_lock_irqsave(&pfp->slock, flags);
310 if (is_queued(t)) { 314 if (is_queued(t)) {
315 BUG(); /* This currently doesn't work. */
311 /* dequeue */ 316 /* dequeue */
312 dom = task_dom(t); 317 dom = task_dom(t);
313 remove(dom, t); 318 remove(dom, t);
314 } 319 }
315 if (pfp->scheduled == t) 320 if (pfp->scheduled == t) {
316 pfp->scheduled = NULL; 321 pfp->scheduled = NULL;
317 322 preempt(pfp);
323 }
318 TRACE_TASK(t, "RIP, now reschedule\n"); 324 TRACE_TASK(t, "RIP, now reschedule\n");
319 325
320 preempt(pfp);
321 raw_spin_unlock_irqrestore(&pfp->slock, flags); 326 raw_spin_unlock_irqrestore(&pfp->slock, flags);
322} 327}
323 328
@@ -326,6 +331,52 @@ static void pfp_task_exit(struct task_struct * t)
326#include <litmus/fdso.h> 331#include <litmus/fdso.h>
327#include <litmus/srp.h> 332#include <litmus/srp.h>
328 333
334static void fp_dequeue(pfp_domain_t* pfp, struct task_struct* t)
335{
336 BUG_ON(pfp->scheduled == t && is_queued(t));
337 if (is_queued(t))
338 fp_prio_remove(&pfp->ready_queue, t, priority_index(t));
339}
340
341static void fp_set_prio_inh(pfp_domain_t* pfp, struct task_struct* t,
342 struct task_struct* prio_inh)
343{
344 int requeue;
345
346 if (!t || t->rt_param.inh_task == prio_inh)
347 /* no update required */
348 return;
349
350 requeue = is_queued(t);
351
352 if (requeue)
353 /* first remove */
354 fp_dequeue(pfp, t);
355
356 t->rt_param.inh_task = prio_inh;
357
358 if (requeue)
359 /* add again to the right queue */
360 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
361}
362
363static int effective_agent_prio(int prio)
364{
365 /* make sure agents have higher priority */
366 return prio - LITMUS_MAX_PRIORITY;
367}
368
369static lt_t prio_point(int eprio)
370{
371 /* make sure we have non-negative prio points */
372 return eprio + LITMUS_MAX_PRIORITY;
373}
374
375static int prio_from_point(lt_t prio_point)
376{
377 return ((int) prio_point) - LITMUS_MAX_PRIORITY;
378}
379
329static void boost_priority(struct task_struct* t, lt_t priority_point) 380static void boost_priority(struct task_struct* t, lt_t priority_point)
330{ 381{
331 unsigned long flags; 382 unsigned long flags;
@@ -391,7 +442,6 @@ static unsigned int pfp_get_srp_prio(struct task_struct* t)
391 442
392/* ******************** FMLP support ********************** */ 443/* ******************** FMLP support ********************** */
393 444
394/* struct for semaphore with priority inheritance */
395struct fmlp_semaphore { 445struct fmlp_semaphore {
396 struct litmus_lock litmus_lock; 446 struct litmus_lock litmus_lock;
397 447
@@ -548,7 +598,6 @@ static struct litmus_lock* pfp_new_fmlp(void)
548 598
549/* ******************** MPCP support ********************** */ 599/* ******************** MPCP support ********************** */
550 600
551/* struct for semaphore with priority inheritance */
552struct mpcp_semaphore { 601struct mpcp_semaphore {
553 struct litmus_lock litmus_lock; 602 struct litmus_lock litmus_lock;
554 603
@@ -588,7 +637,7 @@ static void mpcp_vspin_enter(void)
588 unsigned long flags; 637 unsigned long flags;
589 638
590 /* ordered by regular priority */ 639 /* ordered by regular priority */
591 init_prio_waitqueue_entry(&wait, t, get_priority(t)); 640 init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t)));
592 641
593 spin_lock_irqsave(&vspin->lock, flags); 642 spin_lock_irqsave(&vspin->lock, flags);
594 643
@@ -638,6 +687,7 @@ static inline struct mpcp_semaphore* mpcp_from_lock(struct litmus_lock* lock)
638{ 687{
639 return container_of(lock, struct mpcp_semaphore, litmus_lock); 688 return container_of(lock, struct mpcp_semaphore, litmus_lock);
640} 689}
690
641int pfp_mpcp_lock(struct litmus_lock* l) 691int pfp_mpcp_lock(struct litmus_lock* l)
642{ 692{
643 struct task_struct* t = current; 693 struct task_struct* t = current;
@@ -666,7 +716,7 @@ int pfp_mpcp_lock(struct litmus_lock* l)
666 /* resource is not free => must suspend and wait */ 716 /* resource is not free => must suspend and wait */
667 717
668 /* ordered by regular priority */ 718 /* ordered by regular priority */
669 init_prio_waitqueue_entry(&wait, t, get_priority(t)); 719 init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t)));
670 720
671 /* FIXME: interruptible would be nice some day */ 721 /* FIXME: interruptible would be nice some day */
672 set_task_state(t, TASK_UNINTERRUPTIBLE); 722 set_task_state(t, TASK_UNINTERRUPTIBLE);
@@ -734,7 +784,7 @@ int pfp_mpcp_unlock(struct litmus_lock* l)
734out: 784out:
735 spin_unlock_irqrestore(&sem->wait.lock, flags); 785 spin_unlock_irqrestore(&sem->wait.lock, flags);
736 786
737 if (sem->vspin) { 787 if (sem->vspin && err == 0) {
738 preempt_disable(); 788 preempt_disable();
739 mpcp_vspin_exit(); 789 mpcp_vspin_exit();
740 preempt_enable(); 790 preempt_enable();
@@ -827,6 +877,249 @@ static struct litmus_lock* pfp_new_mpcp(int vspin)
827 return &sem->litmus_lock; 877 return &sem->litmus_lock;
828} 878}
829 879
880
881/* ******************** PCP support ********************** */
882
883
884struct pcp_semaphore {
885 struct list_head ceiling;
886
887 /* current resource holder */
888 struct task_struct *owner;
889
890 /* priority ceiling --- can be negative due to DPCP support */
891 int prio_ceiling;
892
893 /* on which processor is this PCP semaphore allocated? */
894 int on_cpu;
895};
896
897struct pcp_state {
898 struct list_head system_ceiling;
899
900 /* highest-priority waiting task */
901 struct task_struct* hp_waiter;
902
903 /* list of jobs waiting to get past the system ceiling */
904 wait_queue_head_t ceiling_blocked;
905};
906
907static void pcp_init_state(struct pcp_state* s)
908{
909 INIT_LIST_HEAD(&s->system_ceiling);
910 s->hp_waiter = NULL;
911 init_waitqueue_head(&s->ceiling_blocked);
912}
913
914static DEFINE_PER_CPU(struct pcp_state, pcp_state);
915
916/* assumes preemptions are off */
917static struct pcp_semaphore* pcp_get_ceiling(void)
918{
919 struct list_head* top = __get_cpu_var(pcp_state).system_ceiling.next;
920
921 if (top)
922 return list_entry(top, struct pcp_semaphore, ceiling);
923 else
924 return NULL;
925}
926
927/* assumes preempt off */
928static void pcp_add_ceiling(struct pcp_semaphore* sem)
929{
930 struct list_head *pos;
931 struct list_head *in_use = &__get_cpu_var(pcp_state).system_ceiling;
932 struct pcp_semaphore* held;
933
934 BUG_ON(sem->on_cpu != smp_processor_id());
935 BUG_ON(in_list(&sem->ceiling));
936
937 list_for_each(pos, in_use) {
938 held = list_entry(pos, struct pcp_semaphore, ceiling);
939 if (held->prio_ceiling >= sem->prio_ceiling) {
940 __list_add(&sem->ceiling, pos->prev, pos);
941 return;
942 }
943 }
944
945 /* we hit the end of the list */
946
947 list_add_tail(&sem->ceiling, in_use);
948}
949
950/* assumes preempt off */
951static int pcp_exceeds_ceiling(struct pcp_semaphore* ceiling,
952 struct task_struct* task,
953 int effective_prio)
954{
955 return ceiling == NULL ||
956 ceiling->prio_ceiling > effective_prio ||
957 ceiling->owner == task;
958}
959
960/* assumes preempt off */
961static void pcp_priority_inheritance(void)
962{
963 unsigned long flags;
964 pfp_domain_t* pfp = local_pfp;
965
966 struct pcp_semaphore* ceiling = pcp_get_ceiling();
967 struct task_struct *blocker, *blocked;
968
969 blocker = ceiling ? ceiling->owner : NULL;
970 blocked = __get_cpu_var(pcp_state).hp_waiter;
971
972 raw_spin_lock_irqsave(&pfp->slock, flags);
973
974 /* Current is no longer inheriting anything by default. This should be
975 * the currently scheduled job, and hence not currently queued. */
976 BUG_ON(current != pfp->scheduled);
977
978 fp_set_prio_inh(pfp, current, NULL);
979 fp_set_prio_inh(pfp, blocked, NULL);
980 fp_set_prio_inh(pfp, blocker, NULL);
981
982
983 /* Let blocking job inherit priority of blocked job, if required. */
984 if (blocker && blocked &&
985 fp_higher_prio(blocked, blocker))
986 fp_set_prio_inh(pfp, blocker, blocked);
987
988 /* check if anything changed */
989 if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled))
990 preempt(pfp);
991
992 raw_spin_unlock_irqrestore(&pfp->slock, flags);
993}
994
995static void pcp_raise_ceiling(struct pcp_semaphore* sem,
996 int effective_prio)
997{
998 struct task_struct* t = current;
999 struct pcp_semaphore* ceiling;
1000 prio_wait_queue_t wait;
1001 unsigned int waiting_higher_prio;
1002
1003 preempt_disable();
1004
1005 do {
1006 ceiling = pcp_get_ceiling();
1007 if (pcp_exceeds_ceiling(ceiling, t, effective_prio))
1008 break;
1009
1010 /* we need to wait until the ceiling is lowered */
1011
1012 /* enqueue in priority order */
1013 init_prio_waitqueue_entry(&wait, t, prio_point(effective_prio));
1014 set_task_state(t, TASK_UNINTERRUPTIBLE);
1015 waiting_higher_prio = add_wait_queue_prio_exclusive(
1016 &__get_cpu_var(pcp_state).ceiling_blocked, &wait);
1017
1018 if (waiting_higher_prio == 0) {
1019 /* we are the new highest-priority waiting job
1020 * => update inheritance */
1021 __get_cpu_var(pcp_state).hp_waiter = t;
1022 pcp_priority_inheritance();
1023 }
1024
1025 TS_LOCK_SUSPEND;
1026
1027 preempt_enable_no_resched();
1028 schedule();
1029 preempt_disable();
1030
1031 /* pcp_resume_unblocked() removed us from wait queue */
1032
1033 TS_LOCK_RESUME;
1034 } while(1);
1035
1036 /* We are good to go. The semaphore should be available. */
1037 BUG_ON(sem->owner != NULL);
1038
1039 sem->owner = t;
1040
1041 pcp_add_ceiling(sem);
1042
1043 preempt_enable();
1044}
1045
1046static void pcp_resume_unblocked(void)
1047{
1048 wait_queue_head_t *blocked = &__get_cpu_var(pcp_state).ceiling_blocked;
1049 unsigned long flags;
1050 prio_wait_queue_t* q;
1051 struct task_struct* t = NULL;
1052
1053 struct pcp_semaphore* ceiling = pcp_get_ceiling();
1054
1055 spin_lock_irqsave(&blocked->lock, flags);
1056
1057 while (waitqueue_active(blocked)) {
1058 /* check first == highest-priority waiting job */
1059 q = list_entry(blocked->task_list.next,
1060 prio_wait_queue_t, wq.task_list);
1061 t = (struct task_struct*) q->wq.private;
1062
1063 /* can it proceed now? => let it go */
1064 if (pcp_exceeds_ceiling(ceiling, t,
1065 prio_from_point(q->priority))) {
1066 __remove_wait_queue(blocked, &q->wq);
1067 wake_up_process(t);
1068 } else {
1069 /* We are done. Update highest-priority waiter. */
1070 __get_cpu_var(pcp_state).hp_waiter = t;
1071 goto out;
1072 }
1073 }
1074 /* If we get here, then there are no more waiting
1075 * jobs. */
1076 __get_cpu_var(pcp_state).hp_waiter = NULL;
1077out:
1078 spin_unlock_irqrestore(&blocked->lock, flags);
1079}
1080
1081/* assumes preempt off */
1082static void pcp_lower_ceiling(struct pcp_semaphore* sem)
1083{
1084 BUG_ON(!in_list(&sem->ceiling));
1085 BUG_ON(sem->owner != current);
1086 BUG_ON(sem->on_cpu != smp_processor_id());
1087
1088 /* remove from ceiling list */
1089 list_del(&sem->ceiling);
1090
1091 /* release */
1092 sem->owner = NULL;
1093
1094 /* Wake up all ceiling-blocked jobs that now pass the ceiling. */
1095 pcp_resume_unblocked();
1096
1097 pcp_priority_inheritance();
1098}
1099
1100static void pcp_update_prio_ceiling(struct pcp_semaphore* sem,
1101 int effective_prio)
1102{
1103 sem->prio_ceiling = min(sem->prio_ceiling, effective_prio);
1104}
1105
1106static struct pcp_semaphore* pfp_new_pcp(int cpu)
1107{
1108 struct pcp_semaphore* sem;
1109
1110 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1111 if (!sem)
1112 return NULL;
1113
1114 sem->owner = NULL;
1115 INIT_LIST_HEAD(&sem->ceiling);
1116 sem->prio_ceiling = INT_MAX;
1117 sem->on_cpu = cpu;
1118
1119 return sem;
1120}
1121
1122
830/* **** lock constructor **** */ 1123/* **** lock constructor **** */
831 1124
832 1125
@@ -911,6 +1204,8 @@ static long pfp_activate_plugin(void)
911 for_each_online_cpu(cpu) { 1204 for_each_online_cpu(cpu) {
912 init_waitqueue_head(&per_cpu(mpcpvs_vspin_wait, cpu)); 1205 init_waitqueue_head(&per_cpu(mpcpvs_vspin_wait, cpu));
913 per_cpu(mpcpvs_vspin, cpu) = NULL; 1206 per_cpu(mpcpvs_vspin, cpu) = NULL;
1207
1208 pcp_init_state(&per_cpu(pcp_state, cpu));
914 } 1209 }
915 1210
916#endif 1211#endif