aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/locking.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/locking.c')
-rw-r--r--litmus/locking.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/litmus/locking.c b/litmus/locking.c
index 4881ca119acf..1d32dcd8e726 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -6,6 +6,7 @@
6 6
7#include <litmus/sched_plugin.h> 7#include <litmus/sched_plugin.h>
8#include <litmus/trace.h> 8#include <litmus/trace.h>
9#include <litmus/wait.h>
9 10
10static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); 11static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg);
11static int open_generic_lock(struct od_table_entry* entry, void* __user arg); 12static int open_generic_lock(struct od_table_entry* entry, void* __user arg);
@@ -127,6 +128,37 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
127 return(t); 128 return(t);
128} 129}
129 130
131unsigned int __add_wait_queue_prio_exclusive(
132 wait_queue_head_t* head,
133 prio_wait_queue_t *new)
134{
135 struct list_head *pos;
136 unsigned int passed = 0;
137
138 new->wq.flags |= WQ_FLAG_EXCLUSIVE;
139
140 /* find a spot where the new entry is less than the next */
141 list_for_each(pos, &head->task_list) {
142 prio_wait_queue_t* queued = list_entry(pos, prio_wait_queue_t,
143 wq.task_list);
144
145 if (unlikely(lt_before(new->priority, queued->priority) ||
146 (new->priority == queued->priority &&
147 new->tie_breaker < queued->tie_breaker))) {
148 /* pos is not less than new, thus insert here */
149 __list_add(&new->wq.task_list, pos->prev, pos);
150 goto out;
151 }
152 passed++;
153 }
154
155 /* if we get to this point either the list is empty or every entry
156 * queued element is less than new.
157 * Let's add new to the end. */
158 list_add_tail(&new->wq.task_list, &head->task_list);
159out:
160 return passed;
161}
130 162
131#else 163#else
132 164