diff options
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/Kconfig | 19 | ||||
-rw-r--r-- | litmus/binheap.c | 60 | ||||
-rw-r--r-- | litmus/locking.c | 339 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 460 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 17 |
5 files changed, 792 insertions, 103 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig index 841a7e4e9723..97200506e31c 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -67,6 +67,25 @@ config LITMUS_NESTED_LOCKING | |||
67 | help | 67 | help |
68 | Enable nested priority inheritance. | 68 | Enable nested priority inheritance. |
69 | 69 | ||
70 | config LITMUS_DGL_SUPPORT | ||
71 | bool "Support for dynamic group locks" | ||
72 | depends on LITMUS_NESTED_LOCKING | ||
73 | default n | ||
74 | help | ||
75 | Enable dynamic group lock support. | ||
76 | |||
77 | config LITMUS_MAX_DGL_SIZE | ||
78 | int "Maximum size of a dynamic group lock." | ||
79 | depends on LITMUS_DGL_SUPPORT | ||
80 | range 1 128 | ||
81 | default "10" | ||
82 | help | ||
83 | Dynamic group lock data structures are allocated on the process | ||
84 | stack when a group is requested. We set a maximum size of | ||
85 | locks in a dynamic group lock to avoid dynamic allocation. | ||
86 | |||
87 | TODO: Batch DGL requests exceeding LITMUS_MAX_DGL_SIZE. | ||
88 | |||
70 | endmenu | 89 | endmenu |
71 | 90 | ||
72 | menu "Performance Enhancements" | 91 | menu "Performance Enhancements" |
diff --git a/litmus/binheap.c b/litmus/binheap.c index f76260e64b0b..22feea614e50 100644 --- a/litmus/binheap.c +++ b/litmus/binheap.c | |||
@@ -1,5 +1,7 @@ | |||
1 | #include <litmus/binheap.h> | 1 | #include <litmus/binheap.h> |
2 | 2 | ||
3 | //extern void dump_node_data(struct binheap_node* parent, struct binheap_node* child); | ||
4 | //extern void dump_node_data2(struct binheap_handle *handle, struct binheap_node* bad_node); | ||
3 | 5 | ||
4 | int binheap_is_in_this_heap(struct binheap_node *node, | 6 | int binheap_is_in_this_heap(struct binheap_node *node, |
5 | struct binheap_handle* heap) | 7 | struct binheap_handle* heap) |
@@ -29,6 +31,11 @@ static void __update_ref(struct binheap_node *parent, | |||
29 | static void __binheap_swap(struct binheap_node *parent, | 31 | static void __binheap_swap(struct binheap_node *parent, |
30 | struct binheap_node *child) | 32 | struct binheap_node *child) |
31 | { | 33 | { |
34 | // if(parent == BINHEAP_POISON || child == BINHEAP_POISON) { | ||
35 | // dump_node_data(parent, child); | ||
36 | // BUG(); | ||
37 | // } | ||
38 | |||
32 | swap(parent->data, child->data); | 39 | swap(parent->data, child->data); |
33 | __update_ref(parent, child); | 40 | __update_ref(parent, child); |
34 | } | 41 | } |
@@ -185,12 +192,24 @@ static void __binheap_bubble_up( | |||
185 | struct binheap_handle *handle, | 192 | struct binheap_handle *handle, |
186 | struct binheap_node *node) | 193 | struct binheap_node *node) |
187 | { | 194 | { |
188 | /* Note: NULL data pointers are used internally for arbitrary delete */ | 195 | //BUG_ON(!binheap_is_in_heap(node)); |
196 | // if(!binheap_is_in_heap(node)) | ||
197 | // { | ||
198 | // dump_node_data2(handle, node); | ||
199 | // BUG(); | ||
200 | // } | ||
201 | |||
189 | while((node->parent != NULL) && | 202 | while((node->parent != NULL) && |
190 | ((node->data == BINHEAP_POISON) /* let BINHEAP_POISON data bubble to the top */ || | 203 | ((node->data == BINHEAP_POISON) /* let BINHEAP_POISON data bubble to the top */ || |
191 | handle->compare(node, node->parent))) { | 204 | handle->compare(node, node->parent))) { |
192 | __binheap_swap(node->parent, node); | 205 | __binheap_swap(node->parent, node); |
193 | node = node->parent; | 206 | node = node->parent; |
207 | |||
208 | // if(!binheap_is_in_heap(node)) | ||
209 | // { | ||
210 | // dump_node_data2(handle, node); | ||
211 | // BUG(); | ||
212 | // } | ||
194 | } | 213 | } |
195 | } | 214 | } |
196 | 215 | ||
@@ -228,6 +247,12 @@ void __binheap_add(struct binheap_node *new_node, | |||
228 | struct binheap_handle *handle, | 247 | struct binheap_handle *handle, |
229 | void *data) | 248 | void *data) |
230 | { | 249 | { |
250 | // if(binheap_is_in_heap(new_node)) | ||
251 | // { | ||
252 | // dump_node_data2(handle, new_node); | ||
253 | // BUG(); | ||
254 | // } | ||
255 | |||
231 | new_node->data = data; | 256 | new_node->data = data; |
232 | new_node->ref = new_node; | 257 | new_node->ref = new_node; |
233 | new_node->ref_ptr = &(new_node->ref); | 258 | new_node->ref_ptr = &(new_node->ref); |
@@ -284,6 +309,12 @@ void __binheap_delete_root(struct binheap_handle *handle, | |||
284 | { | 309 | { |
285 | struct binheap_node *root = handle->root; | 310 | struct binheap_node *root = handle->root; |
286 | 311 | ||
312 | // if(!binheap_is_in_heap(container)) | ||
313 | // { | ||
314 | // dump_node_data2(handle, container); | ||
315 | // BUG(); | ||
316 | // } | ||
317 | |||
287 | if(root != container) { | 318 | if(root != container) { |
288 | /* coalesce */ | 319 | /* coalesce */ |
289 | __binheap_swap_safe(handle, root, container); | 320 | __binheap_swap_safe(handle, root, container); |
@@ -366,6 +397,18 @@ void __binheap_delete(struct binheap_node *node_to_delete, | |||
366 | struct binheap_node *target = node_to_delete->ref; | 397 | struct binheap_node *target = node_to_delete->ref; |
367 | void *temp_data = target->data; | 398 | void *temp_data = target->data; |
368 | 399 | ||
400 | // if(!binheap_is_in_heap(node_to_delete)) | ||
401 | // { | ||
402 | // dump_node_data2(handle, node_to_delete); | ||
403 | // BUG(); | ||
404 | // } | ||
405 | // | ||
406 | // if(!binheap_is_in_heap(target)) | ||
407 | // { | ||
408 | // dump_node_data2(handle, target); | ||
409 | // BUG(); | ||
410 | // } | ||
411 | |||
369 | /* temporarily set data to null to allow node to bubble up to the top. */ | 412 | /* temporarily set data to null to allow node to bubble up to the top. */ |
370 | target->data = BINHEAP_POISON; | 413 | target->data = BINHEAP_POISON; |
371 | 414 | ||
@@ -373,7 +416,7 @@ void __binheap_delete(struct binheap_node *node_to_delete, | |||
373 | __binheap_delete_root(handle, node_to_delete); | 416 | __binheap_delete_root(handle, node_to_delete); |
374 | 417 | ||
375 | node_to_delete->data = temp_data; /* restore node data pointer */ | 418 | node_to_delete->data = temp_data; /* restore node data pointer */ |
376 | node_to_delete->parent = BINHEAP_POISON; /* poison the node */ | 419 | //node_to_delete->parent = BINHEAP_POISON; /* poison the node */ |
377 | } | 420 | } |
378 | 421 | ||
379 | /** | 422 | /** |
@@ -383,5 +426,18 @@ void __binheap_decrease(struct binheap_node *orig_node, | |||
383 | struct binheap_handle *handle) | 426 | struct binheap_handle *handle) |
384 | { | 427 | { |
385 | struct binheap_node *target = orig_node->ref; | 428 | struct binheap_node *target = orig_node->ref; |
429 | |||
430 | // if(!binheap_is_in_heap(orig_node)) | ||
431 | // { | ||
432 | // dump_node_data2(handle, orig_node); | ||
433 | // BUG(); | ||
434 | // } | ||
435 | // | ||
436 | // if(!binheap_is_in_heap(target)) | ||
437 | // { | ||
438 | // dump_node_data2(handle, target); | ||
439 | // BUG(); | ||
440 | // } | ||
441 | // | ||
386 | __binheap_bubble_up(handle, target); | 442 | __binheap_bubble_up(handle, target); |
387 | } | 443 | } |
diff --git a/litmus/locking.c b/litmus/locking.c index 19ed5a8e16e9..b2f4a205cd04 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -6,6 +6,10 @@ | |||
6 | #include <litmus/trace.h> | 6 | #include <litmus/trace.h> |
7 | #include <litmus/litmus.h> | 7 | #include <litmus/litmus.h> |
8 | 8 | ||
9 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
10 | #include <linux/uaccess.h> | ||
11 | #endif | ||
12 | |||
9 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); | 13 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); |
10 | static int open_generic_lock(struct od_table_entry* entry, void* __user arg); | 14 | static int open_generic_lock(struct od_table_entry* entry, void* __user arg); |
11 | static int close_generic_lock(struct od_table_entry* entry); | 15 | static int close_generic_lock(struct od_table_entry* entry); |
@@ -31,7 +35,7 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry) | |||
31 | 35 | ||
32 | 36 | ||
33 | atomic_t lock_id_gen = ATOMIC_INIT(0); | 37 | atomic_t lock_id_gen = ATOMIC_INIT(0); |
34 | raw_spinlock_t rsm_global_lock; | 38 | //raw_spinlock_t rsm_global_lock; |
35 | 39 | ||
36 | 40 | ||
37 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) | 41 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) |
@@ -50,9 +54,9 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar | |||
50 | 54 | ||
51 | lock->ident = atomic_inc_return(&lock_id_gen); | 55 | lock->ident = atomic_inc_return(&lock_id_gen); |
52 | 56 | ||
53 | if(lock->ident == 1) { | 57 | // if(lock->ident == 1) { |
54 | raw_spin_lock_init(&rsm_global_lock); | 58 | // raw_spin_lock_init(&rsm_global_lock); |
55 | } | 59 | // } |
56 | #endif | 60 | #endif |
57 | *obj_ref = lock; | 61 | *obj_ref = lock; |
58 | } | 62 | } |
@@ -142,25 +146,322 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) | |||
142 | } | 146 | } |
143 | 147 | ||
144 | 148 | ||
145 | //#ifdef CONFIG_LITMUS_NESTED_LOCKING | 149 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
146 | ///* not "lock_nest" ... get it? */ | 150 | |
147 | //void nest_lock(struct litmus_lock *l, struct task_struct *t) | 151 | void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock) |
148 | //{ | 152 | { |
149 | // if(tsk_rt(t)->last_lock) { | 153 | // int i = dgl_wait->size - 1; |
150 | // /* push new lock to front of old lock */ | 154 | |
151 | // struct litmus_lock *old = tsk_rt(t)->last_lock; | 155 | |
152 | // | 156 | BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); |
153 | // list_add(&l->lock_chain, &old->lock_chain); | 157 | |
154 | // } | 158 | WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); |
155 | // | 159 | // |
156 | // tsk_rt(t)->last_lock = l; | 160 | // // since dgl_wait->task->blocked_lock, all locks after prev_lock |
161 | // // are already held. | ||
157 | // | 162 | // |
158 | // // local inh now becomes transitive inh | 163 | // // find the lock after prev. |
159 | // tsk_rt(t)->trans_prio = tsk_rt(t)->local_prio; // what about old transitive prio??? | 164 | // if(prev_lock) { |
160 | // tsk_rt(t)->local_prio = NULL; | 165 | // for(/**/; i >= 0; --i) { |
166 | // if(prev_lock == dgl_wait->locks[i]) { | ||
167 | // --i; | ||
168 | // break; | ||
169 | // } | ||
170 | // else { | ||
171 | // BUG_ON(!dgl_wait->locks[i]->ops->is_owner(dgl_wait->locks[i], dgl_wait->task)); | ||
172 | // } | ||
173 | // } | ||
174 | // } | ||
175 | |||
176 | for(dgl_wait->last_primary = dgl_wait->last_primary - 1; | ||
177 | dgl_wait->last_primary >= 0; | ||
178 | --(dgl_wait->last_primary)){ | ||
179 | if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner(dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { | ||
180 | |||
181 | tsk_rt(dgl_wait->task)->blocked_lock = dgl_wait->locks[dgl_wait->last_primary]; | ||
182 | mb(); | ||
183 | |||
184 | TRACE_CUR("New blocked lock is %d\n", dgl_wait->locks[dgl_wait->last_primary]->ident); | ||
185 | |||
186 | break; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | // for(/**/; i >= 0; --i) { | ||
191 | // struct litmus_lock *l = dgl_wait->locks[i]; | ||
192 | // if(!l->ops->is_owner(l, dgl_wait->task)) { | ||
193 | // | ||
194 | // tsk_rt(dgl_wait->task)->blocked_lock = l; | ||
195 | // mb(); | ||
196 | // | ||
197 | // TRACE_CUR("New blocked lock is %d\n", l->ident); | ||
198 | // | ||
199 | // if(dgl_wait->last_primary >= 0) | ||
200 | // { | ||
201 | // TRACE_CUR("old meth = %d; new meth = %d\n", l->ident, dgl_wait->locks[dgl_wait->last_primary]->ident); | ||
202 | // WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != l); | ||
203 | // } | ||
204 | // | ||
205 | // break; | ||
206 | // } | ||
207 | // else { | ||
208 | // TRACE_CUR("Lock %d is actually held!\n", l->ident); | ||
209 | // } | ||
210 | // } | ||
211 | } | ||
212 | |||
213 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) | ||
214 | { | ||
215 | // should never be called. | ||
216 | BUG(); | ||
217 | return 1; | ||
218 | } | ||
219 | |||
220 | void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task) | ||
221 | { | ||
222 | wait_queue_t *q; | ||
223 | |||
224 | *dgl_wait = NULL; | ||
225 | *task = NULL; | ||
226 | |||
227 | if (waitqueue_active(wq)) { | ||
228 | q = list_entry(wq->task_list.next, | ||
229 | wait_queue_t, task_list); | ||
230 | |||
231 | if(q->func == dgl_wake_up) { | ||
232 | *dgl_wait = (dgl_wait_state_t*) q->private; | ||
233 | } | ||
234 | else { | ||
235 | *task = (struct task_struct*) q->private; | ||
236 | } | ||
237 | |||
238 | __remove_wait_queue(wq, q); | ||
239 | } | ||
240 | } | ||
241 | |||
242 | void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait) | ||
243 | { | ||
244 | init_waitqueue_entry(wq_node, dgl_wait->task); | ||
245 | wq_node->private = dgl_wait; | ||
246 | wq_node->func = dgl_wake_up; | ||
247 | } | ||
248 | |||
249 | |||
250 | static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | ||
251 | { | ||
252 | int i; | ||
253 | unsigned long irqflags; //, dummyflags; | ||
254 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); | ||
255 | |||
256 | BUG_ON(dgl_wait->task != current); | ||
257 | |||
258 | raw_spin_lock_irqsave(dgl_lock, irqflags); | ||
259 | |||
260 | |||
261 | dgl_wait->nr_remaining = dgl_wait->size; | ||
262 | //atomic_set(&dgl_wait->nr_remaining, dgl_wait->size); | ||
263 | |||
264 | // try to acquire each lock. enqueue (non-blocking) if it is unavailable. | ||
265 | for(i = 0; i < dgl_wait->size; ++i) { | ||
266 | struct litmus_lock *l = dgl_wait->locks[i]; | ||
267 | |||
268 | // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. | ||
269 | |||
270 | if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) { | ||
271 | --(dgl_wait->nr_remaining); | ||
272 | //atomic_dec(&dgl_wait->nr_remaining); | ||
273 | TRACE_CUR("Acquired lock %d immediatly.\n", l->ident); | ||
274 | } | ||
275 | } | ||
276 | |||
277 | //if(atomic_read(&dgl_wait->nr_remaining) == 0) { | ||
278 | if(dgl_wait->nr_remaining == 0) { | ||
279 | // acquired entire group immediatly | ||
280 | TRACE_CUR("Acquired all locks in DGL immediatly!\n"); | ||
281 | } | ||
282 | else { | ||
283 | |||
284 | TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", dgl_wait->nr_remaining); //atomic_read(&dgl_wait->nr_remaining)); | ||
285 | |||
286 | for(i = dgl_wait->size - 1; i >= 0; --i) { | ||
287 | struct litmus_lock *l = dgl_wait->locks[i]; | ||
288 | if(!l->ops->is_owner(l, dgl_wait->task)) { // double-check to be thread safe | ||
289 | |||
290 | TRACE_CUR("Activating priority inheritance on lock %d\n", l->ident); | ||
291 | |||
292 | TS_DGL_LOCK_SUSPEND; | ||
293 | |||
294 | l->ops->enable_priority(l, dgl_wait); | ||
295 | dgl_wait->last_primary = i; | ||
296 | |||
297 | TRACE_CUR("Suspending for lock %d\n", l->ident); | ||
298 | |||
299 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending | ||
300 | |||
301 | schedule(); // suspend!!! | ||
302 | |||
303 | TS_DGL_LOCK_RESUME; | ||
304 | |||
305 | TRACE_CUR("Woken up from DGL suspension.\n"); | ||
306 | |||
307 | goto all_acquired; // we should hold all locks when we wake up. | ||
308 | } | ||
309 | } | ||
310 | |||
311 | TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n"); | ||
312 | BUG(); | ||
313 | } | ||
314 | |||
315 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); | ||
316 | |||
317 | all_acquired: | ||
318 | |||
319 | // FOR SANITY CHECK FOR TESTING | ||
320 | for(i = 0; i < dgl_wait->size; ++i) { | ||
321 | struct litmus_lock *l = dgl_wait->locks[i]; | ||
322 | BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); | ||
323 | } | ||
324 | |||
325 | TRACE_CUR("Acquired entire DGL\n"); | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | //static int supports_dgl(struct litmus_lock *l) | ||
331 | //{ | ||
332 | // struct litmus_lock_ops* ops = l->ops; | ||
333 | // | ||
334 | // return (ops->dgl_lock && | ||
335 | // ops->is_owner && | ||
336 | // ops->enable_priority); | ||
161 | //} | 337 | //} |
162 | //#endif | ||
163 | 338 | ||
339 | asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | ||
340 | { | ||
341 | struct task_struct *t = current; | ||
342 | long err = -EINVAL; | ||
343 | int dgl_ods[MAX_DGL_SIZE]; | ||
344 | int i; | ||
345 | |||
346 | dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. | ||
347 | |||
348 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) | ||
349 | goto out; | ||
350 | |||
351 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) | ||
352 | goto out; | ||
353 | |||
354 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) | ||
355 | goto out; | ||
356 | |||
357 | if (!is_realtime(t)) { | ||
358 | err = -EPERM; | ||
359 | goto out; | ||
360 | } | ||
361 | |||
362 | for(i = 0; i < dgl_size; ++i) { | ||
363 | struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); | ||
364 | if(entry && is_lock(entry)) { | ||
365 | dgl_wait_state.locks[i] = get_lock(entry); | ||
366 | // if(!supports_dgl(dgl_wait_state.locks[i])) { | ||
367 | // TRACE_CUR("Lock %d does not support all required DGL operations.\n", | ||
368 | // dgl_wait_state.locks[i]->ident); | ||
369 | // goto out; | ||
370 | // } | ||
371 | } | ||
372 | else { | ||
373 | TRACE_CUR("Invalid lock identifier\n"); | ||
374 | goto out; | ||
375 | } | ||
376 | } | ||
377 | |||
378 | dgl_wait_state.task = t; | ||
379 | dgl_wait_state.size = dgl_size; | ||
380 | |||
381 | TS_DGL_LOCK_START; | ||
382 | err = do_litmus_dgl_lock(&dgl_wait_state); | ||
383 | |||
384 | /* Note: task my have been suspended or preempted in between! Take | ||
385 | * this into account when computing overheads. */ | ||
386 | TS_DGL_LOCK_END; | ||
387 | |||
388 | out: | ||
389 | return err; | ||
390 | } | ||
391 | |||
392 | static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) | ||
393 | { | ||
394 | int i; | ||
395 | long err = 0; | ||
396 | |||
397 | TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size); | ||
398 | |||
399 | for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order | ||
400 | |||
401 | struct litmus_lock *l = dgl_locks[i]; | ||
402 | long tmp_err; | ||
403 | |||
404 | TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); | ||
405 | |||
406 | tmp_err = l->ops->unlock(l); | ||
407 | |||
408 | if(tmp_err) { | ||
409 | TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); | ||
410 | err = tmp_err; | ||
411 | } | ||
412 | } | ||
413 | |||
414 | TRACE_CUR("DGL unlocked. err = %d\n", err); | ||
415 | |||
416 | return err; | ||
417 | } | ||
418 | |||
419 | asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | ||
420 | { | ||
421 | long err = -EINVAL; | ||
422 | int dgl_ods[MAX_DGL_SIZE]; | ||
423 | struct od_table_entry* entry; | ||
424 | int i; | ||
425 | |||
426 | struct litmus_lock* dgl_locks[MAX_DGL_SIZE]; | ||
427 | |||
428 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) | ||
429 | goto out; | ||
430 | |||
431 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) | ||
432 | goto out; | ||
433 | |||
434 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) | ||
435 | goto out; | ||
436 | |||
437 | for(i = 0; i < dgl_size; ++i) { | ||
438 | entry = get_entry_for_od(dgl_ods[i]); | ||
439 | if(entry && is_lock(entry)) { | ||
440 | dgl_locks[i] = get_lock(entry); | ||
441 | // if(!supports_dgl(dgl_locks[i])) { | ||
442 | // TRACE_CUR("Lock %d does not support all required DGL operations.\n", | ||
443 | // dgl_locks[i]->ident); | ||
444 | // goto out; | ||
445 | // } | ||
446 | } | ||
447 | else { | ||
448 | TRACE_CUR("Invalid lock identifier\n"); | ||
449 | goto out; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | TS_DGL_UNLOCK_START; | ||
454 | err = do_litmus_dgl_unlock(dgl_locks, dgl_size); | ||
455 | |||
456 | /* Note: task my have been suspended or preempted in between! Take | ||
457 | * this into account when computing overheads. */ | ||
458 | TS_DGL_UNLOCK_END; | ||
459 | |||
460 | out: | ||
461 | return err; | ||
462 | } | ||
463 | |||
464 | #endif | ||
164 | 465 | ||
165 | 466 | ||
166 | #else | 467 | #else |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 3d653bdca357..c0316c4a1b35 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -120,6 +120,9 @@ static struct binheap_handle gsnedf_cpu_heap; | |||
120 | static rt_domain_t gsnedf; | 120 | static rt_domain_t gsnedf; |
121 | #define gsnedf_lock (gsnedf.ready_lock) | 121 | #define gsnedf_lock (gsnedf.ready_lock) |
122 | 122 | ||
123 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
124 | static raw_spinlock_t dgl_lock; | ||
125 | #endif | ||
123 | 126 | ||
124 | /* Uncomment this if you want to see all scheduling decisions in the | 127 | /* Uncomment this if you want to see all scheduling decisions in the |
125 | * TRACE() log. | 128 | * TRACE() log. |
@@ -835,6 +838,43 @@ void print_hp_waiters(struct binheap_node* n, int depth) | |||
835 | if(n->right) print_hp_waiters(n->right, depth+1); | 838 | if(n->right) print_hp_waiters(n->right, depth+1); |
836 | } | 839 | } |
837 | 840 | ||
841 | void dump_node_data(struct binheap_node* parent, struct binheap_node* child) | ||
842 | { | ||
843 | struct binheap_node *root = (parent != BINHEAP_POISON) ? parent : child; | ||
844 | struct binheap_node *bad_node = (parent == BINHEAP_POISON) ? parent : child; | ||
845 | struct nested_info *nest; | ||
846 | |||
847 | while(root->parent != NULL) { | ||
848 | root = root->parent; | ||
849 | } | ||
850 | |||
851 | if(parent == BINHEAP_POISON) { | ||
852 | TRACE_CUR("parent was bad node.\n"); | ||
853 | } | ||
854 | else { | ||
855 | TRACE_CUR("child was bad node.\n"); | ||
856 | } | ||
857 | TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); | ||
858 | |||
859 | nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); | ||
860 | TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); | ||
861 | |||
862 | print_hp_waiters(root, 1); | ||
863 | } | ||
864 | |||
865 | void dump_node_data2(struct binheap_handle *handle, struct binheap_node* bad_node) | ||
866 | { | ||
867 | struct binheap_node *root = handle->root; | ||
868 | struct nested_info *nest; | ||
869 | |||
870 | TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); | ||
871 | |||
872 | nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); | ||
873 | TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); | ||
874 | |||
875 | print_hp_waiters(root, 1); | ||
876 | } | ||
877 | |||
838 | 878 | ||
839 | /* called with IRQs off */ | 879 | /* called with IRQs off */ |
840 | /* preconditions: | 880 | /* preconditions: |
@@ -861,12 +901,12 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t | |||
861 | } | 901 | } |
862 | else { | 902 | else { |
863 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident); | 903 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident); |
864 | raw_spin_unlock_irqrestore(to_unlock, irqflags); | 904 | unlock_fine_irqrestore(to_unlock, irqflags); |
865 | } | 905 | } |
866 | } | 906 | } |
867 | else { | 907 | else { |
868 | TRACE_TASK(t, "is not blocked. No propagation.\n"); | 908 | TRACE_TASK(t, "is not blocked. No propagation.\n"); |
869 | raw_spin_unlock_irqrestore(to_unlock, irqflags); | 909 | unlock_fine_irqrestore(to_unlock, irqflags); |
870 | } | 910 | } |
871 | } | 911 | } |
872 | 912 | ||
@@ -891,12 +931,12 @@ static void nested_decrease_priority_inheritance(struct task_struct* t, struct t | |||
891 | } | 931 | } |
892 | else { | 932 | else { |
893 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock); | 933 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock); |
894 | raw_spin_unlock_irqrestore(to_unlock, irqflags); | 934 | unlock_fine_irqrestore(to_unlock, irqflags); |
895 | } | 935 | } |
896 | } | 936 | } |
897 | else { | 937 | else { |
898 | TRACE_TASK(t, "is not blocked. No propagation.\n"); | 938 | TRACE_TASK(t, "is not blocked. No propagation.\n"); |
899 | raw_spin_unlock_irqrestore(to_unlock, irqflags); | 939 | unlock_fine_irqrestore(to_unlock, irqflags); |
900 | } | 940 | } |
901 | } | 941 | } |
902 | 942 | ||
@@ -930,16 +970,38 @@ static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) | |||
930 | struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | 970 | struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, |
931 | struct task_struct* skip) | 971 | struct task_struct* skip) |
932 | { | 972 | { |
973 | wait_queue_t *q; | ||
933 | struct list_head *pos; | 974 | struct list_head *pos; |
934 | struct task_struct *queued, *found = NULL; | 975 | struct task_struct *queued = NULL, *found = NULL; |
976 | |||
977 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
978 | dgl_wait_state_t *dgl_wait = NULL; | ||
979 | #endif | ||
935 | 980 | ||
936 | list_for_each(pos, &mutex->wait.task_list) { | 981 | list_for_each(pos, &mutex->wait.task_list) { |
937 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | 982 | q = list_entry(pos, wait_queue_t, task_list); |
938 | task_list)->private; | 983 | |
984 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
985 | if(q->func == dgl_wake_up) { | ||
986 | dgl_wait = (dgl_wait_state_t*) q->private; | ||
987 | if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { | ||
988 | queued = dgl_wait->task; | ||
989 | } | ||
990 | else { | ||
991 | queued = NULL; // skip it. | ||
992 | } | ||
993 | } | ||
994 | else { | ||
995 | queued = (struct task_struct*) q->private; | ||
996 | } | ||
997 | #else | ||
998 | queued = (struct task_struct*) q->private; | ||
999 | #endif | ||
939 | 1000 | ||
940 | /* Compare task prios, find high prio task. */ | 1001 | /* Compare task prios, find high prio task. */ |
941 | if (queued != skip && edf_higher_prio(queued, found)) | 1002 | if (queued && queued != skip && edf_higher_prio(queued, found)) { |
942 | found = queued; | 1003 | found = queued; |
1004 | } | ||
943 | } | 1005 | } |
944 | return found; | 1006 | return found; |
945 | } | 1007 | } |
@@ -951,6 +1013,136 @@ static inline struct task_struct* top_priority(struct binheap_handle* handle) { | |||
951 | return NULL; | 1013 | return NULL; |
952 | } | 1014 | } |
953 | 1015 | ||
1016 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1017 | //static void gsnedf_rsm_mutex_reserve(struct litmus_lock *l, unsigned long *irqflags) | ||
1018 | //{ | ||
1019 | // struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1020 | // raw_spin_lock_irqsave(&mutex->lock, *irqflags); | ||
1021 | //} | ||
1022 | // | ||
1023 | //static void gsnedf_rsm_mutex_unreserve(struct litmus_lock *l, unsigned long irqflags) | ||
1024 | //{ | ||
1025 | // struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1026 | // raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1027 | //} | ||
1028 | |||
1029 | static raw_spinlock_t* gsn_edf_get_dgl_spinlock(struct task_struct *t) | ||
1030 | { | ||
1031 | return(&dgl_lock); | ||
1032 | } | ||
1033 | |||
1034 | static int gsn_edf_rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) | ||
1035 | { | ||
1036 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1037 | return(mutex->owner == t); | ||
1038 | } | ||
1039 | |||
1040 | |||
1041 | // return 1 if resource was immediatly acquired. | ||
1042 | // Assumes mutex->lock is held. | ||
1043 | // Must set task state to TASK_UNINTERRUPTIBLE if task blocks. | ||
1044 | static int gsn_edf_rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node) | ||
1045 | { | ||
1046 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1047 | struct task_struct *t = dgl_wait->task; | ||
1048 | |||
1049 | int acquired_immediatly = 0; | ||
1050 | |||
1051 | BUG_ON(t != current); | ||
1052 | |||
1053 | if (mutex->owner) { | ||
1054 | TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); | ||
1055 | |||
1056 | init_dgl_waitqueue_entry(wq_node, dgl_wait); | ||
1057 | |||
1058 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1059 | __add_wait_queue_tail_exclusive(&mutex->wait, wq_node); | ||
1060 | } else { | ||
1061 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
1062 | |||
1063 | /* it's ours now */ | ||
1064 | mutex->owner = t; | ||
1065 | |||
1066 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1067 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
1068 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1069 | |||
1070 | acquired_immediatly = 1; | ||
1071 | } | ||
1072 | |||
1073 | return acquired_immediatly; | ||
1074 | } | ||
1075 | |||
1076 | // Assumes mutex->lock is held. | ||
1077 | static void gsn_edf_rsm_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait) | ||
1078 | { | ||
1079 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1080 | struct task_struct *t = dgl_wait->task; | ||
1081 | struct task_struct *owner = mutex->owner; | ||
1082 | unsigned long flags = 0; // these are unused under DGL coarse-grain locking | ||
1083 | |||
1084 | BUG_ON(owner == t); | ||
1085 | |||
1086 | tsk_rt(t)->blocked_lock = l; | ||
1087 | mb(); | ||
1088 | |||
1089 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
1090 | |||
1091 | struct task_struct *old_max_eff_prio; | ||
1092 | struct task_struct *new_max_eff_prio; | ||
1093 | struct task_struct *new_prio = NULL; | ||
1094 | |||
1095 | if(mutex->hp_waiter) | ||
1096 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
1097 | else | ||
1098 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
1099 | |||
1100 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1101 | |||
1102 | //TRACE_TASK(owner, "Heap Before:\n"); | ||
1103 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1104 | |||
1105 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1106 | |||
1107 | mutex->hp_waiter = t; | ||
1108 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1109 | |||
1110 | binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1111 | |||
1112 | //TRACE_TASK(owner, "Heap After:\n"); | ||
1113 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1114 | |||
1115 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1116 | |||
1117 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1118 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
1119 | |||
1120 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1121 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
1122 | new_prio = new_max_eff_prio; | ||
1123 | } | ||
1124 | } | ||
1125 | else { | ||
1126 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
1127 | } | ||
1128 | |||
1129 | //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1130 | |||
1131 | if(new_prio) { | ||
1132 | nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. | ||
1133 | } | ||
1134 | else { | ||
1135 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1136 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1137 | } | ||
1138 | } | ||
1139 | else { | ||
1140 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
1141 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1142 | } | ||
1143 | } | ||
1144 | #endif | ||
1145 | |||
954 | int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | 1146 | int gsnedf_rsm_mutex_lock(struct litmus_lock* l) |
955 | { | 1147 | { |
956 | struct task_struct *t = current; | 1148 | struct task_struct *t = current; |
@@ -962,9 +1154,10 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | |||
962 | if (!is_realtime(t)) | 1154 | if (!is_realtime(t)) |
963 | return -EPERM; | 1155 | return -EPERM; |
964 | 1156 | ||
965 | raw_spin_lock_irqsave(&mutex->lock, flags); | 1157 | |
966 | //raw_spin_lock_irqsave(&rsm_global_lock, flags); | 1158 | lock_global_irqsave(&dgl_lock, flags); |
967 | 1159 | lock_fine_irqsave(&mutex->lock, flags); | |
1160 | |||
968 | if (mutex->owner) { | 1161 | if (mutex->owner) { |
969 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); | 1162 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); |
970 | 1163 | ||
@@ -1023,29 +1216,24 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | |||
1023 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | 1216 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); |
1024 | } | 1217 | } |
1025 | 1218 | ||
1026 | //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1027 | |||
1028 | if(new_prio) { | 1219 | if(new_prio) { |
1029 | nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. | 1220 | nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. |
1030 | } | 1221 | } |
1031 | else { | 1222 | else { |
1032 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 1223 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
1033 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | 1224 | unlock_fine_irqrestore(&mutex->lock, flags); |
1034 | } | 1225 | } |
1035 | |||
1036 | } | 1226 | } |
1037 | else { | 1227 | else { |
1038 | TRACE_TASK(t, "no change in hp_waiter.\n"); | 1228 | TRACE_TASK(t, "no change in hp_waiter.\n"); |
1039 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | 1229 | |
1230 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1040 | } | 1231 | } |
1041 | 1232 | ||
1042 | 1233 | unlock_global_irqrestore(&dgl_lock, flags); | |
1234 | |||
1043 | TS_LOCK_SUSPEND; | 1235 | TS_LOCK_SUSPEND; |
1044 | 1236 | ||
1045 | /* release lock before sleeping */ | ||
1046 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | ||
1047 | //raw_spin_unlock_irqrestore(&mutex->lock, flags); | ||
1048 | |||
1049 | /* We depend on the FIFO order. Thus, we don't need to recheck | 1237 | /* We depend on the FIFO order. Thus, we don't need to recheck |
1050 | * when we wake up; we are guaranteed to have the lock since | 1238 | * when we wake up; we are guaranteed to have the lock since |
1051 | * there is only one wake up per release. | 1239 | * there is only one wake up per release. |
@@ -1072,32 +1260,56 @@ int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | |||
1072 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | 1260 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); |
1073 | raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | 1261 | raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); |
1074 | 1262 | ||
1075 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | 1263 | |
1076 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | 1264 | unlock_fine_irqrestore(&mutex->lock, flags); |
1265 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1077 | } | 1266 | } |
1078 | 1267 | ||
1079 | return 0; | 1268 | return 0; |
1080 | } | 1269 | } |
1081 | 1270 | ||
1082 | 1271 | ||
1272 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1273 | void select_next_lock_if_primary(struct litmus_lock *l, dgl_wait_state_t *dgl_wait) | ||
1274 | { | ||
1275 | if(tsk_rt(dgl_wait->task)->blocked_lock == l) { | ||
1276 | TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
1277 | tsk_rt(dgl_wait->task)->blocked_lock = NULL; | ||
1278 | mb(); | ||
1279 | select_next_lock(dgl_wait, l); // pick the next lock to be blocked on | ||
1280 | } | ||
1281 | else { | ||
1282 | TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
1283 | } | ||
1284 | } | ||
1285 | #endif | ||
1286 | |||
1287 | |||
1083 | int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | 1288 | int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) |
1084 | { | 1289 | { |
1085 | struct task_struct *t = current, *next; | 1290 | struct task_struct *t = current, *next = NULL; |
1086 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | 1291 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); |
1087 | unsigned long flags; | 1292 | unsigned long flags; |
1088 | 1293 | ||
1089 | struct task_struct *old_max_eff_prio; | 1294 | struct task_struct *old_max_eff_prio; |
1090 | 1295 | ||
1296 | int wake_up_task = 1; | ||
1297 | |||
1298 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1299 | dgl_wait_state_t *dgl_wait = NULL; | ||
1300 | #endif | ||
1091 | 1301 | ||
1092 | int err = 0; | 1302 | int err = 0; |
1093 | 1303 | ||
1094 | raw_spin_lock_irqsave(&mutex->lock, flags); | 1304 | lock_global_irqsave(&dgl_lock, flags); |
1095 | //raw_spin_lock_irqsave(&rsm_global_lock, flags); | 1305 | lock_fine_irqsave(&mutex->lock, flags); |
1096 | 1306 | ||
1097 | 1307 | ||
1098 | if (mutex->owner != t) { | 1308 | if (mutex->owner != t) { |
1099 | err = -EINVAL; | 1309 | err = -EINVAL; |
1100 | goto out; | 1310 | unlock_fine_irqrestore(&mutex->lock, flags); |
1311 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1312 | return err; | ||
1101 | } | 1313 | } |
1102 | 1314 | ||
1103 | 1315 | ||
@@ -1147,16 +1359,25 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | |||
1147 | 1359 | ||
1148 | 1360 | ||
1149 | /* check if there are jobs waiting for this resource */ | 1361 | /* check if there are jobs waiting for this resource */ |
1362 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1363 | __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); | ||
1364 | if(dgl_wait) { | ||
1365 | next = dgl_wait->task; | ||
1366 | //select_next_lock_if_primary(l, dgl_wait); | ||
1367 | } | ||
1368 | #else | ||
1150 | next = __waitqueue_remove_first(&mutex->wait); | 1369 | next = __waitqueue_remove_first(&mutex->wait); |
1370 | #endif | ||
1151 | if (next) { | 1371 | if (next) { |
1152 | /* next becomes the resouce holder */ | 1372 | /* next becomes the resouce holder */ |
1153 | mutex->owner = next; | 1373 | mutex->owner = next; |
1154 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | 1374 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); |
1155 | 1375 | ||
1156 | 1376 | // if(tsk_rt(next)->blocked_lock == &mutex->litmus_lock) { // might be false for DGL. | |
1157 | tsk_rt(next)->blocked_lock = NULL; | 1377 | // tsk_rt(next)->blocked_lock = NULL; |
1378 | // mb(); | ||
1379 | // } | ||
1158 | 1380 | ||
1159 | |||
1160 | /* determine new hp_waiter if necessary */ | 1381 | /* determine new hp_waiter if necessary */ |
1161 | if (next == mutex->hp_waiter) { | 1382 | if (next == mutex->hp_waiter) { |
1162 | 1383 | ||
@@ -1181,10 +1402,19 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | |||
1181 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | 1402 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); |
1182 | 1403 | ||
1183 | //TRACE_TASK(next, "Heap After:\n"); | 1404 | //TRACE_TASK(next, "Heap After:\n"); |
1184 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | 1405 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); |
1185 | 1406 | ||
1407 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1408 | if(dgl_wait) { | ||
1409 | select_next_lock_if_primary(l, dgl_wait); | ||
1410 | //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); | ||
1411 | --(dgl_wait->nr_remaining); | ||
1412 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
1413 | } | ||
1414 | #endif | ||
1186 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | 1415 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); |
1187 | } else { | 1416 | } |
1417 | else { | ||
1188 | /* Well, if 'next' is not the highest-priority waiter, | 1418 | /* Well, if 'next' is not the highest-priority waiter, |
1189 | * then it (probably) ought to inherit the highest-priority | 1419 | * then it (probably) ought to inherit the highest-priority |
1190 | * waiter's priority. */ | 1420 | * waiter's priority. */ |
@@ -1198,6 +1428,16 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | |||
1198 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, | 1428 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, |
1199 | struct nested_info, hp_binheap_node); | 1429 | struct nested_info, hp_binheap_node); |
1200 | 1430 | ||
1431 | |||
1432 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1433 | if(dgl_wait) { | ||
1434 | select_next_lock_if_primary(l, dgl_wait); | ||
1435 | // wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); | ||
1436 | --(dgl_wait->nr_remaining); | ||
1437 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
1438 | } | ||
1439 | #endif | ||
1440 | |||
1201 | //TRACE_TASK(next, "Heap After:\n"); | 1441 | //TRACE_TASK(next, "Heap After:\n"); |
1202 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | 1442 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); |
1203 | 1443 | ||
@@ -1209,26 +1449,53 @@ int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | |||
1209 | * since the effective priority of hp_waiter can change (and the | 1449 | * since the effective priority of hp_waiter can change (and the |
1210 | * update has not made it to this lock).) | 1450 | * update has not made it to this lock).) |
1211 | */ | 1451 | */ |
1452 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1453 | if((l->nest.hp_waiter_eff_prio != NULL) && (top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) | ||
1454 | { | ||
1455 | if(dgl_wait && tsk_rt(next)->blocked_lock) { | ||
1456 | BUG_ON(wake_up_task); | ||
1457 | if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { | ||
1458 | nested_increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. | ||
1459 | goto out; // all spinlocks are released. bail out now. | ||
1460 | } | ||
1461 | } | ||
1462 | else { | ||
1463 | increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); | ||
1464 | } | ||
1465 | } | ||
1466 | |||
1467 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1468 | #else | ||
1212 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) | 1469 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) |
1213 | { | 1470 | { |
1214 | increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); | 1471 | increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); |
1215 | } | 1472 | } |
1216 | |||
1217 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | 1473 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); |
1474 | #endif | ||
1475 | } | ||
1476 | |||
1477 | if(wake_up_task) { | ||
1478 | TRACE_TASK(next, "waking up since it is no longer blocked.\n"); | ||
1479 | |||
1480 | tsk_rt(next)->blocked_lock = NULL; | ||
1481 | mb(); | ||
1482 | |||
1483 | wake_up_process(next); | ||
1484 | } | ||
1485 | else { | ||
1486 | TRACE_TASK(next, "is still blocked.\n"); | ||
1218 | } | 1487 | } |
1219 | |||
1220 | /* wake up next */ | ||
1221 | wake_up_process(next); | ||
1222 | } | 1488 | } |
1223 | else { | 1489 | else { |
1224 | /* becomes available */ | 1490 | /* becomes available */ |
1225 | mutex->owner = NULL; | 1491 | mutex->owner = NULL; |
1226 | } | 1492 | } |
1227 | 1493 | ||
1494 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1495 | |||
1228 | out: | 1496 | out: |
1229 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | 1497 | unlock_global_irqrestore(&dgl_lock, flags); |
1230 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | 1498 | |
1231 | |||
1232 | return err; | 1499 | return err; |
1233 | } | 1500 | } |
1234 | 1501 | ||
@@ -1241,8 +1508,8 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
1241 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | 1508 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); |
1242 | 1509 | ||
1243 | // relay-style locking | 1510 | // relay-style locking |
1244 | raw_spin_lock(&mutex->lock); | 1511 | lock_fine(&mutex->lock); |
1245 | raw_spin_unlock(to_unlock); | 1512 | unlock_fine(to_unlock); |
1246 | 1513 | ||
1247 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | 1514 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked |
1248 | struct task_struct *owner = mutex->owner; | 1515 | struct task_struct *owner = mutex->owner; |
@@ -1261,6 +1528,10 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
1261 | if(t == mutex->hp_waiter) { | 1528 | if(t == mutex->hp_waiter) { |
1262 | // reflect the decreased priority in the heap node. | 1529 | // reflect the decreased priority in the heap node. |
1263 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | 1530 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); |
1531 | |||
1532 | BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); | ||
1533 | BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks)); | ||
1534 | |||
1264 | binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | 1535 | binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); |
1265 | } | 1536 | } |
1266 | 1537 | ||
@@ -1280,13 +1551,13 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
1280 | else { | 1551 | else { |
1281 | TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); | 1552 | TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); |
1282 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 1553 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
1283 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | 1554 | unlock_fine_irqrestore(&mutex->lock, irqflags); |
1284 | } | 1555 | } |
1285 | } | 1556 | } |
1286 | else { | 1557 | else { |
1287 | TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); | 1558 | TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); |
1288 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 1559 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
1289 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | 1560 | unlock_fine_irqrestore(&mutex->lock, irqflags); |
1290 | } | 1561 | } |
1291 | } | 1562 | } |
1292 | else { | 1563 | else { |
@@ -1303,11 +1574,11 @@ void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
1303 | } | 1574 | } |
1304 | else { | 1575 | else { |
1305 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | 1576 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); |
1306 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | 1577 | unlock_fine_irqrestore(&mutex->lock, irqflags); |
1307 | } | 1578 | } |
1308 | } | 1579 | } |
1309 | else { | 1580 | else { |
1310 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | 1581 | unlock_fine_irqrestore(&mutex->lock, irqflags); |
1311 | } | 1582 | } |
1312 | } | 1583 | } |
1313 | } | 1584 | } |
@@ -1321,8 +1592,8 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
1321 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | 1592 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); |
1322 | 1593 | ||
1323 | // relay-style locking | 1594 | // relay-style locking |
1324 | raw_spin_lock(&mutex->lock); | 1595 | lock_fine(&mutex->lock); |
1325 | raw_spin_unlock(to_unlock); | 1596 | unlock_fine(to_unlock); |
1326 | 1597 | ||
1327 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | 1598 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked |
1328 | if(t == mutex->hp_waiter) { | 1599 | if(t == mutex->hp_waiter) { |
@@ -1377,12 +1648,12 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
1377 | } | 1648 | } |
1378 | else { | 1649 | else { |
1379 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 1650 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
1380 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | 1651 | unlock_fine_irqrestore(&mutex->lock, irqflags); |
1381 | } | 1652 | } |
1382 | } | 1653 | } |
1383 | else { | 1654 | else { |
1384 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); | 1655 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); |
1385 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | 1656 | unlock_fine_irqrestore(&mutex->lock, irqflags); |
1386 | } | 1657 | } |
1387 | } | 1658 | } |
1388 | else { | 1659 | else { |
@@ -1399,11 +1670,11 @@ void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
1399 | } | 1670 | } |
1400 | else { | 1671 | else { |
1401 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | 1672 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); |
1402 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | 1673 | unlock_fine_irqrestore(&mutex->lock, irqflags); |
1403 | } | 1674 | } |
1404 | } | 1675 | } |
1405 | else { | 1676 | else { |
1406 | raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | 1677 | unlock_fine_irqrestore(&mutex->lock, irqflags); |
1407 | } | 1678 | } |
1408 | } | 1679 | } |
1409 | } | 1680 | } |
@@ -1418,14 +1689,15 @@ int gsnedf_rsm_mutex_close(struct litmus_lock* l) | |||
1418 | 1689 | ||
1419 | int owner; | 1690 | int owner; |
1420 | 1691 | ||
1421 | raw_spin_lock_irqsave(&mutex->lock, flags); | 1692 | |
1422 | //raw_spin_lock_irqsave(&rsm_global_lock, flags); | 1693 | lock_global_irqsave(&dgl_lock, flags); |
1694 | lock_fine_irqsave(&mutex->lock, flags); | ||
1423 | 1695 | ||
1424 | owner = (mutex->owner == t); | 1696 | owner = (mutex->owner == t); |
1425 | 1697 | ||
1426 | raw_spin_unlock_irqrestore(&mutex->lock, flags); | 1698 | unlock_fine_irqrestore(&mutex->lock, flags); |
1427 | //raw_spin_unlock_irqrestore(&rsm_global_lock, flags); | 1699 | unlock_global_irqrestore(&dgl_lock, flags); |
1428 | 1700 | ||
1429 | if (owner) | 1701 | if (owner) |
1430 | gsnedf_rsm_mutex_unlock(l); | 1702 | gsnedf_rsm_mutex_unlock(l); |
1431 | 1703 | ||
@@ -1443,7 +1715,15 @@ static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { | |||
1443 | .unlock = gsnedf_rsm_mutex_unlock, | 1715 | .unlock = gsnedf_rsm_mutex_unlock, |
1444 | .deallocate = gsnedf_rsm_mutex_free, | 1716 | .deallocate = gsnedf_rsm_mutex_free, |
1445 | .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance, | 1717 | .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance, |
1446 | .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance | 1718 | .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance, |
1719 | |||
1720 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1721 | // .reserve = gsnedf_rsm_mutex_reserve, | ||
1722 | // .unreserve = gsnedf_rsm_mutex_unreserve, | ||
1723 | .dgl_lock = gsn_edf_rsm_mutex_dgl_lock, | ||
1724 | .is_owner = gsn_edf_rsm_mutex_is_owner, | ||
1725 | .enable_priority = gsn_edf_rsm_enable_priority, | ||
1726 | #endif | ||
1447 | }; | 1727 | }; |
1448 | 1728 | ||
1449 | static struct litmus_lock* gsnedf_new_rsm_mutex(void) | 1729 | static struct litmus_lock* gsnedf_new_rsm_mutex(void) |
@@ -1928,7 +2208,7 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fif | |||
1928 | TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n", | 2208 | TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n", |
1929 | new_max_eff_prio->comm, new_max_eff_prio->pid); | 2209 | new_max_eff_prio->comm, new_max_eff_prio->pid); |
1930 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 2210 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
1931 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 2211 | unlock_fine_irqrestore(&sem->lock, flags); |
1932 | } | 2212 | } |
1933 | } | 2213 | } |
1934 | else { | 2214 | else { |
@@ -1936,12 +2216,12 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fif | |||
1936 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | 2216 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); |
1937 | 2217 | ||
1938 | TRACE_TASK(t, "no owner??\n"); | 2218 | TRACE_TASK(t, "no owner??\n"); |
1939 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 2219 | unlock_fine_irqrestore(&sem->lock, flags); |
1940 | } | 2220 | } |
1941 | } | 2221 | } |
1942 | else { | 2222 | else { |
1943 | TRACE_TASK(t, "hp_waiter is unaffected.\n"); | 2223 | TRACE_TASK(t, "hp_waiter is unaffected.\n"); |
1944 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 2224 | unlock_fine_irqrestore(&sem->lock, flags); |
1945 | } | 2225 | } |
1946 | } | 2226 | } |
1947 | 2227 | ||
@@ -1955,7 +2235,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikg | |||
1955 | 2235 | ||
1956 | if(!owner) { | 2236 | if(!owner) { |
1957 | TRACE_CUR("No owner. Returning.\n"); | 2237 | TRACE_CUR("No owner. Returning.\n"); |
1958 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 2238 | unlock_fine_irqrestore(&sem->lock, flags); |
1959 | return; | 2239 | return; |
1960 | } | 2240 | } |
1961 | 2241 | ||
@@ -2004,7 +2284,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikg | |||
2004 | else { | 2284 | else { |
2005 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | 2285 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); |
2006 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 2286 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
2007 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 2287 | unlock_fine_irqrestore(&sem->lock, flags); |
2008 | } | 2288 | } |
2009 | } | 2289 | } |
2010 | 2290 | ||
@@ -2049,7 +2329,7 @@ static void ikglp_remove_donation_from_owner(struct binheap_node *n, struct fifo | |||
2049 | else { | 2329 | else { |
2050 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | 2330 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); |
2051 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 2331 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
2052 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 2332 | unlock_fine_irqrestore(&sem->lock, flags); |
2053 | } | 2333 | } |
2054 | } | 2334 | } |
2055 | 2335 | ||
@@ -2103,7 +2383,7 @@ static void ikglp_get_immediate(struct task_struct* t, struct fifo_queue *fq, st | |||
2103 | 2383 | ||
2104 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue); | 2384 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue); |
2105 | 2385 | ||
2106 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 2386 | unlock_fine_irqrestore(&sem->lock, flags); |
2107 | } | 2387 | } |
2108 | 2388 | ||
2109 | 2389 | ||
@@ -2136,9 +2416,9 @@ static void __ikglp_enqueue_on_fq( | |||
2136 | } | 2416 | } |
2137 | // update donor eligiblity list. | 2417 | // update donor eligiblity list. |
2138 | if(likely(donee_heap_node)) { | 2418 | if(likely(donee_heap_node)) { |
2139 | if(binheap_is_in_heap(&donee_heap_node->node)) { | 2419 | // if(binheap_is_in_heap(&donee_heap_node->node)) { |
2140 | WARN_ON(1); | 2420 | // WARN_ON(1); |
2141 | } | 2421 | // } |
2142 | ikglp_add_donees(sem, fq, t, donee_heap_node); | 2422 | ikglp_add_donees(sem, fq, t, donee_heap_node); |
2143 | } | 2423 | } |
2144 | 2424 | ||
@@ -2353,7 +2633,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state | |||
2353 | TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", | 2633 | TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", |
2354 | new_max_eff_prio->comm, new_max_eff_prio->pid); | 2634 | new_max_eff_prio->comm, new_max_eff_prio->pid); |
2355 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); | 2635 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); |
2356 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 2636 | unlock_fine_irqrestore(&sem->lock, flags); |
2357 | } | 2637 | } |
2358 | 2638 | ||
2359 | 2639 | ||
@@ -2366,7 +2646,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l) | |||
2366 | { | 2646 | { |
2367 | struct task_struct* t = current; | 2647 | struct task_struct* t = current; |
2368 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | 2648 | struct ikglp_semaphore *sem = ikglp_from_lock(l); |
2369 | unsigned long flags, real_flags; | 2649 | unsigned long flags = 0, real_flags; |
2370 | struct fifo_queue *fq = NULL; | 2650 | struct fifo_queue *fq = NULL; |
2371 | int replica = -EINVAL; | 2651 | int replica = -EINVAL; |
2372 | 2652 | ||
@@ -2376,13 +2656,17 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l) | |||
2376 | return -EPERM; | 2656 | return -EPERM; |
2377 | 2657 | ||
2378 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | 2658 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); |
2379 | raw_spin_lock_irqsave(&sem->lock, flags); | 2659 | |
2660 | lock_global_irqsave(&dgl_lock, flags); | ||
2661 | lock_fine_irqsave(&sem->lock, flags); | ||
2380 | 2662 | ||
2381 | if(sem->shortest_fifo_queue->count == 0) { | 2663 | if(sem->shortest_fifo_queue->count == 0) { |
2382 | // take available resource | 2664 | // take available resource |
2383 | replica = ikglp_get_idx(sem, sem->shortest_fifo_queue); | 2665 | replica = ikglp_get_idx(sem, sem->shortest_fifo_queue); |
2384 | 2666 | ||
2385 | ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock | 2667 | ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock |
2668 | |||
2669 | unlock_global_irqrestore(&dgl_lock, flags); | ||
2386 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | 2670 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); |
2387 | } | 2671 | } |
2388 | else | 2672 | else |
@@ -2410,7 +2694,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l) | |||
2410 | if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { | 2694 | if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { |
2411 | // enqueue on PQ | 2695 | // enqueue on PQ |
2412 | ikglp_enqueue_on_pq(sem, &wait); | 2696 | ikglp_enqueue_on_pq(sem, &wait); |
2413 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 2697 | unlock_fine_irqrestore(&sem->lock, flags); |
2414 | } | 2698 | } |
2415 | else { | 2699 | else { |
2416 | // enqueue as donor | 2700 | // enqueue as donor |
@@ -2418,6 +2702,7 @@ static int gsnedf_ikglp_lock(struct litmus_lock* l) | |||
2418 | } | 2702 | } |
2419 | } | 2703 | } |
2420 | 2704 | ||
2705 | unlock_global_irqrestore(&dgl_lock, flags); | ||
2421 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | 2706 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); |
2422 | 2707 | ||
2423 | TS_LOCK_SUSPEND; | 2708 | TS_LOCK_SUSPEND; |
@@ -2631,12 +2916,14 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) | |||
2631 | struct fifo_queue *to_steal = NULL; | 2916 | struct fifo_queue *to_steal = NULL; |
2632 | struct fifo_queue *fq; | 2917 | struct fifo_queue *fq; |
2633 | 2918 | ||
2634 | unsigned long flags, real_flags; | 2919 | unsigned long flags = 0, real_flags; |
2635 | 2920 | ||
2636 | int err = 0; | 2921 | int err = 0; |
2637 | 2922 | ||
2638 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | 2923 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); |
2639 | raw_spin_lock_irqsave(&sem->lock, flags); | 2924 | |
2925 | lock_global_irqsave(&dgl_lock, flags); // TODO: Push this deeper | ||
2926 | lock_fine_irqsave(&sem->lock, flags); | ||
2640 | 2927 | ||
2641 | fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. | 2928 | fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. |
2642 | 2929 | ||
@@ -2781,7 +3068,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) | |||
2781 | ikglp_get_idx(sem, other_fq)); | 3068 | ikglp_get_idx(sem, other_fq)); |
2782 | 3069 | ||
2783 | ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); | 3070 | ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); |
2784 | raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! | 3071 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! |
2785 | } | 3072 | } |
2786 | else { | 3073 | else { |
2787 | TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", | 3074 | TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", |
@@ -2801,7 +3088,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) | |||
2801 | (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1); | 3088 | (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1); |
2802 | 3089 | ||
2803 | ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. | 3090 | ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. |
2804 | raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! | 3091 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! |
2805 | } | 3092 | } |
2806 | } | 3093 | } |
2807 | } | 3094 | } |
@@ -2810,7 +3097,7 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) | |||
2810 | ikglp_get_idx(sem, to_steal)); | 3097 | ikglp_get_idx(sem, to_steal)); |
2811 | 3098 | ||
2812 | ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. | 3099 | ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. |
2813 | raw_spin_lock_irqsave(&sem->lock, flags); // there should be no contention!!!! | 3100 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! |
2814 | } | 3101 | } |
2815 | 3102 | ||
2816 | // check for new HP waiter. | 3103 | // check for new HP waiter. |
@@ -2930,7 +3217,8 @@ static int gsnedf_ikglp_unlock(struct litmus_lock* l) | |||
2930 | } | 3217 | } |
2931 | 3218 | ||
2932 | out: | 3219 | out: |
2933 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 3220 | unlock_fine_irqrestore(&sem->lock, flags); |
3221 | unlock_global_irqrestore(&dgl_lock, flags); | ||
2934 | 3222 | ||
2935 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | 3223 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); |
2936 | 3224 | ||
@@ -2947,7 +3235,7 @@ static int gsnedf_ikglp_close(struct litmus_lock* l) | |||
2947 | int owner = 0; | 3235 | int owner = 0; |
2948 | int i; | 3236 | int i; |
2949 | 3237 | ||
2950 | raw_spin_lock_irqsave(&sem->lock, flags); | 3238 | raw_spin_lock_irqsave(&sem->real_lock, flags); |
2951 | 3239 | ||
2952 | for(i = 0; i < sem->nr_replicas; ++i) { | 3240 | for(i = 0; i < sem->nr_replicas; ++i) { |
2953 | if(sem->fifo_queues[i].owner == t) { | 3241 | if(sem->fifo_queues[i].owner == t) { |
@@ -2956,7 +3244,7 @@ static int gsnedf_ikglp_close(struct litmus_lock* l) | |||
2956 | } | 3244 | } |
2957 | } | 3245 | } |
2958 | 3246 | ||
2959 | raw_spin_unlock_irqrestore(&sem->lock, flags); | 3247 | raw_spin_unlock_irqrestore(&sem->real_lock, flags); |
2960 | 3248 | ||
2961 | if (owner) | 3249 | if (owner) |
2962 | gsnedf_ikglp_unlock(l); | 3250 | gsnedf_ikglp_unlock(l); |
@@ -3384,6 +3672,9 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
3384 | #ifdef CONFIG_LITMUS_LOCKING | 3672 | #ifdef CONFIG_LITMUS_LOCKING |
3385 | .allocate_lock = gsnedf_allocate_lock, | 3673 | .allocate_lock = gsnedf_allocate_lock, |
3386 | #endif | 3674 | #endif |
3675 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
3676 | .get_dgl_spinlock = gsn_edf_get_dgl_spinlock, | ||
3677 | #endif | ||
3387 | }; | 3678 | }; |
3388 | 3679 | ||
3389 | 3680 | ||
@@ -3401,6 +3692,11 @@ static int __init init_gsn_edf(void) | |||
3401 | 3692 | ||
3402 | INIT_BINHEAP_NODE(&entry->hn); | 3693 | INIT_BINHEAP_NODE(&entry->hn); |
3403 | } | 3694 | } |
3695 | |||
3696 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
3697 | raw_spin_lock_init(&dgl_lock); | ||
3698 | #endif | ||
3699 | |||
3404 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | 3700 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); |
3405 | return register_sched_plugin(&gsn_edf_plugin); | 3701 | return register_sched_plugin(&gsn_edf_plugin); |
3406 | } | 3702 | } |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 00a1900d6457..77ae3eeb3966 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -120,6 +120,17 @@ static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type, | |||
120 | 120 | ||
121 | #endif | 121 | #endif |
122 | 122 | ||
123 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
124 | |||
125 | static raw_spinlock_t* litmus_dummy_get_dgl_spinlock(struct task_struct *t) | ||
126 | { | ||
127 | BUG(); | ||
128 | return NULL; | ||
129 | } | ||
130 | |||
131 | #endif | ||
132 | |||
133 | |||
123 | 134 | ||
124 | /* The default scheduler plugin. It doesn't do anything and lets Linux do its | 135 | /* The default scheduler plugin. It doesn't do anything and lets Linux do its |
125 | * job. | 136 | * job. |
@@ -139,6 +150,9 @@ struct sched_plugin linux_sched_plugin = { | |||
139 | #ifdef CONFIG_LITMUS_LOCKING | 150 | #ifdef CONFIG_LITMUS_LOCKING |
140 | .allocate_lock = litmus_dummy_allocate_lock, | 151 | .allocate_lock = litmus_dummy_allocate_lock, |
141 | #endif | 152 | #endif |
153 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
154 | .get_dgl_spinlock = litmus_dummy_get_dgl_spinlock, | ||
155 | #endif | ||
142 | .admit_task = litmus_dummy_admit_task | 156 | .admit_task = litmus_dummy_admit_task |
143 | }; | 157 | }; |
144 | 158 | ||
@@ -177,6 +191,9 @@ int register_sched_plugin(struct sched_plugin* plugin) | |||
177 | #ifdef CONFIG_LITMUS_LOCKING | 191 | #ifdef CONFIG_LITMUS_LOCKING |
178 | CHECK(allocate_lock); | 192 | CHECK(allocate_lock); |
179 | #endif | 193 | #endif |
194 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
195 | CHECK(get_dgl_spinlock); | ||
196 | #endif | ||
180 | CHECK(admit_task); | 197 | CHECK(admit_task); |
181 | 198 | ||
182 | if (!plugin->release_at) | 199 | if (!plugin->release_at) |