diff options
Diffstat (limited to 'litmus/locking.c')
-rw-r--r-- | litmus/locking.c | 447 |
1 files changed, 443 insertions, 4 deletions
diff --git a/litmus/locking.c b/litmus/locking.c index 43d9aece2e74..c21ec1ae36d7 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -8,8 +8,17 @@ | |||
8 | #include <litmus/litmus.h> | 8 | #include <litmus/litmus.h> |
9 | #include <litmus/sched_plugin.h> | 9 | #include <litmus/sched_plugin.h> |
10 | #include <litmus/trace.h> | 10 | #include <litmus/trace.h> |
11 | #include <litmus/litmus.h> | ||
11 | #include <litmus/wait.h> | 12 | #include <litmus/wait.h> |
12 | 13 | ||
14 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
15 | #include <linux/uaccess.h> | ||
16 | #endif | ||
17 | |||
18 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
19 | #include <litmus/gpu_affinity.h> | ||
20 | #endif | ||
21 | |||
13 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); | 22 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); |
14 | static int open_generic_lock(struct od_table_entry* entry, void* __user arg); | 23 | static int open_generic_lock(struct od_table_entry* entry, void* __user arg); |
15 | static int close_generic_lock(struct od_table_entry* entry); | 24 | static int close_generic_lock(struct od_table_entry* entry); |
@@ -22,6 +31,9 @@ struct fdso_ops generic_lock_ops = { | |||
22 | .destroy = destroy_generic_lock | 31 | .destroy = destroy_generic_lock |
23 | }; | 32 | }; |
24 | 33 | ||
34 | static atomic_t lock_id_gen = ATOMIC_INIT(0); | ||
35 | |||
36 | |||
25 | static inline bool is_lock(struct od_table_entry* entry) | 37 | static inline bool is_lock(struct od_table_entry* entry) |
26 | { | 38 | { |
27 | return entry->class == &generic_lock_ops; | 39 | return entry->class == &generic_lock_ops; |
@@ -39,8 +51,21 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar | |||
39 | int err; | 51 | int err; |
40 | 52 | ||
41 | err = litmus->allocate_lock(&lock, type, arg); | 53 | err = litmus->allocate_lock(&lock, type, arg); |
42 | if (err == 0) | 54 | if (err == 0) { |
55 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
56 | lock->nest.lock = lock; | ||
57 | lock->nest.hp_waiter_eff_prio = NULL; | ||
58 | |||
59 | INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node); | ||
60 | if(!lock->nest.hp_waiter_ptr) { | ||
61 | TRACE_CUR("BEWARE: hp_waiter_ptr should probably not be NULL in " | ||
62 | "most uses. (exception: IKGLP donors)\n"); | ||
63 | } | ||
64 | #endif | ||
65 | lock->type = type; | ||
66 | lock->ident = atomic_inc_return(&lock_id_gen); | ||
43 | *obj_ref = lock; | 67 | *obj_ref = lock; |
68 | } | ||
44 | return err; | 69 | return err; |
45 | } | 70 | } |
46 | 71 | ||
@@ -83,7 +108,8 @@ asmlinkage long sys_litmus_lock(int lock_od) | |||
83 | entry = get_entry_for_od(lock_od); | 108 | entry = get_entry_for_od(lock_od); |
84 | if (entry && is_lock(entry)) { | 109 | if (entry && is_lock(entry)) { |
85 | l = get_lock(entry); | 110 | l = get_lock(entry); |
86 | TRACE_CUR("attempts to lock 0x%p\n", l); | 111 | //TRACE_CUR("attempts to lock 0x%p\n", l); |
112 | TRACE_CUR("attempts to lock %d\n", l->ident); | ||
87 | err = l->ops->lock(l); | 113 | err = l->ops->lock(l); |
88 | } | 114 | } |
89 | 115 | ||
@@ -111,7 +137,8 @@ asmlinkage long sys_litmus_unlock(int lock_od) | |||
111 | entry = get_entry_for_od(lock_od); | 137 | entry = get_entry_for_od(lock_od); |
112 | if (entry && is_lock(entry)) { | 138 | if (entry && is_lock(entry)) { |
113 | l = get_lock(entry); | 139 | l = get_lock(entry); |
114 | TRACE_CUR("attempts to unlock 0x%p\n", l); | 140 | //TRACE_CUR("attempts to unlock 0x%p\n", l); |
141 | TRACE_CUR("attempts to unlock %d\n", l->ident); | ||
115 | err = l->ops->unlock(l); | 142 | err = l->ops->unlock(l); |
116 | } | 143 | } |
117 | 144 | ||
@@ -138,6 +165,365 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) | |||
138 | return(t); | 165 | return(t); |
139 | } | 166 | } |
140 | 167 | ||
168 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
169 | |||
170 | void print_hp_waiters(struct binheap_node* n, int depth) | ||
171 | { | ||
172 | struct litmus_lock *l; | ||
173 | struct nested_info *nest; | ||
174 | char padding[81] = " "; | ||
175 | struct task_struct *hp = NULL; | ||
176 | struct task_struct *hp_eff = NULL; | ||
177 | struct task_struct *node_prio = NULL; | ||
178 | |||
179 | |||
180 | if(n == NULL) { | ||
181 | TRACE("+-> %p\n", NULL); | ||
182 | return; | ||
183 | } | ||
184 | |||
185 | nest = binheap_entry(n, struct nested_info, hp_binheap_node); | ||
186 | l = nest->lock; | ||
187 | |||
188 | if(depth*2 <= 80) | ||
189 | padding[depth*2] = '\0'; | ||
190 | |||
191 | if(nest->hp_waiter_ptr && *(nest->hp_waiter_ptr)) { | ||
192 | hp = *(nest->hp_waiter_ptr); | ||
193 | |||
194 | if(tsk_rt(hp)->inh_task) { | ||
195 | hp_eff = tsk_rt(hp)->inh_task; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | node_prio = nest->hp_waiter_eff_prio; | ||
200 | |||
201 | TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n", | ||
202 | padding, | ||
203 | (node_prio) ? node_prio->comm : "nil", | ||
204 | (node_prio) ? node_prio->pid : -1, | ||
205 | (hp) ? hp->comm : "nil", | ||
206 | (hp) ? hp->pid : -1, | ||
207 | (hp_eff) ? hp_eff->comm : "nil", | ||
208 | (hp_eff) ? hp_eff->pid : -1, | ||
209 | l->ident); | ||
210 | |||
211 | if(n->left) print_hp_waiters(n->left, depth+1); | ||
212 | if(n->right) print_hp_waiters(n->right, depth+1); | ||
213 | } | ||
214 | #endif | ||
215 | |||
216 | |||
217 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
218 | |||
219 | void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/) | ||
220 | { | ||
221 | /* | ||
222 | We pick the next lock in reverse order. This causes inheritance propagation | ||
223 | from locks received earlier to flow in the same direction as regular nested | ||
224 | locking. This might make fine-grain DGL easier in the future. | ||
225 | */ | ||
226 | |||
227 | BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); | ||
228 | |||
229 | //WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); | ||
230 | |||
231 | // note reverse order | ||
232 | for(dgl_wait->last_primary = dgl_wait->last_primary - 1; | ||
233 | dgl_wait->last_primary >= 0; | ||
234 | --(dgl_wait->last_primary)){ | ||
235 | if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner( | ||
236 | dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { | ||
237 | |||
238 | tsk_rt(dgl_wait->task)->blocked_lock = | ||
239 | dgl_wait->locks[dgl_wait->last_primary]; | ||
240 | mb(); | ||
241 | |||
242 | TRACE_CUR("New blocked lock is %d\n", | ||
243 | dgl_wait->locks[dgl_wait->last_primary]->ident); | ||
244 | |||
245 | break; | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | |||
250 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) | ||
251 | { | ||
252 | // should never be called. | ||
253 | BUG(); | ||
254 | return 1; | ||
255 | } | ||
256 | |||
257 | void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, | ||
258 | dgl_wait_state_t** dgl_wait, | ||
259 | struct task_struct **task) | ||
260 | { | ||
261 | wait_queue_t *q; | ||
262 | |||
263 | *dgl_wait = NULL; | ||
264 | *task = NULL; | ||
265 | |||
266 | if (waitqueue_active(wq)) { | ||
267 | q = list_entry(wq->task_list.next, | ||
268 | wait_queue_t, task_list); | ||
269 | |||
270 | if(q->func == dgl_wake_up) { | ||
271 | *dgl_wait = (dgl_wait_state_t*) q->private; | ||
272 | } | ||
273 | else { | ||
274 | *task = (struct task_struct*) q->private; | ||
275 | } | ||
276 | |||
277 | __remove_wait_queue(wq, q); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait) | ||
282 | { | ||
283 | init_waitqueue_entry(wq_node, dgl_wait->task); | ||
284 | wq_node->private = dgl_wait; | ||
285 | wq_node->func = dgl_wake_up; | ||
286 | } | ||
287 | |||
288 | |||
289 | static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | ||
290 | { | ||
291 | int i; | ||
292 | unsigned long irqflags; //, dummyflags; | ||
293 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); | ||
294 | |||
295 | BUG_ON(dgl_wait->task != current); | ||
296 | |||
297 | raw_spin_lock_irqsave(dgl_lock, irqflags); | ||
298 | |||
299 | |||
300 | dgl_wait->nr_remaining = dgl_wait->size; | ||
301 | |||
302 | TRACE_CUR("Locking DGL with size %d\n", dgl_wait->size); | ||
303 | |||
304 | // try to acquire each lock. enqueue (non-blocking) if it is unavailable. | ||
305 | for(i = 0; i < dgl_wait->size; ++i) { | ||
306 | struct litmus_lock *l = dgl_wait->locks[i]; | ||
307 | |||
308 | // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. | ||
309 | |||
310 | if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) { | ||
311 | --(dgl_wait->nr_remaining); | ||
312 | TRACE_CUR("Acquired lock %d immediatly.\n", l->ident); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | if(dgl_wait->nr_remaining == 0) { | ||
317 | // acquired entire group immediatly | ||
318 | TRACE_CUR("Acquired all locks in DGL immediatly!\n"); | ||
319 | } | ||
320 | else { | ||
321 | |||
322 | TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", | ||
323 | dgl_wait->nr_remaining); | ||
324 | |||
325 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
326 | // KLUDGE: don't count this suspension as time in the critical gpu | ||
327 | // critical section | ||
328 | if(tsk_rt(dgl_wait->task)->held_gpus) { | ||
329 | tsk_rt(dgl_wait->task)->suspend_gpu_tracker_on_block = 1; | ||
330 | } | ||
331 | #endif | ||
332 | |||
333 | // note reverse order. see comments in select_next_lock for reason. | ||
334 | for(i = dgl_wait->size - 1; i >= 0; --i) { | ||
335 | struct litmus_lock *l = dgl_wait->locks[i]; | ||
336 | if(!l->ops->is_owner(l, dgl_wait->task)) { // double-check to be thread safe | ||
337 | |||
338 | TRACE_CUR("Activating priority inheritance on lock %d\n", | ||
339 | l->ident); | ||
340 | |||
341 | TS_DGL_LOCK_SUSPEND; | ||
342 | |||
343 | l->ops->enable_priority(l, dgl_wait); | ||
344 | dgl_wait->last_primary = i; | ||
345 | |||
346 | TRACE_CUR("Suspending for lock %d\n", l->ident); | ||
347 | |||
348 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending | ||
349 | |||
350 | schedule(); // suspend!!! | ||
351 | |||
352 | TS_DGL_LOCK_RESUME; | ||
353 | |||
354 | TRACE_CUR("Woken up from DGL suspension.\n"); | ||
355 | |||
356 | goto all_acquired; // we should hold all locks when we wake up. | ||
357 | } | ||
358 | } | ||
359 | |||
360 | TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n"); | ||
361 | //BUG(); | ||
362 | } | ||
363 | |||
364 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); | ||
365 | |||
366 | all_acquired: | ||
367 | |||
368 | // FOR SANITY CHECK FOR TESTING | ||
369 | // for(i = 0; i < dgl_wait->size; ++i) { | ||
370 | // struct litmus_lock *l = dgl_wait->locks[i]; | ||
371 | // BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); | ||
372 | // } | ||
373 | |||
374 | TRACE_CUR("Acquired entire DGL\n"); | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static int supports_dgl(struct litmus_lock *l) | ||
380 | { | ||
381 | struct litmus_lock_ops* ops = l->ops; | ||
382 | |||
383 | return (ops->dgl_lock && | ||
384 | ops->is_owner && | ||
385 | ops->enable_priority); | ||
386 | } | ||
387 | |||
388 | asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | ||
389 | { | ||
390 | struct task_struct *t = current; | ||
391 | long err = -EINVAL; | ||
392 | int dgl_ods[MAX_DGL_SIZE]; | ||
393 | int i; | ||
394 | |||
395 | dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. | ||
396 | |||
397 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) | ||
398 | goto out; | ||
399 | |||
400 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) | ||
401 | goto out; | ||
402 | |||
403 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) | ||
404 | goto out; | ||
405 | |||
406 | if (!is_realtime(t)) { | ||
407 | err = -EPERM; | ||
408 | goto out; | ||
409 | } | ||
410 | |||
411 | for(i = 0; i < dgl_size; ++i) { | ||
412 | struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); | ||
413 | if(entry && is_lock(entry)) { | ||
414 | dgl_wait_state.locks[i] = get_lock(entry); | ||
415 | if(!supports_dgl(dgl_wait_state.locks[i])) { | ||
416 | TRACE_CUR("Lock %d does not support all required DGL operations.\n", | ||
417 | dgl_wait_state.locks[i]->ident); | ||
418 | goto out; | ||
419 | } | ||
420 | } | ||
421 | else { | ||
422 | TRACE_CUR("Invalid lock identifier\n"); | ||
423 | goto out; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | dgl_wait_state.task = t; | ||
428 | dgl_wait_state.size = dgl_size; | ||
429 | |||
430 | TS_DGL_LOCK_START; | ||
431 | err = do_litmus_dgl_lock(&dgl_wait_state); | ||
432 | |||
433 | /* Note: task my have been suspended or preempted in between! Take | ||
434 | * this into account when computing overheads. */ | ||
435 | TS_DGL_LOCK_END; | ||
436 | |||
437 | out: | ||
438 | return err; | ||
439 | } | ||
440 | |||
441 | static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) | ||
442 | { | ||
443 | int i; | ||
444 | long err = 0; | ||
445 | |||
446 | TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size); | ||
447 | |||
448 | for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order | ||
449 | |||
450 | struct litmus_lock *l = dgl_locks[i]; | ||
451 | long tmp_err; | ||
452 | |||
453 | TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); | ||
454 | |||
455 | tmp_err = l->ops->unlock(l); | ||
456 | |||
457 | if(tmp_err) { | ||
458 | TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); | ||
459 | err = tmp_err; | ||
460 | } | ||
461 | } | ||
462 | |||
463 | TRACE_CUR("DGL unlocked. err = %d\n", err); | ||
464 | |||
465 | return err; | ||
466 | } | ||
467 | |||
468 | asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | ||
469 | { | ||
470 | long err = -EINVAL; | ||
471 | int dgl_ods[MAX_DGL_SIZE]; | ||
472 | struct od_table_entry* entry; | ||
473 | int i; | ||
474 | |||
475 | struct litmus_lock* dgl_locks[MAX_DGL_SIZE]; | ||
476 | |||
477 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) | ||
478 | goto out; | ||
479 | |||
480 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) | ||
481 | goto out; | ||
482 | |||
483 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) | ||
484 | goto out; | ||
485 | |||
486 | for(i = 0; i < dgl_size; ++i) { | ||
487 | entry = get_entry_for_od(dgl_ods[i]); | ||
488 | if(entry && is_lock(entry)) { | ||
489 | dgl_locks[i] = get_lock(entry); | ||
490 | if(!supports_dgl(dgl_locks[i])) { | ||
491 | TRACE_CUR("Lock %d does not support all required DGL operations.\n", | ||
492 | dgl_locks[i]->ident); | ||
493 | goto out; | ||
494 | } | ||
495 | } | ||
496 | else { | ||
497 | TRACE_CUR("Invalid lock identifier\n"); | ||
498 | goto out; | ||
499 | } | ||
500 | } | ||
501 | |||
502 | TS_DGL_UNLOCK_START; | ||
503 | err = do_litmus_dgl_unlock(dgl_locks, dgl_size); | ||
504 | |||
505 | /* Note: task my have been suspended or preempted in between! Take | ||
506 | * this into account when computing overheads. */ | ||
507 | TS_DGL_UNLOCK_END; | ||
508 | |||
509 | out: | ||
510 | return err; | ||
511 | } | ||
512 | |||
513 | #else // CONFIG_LITMUS_DGL_SUPPORT | ||
514 | |||
515 | asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | ||
516 | { | ||
517 | return -ENOSYS; | ||
518 | } | ||
519 | |||
520 | asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | ||
521 | { | ||
522 | return -ENOSYS; | ||
523 | } | ||
524 | |||
525 | #endif | ||
526 | |||
141 | unsigned int __add_wait_queue_prio_exclusive( | 527 | unsigned int __add_wait_queue_prio_exclusive( |
142 | wait_queue_head_t* head, | 528 | wait_queue_head_t* head, |
143 | prio_wait_queue_t *new) | 529 | prio_wait_queue_t *new) |
@@ -171,7 +557,60 @@ out: | |||
171 | } | 557 | } |
172 | 558 | ||
173 | 559 | ||
174 | #else | 560 | void suspend_for_lock(void) |
561 | { | ||
562 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) | ||
563 | struct task_struct *t = current; | ||
564 | #endif | ||
565 | |||
566 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
567 | unsigned int aux_restore = 0; | ||
568 | unsigned int aux_hide; | ||
569 | #endif | ||
570 | |||
571 | #ifdef CONFIG_LITMUS_NVIDIA | ||
572 | unsigned int gpu_restore = 0; | ||
573 | unsigned int gpu_hide; | ||
574 | #endif | ||
575 | |||
576 | //#ifdef CONFIG_REALTIME_AUX_TASKS | ||
577 | // if (tsk_rt(t)->has_aux_tasks) { | ||
578 | // /* hide from aux tasks so they can't inherit our priority when we block | ||
579 | // * for a litmus lock. inheritance is already going to a litmus lock | ||
580 | // * holder. */ | ||
581 | // aux_hide = tsk_rt(t)->hide_from_aux_tasks; | ||
582 | // aux_restore = 1; | ||
583 | // tsk_rt(t)->hide_from_aux_tasks = 1; | ||
584 | // } | ||
585 | //#endif | ||
586 | |||
587 | #ifdef CONFIG_LITMUS_NVIDIA | ||
588 | if (tsk_rt(t)->held_gpus) { | ||
589 | gpu_hide = tsk_rt(t)->hide_from_gpu; | ||
590 | gpu_restore = 1; | ||
591 | tsk_rt(t)->hide_from_gpu = 1; | ||
592 | } | ||
593 | #endif | ||
594 | |||
595 | schedule(); | ||
596 | |||
597 | #ifdef CONFIG_LITMUS_NVIDIA | ||
598 | if (gpu_restore) { | ||
599 | /* restore our state */ | ||
600 | tsk_rt(t)->hide_from_gpu = gpu_hide; | ||
601 | } | ||
602 | #endif | ||
603 | |||
604 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
605 | if (aux_restore) { | ||
606 | /* restore our state */ | ||
607 | tsk_rt(t)->hide_from_aux_tasks = aux_hide; | ||
608 | } | ||
609 | #endif | ||
610 | } | ||
611 | |||
612 | |||
613 | #else // CONFIG_LITMUS_LOCKING | ||
175 | 614 | ||
176 | struct fdso_ops generic_lock_ops = {}; | 615 | struct fdso_ops generic_lock_ops = {}; |
177 | 616 | ||