diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-11-05 12:55:00 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2014-03-03 10:14:31 -0500 |
commit | 1b451ec595b80e5032ae99958393f2ffec3c3fc8 (patch) | |
tree | bf92690ac6b432af99f0f3304993a2ca1f4d631c | |
parent | a7b7d05f315be94466e23494a0e135da68be5e50 (diff) |
Clean up locking.c
Clean up formatting in locking.c
-rw-r--r-- | litmus/locking.c | 121 |
1 files changed, 58 insertions, 63 deletions
diff --git a/litmus/locking.c b/litmus/locking.c index 4913309aa74a..a2f38900e02f 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -24,8 +24,10 @@ | |||
24 | #include <litmus/jobs.h> | 24 | #include <litmus/jobs.h> |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); | 27 | static int create_generic_lock(void** obj_ref, obj_type_t type, |
28 | static int open_generic_lock(struct od_table_entry* entry, void* __user arg); | 28 | void* __user arg); |
29 | static int open_generic_lock(struct od_table_entry* entry, | ||
30 | void* __user arg); | ||
29 | static int close_generic_lock(struct od_table_entry* entry); | 31 | static int close_generic_lock(struct od_table_entry* entry); |
30 | static void destroy_generic_lock(obj_type_t type, void* sem); | 32 | static void destroy_generic_lock(obj_type_t type, void* sem); |
31 | 33 | ||
@@ -50,7 +52,8 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry) | |||
50 | return (struct litmus_lock*) entry->obj->obj; | 52 | return (struct litmus_lock*) entry->obj->obj; |
51 | } | 53 | } |
52 | 54 | ||
53 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) | 55 | static int create_generic_lock(void** obj_ref, obj_type_t type, |
56 | void* __user arg) | ||
54 | { | 57 | { |
55 | struct litmus_lock* lock; | 58 | struct litmus_lock* lock; |
56 | int err; | 59 | int err; |
@@ -71,7 +74,8 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg | |||
71 | lock->ident = atomic_inc_return(&lock_id_gen); | 74 | lock->ident = atomic_inc_return(&lock_id_gen); |
72 | *obj_ref = lock; | 75 | *obj_ref = lock; |
73 | 76 | ||
74 | TRACE_CUR("Lock %d (%p) created. Type = %d\n.", lock->ident, lock, type); | 77 | TRACE_CUR("Lock %d (%p) created. Type = %d\n.", |
78 | lock->ident, lock, type); | ||
75 | 79 | ||
76 | if (lock->proc && lock->proc->add) | 80 | if (lock->proc && lock->proc->add) |
77 | lock->proc->add(lock); | 81 | lock->proc->add(lock); |
@@ -131,14 +135,14 @@ asmlinkage long sys_litmus_lock(int lock_od) | |||
131 | if (tsk_rt(current)->outermost_lock == NULL) { | 135 | if (tsk_rt(current)->outermost_lock == NULL) { |
132 | TRACE_CUR("Lock %d is outermost lock.\n", l->ident); | 136 | TRACE_CUR("Lock %d is outermost lock.\n", l->ident); |
133 | tsk_rt(current)->outermost_lock = l; | 137 | tsk_rt(current)->outermost_lock = l; |
134 | } | 138 | } |
135 | } | 139 | } |
136 | flush_pending_wakes(); | 140 | flush_pending_wakes(); |
137 | local_irq_restore(flags); | 141 | local_irq_restore(flags); |
138 | } | 142 | } |
139 | 143 | ||
140 | /* Note: task my have been suspended or preempted in between! Take | 144 | /* Note: task my have been suspended or preempted in between! Take this |
141 | * this into account when computing overheads. */ | 145 | * into account when computing overheads. */ |
142 | TS_LOCK_END; | 146 | TS_LOCK_END; |
143 | 147 | ||
144 | TS_SYSCALL_OUT_START; | 148 | TS_SYSCALL_OUT_START; |
@@ -277,7 +281,7 @@ void print_hp_waiters(struct binheap_node* n, int depth) | |||
277 | 281 | ||
278 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 282 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
279 | 283 | ||
280 | struct litmus_lock* select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/) | 284 | struct litmus_lock* select_next_lock(dgl_wait_state_t* dgl_wait) |
281 | { | 285 | { |
282 | int num_locks = dgl_wait->size; | 286 | int num_locks = dgl_wait->size; |
283 | int last = dgl_wait->last_primary; | 287 | int last = dgl_wait->last_primary; |
@@ -292,8 +296,8 @@ struct litmus_lock* select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmu | |||
292 | 296 | ||
293 | BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); | 297 | BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); |
294 | 298 | ||
295 | // note reverse order | 299 | /* Try to enable priority on a lock that has an owner. |
296 | // Try to enable priority on a lock that has an owner. | 300 | Note reverse loop iteration order */ |
297 | idx = start = (last != 0) ? last - 1 : num_locks - 1; | 301 | idx = start = (last != 0) ? last - 1 : num_locks - 1; |
298 | do { | 302 | do { |
299 | struct litmus_lock *l = dgl_wait->locks[idx]; | 303 | struct litmus_lock *l = dgl_wait->locks[idx]; |
@@ -309,10 +313,9 @@ struct litmus_lock* select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmu | |||
309 | idx = (idx != 0) ? idx - 1 : num_locks - 1; | 313 | idx = (idx != 0) ? idx - 1 : num_locks - 1; |
310 | } while(idx != start); | 314 | } while(idx != start); |
311 | 315 | ||
312 | // There was no one to push on. This can happen if the blocked task is | 316 | /* There was no one to push on. This can happen if the blocked task is |
313 | // behind a task that is idling a prioq-mutex. | 317 | behind a task that is idling a prioq-mutex. |
314 | 318 | Note reverse order. */ | |
315 | // note reverse order | ||
316 | idx = (last != 0) ? last - 1 : num_locks - 1; | 319 | idx = (last != 0) ? last - 1 : num_locks - 1; |
317 | do { | 320 | do { |
318 | struct litmus_lock *l = dgl_wait->locks[idx]; | 321 | struct litmus_lock *l = dgl_wait->locks[idx]; |
@@ -333,7 +336,6 @@ struct litmus_lock* select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmu | |||
333 | 336 | ||
334 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) | 337 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) |
335 | { | 338 | { |
336 | // should never be called. | ||
337 | BUG(); | 339 | BUG(); |
338 | return 1; | 340 | return 1; |
339 | } | 341 | } |
@@ -376,14 +378,14 @@ void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t *dgl_wait) | |||
376 | } | 378 | } |
377 | 379 | ||
378 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 380 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
379 | static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[], int sz) | 381 | static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[], |
382 | int sz) | ||
380 | { | 383 | { |
381 | int i; | 384 | int i; |
382 | char* ptr; | 385 | char* ptr; |
383 | 386 | ||
384 | ptr = buf; | 387 | ptr = buf; |
385 | for(i = 0; i < sz && ptr < buf+bsz; ++i) | 388 | for(i = 0; i < sz && ptr < buf+bsz; ++i) { |
386 | { | ||
387 | struct litmus_lock *l = dgl_locks[i]; | 389 | struct litmus_lock *l = dgl_locks[i]; |
388 | int remaining = bsz - (ptr-buf); | 390 | int remaining = bsz - (ptr-buf); |
389 | int written; | 391 | int written; |
@@ -402,15 +404,15 @@ static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[], | |||
402 | 404 | ||
403 | /* only valid when locks are prioq locks!!! | 405 | /* only valid when locks are prioq locks!!! |
404 | * THE BIG DGL LOCK MUST BE HELD! */ | 406 | * THE BIG DGL LOCK MUST BE HELD! */ |
405 | int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait) | 407 | int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, |
408 | dgl_wait_state_t *dgl_wait) | ||
406 | { | 409 | { |
407 | int i; | 410 | int i; |
408 | 411 | ||
409 | /* check to see if we can take all the locks */ | 412 | /* check to see if we can take all the locks */ |
410 | for(i = 0; i < dgl_wait->size; ++i) { | 413 | for(i = 0; i < dgl_wait->size; ++i) { |
411 | struct litmus_lock *l = dgl_wait->locks[i]; | 414 | struct litmus_lock *l = dgl_wait->locks[i]; |
412 | if(!l->ops->dgl_can_quick_lock(l, dgl_wait->task)) | 415 | if(!l->ops->dgl_can_quick_lock(l, dgl_wait->task)) { |
413 | { | ||
414 | return -1; | 416 | return -1; |
415 | } | 417 | } |
416 | } | 418 | } |
@@ -418,7 +420,8 @@ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t | |||
418 | /* take the locks */ | 420 | /* take the locks */ |
419 | for(i = 0; i < dgl_wait->size; ++i) { | 421 | for(i = 0; i < dgl_wait->size; ++i) { |
420 | struct litmus_lock *l = dgl_wait->locks[i]; | 422 | struct litmus_lock *l = dgl_wait->locks[i]; |
421 | l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); | 423 | l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, |
424 | &dgl_wait->wq_nodes[i]); | ||
422 | 425 | ||
423 | sched_trace_lock(dgl_wait->task, l->ident, 1); | 426 | sched_trace_lock(dgl_wait->task, l->ident, 1); |
424 | 427 | ||
@@ -453,11 +456,12 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | |||
453 | local_irq_save(kludge_flags); | 456 | local_irq_save(kludge_flags); |
454 | raw_spin_lock_irqsave(dgl_lock, irqflags); | 457 | raw_spin_lock_irqsave(dgl_lock, irqflags); |
455 | 458 | ||
456 | // try to acquire each lock. enqueue (non-blocking) if it is unavailable. | 459 | /* Try to acquire each lock. Enqueue (non-blocking) if it is unavailable. */ |
457 | for(i = 0; i < dgl_wait->size; ++i) { | 460 | for(i = 0; i < dgl_wait->size; ++i) { |
458 | struct litmus_lock *tmp = dgl_wait->locks[i]; | 461 | struct litmus_lock *tmp = dgl_wait->locks[i]; |
459 | 462 | ||
460 | // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. | 463 | /* dgl_lock() must set task state to TASK_UNINTERRUPTIBLE |
464 | if task blocks. */ | ||
461 | 465 | ||
462 | if(tmp->ops->dgl_lock(tmp, dgl_wait, &dgl_wait->wq_nodes[i])) { | 466 | if(tmp->ops->dgl_lock(tmp, dgl_wait, &dgl_wait->wq_nodes[i])) { |
463 | sched_trace_lock(dgl_wait->task, tmp->ident, 1); | 467 | sched_trace_lock(dgl_wait->task, tmp->ident, 1); |
@@ -467,7 +471,7 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | |||
467 | } | 471 | } |
468 | 472 | ||
469 | if(dgl_wait->nr_remaining == 0) { | 473 | if(dgl_wait->nr_remaining == 0) { |
470 | // acquired entire group immediatly | 474 | /* acquired entire group immediatly */ |
471 | TRACE_CUR("Acquired all locks in DGL immediatly!\n"); | 475 | TRACE_CUR("Acquired all locks in DGL immediatly!\n"); |
472 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); | 476 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); |
473 | local_irq_restore(kludge_flags); | 477 | local_irq_restore(kludge_flags); |
@@ -486,7 +490,8 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | |||
486 | 490 | ||
487 | TS_DGL_LOCK_SUSPEND; | 491 | TS_DGL_LOCK_SUSPEND; |
488 | 492 | ||
489 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending | 493 | /* free dgl_lock before suspending */ |
494 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); | ||
490 | flush_pending_wakes(); | 495 | flush_pending_wakes(); |
491 | local_irq_restore(kludge_flags); | 496 | local_irq_restore(kludge_flags); |
492 | suspend_for_lock(); | 497 | suspend_for_lock(); |
@@ -496,14 +501,6 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | |||
496 | TRACE_CUR("Woken up from DGL suspension.\n"); | 501 | TRACE_CUR("Woken up from DGL suspension.\n"); |
497 | } | 502 | } |
498 | 503 | ||
499 | #if 0 | ||
500 | /* FOR SANITY CHECK FOR TESTING */ | ||
501 | for(i = 0; i < dgl_wait->size; ++i) { | ||
502 | struct litmus_lock *tmp = dgl_wait->locks[i]; | ||
503 | BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); | ||
504 | } | ||
505 | #endif | ||
506 | |||
507 | TRACE_CUR("Acquired entire DGL\n"); | 504 | TRACE_CUR("Acquired entire DGL\n"); |
508 | 505 | ||
509 | return 0; | 506 | return 0; |
@@ -564,16 +561,17 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) | |||
564 | 561 | ||
565 | TS_DGL_LOCK_SUSPEND; | 562 | TS_DGL_LOCK_SUSPEND; |
566 | 563 | ||
567 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending | 564 | /* free dgl_lock before suspending */ |
565 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); | ||
568 | flush_pending_wakes(); | 566 | flush_pending_wakes(); |
569 | local_irq_restore(kludge_flags); | 567 | local_irq_restore(kludge_flags); |
570 | suspend_for_lock(); // suspend!!! | 568 | suspend_for_lock(); |
571 | 569 | ||
572 | TS_DGL_LOCK_RESUME; | 570 | TS_DGL_LOCK_RESUME; |
573 | 571 | ||
574 | TRACE_CUR("Woken up from DGL suspension.\n"); | 572 | TRACE_CUR("Woken up from DGL suspension.\n"); |
575 | 573 | ||
576 | goto all_acquired; // we should hold all locks when we wake up. | 574 | goto all_acquired; /* we should hold all locks when we wake up. */ |
577 | } | 575 | } |
578 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); | 576 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); |
579 | flush_pending_wakes(); | 577 | flush_pending_wakes(); |
@@ -583,14 +581,6 @@ all_acquired: | |||
583 | 581 | ||
584 | dgl_wait->nr_remaining = 0; | 582 | dgl_wait->nr_remaining = 0; |
585 | 583 | ||
586 | #if 0 | ||
587 | /* SANITY CHECK FOR TESTING */ | ||
588 | for(i = 0; i < dgl_wait->size; ++i) { | ||
589 | struct litmus_lock *tmp = dgl_wait->locks[i]; | ||
590 | BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); | ||
591 | } | ||
592 | #endif | ||
593 | |||
594 | TRACE_CUR("Acquired entire DGL\n"); | 584 | TRACE_CUR("Acquired entire DGL\n"); |
595 | 585 | ||
596 | return 0; | 586 | return 0; |
@@ -625,7 +615,9 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | |||
625 | else { | 615 | else { |
626 | int i; | 616 | int i; |
627 | int num_need_atomic = 0; | 617 | int num_need_atomic = 0; |
628 | dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. | 618 | |
619 | /* lives on the stack until all resources in DGL are held. */ | ||
620 | dgl_wait_state_t dgl_wait_state; | ||
629 | 621 | ||
630 | init_dgl_wait_state(&dgl_wait_state); | 622 | init_dgl_wait_state(&dgl_wait_state); |
631 | 623 | ||
@@ -634,8 +626,9 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | |||
634 | if(entry && is_lock(entry)) { | 626 | if(entry && is_lock(entry)) { |
635 | dgl_wait_state.locks[i] = get_lock(entry); | 627 | dgl_wait_state.locks[i] = get_lock(entry); |
636 | if(!dgl_wait_state.locks[i]->ops->supports_dgl) { | 628 | if(!dgl_wait_state.locks[i]->ops->supports_dgl) { |
637 | TRACE_CUR("Lock %d does not support all required DGL operations.\n", | 629 | TRACE_CUR("Lock %d does not support all required " |
638 | dgl_wait_state.locks[i]->ident); | 630 | "DGL operations.\n", |
631 | dgl_wait_state.locks[i]->ident); | ||
639 | goto out; | 632 | goto out; |
640 | } | 633 | } |
641 | 634 | ||
@@ -650,7 +643,8 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | |||
650 | } | 643 | } |
651 | 644 | ||
652 | if (num_need_atomic && num_need_atomic != dgl_size) { | 645 | if (num_need_atomic && num_need_atomic != dgl_size) { |
653 | TRACE_CUR("All locks in DGL must support atomic acquire if any one does.\n"); | 646 | TRACE_CUR("All locks in DGL must support atomic " |
647 | "acquire if any one does.\n"); | ||
654 | goto out; | 648 | goto out; |
655 | } | 649 | } |
656 | 650 | ||
@@ -687,7 +681,7 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) | |||
687 | #endif | 681 | #endif |
688 | 682 | ||
689 | local_irq_save(flags); | 683 | local_irq_save(flags); |
690 | for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order | 684 | for(i = dgl_size - 1; i >= 0; --i) { /* unlock in reverse order */ |
691 | 685 | ||
692 | struct litmus_lock *l = dgl_locks[i]; | 686 | struct litmus_lock *l = dgl_locks[i]; |
693 | long tmp_err; | 687 | long tmp_err; |
@@ -698,7 +692,8 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) | |||
698 | sched_trace_lock(current, l->ident, 0); | 692 | sched_trace_lock(current, l->ident, 0); |
699 | 693 | ||
700 | if(tmp_err) { | 694 | if(tmp_err) { |
701 | TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); | 695 | TRACE_CUR("There was an error unlocking %d: %d.\n", |
696 | l->ident, tmp_err); | ||
702 | err = tmp_err; | 697 | err = tmp_err; |
703 | } | 698 | } |
704 | } | 699 | } |
@@ -738,8 +733,9 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | |||
738 | if(entry && is_lock(entry)) { | 733 | if(entry && is_lock(entry)) { |
739 | dgl_locks[i] = get_lock(entry); | 734 | dgl_locks[i] = get_lock(entry); |
740 | if(!dgl_locks[i]->ops->supports_dgl) { | 735 | if(!dgl_locks[i]->ops->supports_dgl) { |
741 | TRACE_CUR("Lock %d does not support all required DGL operations.\n", | 736 | TRACE_CUR("Lock %d does not support all required " |
742 | dgl_locks[i]->ident); | 737 | "DGL operations.\n", |
738 | dgl_locks[i]->ident); | ||
743 | goto out; | 739 | goto out; |
744 | } | 740 | } |
745 | } | 741 | } |
@@ -761,8 +757,8 @@ out: | |||
761 | return err; | 757 | return err; |
762 | } | 758 | } |
763 | 759 | ||
764 | 760 | asmlinkage long sys_litmus_dgl_should_yield_lock(void* __user usr_dgl_ods, | |
765 | asmlinkage long sys_litmus_dgl_should_yield_lock(void* __user usr_dgl_ods, int dgl_size) | 761 | int dgl_size) |
766 | { | 762 | { |
767 | long err = -EINVAL; | 763 | long err = -EINVAL; |
768 | int dgl_ods[MAX_DGL_SIZE]; | 764 | int dgl_ods[MAX_DGL_SIZE]; |
@@ -794,7 +790,8 @@ asmlinkage long sys_litmus_dgl_should_yield_lock(void* __user usr_dgl_ods, int d | |||
794 | if (entry && is_lock(entry)) { | 790 | if (entry && is_lock(entry)) { |
795 | struct litmus_lock *l = get_lock(entry); | 791 | struct litmus_lock *l = get_lock(entry); |
796 | if (l->ops->should_yield_lock) { | 792 | if (l->ops->should_yield_lock) { |
797 | TRACE_CUR("Checking to see if should yield lock %d\n", l->ident); | 793 | TRACE_CUR("Checking to see if should yield lock %d\n", |
794 | l->ident); | ||
798 | err = l->ops->should_yield_lock(l); | 795 | err = l->ops->should_yield_lock(l); |
799 | } | 796 | } |
800 | else { | 797 | else { |
@@ -827,7 +824,8 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | |||
827 | return -ENOSYS; | 824 | return -ENOSYS; |
828 | } | 825 | } |
829 | 826 | ||
830 | asmlinkage long sys_litmus_dgl_should_yield_lock(void* __user usr_dgl_ods, int dgl_size) | 827 | asmlinkage long sys_litmus_dgl_should_yield_lock(void* __user usr_dgl_ods, |
828 | int dgl_size) | ||
831 | { | 829 | { |
832 | return -ENOSYS; | 830 | return -ENOSYS; |
833 | } | 831 | } |
@@ -880,8 +878,7 @@ void suspend_for_lock(void) | |||
880 | 878 | ||
881 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | 879 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) |
882 | /* disable tracking */ | 880 | /* disable tracking */ |
883 | if(tsk_rt(t)->held_gpus) | 881 | if(tsk_rt(t)->held_gpus) { |
884 | { | ||
885 | /* tracking is actually stopped in schedule(), where it | 882 | /* tracking is actually stopped in schedule(), where it |
886 | is also stopped upon preemption */ | 883 | is also stopped upon preemption */ |
887 | tsk_rt(t)->suspend_gpu_tracker_on_block = 1; | 884 | tsk_rt(t)->suspend_gpu_tracker_on_block = 1; |
@@ -914,8 +911,7 @@ DEFINE_PER_CPU(wake_queue_t, wqueues); | |||
914 | void init_wake_queues() | 911 | void init_wake_queues() |
915 | { | 912 | { |
916 | int cpu = 0; | 913 | int cpu = 0; |
917 | for_each_online_cpu(cpu) | 914 | for_each_online_cpu(cpu) { |
918 | { | ||
919 | wake_queue_t *q = &per_cpu(wqueues, cpu); | 915 | wake_queue_t *q = &per_cpu(wqueues, cpu); |
920 | memset(q, 0, sizeof(*q)); | 916 | memset(q, 0, sizeof(*q)); |
921 | } | 917 | } |
@@ -944,8 +940,7 @@ int flush_pending_wakes() | |||
944 | wake_queue_t *q; | 940 | wake_queue_t *q; |
945 | 941 | ||
946 | q = &per_cpu(wqueues, smp_processor_id()); | 942 | q = &per_cpu(wqueues, smp_processor_id()); |
947 | for(i = 0; i < q->count; ++i) | 943 | for(i = 0; i < q->count; ++i) { |
948 | { | ||
949 | if (q->to_wake[i]) { | 944 | if (q->to_wake[i]) { |
950 | struct task_struct *t = q->to_wake[i]; | 945 | struct task_struct *t = q->to_wake[i]; |
951 | q->to_wake[i] = NULL; | 946 | q->to_wake[i] = NULL; |