aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/kfmlp_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/kfmlp_lock.c')
-rw-r--r--litmus/kfmlp_lock.c173
1 files changed, 118 insertions, 55 deletions
diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c
index b30e5b589882..7cdca1b7b50a 100644
--- a/litmus/kfmlp_lock.c
+++ b/litmus/kfmlp_lock.c
@@ -99,12 +99,19 @@ static struct task_struct* kfmlp_select_hp_steal(struct kfmlp_semaphore* sem, wa
99 if(*to_steal_from) 99 if(*to_steal_from)
100 { 100 {
101 struct list_head *pos; 101 struct list_head *pos;
102 struct task_struct *target = (*to_steal_from)->hp_waiter;
103
104 TRACE_CUR("want to steal hp_waiter (%s/%d) from queue %d\n",
105 target->comm,
106 target->pid,
107 kfmlp_get_idx(sem, *to_steal_from));
108
102 list_for_each(pos, &(*to_steal_from)->wait.task_list) 109 list_for_each(pos, &(*to_steal_from)->wait.task_list)
103 { 110 {
104 wait_queue_t *node = list_entry(pos, wait_queue_t, task_list); 111 wait_queue_t *node = list_entry(pos, wait_queue_t, task_list);
105 struct task_struct *queued = (struct task_struct*) node->private; 112 struct task_struct *queued = (struct task_struct*) node->private;
106 /* Compare task prios, find high prio task. */ 113 /* Compare task prios, find high prio task. */
107 if (queued == (*to_steal_from)->hp_waiter) 114 if (queued == target)
108 { 115 {
109 *to_steal = node; 116 *to_steal = node;
110 117
@@ -115,6 +122,11 @@ static struct task_struct* kfmlp_select_hp_steal(struct kfmlp_semaphore* sem, wa
115 return queued; 122 return queued;
116 } 123 }
117 } 124 }
125
126 TRACE_CUR("Could not find %s/%d in queue %d!!! THIS IS A BUG!\n",
127 target->comm,
128 target->pid,
129 kfmlp_get_idx(sem, *to_steal_from));
118 } 130 }
119 131
120 return NULL; 132 return NULL;
@@ -133,6 +145,11 @@ static void kfmlp_steal_node(struct kfmlp_semaphore *sem,
133 if(t == src->hp_waiter) { 145 if(t == src->hp_waiter) {
134 src->hp_waiter = kfmlp_find_hp_waiter(src, NULL); 146 src->hp_waiter = kfmlp_find_hp_waiter(src, NULL);
135 147
148 TRACE_CUR("queue %d: %s/%d is new hp_waiter\n",
149 kfmlp_get_idx(sem, src),
150 (src->hp_waiter) ? src->hp_waiter->comm : "nil",
151 (src->hp_waiter) ? src->hp_waiter->pid : -1);
152
136 if(src->owner && tsk_rt(src->owner)->inh_task == t) { 153 if(src->owner && tsk_rt(src->owner)->inh_task == t) {
137 litmus->decrease_prio(src->owner, src->hp_waiter); 154 litmus->decrease_prio(src->owner, src->hp_waiter);
138 } 155 }
@@ -140,6 +157,7 @@ static void kfmlp_steal_node(struct kfmlp_semaphore *sem,
140 157
141 if(sem->shortest_queue->count > src->count) { 158 if(sem->shortest_queue->count > src->count) {
142 sem->shortest_queue = src; 159 sem->shortest_queue = src;
160 TRACE_CUR("queue %d is the shortest\n", kfmlp_get_idx(sem, sem->shortest_queue));
143 } 161 }
144 162
145#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 163#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
@@ -155,6 +173,10 @@ static void kfmlp_steal_node(struct kfmlp_semaphore *sem,
155 if(litmus->compare(t, dst->hp_waiter)) { 173 if(litmus->compare(t, dst->hp_waiter)) {
156 dst->hp_waiter = t; 174 dst->hp_waiter = t;
157 175
176 TRACE_CUR("queue %d: %s/%d is new hp_waiter\n",
177 kfmlp_get_idx(sem, dst),
178 t->comm, t->pid);
179
158 if(dst->owner && litmus->compare(t, dst->owner)) 180 if(dst->owner && litmus->compare(t, dst->owner))
159 { 181 {
160 litmus->increase_prio(dst->owner, t); 182 litmus->increase_prio(dst->owner, t);
@@ -264,8 +286,9 @@ int kfmlp_lock(struct litmus_lock* l)
264 286
265 if (my_queue->owner) { 287 if (my_queue->owner) {
266 /* resource is not free => must suspend and wait */ 288 /* resource is not free => must suspend and wait */
267 TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", 289 TRACE_CUR("queue %d: Resource is not free => must suspend and wait. (queue size = %d)\n",
268 kfmlp_get_idx(sem, my_queue)); 290 kfmlp_get_idx(sem, my_queue),
291 my_queue->count);
269 292
270 init_waitqueue_entry(&wait, t); 293 init_waitqueue_entry(&wait, t);
271 294
@@ -274,29 +297,37 @@ int kfmlp_lock(struct litmus_lock* l)
274 297
275 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); 298 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait);
276 299
300 TRACE_CUR("queue %d: hp_waiter is currently %s/%d\n",
301 kfmlp_get_idx(sem, my_queue),
302 (my_queue->hp_waiter) ? my_queue->hp_waiter->comm : "nil",
303 (my_queue->hp_waiter) ? my_queue->hp_waiter->pid : -1);
304
277 /* check if we need to activate priority inheritance */ 305 /* check if we need to activate priority inheritance */
278 //if (edf_higher_prio(t, my_queue->hp_waiter)) 306 //if (edf_higher_prio(t, my_queue->hp_waiter))
279 if (litmus->compare(t, my_queue->hp_waiter)) 307 if (litmus->compare(t, my_queue->hp_waiter)) {
280 {
281 my_queue->hp_waiter = t; 308 my_queue->hp_waiter = t;
309 TRACE_CUR("queue %d: %s/%d is new hp_waiter\n",
310 kfmlp_get_idx(sem, my_queue),
311 t->comm, t->pid);
312
282 //if (edf_higher_prio(t, my_queue->owner)) 313 //if (edf_higher_prio(t, my_queue->owner))
283 if (litmus->compare(t, my_queue->owner)) 314 if (litmus->compare(t, my_queue->owner)) {
284 {
285 litmus->increase_prio(my_queue->owner, my_queue->hp_waiter); 315 litmus->increase_prio(my_queue->owner, my_queue->hp_waiter);
286 } 316 }
287 } 317 }
288 318
289 ++(my_queue->count); 319 ++(my_queue->count);
290 320
291#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
292 if(my_queue == sem->shortest_queue) { 321 if(my_queue == sem->shortest_queue) {
293 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); 322 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
323 TRACE_CUR("queue %d is the shortest\n",
324 kfmlp_get_idx(sem, sem->shortest_queue));
294 } 325 }
326
327#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
295 if(sem->aff_obs) { 328 if(sem->aff_obs) {
296 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t); 329 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t);
297 } 330 }
298#else
299 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
300#endif 331#endif
301 332
302 /* release lock before sleeping */ 333 /* release lock before sleeping */
@@ -309,13 +340,11 @@ int kfmlp_lock(struct litmus_lock* l)
309 schedule(); 340 schedule();
310 341
311 342
312 if(my_queue->owner == t) 343 if(my_queue->owner == t) {
313 {
314 TRACE_CUR("queue %d: acquired through waiting\n", 344 TRACE_CUR("queue %d: acquired through waiting\n",
315 kfmlp_get_idx(sem, my_queue)); 345 kfmlp_get_idx(sem, my_queue));
316 } 346 }
317 else 347 else {
318 {
319 /* this case may happen if our wait entry was stolen 348 /* this case may happen if our wait entry was stolen
320 between queues. record where we went. */ 349 between queues. record where we went. */
321 my_queue = kfmlp_get_queue(sem, t); 350 my_queue = kfmlp_get_queue(sem, t);
@@ -325,8 +354,7 @@ int kfmlp_lock(struct litmus_lock* l)
325 kfmlp_get_idx(sem, my_queue)); 354 kfmlp_get_idx(sem, my_queue));
326 } 355 }
327 } 356 }
328 else 357 else {
329 {
330 TRACE_CUR("queue %d: acquired immediately\n", 358 TRACE_CUR("queue %d: acquired immediately\n",
331 kfmlp_get_idx(sem, my_queue)); 359 kfmlp_get_idx(sem, my_queue));
332 360
@@ -334,16 +362,17 @@ int kfmlp_lock(struct litmus_lock* l)
334 362
335 ++(my_queue->count); 363 ++(my_queue->count);
336 364
337#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
338 if(my_queue == sem->shortest_queue) { 365 if(my_queue == sem->shortest_queue) {
339 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); 366 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
367 TRACE_CUR("queue %d is the shortest\n",
368 kfmlp_get_idx(sem, sem->shortest_queue));
340 } 369 }
370
371#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
341 if(sem->aff_obs) { 372 if(sem->aff_obs) {
342 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t); 373 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t);
343 sem->aff_obs->ops->notify_acquired(sem->aff_obs, my_queue, t); 374 sem->aff_obs->ops->notify_acquired(sem->aff_obs, my_queue, t);
344 } 375 }
345#else
346 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
347#endif 376#endif
348 377
349 spin_unlock_irqrestore(&sem->lock, flags); 378 spin_unlock_irqrestore(&sem->lock, flags);
@@ -370,12 +399,16 @@ int kfmlp_unlock(struct litmus_lock* l)
370 goto out; 399 goto out;
371 } 400 }
372 401
402 TRACE_CUR("queue %d: unlocking\n", kfmlp_get_idx(sem, my_queue));
403
373 my_queue->owner = NULL; // clear ownership 404 my_queue->owner = NULL; // clear ownership
374 --(my_queue->count); 405 --(my_queue->count);
375 406
376 if(my_queue->count < sem->shortest_queue->count) 407 if(my_queue->count < sem->shortest_queue->count)
377 { 408 {
378 sem->shortest_queue = my_queue; 409 sem->shortest_queue = my_queue;
410 TRACE_CUR("queue %d is the shortest\n",
411 kfmlp_get_idx(sem, sem->shortest_queue));
379 } 412 }
380 413
381#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 414#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
@@ -424,13 +457,13 @@ RETRY:
424 /* wake up next */ 457 /* wake up next */
425 wake_up_process(next); 458 wake_up_process(next);
426 } 459 }
427 else 460 else {
428 {
429 // TODO: put this stealing logic before we attempt to release 461 // TODO: put this stealing logic before we attempt to release
430 // our resource. (simplifies code and gets rid of ugly goto RETRY. 462 // our resource. (simplifies code and gets rid of ugly goto RETRY.
431 wait_queue_t *wait; 463 wait_queue_t *wait;
432 464
433 TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); 465 TRACE_CUR("queue %d: looking to steal someone...\n",
466 kfmlp_get_idx(sem, my_queue));
434 467
435#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 468#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
436 next = (sem->aff_obs) ? 469 next = (sem->aff_obs) ?
@@ -441,16 +474,18 @@ RETRY:
441#endif 474#endif
442 475
443 if(next) { 476 if(next) {
444 kfmlp_steal_node(sem, my_queue, wait, to_steal_from); 477 TRACE_CUR("queue %d: stealing %s/%d from queue %d\n",
445 478 kfmlp_get_idx(sem, my_queue),
446 TRACE_CUR("queued %d: stole %s/%d from queue %d\n",
447 next->comm, next->pid, 479 next->comm, next->pid,
448 kfmlp_get_idx(sem, to_steal_from)); 480 kfmlp_get_idx(sem, to_steal_from));
481
482 kfmlp_steal_node(sem, my_queue, wait, to_steal_from);
449 483
450 goto RETRY; // will succeed this time. 484 goto RETRY; // will succeed this time.
451 } 485 }
452 else { 486 else {
453 TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); 487 TRACE_CUR("queue %d: no one to steal.\n",
488 kfmlp_get_idx(sem, my_queue));
454 } 489 }
455 } 490 }
456 491
@@ -597,7 +632,8 @@ static struct affinity_observer* kfmlp_aff_obs_new(struct affinity_observer_ops*
597 return(NULL); 632 return(NULL);
598 } 633 }
599 634
600 kfmlp_aff->obs.ops = ops; 635 affinity_observer_new(&kfmlp_aff->obs, ops, &aff_args.obs);
636
601 kfmlp_aff->ops = kfmlp_ops; 637 kfmlp_aff->ops = kfmlp_ops;
602 kfmlp_aff->offset = aff_args.replica_to_gpu_offset; 638 kfmlp_aff->offset = aff_args.replica_to_gpu_offset;
603 639
@@ -645,34 +681,50 @@ struct kfmlp_queue* gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct
645 681
646 // simply pick the shortest queue if, we have no affinity, or we have 682 // simply pick the shortest queue if, we have no affinity, or we have
647 // affinity with the shortest 683 // affinity with the shortest
648 if((tsk_rt(t)->last_gpu < 0) || 684 if(unlikely(tsk_rt(t)->last_gpu < 0)) {
649 ((kfmlp_get_idx(sem, aff->shortest_queue->q) + aff->offset) == tsk_rt(t)->last_gpu)) {
650 // we have affinity with the shorest queue. pick it. 685 // we have affinity with the shorest queue. pick it.
651 to_enqueue = aff->shortest_queue->q; 686 shortest = aff->shortest_queue;
652 687 TRACE_CUR("special case: no affinity\n");
653 TRACE_CUR("special case: no affinity or have affinity with shortest\n");
654
655 goto out; 688 goto out;
656 } 689 }
657 690
658 // enqueue where we will have the shortest time to completion 691 // all things being equal, let's start with the queue with which we have
692 // affinity. this helps us maintain affinity even when we don't have
693 // an estiamte for local-affinity execution time (i.e., 2nd time on GPU)
694 shortest = &aff->q_info[tsk_rt(t)->last_gpu - aff->offset];
695
696 if(shortest == aff->shortest_queue) {
697 TRACE_CUR("special case: have affinity with shortest queue\n");
698 goto out;
699 }
659 700
660 shortest = &aff->q_info[0]; 701 min_len = shortest->estimated_len + get_gpu_estimate(t, MIG_LOCAL);
661 min_len = shortest->estimated_len + get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, 0 + aff->offset));
662 702
663 for(i = 1; i < sem->num_resources; ++i) { 703 TRACE_CUR("cs is %llu on queue %d: est len = %llu\n",
664 lt_t est_len = 704 get_gpu_estimate(t, MIG_LOCAL),
665 aff->q_info[i].estimated_len + 705 kfmlp_get_idx(sem, shortest->q),
666 get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, i + aff->offset)); 706 min_len);
667 707
668 if(est_len < min_len) { 708 for(i = 0; i < sem->num_resources; ++i) {
669 shortest = &aff->q_info[i]; 709 if(&aff->q_info[i] != shortest) {
670 min_len = est_len; 710
711 lt_t est_len =
712 aff->q_info[i].estimated_len +
713 get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, i + aff->offset));
714 if(est_len < min_len) {
715 shortest = &aff->q_info[i];
716 min_len = est_len;
717 }
718
719 TRACE_CUR("cs is %llu on queue %d: est len = %llu\n",
720 get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, i + aff->offset)),
721 kfmlp_get_idx(sem, aff->q_info[i].q),
722 est_len);
671 } 723 }
672 } 724 }
673 to_enqueue = shortest->q;
674 725
675out: 726out:
727 to_enqueue = shortest->q;
676 TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n", 728 TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n",
677 kfmlp_get_idx(sem, to_enqueue), 729 kfmlp_get_idx(sem, to_enqueue),
678 kfmlp_get_idx(sem, sem->shortest_queue)); 730 kfmlp_get_idx(sem, sem->shortest_queue));
@@ -698,17 +750,20 @@ void gpu_kfmlp_notify_enqueue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq
698 int gpu = aff->offset + replica; 750 int gpu = aff->offset + replica;
699 struct kfmlp_queue_info *info = &aff->q_info[replica]; 751 struct kfmlp_queue_info *info = &aff->q_info[replica];
700 lt_t est_time; 752 lt_t est_time;
753 lt_t est_len_before;
701 754
702 if(current == t) { 755 if(current == t) {
703 tsk_rt(t)->suspend_gpu_tracker_on_block = 1; 756 tsk_rt(t)->suspend_gpu_tracker_on_block = 1;
704 } 757 }
705 758
759 est_len_before = info->estimated_len;
706 est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); 760 est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu));
707 info->estimated_len += est_time; 761 info->estimated_len += est_time;
708 762
709 TRACE_CUR("fq %d est len is now %llu\n", 763 TRACE_CUR("fq %d: q_len (%llu) + est_cs (%llu) = %llu\n",
710 kfmlp_get_idx(sem, aff->shortest_queue->q), 764 kfmlp_get_idx(sem, info->q),
711 aff->shortest_queue->estimated_len); 765 est_len_before, est_time,
766 info->estimated_len);
712 767
713 if(aff->shortest_queue == info) { 768 if(aff->shortest_queue == info) {
714 // we may no longer be the shortest 769 // we may no longer be the shortest
@@ -764,7 +819,7 @@ void gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_queue* f
764 TRACE_CUR("%s/%d acquired gpu %d. migration type = %d\n", 819 TRACE_CUR("%s/%d acquired gpu %d. migration type = %d\n",
765 t->comm, t->pid, gpu, tsk_rt(t)->gpu_migration); 820 t->comm, t->pid, gpu, tsk_rt(t)->gpu_migration);
766 821
767 reg_nv_device(gpu, 1); // register 822 reg_nv_device(gpu, 1, t); // register
768 823
769 tsk_rt(t)->suspend_gpu_tracker_on_block = 0; 824 tsk_rt(t)->suspend_gpu_tracker_on_block = 0;
770 reset_gpu_tracker(t); 825 reset_gpu_tracker(t);
@@ -782,7 +837,7 @@ void gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queue* fq,
782 est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); 837 est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu));
783 838
784 tsk_rt(t)->last_gpu = gpu; 839 tsk_rt(t)->last_gpu = gpu;
785 reg_nv_device(gpu, 0); // unregister 840 reg_nv_device(gpu, 0, t); // unregister
786 841
787 // update estimates 842 // update estimates
788 update_gpu_estimate(t, get_gpu_time(t)); 843 update_gpu_estimate(t, get_gpu_time(t));
@@ -822,21 +877,25 @@ struct affinity_observer* kfmlp_gpu_aff_obs_new(struct affinity_observer_ops* op
822struct kfmlp_queue* simple_gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct task_struct* t) 877struct kfmlp_queue* simple_gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct task_struct* t)
823{ 878{
824 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 879 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
880// TRACE_CUR("Simple GPU KFMLP advise_enqueue invoked\n");
825 return sem->shortest_queue; 881 return sem->shortest_queue;
826} 882}
827 883
828struct task_struct* simple_gpu_kfmlp_advise_steal(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from) 884struct task_struct* simple_gpu_kfmlp_advise_steal(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from)
829{ 885{
830 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 886 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
887// TRACE_CUR("Simple GPU KFMLP advise_steal invoked\n");
831 return kfmlp_select_hp_steal(sem, to_steal, to_steal_from); 888 return kfmlp_select_hp_steal(sem, to_steal, to_steal_from);
832} 889}
833 890
834void simple_gpu_kfmlp_notify_enqueue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) 891void simple_gpu_kfmlp_notify_enqueue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t)
835{ 892{
893// TRACE_CUR("Simple GPU KFMLP notify_enqueue invoked\n");
836} 894}
837 895
838void simple_gpu_kfmlp_notify_dequeue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) 896void simple_gpu_kfmlp_notify_dequeue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t)
839{ 897{
898// TRACE_CUR("Simple GPU KFMLP notify_dequeue invoked\n");
840} 899}
841 900
842void simple_gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) 901void simple_gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t)
@@ -844,7 +903,9 @@ void simple_gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_q
844 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 903 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
845 int gpu = kfmlp_get_idx(sem, fq) + aff->offset; 904 int gpu = kfmlp_get_idx(sem, fq) + aff->offset;
846 905
847 reg_nv_device(gpu, 1); // register 906// TRACE_CUR("Simple GPU KFMLP notify_acquired invoked\n");
907
908 reg_nv_device(gpu, 1, t); // register
848} 909}
849 910
850void simple_gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) 911void simple_gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t)
@@ -852,7 +913,9 @@ void simple_gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queu
852 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 913 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
853 int gpu = kfmlp_get_idx(sem, fq) + aff->offset; 914 int gpu = kfmlp_get_idx(sem, fq) + aff->offset;
854 915
855 reg_nv_device(gpu, 0); // unregister 916// TRACE_CUR("Simple GPU KFMLP notify_freed invoked\n");
917
918 reg_nv_device(gpu, 0, t); // unregister
856} 919}
857 920
858struct kfmlp_affinity_ops simple_gpu_kfmlp_affinity = 921struct kfmlp_affinity_ops simple_gpu_kfmlp_affinity =