aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-01-25 00:04:56 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-01-25 00:04:56 -0500
commite2231dd23a0a8568d7a7bdc5ac0771bf270010f0 (patch)
treee3dc0a9c2cd86554024c6664b8ddc564cf26ddbb
parente20223fcfd6ad9274e9e1aab11a73eaa72c7a4f5 (diff)
Added TRACE calls to ikglp
-rw-r--r--litmus/ikglp_lock.c79
-rw-r--r--litmus/sched_cedf.c2
2 files changed, 52 insertions, 29 deletions
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index 5099f6eae537..ae769bce58d6 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
@@ -269,10 +269,12 @@ static void ikglp_add_global_list(struct ikglp_semaphore *sem,
269 269
270 binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); 270 binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node);
271 271
272// TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); 272#if 0
273// print_global_list(sem->top_m.root, 1); 273 TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size);
274// TRACE_CUR("Not-Top-M After:\n"); 274 print_global_list(sem->top_m.root, 1);
275// print_global_list(sem->not_top_m.root, 1); 275 TRACE_CUR("Not-Top-M After:\n");
276 print_global_list(sem->not_top_m.root, 1);
277#endif
276 } 278 }
277 else { 279 else {
278 TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", 280 TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n",
@@ -282,8 +284,10 @@ static void ikglp_add_global_list(struct ikglp_semaphore *sem,
282 284
283 binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node); 285 binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node);
284 286
285// TRACE_CUR("Not-Top-M After:\n"); 287#if 0
286// print_global_list(sem->not_top_m.root, 1); 288 TRACE_CUR("Not-Top-M After:\n");
289 print_global_list(sem->not_top_m.root, 1);
290#endif
287 } 291 }
288} 292}
289 293
@@ -324,10 +328,12 @@ static void ikglp_del_global_list(struct ikglp_semaphore *sem,
324 --(sem->top_m_size); 328 --(sem->top_m_size);
325 } 329 }
326 330
327// TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); 331#if 0
328// print_global_list(sem->top_m.root, 1); 332 TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size);
329// TRACE_CUR("Not-Top-M After:\n"); 333 print_global_list(sem->top_m.root, 1);
330// print_global_list(sem->not_top_m.root, 1); 334 TRACE_CUR("Not-Top-M After:\n");
335 print_global_list(sem->not_top_m.root, 1);
336#endif
331 } 337 }
332 else { 338 else {
333 TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid); 339 TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid);
@@ -336,8 +342,10 @@ static void ikglp_del_global_list(struct ikglp_semaphore *sem,
336 342
337 binheap_delete(&node->node, &sem->not_top_m); 343 binheap_delete(&node->node, &sem->not_top_m);
338 344
339// TRACE_CUR("Not-Top-M After:\n"); 345#if 0
340// print_global_list(sem->not_top_m.root, 1); 346 TRACE_CUR("Not-Top-M After:\n");
347 print_global_list(sem->not_top_m.root, 1);
348#endif
341 } 349 }
342} 350}
343 351
@@ -358,8 +366,10 @@ static void ikglp_add_donees(struct ikglp_semaphore *sem,
358 366
359 binheap_add(&node->node, &sem->donees, ikglp_donee_heap_node_t, node); 367 binheap_add(&node->node, &sem->donees, ikglp_donee_heap_node_t, node);
360 368
361// TRACE_CUR("donees After:\n"); 369#if 0
362// print_donees(sem, sem->donees.root, 1); 370 TRACE_CUR("donees After:\n");
371 print_donees(sem, sem->donees.root, 1);
372#endif
363} 373}
364 374
365 375
@@ -466,7 +476,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq,
466 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 476 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
467 477
468 binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); 478 binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
469 fq->nest.hp_waiter_eff_prio = fq->hp_waiter; 479 fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter);
470 binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, 480 binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks,
471 struct nested_info, hp_binheap_node); 481 struct nested_info, hp_binheap_node);
472 482
@@ -571,6 +581,8 @@ static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t,
571 581
572 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); 582 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
573 583
584 TRACE_CUR("Removing donation from fq waiter %s\%d\n", t->comm, t->pid);
585
574 old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); 586 old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks);
575 587
576 binheap_delete(n, &tsk_rt(t)->hp_blocked_tasks); 588 binheap_delete(n, &tsk_rt(t)->hp_blocked_tasks);
@@ -590,6 +602,10 @@ static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t,
590 decreased_prio = NULL; 602 decreased_prio = NULL;
591 } 603 }
592 604
605 // no need to propagate decreased inheritance to AUX
606 // or klmirqd tasks since they cannot (should not) inherit
607 // a priority directly from us while we suspend on a litmus
608 // lock.
593 tsk_rt(t)->inh_task = decreased_prio; 609 tsk_rt(t)->inh_task = decreased_prio;
594 } 610 }
595 611
@@ -863,7 +879,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem,
863 ikglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags); // unlocks sem->lock 879 ikglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags); // unlocks sem->lock
864 } 880 }
865 else { 881 else {
866 TRACE_TASK(t, "%s/%d is the owner. Progatating priority immediatly.\n", 882 TRACE_TASK(t, "%s/%d is the owner. Propagating priority immediatly.\n",
867 donee->comm, donee->pid); 883 donee->comm, donee->pid);
868 litmus->nested_increase_prio(donee, new_prio, &sem->lock, flags); // unlocks sem->lock and donee's heap lock 884 litmus->nested_increase_prio(donee, new_prio, &sem->lock, flags); // unlocks sem->lock and donee's heap lock
869 } 885 }
@@ -907,6 +923,8 @@ int ikglp_lock(struct litmus_lock* l)
907 lock_global_irqsave(dgl_lock, flags); 923 lock_global_irqsave(dgl_lock, flags);
908 lock_fine_irqsave(&sem->lock, flags); 924 lock_fine_irqsave(&sem->lock, flags);
909 925
926 TRACE_CUR("Requesting a replica from lock %d.\n", l->ident);
927
910 if(sem->nr_in_fifos < sem->m) { 928 if(sem->nr_in_fifos < sem->m) {
911 // enqueue somwhere 929 // enqueue somwhere
912#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 930#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
@@ -920,6 +938,8 @@ int ikglp_lock(struct litmus_lock* l)
920 // take available resource 938 // take available resource
921 replica = ikglp_get_idx(sem, fq); 939 replica = ikglp_get_idx(sem, fq);
922 940
941 TRACE_CUR("Getting replica %d\n", replica);
942
923 ikglp_get_immediate(t, fq, sem, flags); // unlocks sem->lock 943 ikglp_get_immediate(t, fq, sem, flags); // unlocks sem->lock
924 944
925 unlock_global_irqrestore(dgl_lock, flags); 945 unlock_global_irqrestore(dgl_lock, flags);
@@ -927,6 +947,8 @@ int ikglp_lock(struct litmus_lock* l)
927 goto acquired; 947 goto acquired;
928 } 948 }
929 else { 949 else {
950 TRACE_CUR("Will go on FQ somewhere.\n");
951
930 wait.task = t; // THIS IS CRITICALLY IMPORTANT!!! 952 wait.task = t; // THIS IS CRITICALLY IMPORTANT!!!
931 953
932 tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked 954 tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked
@@ -939,6 +961,8 @@ int ikglp_lock(struct litmus_lock* l)
939 } 961 }
940 } 962 }
941 else { 963 else {
964 TRACE_CUR("Going on a heap.\n");
965
942 // donor! 966 // donor!
943 wait.task = t; // THIS IS CRITICALLY IMPORTANT!!! 967 wait.task = t; // THIS IS CRITICALLY IMPORTANT!!!
944 968
@@ -949,12 +973,14 @@ int ikglp_lock(struct litmus_lock* l)
949 set_task_state(t, TASK_UNINTERRUPTIBLE); 973 set_task_state(t, TASK_UNINTERRUPTIBLE);
950 974
951 if(litmus->__compare(ikglp_mth_highest(sem), BASE, t, BASE)) { 975 if(litmus->__compare(ikglp_mth_highest(sem), BASE, t, BASE)) {
976 TRACE_CUR("Going on PQ heap.\n");
952 // enqueue on PQ 977 // enqueue on PQ
953 ikglp_enqueue_on_pq(sem, &wait); 978 ikglp_enqueue_on_pq(sem, &wait);
954 unlock_fine_irqrestore(&sem->lock, flags); 979 unlock_fine_irqrestore(&sem->lock, flags);
955 } 980 }
956 else { 981 else {
957 // enqueue as donor 982 // enqueue as donor
983 TRACE_CUR("Going on donor heap.\n");
958 ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock 984 ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock
959 } 985 }
960 } 986 }
@@ -962,6 +988,8 @@ int ikglp_lock(struct litmus_lock* l)
962 unlock_global_irqrestore(dgl_lock, flags); 988 unlock_global_irqrestore(dgl_lock, flags);
963 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); 989 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
964 990
991 TRACE_CUR("Suspending for replica.\n");
992
965 TS_LOCK_SUSPEND; 993 TS_LOCK_SUSPEND;
966 994
967 suspend_for_lock(); 995 suspend_for_lock();
@@ -1368,7 +1396,6 @@ int ikglp_unlock(struct litmus_lock* l)
1368 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, donor_info); 1396 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, donor_info);
1369 } 1397 }
1370 else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ 1398 else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ
1371 // move other donor to FQ
1372 // Select a donor 1399 // Select a donor
1373#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1400#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1374 other_donor_info = (sem->aff_obs) ? 1401 other_donor_info = (sem->aff_obs) ?
@@ -1405,6 +1432,7 @@ int ikglp_unlock(struct litmus_lock* l)
1405 ikglp_get_idx(sem, fq_of_new_on_fq), 1432 ikglp_get_idx(sem, fq_of_new_on_fq),
1406 ikglp_get_idx(sem, fq)); 1433 ikglp_get_idx(sem, fq));
1407 1434
1435
1408 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, other_donor_info); 1436 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, other_donor_info);
1409 } 1437 }
1410 else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ 1438 else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ
@@ -1476,8 +1504,10 @@ int ikglp_unlock(struct litmus_lock* l)
1476 // 't' must drop all priority and clean up data structures before hand-off. 1504 // 't' must drop all priority and clean up data structures before hand-off.
1477 1505
1478 // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST 1506 // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST
1507 // This kills any inheritance from a donor.
1479 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); 1508 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
1480 { 1509 {
1510 TRACE_TASK(t, "discarding _all_ inheritance because IKGLP is outermost\n");
1481 int count = 0; 1511 int count = 0;
1482 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { 1512 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) {
1483 binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, 1513 binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks,
@@ -1498,7 +1528,7 @@ int ikglp_unlock(struct litmus_lock* l)
1498 // if(to_steal), update owner's prio (hp_waiter has already been set) 1528 // if(to_steal), update owner's prio (hp_waiter has already been set)
1499 // 1529 //
1500 1530
1501 BUG_ON((other_donor_info != NULL) && (to_steal != NULL)); 1531 BUG_ON(other_donor_info && to_steal);
1502 1532
1503 if(other_donor_info) { 1533 if(other_donor_info) {
1504 struct fifo_queue *other_fq = other_donor_info->donee_info->fq; 1534 struct fifo_queue *other_fq = other_donor_info->donee_info->fq;
@@ -1520,7 +1550,7 @@ int ikglp_unlock(struct litmus_lock* l)
1520 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! 1550 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
1521 } 1551 }
1522 else { 1552 else {
1523 TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", 1553 TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n",
1524 donee->comm, donee->pid, 1554 donee->comm, donee->pid,
1525 ikglp_get_idx(sem, other_fq)); 1555 ikglp_get_idx(sem, other_fq));
1526 1556
@@ -1701,6 +1731,8 @@ wake_kludge:
1701 1731
1702 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); 1732 raw_spin_unlock_irqrestore(&sem->real_lock, real_flags);
1703 1733
1734 TRACE_CUR("done with freeing replica.\n");
1735
1704out: 1736out:
1705 return err; 1737 return err;
1706} 1738}
@@ -2967,12 +2999,3 @@ struct affinity_observer* ikglp_simple_gpu_aff_obs_new(struct affinity_observer_
2967} 2999}
2968 3000
2969#endif 3001#endif
2970
2971
2972
2973
2974
2975
2976
2977
2978
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index b3281e40df52..0d8773b2495f 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -415,7 +415,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
415 415
416 sched_trace_task_completion(t, forced); 416 sched_trace_task_completion(t, forced);
417 417
418 TRACE_TASK(t, "job_completion().\n"); 418 TRACE_TASK(t, "job_completion() at %llu.\n", litmus_clock());
419 419
420#ifdef CONFIG_LITMUS_LOCKING 420#ifdef CONFIG_LITMUS_LOCKING
421 if (!is_persistent(t) && tsk_rt(t)->inh_task) { 421 if (!is_persistent(t) && tsk_rt(t)->inh_task) {