aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-05-06 15:49:14 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-05-06 15:49:14 -0400
commite841b93a7cb88086840f5d1ee8962ea27aa164e3 (patch)
tree19c5b486c0f95450f8052b867ff3db6dd414fbcd
parent16a43c267e49c6edce0c155c98a61fbd44a1c101 (diff)
error checking in ikglp
-rw-r--r--litmus/aux_tasks.c2
-rw-r--r--litmus/ikglp_lock.c44
-rw-r--r--litmus/litmus.c4
-rw-r--r--litmus/sched_cedf.c14
4 files changed, 38 insertions, 26 deletions
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c
index 42dee40f8b58..0d837c6bf9f8 100644
--- a/litmus/aux_tasks.c
+++ b/litmus/aux_tasks.c
@@ -513,6 +513,8 @@ static long __do_disable_aux_tasks(int flags)
513 TRACE_CUR("prior error (%d) masks new error (%d)\n", retval, temp_retval); 513 TRACE_CUR("prior error (%d) masks new error (%d)\n", retval, temp_retval);
514 } 514 }
515 } 515 }
516
517 tsk_rt(t)->task_params.period = 0;
516 } 518 }
517 519
518 tsk_rt(t)->is_aux_task = 0; 520 tsk_rt(t)->is_aux_task = 0;
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index c11152c05efb..d9316766c71b 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
@@ -93,11 +93,16 @@ static inline int ikglp_get_idx(struct ikglp_semaphore *sem,
93static inline struct fifo_queue* ikglp_get_queue(struct ikglp_semaphore *sem, 93static inline struct fifo_queue* ikglp_get_queue(struct ikglp_semaphore *sem,
94 struct task_struct *holder) 94 struct task_struct *holder)
95{ 95{
96 struct fifo_queue *fq = NULL;
96 int i; 97 int i;
97 for(i = 0; i < sem->nr_replicas; ++i) 98 for(i = 0; i < sem->nr_replicas; ++i) {
98 if(sem->fifo_queues[i].owner == holder) 99 if(sem->fifo_queues[i].owner == holder) {
99 return(&sem->fifo_queues[i]); 100 fq = &sem->fifo_queues[i];
100 return(NULL); 101 break;
102 }
103 }
104
105 return(fq);
101} 106}
102 107
103 108
@@ -622,6 +627,12 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t,
622 627
623// TRACE_TASK(owner, "Heap Before:\n"); 628// TRACE_TASK(owner, "Heap Before:\n");
624// print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); 629// print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0);
630 if (unlikely(binheap_empty(&tsk_rt(owner)->hp_blocked_tasks))) {
631 TRACE_TASK(owner, "not drawing inheritance from fq %d.\n", ikglp_get_idx(sem, fq));
632 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
633 WARN_ON(1);
634 return;
635 }
625 636
626 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 637 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
627 638
@@ -699,6 +710,14 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq,
699 710
700 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); 711 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
701 712
713 if (unlikely(binheap_empty(&tsk_rt(owner)->hp_blocked_tasks))) {
714 TRACE_TASK(owner, "not drawing inheritance from fq %d.\n", ikglp_get_idx(sem, fq));
715 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
716 unlock_fine_irqrestore(&sem->lock, flags);
717 WARN_ON(1);
718 return;
719 }
720
702 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 721 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
703 722
704 binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); 723 binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
@@ -1310,7 +1329,8 @@ static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem,
1310} 1329}
1311 1330
1312static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal( 1331static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(
1313 struct ikglp_semaphore* sem) 1332 struct ikglp_semaphore* sem,
1333 struct fifo_queue* skip)
1314{ 1334{
1315 /* must hold sem->lock */ 1335 /* must hold sem->lock */
1316 1336
@@ -1320,7 +1340,7 @@ static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(
1320 int i; 1340 int i;
1321 1341
1322 for(i = 0; i < sem->nr_replicas; ++i) { 1342 for(i = 0; i < sem->nr_replicas; ++i) {
1323 if( (sem->fifo_queues[i].count > 1) && 1343 if( (sem->fifo_queues[i].count > 1) && (&sem->fifo_queues[i] != skip) &&
1324 (!fq || litmus->compare(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) { 1344 (!fq || litmus->compare(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) {
1325 1345
1326 TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than hp_waiter on fq %d (%s/%d)\n", 1346 TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than hp_waiter on fq %d (%s/%d)\n",
@@ -1614,8 +1634,8 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1614 // Select a donor 1634 // Select a donor
1615#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1635#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1616 other_donor_info = (sem->aff_obs) ? 1636 other_donor_info = (sem->aff_obs) ?
1617 sem->aff_obs->ops->advise_donor_to_fq(sem->aff_obs, fq) : 1637 sem->aff_obs->ops->advise_donor_to_fq(sem->aff_obs, fq) :
1618 binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); 1638 binheap_top_entry(&sem->donors, ikglp_wait_state_t, node);
1619#else 1639#else
1620 other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); 1640 other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node);
1621#endif 1641#endif
@@ -1685,10 +1705,10 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1685 1705
1686#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1706#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1687 fq_wait = (sem->aff_obs) ? 1707 fq_wait = (sem->aff_obs) ?
1688 sem->aff_obs->ops->advise_steal(sem->aff_obs, fq) : 1708 sem->aff_obs->ops->advise_steal(sem->aff_obs, fq) :
1689 ikglp_find_hp_waiter_to_steal(sem); 1709 ikglp_find_hp_waiter_to_steal(sem, fq);
1690#else 1710#else
1691 fq_wait = ikglp_find_hp_waiter_to_steal(sem); 1711 fq_wait = ikglp_find_hp_waiter_to_steal(sem, fq);
1692#endif 1712#endif
1693 1713
1694 if(fq_wait) { 1714 if(fq_wait) {
@@ -3326,7 +3346,7 @@ ikglp_wait_state_t* simple_gpu_ikglp_advise_steal(struct ikglp_affinity* aff,
3326{ 3346{
3327 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 3347 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock);
3328 // TRACE_CUR("Simple GPU ikglp advise_steal invoked\n"); 3348 // TRACE_CUR("Simple GPU ikglp advise_steal invoked\n");
3329 return ikglp_find_hp_waiter_to_steal(sem); 3349 return ikglp_find_hp_waiter_to_steal(sem, NULL);
3330} 3350}
3331 3351
3332ikglp_donee_heap_node_t* simple_gpu_ikglp_advise_donee_selection(struct ikglp_affinity* aff, struct task_struct* donor) 3352ikglp_donee_heap_node_t* simple_gpu_ikglp_advise_donee_selection(struct ikglp_affinity* aff, struct task_struct* donor)
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 6c0ad0b4513a..924853c82343 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -476,9 +476,9 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
476#ifdef CONFIG_REALTIME_AUX_TASKS 476#ifdef CONFIG_REALTIME_AUX_TASKS
477 /* also clear out the aux_data. the !restore case is only called on 477 /* also clear out the aux_data. the !restore case is only called on
478 * fork (initial thread creation). */ 478 * fork (initial thread creation). */
479 if (!restore) { 479// if (!restore) {
480 memset(&p->aux_data, 0, sizeof(p->aux_data)); 480 memset(&p->aux_data, 0, sizeof(p->aux_data));
481 } 481// }
482#endif 482#endif
483 483
484 484
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index a42c06c0ad14..b931c83579bc 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -1777,16 +1777,6 @@ static void cedf_task_exit(struct task_struct * t)
1777 cedf_change_prio_pai_tasklet(t, NULL); 1777 cedf_change_prio_pai_tasklet(t, NULL);
1778#endif 1778#endif
1779 1779
1780 /*
1781BUG: t is forced to exit by another task.
1782meanwhile, the scheduler selects to migrate to be scheduled
1783
1784-- this triggers BAD BAD BAD
1785
1786if (current != t) and t is linked (but not scheduled?), do something.
1787
1788*/
1789
1790 /* unlink if necessary */ 1780 /* unlink if necessary */
1791 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags); 1781 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
1792 1782
@@ -1887,8 +1877,8 @@ static long cedf_admit_task(struct task_struct* tsk)
1887 struct budget_tracker_ops* ops = NULL; 1877 struct budget_tracker_ops* ops = NULL;
1888 1878
1889 if (remote_cluster(task_cpu(tsk)) != task_cpu_cluster(tsk)) { 1879 if (remote_cluster(task_cpu(tsk)) != task_cpu_cluster(tsk)) {
1890 printk("rejected admit: incorrect cluster.\n"); 1880// printk("rejected admit: incorrect cluster.\n");
1891 return -EINVAL; 1881// return -EINVAL;
1892 } 1882 }
1893 1883
1894 if (budget_enforced(tsk) || budget_signalled(tsk)) { 1884 if (budget_enforced(tsk) || budget_signalled(tsk)) {