aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-04-27 19:52:34 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-04-27 19:52:34 -0400
commitaf6eeb156c7da47ff5df03a3da04432c8ac4460c (patch)
treefd141359332b95dcb2260cbf3704c70c32f96e68
parent52056e94a94517e250f7f4e36e7470a4b002404e (diff)
fix minor bugs. there is still a bug in GEDF PAI.
-rw-r--r--include/litmus/fpmath.h2
-rw-r--r--litmus/locking.c40
-rw-r--r--litmus/sched_cedf.c41
3 files changed, 50 insertions, 33 deletions
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h
index ba4121eaa1bf..04d4bcaeae96 100644
--- a/include/litmus/fpmath.h
+++ b/include/litmus/fpmath.h
@@ -19,8 +19,10 @@ typedef struct
19 19
20#define _fp(x) ((fp_t) {x}) 20#define _fp(x) ((fp_t) {x})
21 21
22#ifdef __KERNEL__
22static const fp_t LITMUS_FP_ZERO = {.val = 0}; 23static const fp_t LITMUS_FP_ZERO = {.val = 0};
23static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; 24static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)};
25#endif
24 26
25static inline fp_t FP(fpbuf_t x) 27static inline fp_t FP(fpbuf_t x)
26{ 28{
diff --git a/litmus/locking.c b/litmus/locking.c
index 48b548a61f63..cb11c04ed0d4 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -282,6 +282,8 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
282 282
283 dgl_wait->nr_remaining = dgl_wait->size; 283 dgl_wait->nr_remaining = dgl_wait->size;
284 284
285 TRACE_CUR("Locking DGL with size %d\n", dgl_wait->size);
286
285 // try to acquire each lock. enqueue (non-blocking) if it is unavailable. 287 // try to acquire each lock. enqueue (non-blocking) if it is unavailable.
286 for(i = 0; i < dgl_wait->size; ++i) { 288 for(i = 0; i < dgl_wait->size; ++i) {
287 struct litmus_lock *l = dgl_wait->locks[i]; 289 struct litmus_lock *l = dgl_wait->locks[i];
@@ -339,7 +341,7 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
339 } 341 }
340 342
341 TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n"); 343 TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n");
342 BUG(); 344 //BUG();
343 } 345 }
344 346
345 raw_spin_unlock_irqrestore(dgl_lock, irqflags); 347 raw_spin_unlock_irqrestore(dgl_lock, irqflags);
@@ -357,14 +359,14 @@ all_acquired:
357 return 0; 359 return 0;
358} 360}
359 361
360//static int supports_dgl(struct litmus_lock *l) 362static int supports_dgl(struct litmus_lock *l)
361//{ 363{
362// struct litmus_lock_ops* ops = l->ops; 364 struct litmus_lock_ops* ops = l->ops;
363// 365
364// return (ops->dgl_lock && 366 return (ops->dgl_lock &&
365// ops->is_owner && 367 ops->is_owner &&
366// ops->enable_priority); 368 ops->enable_priority);
367//} 369}
368 370
369asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) 371asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
370{ 372{
@@ -393,11 +395,11 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
393 struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); 395 struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]);
394 if(entry && is_lock(entry)) { 396 if(entry && is_lock(entry)) {
395 dgl_wait_state.locks[i] = get_lock(entry); 397 dgl_wait_state.locks[i] = get_lock(entry);
396// if(!supports_dgl(dgl_wait_state.locks[i])) { 398 if(!supports_dgl(dgl_wait_state.locks[i])) {
397// TRACE_CUR("Lock %d does not support all required DGL operations.\n", 399 TRACE_CUR("Lock %d does not support all required DGL operations.\n",
398// dgl_wait_state.locks[i]->ident); 400 dgl_wait_state.locks[i]->ident);
399// goto out; 401 goto out;
400// } 402 }
401 } 403 }
402 else { 404 else {
403 TRACE_CUR("Invalid lock identifier\n"); 405 TRACE_CUR("Invalid lock identifier\n");
@@ -468,11 +470,11 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
468 entry = get_entry_for_od(dgl_ods[i]); 470 entry = get_entry_for_od(dgl_ods[i]);
469 if(entry && is_lock(entry)) { 471 if(entry && is_lock(entry)) {
470 dgl_locks[i] = get_lock(entry); 472 dgl_locks[i] = get_lock(entry);
471// if(!supports_dgl(dgl_locks[i])) { 473 if(!supports_dgl(dgl_locks[i])) {
472// TRACE_CUR("Lock %d does not support all required DGL operations.\n", 474 TRACE_CUR("Lock %d does not support all required DGL operations.\n",
473// dgl_locks[i]->ident); 475 dgl_locks[i]->ident);
474// goto out; 476 goto out;
475// } 477 }
476 } 478 }
477 else { 479 else {
478 TRACE_CUR("Invalid lock identifier\n"); 480 TRACE_CUR("Invalid lock identifier\n");
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index e78ff0ade237..99f7620925ba 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -504,7 +504,6 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
504 504
505 TS_NV_SCHED_BOTISR_START; 505 TS_NV_SCHED_BOTISR_START;
506 506
507 // remove tasklet at head of list if it has higher priority.
508 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 507 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
509 508
510 if(cluster->pending_tasklets.head != NULL) { 509 if(cluster->pending_tasklets.head != NULL) {
@@ -512,25 +511,35 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
512 struct tasklet_struct *prev = NULL; 511 struct tasklet_struct *prev = NULL;
513 tasklet = cluster->pending_tasklets.head; 512 tasklet = cluster->pending_tasklets.head;
514 513
515 while(tasklet && edf_higher_prio(sched_task, tasklet->owner)) { 514 // find a tasklet with prio to execute; skip ones where
515 // sched_task has a higher priority.
516 // We use the '!edf' test instead of swaping function arguments since
517 // both sched_task and owner could be NULL. In this case, we want to
518 // still execute the tasklet.
519 while(tasklet && !edf_higher_prio(tasklet->owner, sched_task)) {
516 prev = tasklet; 520 prev = tasklet;
517 tasklet = tasklet->next; 521 tasklet = tasklet->next;
518 } 522 }
519 523
520 // remove the tasklet from the queue 524 if(tasklet) { // found something to execuite
521 if(prev) { 525 // remove the tasklet from the queue
522 prev->next = tasklet->next; 526 if(prev) {
523 if(prev->next == NULL) { 527 prev->next = tasklet->next;
524 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 528 if(prev->next == NULL) {
525 cluster->pending_tasklets.tail = &(prev); 529 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
530 cluster->pending_tasklets.tail = &(prev);
531 }
532 }
533 else {
534 cluster->pending_tasklets.head = tasklet->next;
535 if(tasklet->next == NULL) {
536 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
537 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
538 }
526 } 539 }
527 } 540 }
528 else { 541 else {
529 cluster->pending_tasklets.head = tasklet->next; 542 TRACE("%s: No tasklets with eligible priority.\n", __FUNCTION__);
530 if(tasklet->next == NULL) {
531 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
532 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
533 }
534 } 543 }
535 } 544 }
536 else { 545 else {
@@ -629,6 +638,7 @@ static void cedf_run_tasklets(struct task_struct* sched_task)
629 638
630static int cedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet) 639static int cedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
631{ 640{
641#if 0
632 cedf_domain_t *cluster = NULL; 642 cedf_domain_t *cluster = NULL;
633 cpu_entry_t *targetCPU = NULL; 643 cpu_entry_t *targetCPU = NULL;
634 int thisCPU; 644 int thisCPU;
@@ -709,7 +719,10 @@ static int cedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
709 else { 719 else {
710 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); 720 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
711 } 721 }
712 722#else
723 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
724 __do_lit_tasklet(tasklet, 0ul);
725#endif
713 return(1); // success 726 return(1); // success
714} 727}
715 728