aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTanya Amert <tamert@cs.unc.edu>2020-10-26 20:29:47 -0400
committerTanya Amert <tamert@cs.unc.edu>2020-10-26 20:29:47 -0400
commit307eb923ed6ea1c1b98e73c7c948fbacc044ca71 (patch)
tree8737ed05c06630ba66d367845c7ef520a4501b3e
parent9e0f6ba040ab9f9a6d1e2b1dbd0beca8ef4754cc (diff)
Made global OMLP non-preemptive between lock and unlock calls.
Added support for non-preemptivity in EXT-RES.
-rw-r--r--litmus/reservations/ext_reservation.c6
-rw-r--r--litmus/reservations/gedf_reservation.c19
2 files changed, 21 insertions, 4 deletions
diff --git a/litmus/reservations/ext_reservation.c b/litmus/reservations/ext_reservation.c
index e6685fca20ea..6b4889c40bca 100644
--- a/litmus/reservations/ext_reservation.c
+++ b/litmus/reservations/ext_reservation.c
@@ -24,6 +24,12 @@ int higher_res_prio(struct reservation* first,
24 24
25#ifdef CONFIG_LITMUS_LOCKING 25#ifdef CONFIG_LITMUS_LOCKING
26 26
27 // SOMEWHAT HACKY -- guarantee that a non-preemptive reservation
28 // is higher-priority than any other; fall back to regular
29 // priority-checking if both are non-preemptive or both are preemptive
30 if (first->ops->is_np(first, -1) != second->ops->is_np(second, -1))
31 return first->ops->is_np(first, -1);
32
27 /* Check for inherited priorities. Change reservation 33 /* Check for inherited priorities. Change reservation
28 * used for comparison in such a case. 34 * used for comparison in such a case.
29 */ 35 */
diff --git a/litmus/reservations/gedf_reservation.c b/litmus/reservations/gedf_reservation.c
index deb0c9e6e3ef..ef9751542d0a 100644
--- a/litmus/reservations/gedf_reservation.c
+++ b/litmus/reservations/gedf_reservation.c
@@ -438,10 +438,10 @@ static void gedf_env_remove_res(
438 gedf_env = container_of(env, struct gedf_reservation_environment, env); 438 gedf_env = container_of(env, struct gedf_reservation_environment, env);
439 gedf_res = container_of(res, struct gedf_reservation, res); 439 gedf_res = container_of(res, struct gedf_reservation, res);
440 440
441 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
441 gedf_res->will_remove = complete; 442 gedf_res->will_remove = complete;
442 gedf_res->blocked = !complete; 443 gedf_res->blocked = !complete;
443 444
444 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
445 list_del_init(&gedf_res->res.all_list); 445 list_del_init(&gedf_res->res.all_list);
446 unlink(gedf_env, gedf_res); 446 unlink(gedf_env, gedf_res);
447 check_for_preemptions(gedf_env); 447 check_for_preemptions(gedf_env);
@@ -469,10 +469,11 @@ static void gedf_env_add_res(
469 gedf_res = container_of(res, struct gedf_reservation, res); 469 gedf_res = container_of(res, struct gedf_reservation, res);
470 470
471 res->par_env = env; 471 res->par_env = env;
472
473 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
472 gedf_res->will_remove = 0; 474 gedf_res->will_remove = 0;
473 gedf_res->blocked = 0; 475 gedf_res->blocked = 0;
474 476
475 raw_spin_lock_irqsave(&gedf_env->domain.ready_lock, flags);
476 list_add_tail(&gedf_res->res.all_list, &env->all_reservations); 477 list_add_tail(&gedf_res->res.all_list, &env->all_reservations);
477 requeue(gedf_env, gedf_res); 478 requeue(gedf_env, gedf_res);
478 check_for_preemptions(gedf_env); 479 check_for_preemptions(gedf_env);
@@ -529,7 +530,7 @@ static void gedf_env_suspend(
529 goto unlock; 530 goto unlock;
530 531
531 //TODO: More Graceful way to handle forbidden zone violation? 532 //TODO: More Graceful way to handle forbidden zone violation?
532 BUG_ON(env->ops->is_np(env, cpu)); 533 // BUG_ON(env->ops->is_np(env, cpu));
533 534
534 gedf_env->num_cpus--; 535 gedf_env->num_cpus--;
535 /* on env suspension, we need to preempt scheduled tasks, and unlink linked tasks */ 536 /* on env suspension, we need to preempt scheduled tasks, and unlink linked tasks */
@@ -642,6 +643,7 @@ static struct task_struct* gedf_env_dispatch(
642 struct task_struct* next = NULL; 643 struct task_struct* next = NULL;
643 unsigned long flags; 644 unsigned long flags;
644 int np = 0; 645 int np = 0;
646 int block = 0;
645 647
646 gedf_env = container_of(env, struct gedf_reservation_environment, env); 648 gedf_env = container_of(env, struct gedf_reservation_environment, env);
647 entry = &gedf_env->cpu_entries[cpu]; 649 entry = &gedf_env->cpu_entries[cpu];
@@ -656,6 +658,9 @@ static struct task_struct* gedf_env_dispatch(
656 BUG_ON(!bheap_node_in_heap(entry->hn)); 658 BUG_ON(!bheap_node_in_heap(entry->hn));
657 BUG_ON(bheap_empty(&gedf_env->cpu_heap)); 659 BUG_ON(bheap_empty(&gedf_env->cpu_heap));
658 660
661 if (entry->scheduled)
662 block = entry->scheduled->blocked;
663
659 /* update linked if linked for this cpu is empty */ 664 /* update linked if linked for this cpu is empty */
660 if (!entry->linked) 665 if (!entry->linked)
661 check_for_preemptions(gedf_env); 666 check_for_preemptions(gedf_env);
@@ -663,7 +668,7 @@ static struct task_struct* gedf_env_dispatch(
663 BUG_ON(!entry->linked && __peek_ready_res(&gedf_env->domain)); 668 BUG_ON(!entry->linked && __peek_ready_res(&gedf_env->domain));
664 669
665 /* if linked and scheduled differ, preempt and schedule accordingly */ 670 /* if linked and scheduled differ, preempt and schedule accordingly */
666 if (!np && entry->scheduled != entry->linked) { 671 if ((!np || block) && entry->scheduled != entry->linked) {
667 if (entry->scheduled) { 672 if (entry->scheduled) {
668 if (entry->scheduled->res.ops->on_preempt) 673 if (entry->scheduled->res.ops->on_preempt)
669 entry->scheduled->res.ops->on_preempt(&entry->scheduled->res, cpu); 674 entry->scheduled->res.ops->on_preempt(&entry->scheduled->res, cpu);
@@ -1061,6 +1066,9 @@ int gedf_env_omlp_lock(struct litmus_lock* l)
1061 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); 1066 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
1062 } 1067 }
1063 1068
1069 // Make the task non-preemptive until the lock is released
1070 make_np(t);
1071
1064 tsk_rt(t)->num_locks_held++; 1072 tsk_rt(t)->num_locks_held++;
1065 1073
1066 return 0; 1074 return 0;
@@ -1150,6 +1158,9 @@ static int gedf_env_omlp_unlock(struct litmus_lock* l)
1150 1158
1151 tsk_rt(t)->num_locks_held--; 1159 tsk_rt(t)->num_locks_held--;
1152 1160
1161 // Make the task preemptive again
1162 take_np(t);
1163
1153 /* check if there are jobs waiting for this resource */ 1164 /* check if there are jobs waiting for this resource */
1154 next = omlp_dequeue(sem); 1165 next = omlp_dequeue(sem);
1155 if (next) { 1166 if (next) {