aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTanya Amert <tamert@cs.unc.edu>2020-10-14 21:00:37 -0400
committerTanya Amert <tamert@cs.unc.edu>2020-10-14 21:00:37 -0400
commit543a6532f42d9d085967dd4855a65a099438dd1c (patch)
treea9696c6a0e7a11e63f52ceff21511608135b2534
parent27326fafd31f4f0774db86672dd852a90e7ea9b7 (diff)
Added logic to set_priority_inheritance.
Remaining work for global OMLP is just to add a check for inh_res in higher_res_prio.
-rw-r--r--litmus/reservations/gedf_reservation.c55
1 files changed, 50 insertions, 5 deletions
diff --git a/litmus/reservations/gedf_reservation.c b/litmus/reservations/gedf_reservation.c
index dc30ea0395d1..416ef050fee8 100644
--- a/litmus/reservations/gedf_reservation.c
+++ b/litmus/reservations/gedf_reservation.c
@@ -725,12 +725,60 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
725 struct gedf_reservation_environment *gedf_env; 725 struct gedf_reservation_environment *gedf_env;
726 gedf_env = container_of(env, struct gedf_reservation_environment, env); 726 gedf_env = container_of(env, struct gedf_reservation_environment, env);
727 727
728 struct gedf_reservation *gedf_res;
729 gedf_res = container_of(t_res, struct gedf_reservation, res);
730
728 raw_spin_lock(&gedf_env->domain.ready_lock); 731 raw_spin_lock(&gedf_env->domain.ready_lock);
729 732
730 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); 733 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
731 t_res->inh_res = prio_inh_res; 734 t_res->inh_res = prio_inh_res;
732 735
733 // TODO tamert: actually try and handle the change to the priority 736 linked_on = gedf_res->linked_on;
737
738 /* If it is scheduled, then we need to reorder the CPU heap. */
739 if (linked_on) {
740 TRACE_TASK(t, "%s: linked on %d\n",
741 __FUNCTION__, linked_on->id);
742 /* holder is scheduled; need to re-order CPUs. */
743 update_cpu_position(gedf_res->linked_on, &gedf_env->cpu_heap);
744 } else {
745 /* holder may be queued: first stop queue changes */
746 raw_spin_lock(&gedf_env->domain.release_lock);
747 if (is_queued_res(t_res)) {
748 TRACE_TASK(t, "%s: is queued\n",
749 __FUNCTION__);
750 /* We need to update the position of holder in some
751 * heap. Note that this could be a release heap if we
752 * budget enforcement is used and this job overran. */
753 check_preempt =
754 !bheap_decrease(edf_ready_order,
755 t_res->heap_node);
756 } else {
757 /* Nothing to do: if it is not queued and not linked
758 * then it is either sleeping or currently being moved
759 * by other code (e.g., a timer interrupt handler) that
760 * will use the correct priority when enqueuing the
761 * task. */
762 TRACE_TASK(t, "%s: is NOT queued => Done.\n",
763 __FUNCTION__);
764 }
765 raw_spin_unlock(&gedf_env->domain.release_lock);
766
767 /* If holder was enqueued in a release heap, then the following
768 * preemption check is pointless, but we can't easily detect
769 * that case. If you want to fix this, then consider that
770 * simply adding a state flag requires O(n) time to update when
771 * releasing n tasks, which conflicts with the goal to have
772 * O(log n) merges. */
773 if (check_preempt) {
774 /* heap_decrease() hit the top level of the heap: make
775 * sure preemption checks get the right task, not the
776 * potentially stale cache. */
777 bheap_uncache_min(edf_ready_order,
778 &gedf_env->domain.ready_queue);
779 check_for_preemptions(gedf_env);
780 }
781 }
734 782
735 raw_spin_unlock(&gedf_env->domain.ready_lock); 783 raw_spin_unlock(&gedf_env->domain.ready_lock);
736} 784}
@@ -909,9 +957,6 @@ int gedf_env_omlp_lock(struct litmus_lock* l)
909 struct reservation *t_res = (struct reservation *) tsk_rt(t)->plugin_state; 957 struct reservation *t_res = (struct reservation *) tsk_rt(t)->plugin_state;
910 958
911 struct gedf_reservation *gedf_res = container_of(t_res, struct gedf_reservation, res); 959 struct gedf_reservation *gedf_res = container_of(t_res, struct gedf_reservation, res);
912 BUG_ON(!gedf_res->linked_on && !bheap_node_in_heap(t_res->heap_node));
913 BUG_ON(!gedf_res->res.cur_budget);
914 BUG_ON(gedf_res->res.replenishment_time > litmus_clock());
915 960
916 spin_lock_irqsave(&sem->fifo_wait.lock, flags); 961 spin_lock_irqsave(&sem->fifo_wait.lock, flags);
917 962
@@ -943,7 +988,7 @@ int gedf_env_omlp_lock(struct litmus_lock* l)
943 BUG_ON(!gedf_res->linked_on && !bheap_node_in_heap(t_res->heap_node)); 988 BUG_ON(!gedf_res->linked_on && !bheap_node_in_heap(t_res->heap_node));
944 989
945 schedule(); // will have issues if the reservation 990 schedule(); // will have issues if the reservation
946 // is not linked or on the ready queue (wtf?!) 991 // is not linked or on the ready queue
947 992
948 TS_LOCK_RESUME; 993 TS_LOCK_RESUME;
949 994