aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTanya Amert <tamert@cs.unc.edu>2020-10-12 20:35:13 -0400
committerTanya Amert <tamert@cs.unc.edu>2020-10-12 20:35:13 -0400
commit8e23121fca5a987c1e8fbeb941cafa0fbcdc7610 (patch)
treef1f5d6e960e2cf163f62c730a9545272fb2c6df4
parent4f6266335535e47a82a6d9b1d4b0472819bbcab3 (diff)
Added lock/unlock logic to global OMLP for EXT-RES, still needs priority inheritance.
-rw-r--r--litmus/reservations/gedf_reservation.c110
1 files changed, 110 insertions, 0 deletions
diff --git a/litmus/reservations/gedf_reservation.c b/litmus/reservations/gedf_reservation.c
index 5f7c97eb980c..6d713d95131b 100644
--- a/litmus/reservations/gedf_reservation.c
+++ b/litmus/reservations/gedf_reservation.c
@@ -11,6 +11,7 @@
11#include <litmus/np.h> 11#include <litmus/np.h>
12#include <litmus/sched_trace.h> 12#include <litmus/sched_trace.h>
13#include <litmus/debug_trace.h> 13#include <litmus/debug_trace.h>
14#include <litmus/trace.h>
14#include <litmus/reservations/gedf_reservation.h> 15#include <litmus/reservations/gedf_reservation.h>
15 16
16// Needed to store context during cross-CPU function calls 17// Needed to store context during cross-CPU function calls
@@ -719,6 +720,43 @@ static inline struct omlp_semaphore* omlp_from_lock(struct litmus_lock* lock)
719 return container_of(lock, struct omlp_semaphore, litmus_lock); 720 return container_of(lock, struct omlp_semaphore, litmus_lock);
720} 721}
721 722
723/* already locked */
724static void omlp_enqueue(struct omlp_semaphore *sem, prio_wait_queue_t* wait)
725{
726 if (sem->num_free) {
727 /* there is space in the FIFO queue */
728 sem->num_free--;
729 __add_wait_queue_tail_exclusive(&sem->fifo_wait, &wait->wq);
730 } else {
731 /* nope, gotta go to the priority queue */
732 __add_wait_queue_prio_exclusive(&sem->prio_wait, wait);
733 }
734}
735
736/* already locked */
737static int omlp_move(struct omlp_semaphore *sem)
738{
739 struct list_head* first;
740
741 if (waitqueue_active(&sem->prio_wait)) {
742 first = sem->prio_wait.task_list.next;
743 list_move_tail(first, &sem->fifo_wait.task_list);
744 return 1;
745 }
746 else
747 return 0;
748}
749
750static struct task_struct* omlp_dequeue(struct omlp_semaphore *sem)
751{
752 struct task_struct* first = __waitqueue_remove_first(&sem->fifo_wait);
753
754 if (first && !omlp_move(sem))
755 sem->num_free++;
756
757 return first;
758}
759
722int gedf_env_omlp_lock(struct litmus_lock* l) 760int gedf_env_omlp_lock(struct litmus_lock* l)
723{ 761{
724 struct task_struct* t = current; 762 struct task_struct* t = current;
@@ -726,6 +764,51 @@ int gedf_env_omlp_lock(struct litmus_lock* l)
726 prio_wait_queue_t wait; 764 prio_wait_queue_t wait;
727 unsigned long flags; 765 unsigned long flags;
728 766
767 if (!is_realtime(t))
768 return -EPERM;
769
770 /* prevent nested lock acquisition --- not supported by global OMLP
771 by default */
772 if (tsk_rt(t)->num_locks_held)
773 return -EBUSY;
774
775 struct reservation *t_res = (struct reservation *) tsk_rt(t)->plugin_state;
776
777 spin_lock_irqsave(&sem->fifo_wait.lock, flags);
778
779 if (sem->owner) {
780 /* resource is not free => must suspend and wait */
781
782 init_prio_waitqueue_entry(&wait, t, t_res->priority);
783
784 set_task_state(t, TASK_UNINTERRUPTIBLE);
785
786 omlp_enqueue(sem, &wait);
787
788 // TODO tamert: add priority inheritance
789
790 TS_LOCK_SUSPEND;
791
792 /* release lock before sleeping */
793 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
794
795 schedule();
796
797 TS_LOCK_RESUME;
798
799 /* Since we hold the lock, no other task will change
800 * ->owner. We can thus check it without acquiring the spin
801 * lock. */
802 BUG_ON(sem->owner != t);
803 } else {
804 /* it's ours now */
805 sem->owner = t;
806
807 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
808 }
809
810 tsk_rt(t)->num_locks_held++;
811
729 return 0; 812 return 0;
730} 813}
731 814
@@ -736,6 +819,33 @@ static int gedf_env_omlp_unlock(struct litmus_lock* l)
736 unsigned long flags; 819 unsigned long flags;
737 int err = 0; 820 int err = 0;
738 821
822 spin_lock_irqsave(&sem->fifo_wait.lock, flags);
823
824 if (sem->owner != t) {
825 err = -EINVAL;
826 goto out;
827 }
828
829 tsk_rt(t)->num_locks_held--;
830
831 /* check if there are jobs waiting for this resource */
832 next = omlp_dequeue(sem);
833 if (next) {
834 /* next becomes the resouce holder */
835 sem->owner = next;
836 TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid);
837
838 // TODO tamert: add priority inheritance
839
840 /* wake up next */
841 wake_up_process(next);
842 } else
843 /* becomes available */
844 sem->owner = NULL;
845
846out:
847 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
848
739 return err; 849 return err;
740} 850}
741 851