aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--litmus/sched_psn_edf.c264
1 files changed, 262 insertions, 2 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 178ca0b33b04..2e5b9c9d75d7 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -540,6 +540,251 @@ static struct litmus_lock* psnedf_new_fmlp(void)
540 return &sem->litmus_lock; 540 return &sem->litmus_lock;
541} 541}
542 542
543
544
545/* ******************** OMLP support **********************/
546
547/* Since jobs spin "virtually" while waiting to acquire a lock,
548 * they first must aquire a local per-cpu resource.
549 */
550static DEFINE_PER_CPU(wait_queue_head_t, omlp_token_wait);
551static DEFINE_PER_CPU(struct task_struct*, omlp_token);
552
553/* called with preemptions off <=> no local modifications */
554static void omlp_grab_token(void)
555{
556 struct task_struct* t = current;
557
558 while (1) {
559 if (__get_cpu_var(omlp_token) == NULL) {
560 /* take it */
561 __get_cpu_var(omlp_token) = t;
562 break;
563 } else {
564 /* some job is spinning => enqueue in request queue */
565 prio_wait_queue_t wait;
566 wait_queue_head_t* token_waiters = &__get_cpu_var(omlp_token_wait);
567 unsigned long flags;
568
569 /* ordered by regular priority; break by lower PID */
570 init_prio_waitqueue_entry_tie(&wait, t, get_deadline(t), t->pid);
571
572 spin_lock_irqsave(&token_waiters->lock, flags);
573
574 set_task_state(t, TASK_UNINTERRUPTIBLE);
575
576 __add_wait_queue_prio_exclusive(token_waiters, &wait);
577
578 TRACE_CUR("waiting for OMLP token\n");
579
580 spin_unlock_irqrestore(&token_waiters->lock, flags);
581
582 TS_LOCK_SUSPEND;
583
584 preempt_enable_no_resched();
585
586 schedule();
587
588 preempt_disable();
589
590 TS_LOCK_RESUME;
591 /* Recheck if we got it */
592 }
593 }
594 /* ok, now it is ours */
595 TRACE_CUR("got OMLP token\n");
596}
597
598/* called with preemptions off */
599static void omlp_release_token(void)
600{
601 struct task_struct* t = current, *next;
602 unsigned long flags;
603 wait_queue_head_t* token_waiters = &__get_cpu_var(omlp_token_wait);
604
605 BUG_ON(__get_cpu_var(omlp_token) != t);
606
607 __get_cpu_var(omlp_token) = NULL;
608
609 TRACE_CUR("released OMLP token\n");
610
611 spin_lock_irqsave(&token_waiters->lock, flags);
612 next = __waitqueue_remove_first(token_waiters);
613
614 if (next)
615 wake_up_process(next);
616
617 spin_unlock_irqrestore(&token_waiters->lock, flags);
618}
619
620
621struct omlp_semaphore {
622 struct litmus_lock litmus_lock;
623
624 /* current resource holder */
625 struct task_struct *owner;
626
627 /* FIFO queue of waiting tasks */
628 wait_queue_head_t wait;
629};
630
631static inline struct omlp_semaphore* omlp_from_lock(struct litmus_lock* lock)
632{
633 return container_of(lock, struct omlp_semaphore, litmus_lock);
634}
635int psnedf_omlp_lock(struct litmus_lock* l)
636{
637 struct task_struct* t = current;
638 struct omlp_semaphore *sem = omlp_from_lock(l);
639 wait_queue_t wait;
640 unsigned long flags;
641
642 if (!is_realtime(t))
643 return -EPERM;
644
645 preempt_disable();
646
647 omlp_grab_token();
648
649 /* Priority-boost ourself *before* we suspend so that
650 * our priority is boosted when we resume. */
651 boost_priority(t);
652
653 spin_lock_irqsave(&sem->wait.lock, flags);
654
655 if (sem->owner) {
656 /* resource is not free => must suspend and wait */
657
658 init_waitqueue_entry(&wait, t);
659
660 /* FIXME: interruptible would be nice some day */
661 set_task_state(t, TASK_UNINTERRUPTIBLE);
662
663 __add_wait_queue_tail_exclusive(&sem->wait, &wait);
664
665 /* release lock before sleeping */
666 spin_unlock_irqrestore(&sem->wait.lock, flags);
667
668 /* We depend on the FIFO order. Thus, we don't need to recheck
669 * when we wake up; we are guaranteed to have the lock since
670 * there is only one wake up per release.
671 */
672 TS_LOCK_SUSPEND;
673
674 preempt_enable_no_resched();
675
676 schedule();
677
678 preempt_disable();
679
680 TS_LOCK_RESUME;
681
682 /* Since we hold the lock, no other task will change
683 * ->owner. We can thus check it without acquiring the spin
684 * lock. */
685 BUG_ON(sem->owner != t);
686 } else {
687 /* it's ours now */
688 sem->owner = t;
689
690 spin_unlock_irqrestore(&sem->wait.lock, flags);
691 }
692
693 preempt_enable();
694
695 return 0;
696}
697
698int psnedf_omlp_unlock(struct litmus_lock* l)
699{
700 struct task_struct *t = current, *next;
701 struct omlp_semaphore *sem = omlp_from_lock(l);
702 unsigned long flags;
703 int err = 0;
704
705 preempt_disable();
706
707 spin_lock_irqsave(&sem->wait.lock, flags);
708
709 if (sem->owner != t) {
710 err = -EINVAL;
711 spin_unlock_irqrestore(&sem->wait.lock, flags);
712 goto out;
713 }
714
715 /* we lose the benefit of priority boosting */
716
717 unboost_priority(t);
718
719 /* check if there are jobs waiting for this resource */
720 next = __waitqueue_remove_first(&sem->wait);
721 if (next) {
722 /* next becomes the resouce holder */
723 sem->owner = next;
724
725 /* Wake up next. The waiting job is already priority-boosted. */
726 wake_up_process(next);
727 } else
728 /* resource becomes available */
729 sem->owner = NULL;
730
731 spin_unlock_irqrestore(&sem->wait.lock, flags);
732
733 omlp_release_token();
734
735out:
736 preempt_enable();
737 return err;
738}
739
740int psnedf_omlp_close(struct litmus_lock* l)
741{
742 struct task_struct *t = current;
743 struct omlp_semaphore *sem = omlp_from_lock(l);
744 unsigned long flags;
745
746 int owner;
747
748 spin_lock_irqsave(&sem->wait.lock, flags);
749
750 owner = sem->owner == t;
751
752 spin_unlock_irqrestore(&sem->wait.lock, flags);
753
754 if (owner)
755 psnedf_omlp_unlock(l);
756
757 return 0;
758}
759
760void psnedf_omlp_free(struct litmus_lock* lock)
761{
762 kfree(omlp_from_lock(lock));
763}
764
765static struct litmus_lock_ops psnedf_omlp_lock_ops = {
766 .close = psnedf_omlp_close,
767 .lock = psnedf_omlp_lock,
768 .unlock = psnedf_omlp_unlock,
769 .deallocate = psnedf_omlp_free,
770};
771
772static struct litmus_lock* psnedf_new_omlp(void)
773{
774 struct omlp_semaphore* sem;
775
776 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
777 if (!sem)
778 return NULL;
779
780 sem->owner = NULL;
781 init_waitqueue_head(&sem->wait);
782 sem->litmus_lock.ops = &psnedf_omlp_lock_ops;
783
784 return &sem->litmus_lock;
785}
786
787
543/* **** lock constructor **** */ 788/* **** lock constructor **** */
544 789
545 790
@@ -561,6 +806,15 @@ static long psnedf_allocate_lock(struct litmus_lock **lock, int type,
561 err = -ENOMEM; 806 err = -ENOMEM;
562 break; 807 break;
563 808
809 case OMLP_SEM:
810 /* O(m) Locking Protocol */
811 *lock = psnedf_new_omlp();
812 if (*lock)
813 err = 0;
814 else
815 err = -ENOMEM;
816 break;
817
564 case SRP_SEM: 818 case SRP_SEM:
565 /* Baker's Stack Resource Policy */ 819 /* Baker's Stack Resource Policy */
566 srp = allocate_srp_semaphore(); 820 srp = allocate_srp_semaphore();
@@ -580,13 +834,19 @@ static long psnedf_allocate_lock(struct litmus_lock **lock, int type,
580 834
581static long psnedf_activate_plugin(void) 835static long psnedf_activate_plugin(void)
582{ 836{
583#ifdef CONFIG_RELEASE_MASTER 837
584 int cpu; 838 int cpu;
585 839
586 for_each_online_cpu(cpu) { 840 for_each_online_cpu(cpu) {
841#ifdef CONFIG_RELEASE_MASTER
587 remote_edf(cpu)->release_master = atomic_read(&release_master_cpu); 842 remote_edf(cpu)->release_master = atomic_read(&release_master_cpu);
588 }
589#endif 843#endif
844#ifdef CONFIG_LITMUS_LOCKING
845 init_waitqueue_head(&per_cpu(omlp_token_wait, cpu));
846 per_cpu(omlp_token, cpu) = NULL;
847#endif
848 }
849
590 850
591#ifdef CONFIG_LITMUS_LOCKING 851#ifdef CONFIG_LITMUS_LOCKING
592 get_srp_prio = psnedf_get_srp_prio; 852 get_srp_prio = psnedf_get_srp_prio;