aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-27 00:39:01 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-27 00:39:01 -0400
commit76f8ba0ab085c5f5c4121462a0a237e205c3e51c (patch)
tree52065af9ac1a04b4902fa304fd1e9f25659b2d65
parente7b660efe777acd863442e5d863d2f681db353ff (diff)
fixup MPCP-VS: don't priority boost until after virtual spinning is over
Otherwise there is no benefit to the virtual spinning...
-rw-r--r--litmus/sched_pfp.c66
1 files changed, 32 insertions, 34 deletions
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index f5796f3607f4..74a77e7a4959 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -690,39 +690,42 @@ static void mpcp_vspin_enter(void)
690{ 690{
691 struct task_struct* t = current; 691 struct task_struct* t = current;
692 692
693 if (__get_cpu_var(mpcpvs_vspin) == NULL) { 693 while (1) {
694 /* good, we get to issue our request */ 694 if (__get_cpu_var(mpcpvs_vspin) == NULL) {
695 __get_cpu_var(mpcpvs_vspin) = t; 695 /* good, we get to issue our request */
696 } else { 696 __get_cpu_var(mpcpvs_vspin) = t;
697 /* some job is spinning => enqueue in request queue */ 697 break;
698 prio_wait_queue_t wait; 698 } else {
699 wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait); 699 /* some job is spinning => enqueue in request queue */
700 unsigned long flags; 700 prio_wait_queue_t wait;
701 701 wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait);
702 /* ordered by regular priority */ 702 unsigned long flags;
703 init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t)));
704 703
705 spin_lock_irqsave(&vspin->lock, flags); 704 /* ordered by regular priority */
705 init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t)));
706 706
707 set_task_state(t, TASK_UNINTERRUPTIBLE); 707 spin_lock_irqsave(&vspin->lock, flags);
708 708
709 __add_wait_queue_prio_exclusive(vspin, &wait); 709 set_task_state(t, TASK_UNINTERRUPTIBLE);
710 710
711 TS_LOCK_SUSPEND; 711 __add_wait_queue_prio_exclusive(vspin, &wait);
712 712
713 spin_unlock_irqrestore(&vspin->lock, flags); 713 spin_unlock_irqrestore(&vspin->lock, flags);
714 714
715 preempt_enable_no_resched(); 715 TS_LOCK_SUSPEND;
716 716
717 schedule(); 717 preempt_enable_no_resched();
718 718
719 preempt_disable(); 719 schedule();
720 720
721 TS_LOCK_RESUME; 721 preempt_disable();
722 722
723 /* ok, now it is ours */ 723 TS_LOCK_RESUME;
724 BUG_ON(__get_cpu_var(mpcpvs_vspin) != t); 724 /* Recheck if we got it --- some higher-priority process might
725 * have swooped in. */
726 }
725 } 727 }
728 /* ok, now it is ours */
726} 729}
727 730
728/* called with preemptions off */ 731/* called with preemptions off */
@@ -734,20 +737,15 @@ static void mpcp_vspin_exit(void)
734 737
735 BUG_ON(__get_cpu_var(mpcpvs_vspin) != t); 738 BUG_ON(__get_cpu_var(mpcpvs_vspin) != t);
736 739
740 /* no spinning job */
741 __get_cpu_var(mpcpvs_vspin) = NULL;
742
737 /* see if anyone is waiting for us to stop "spinning" */ 743 /* see if anyone is waiting for us to stop "spinning" */
738 spin_lock_irqsave(&vspin->lock, flags); 744 spin_lock_irqsave(&vspin->lock, flags);
739 next = __waitqueue_remove_first(vspin); 745 next = __waitqueue_remove_first(vspin);
740 746
741 if (next) { 747 if (next)
742 /* grant ownership */
743 __get_cpu_var(mpcpvs_vspin) = next;
744
745 /* let it continue its request */
746 wake_up_process(next); 748 wake_up_process(next);
747 } else {
748 /* no spinning job */
749 __get_cpu_var(mpcpvs_vspin) = NULL;
750 }
751 749
752 spin_unlock_irqrestore(&vspin->lock, flags); 750 spin_unlock_irqrestore(&vspin->lock, flags);
753} 751}
@@ -769,14 +767,14 @@ int pfp_mpcp_lock(struct litmus_lock* l)
769 767
770 preempt_disable(); 768 preempt_disable();
771 769
770 if (sem->vspin)
771 mpcp_vspin_enter();
772
772 /* Priority-boost ourself *before* we suspend so that 773 /* Priority-boost ourself *before* we suspend so that
773 * our priority is boosted when we resume. Use the priority 774 * our priority is boosted when we resume. Use the priority
774 * ceiling for the local partition. */ 775 * ceiling for the local partition. */
775 boost_priority(t, sem->prio_ceiling[get_partition(t)]); 776 boost_priority(t, sem->prio_ceiling[get_partition(t)]);
776 777
777 if (sem->vspin)
778 mpcp_vspin_enter();
779
780 spin_lock_irqsave(&sem->wait.lock, flags); 778 spin_lock_irqsave(&sem->wait.lock, flags);
781 779
782 preempt_enable_no_resched(); 780 preempt_enable_no_resched();