diff options
Diffstat (limited to 'litmus/sched_psn_edf.c')
-rw-r--r-- | litmus/sched_psn_edf.c | 69 |
1 files changed, 36 insertions, 33 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 6f4d4adcec01..466d45d9f6bd 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -418,6 +418,9 @@ struct dgl_semaphore { | |||
418 | /* bitmask of resources that are currently locked. */ | 418 | /* bitmask of resources that are currently locked. */ |
419 | resource_mask_t locked; | 419 | resource_mask_t locked; |
420 | 420 | ||
421 | /* bitmask of the resources that are logically locked. */ | ||
422 | resource_mask_t logically_locked; | ||
423 | |||
421 | /* bitmask of resources in the file descriptor table that are controlled by | 424 | /* bitmask of resources in the file descriptor table that are controlled by |
422 | * this dgl_semaphore. | 425 | * this dgl_semaphore. |
423 | */ | 426 | */ |
@@ -429,6 +432,11 @@ struct dgl_semaphore { | |||
429 | */ | 432 | */ |
430 | bool boosted[NR_CPUS]; | 433 | bool boosted[NR_CPUS]; |
431 | 434 | ||
435 | /* Ensure that a task cannot acquire if there is an earlier-issued request | ||
436 | * on that processor. | ||
437 | */ | ||
438 | bool logically_boosted[NR_CPUS]; | ||
439 | |||
432 | /* FIFO queue of waiting tasks */ | 440 | /* FIFO queue of waiting tasks */ |
433 | wait_queue_head_t wait; | 441 | wait_queue_head_t wait; |
434 | }; | 442 | }; |
@@ -644,7 +652,7 @@ bool is_mask_valid(struct litmus_lock* l, resource_mask_t mask) | |||
644 | #define for_each_bit(field, idx) \ | 652 | #define for_each_bit(field, idx) \ |
645 | for (idx = find_first_bit(&field, sizeof(field)*8); \ | 653 | for (idx = find_first_bit(&field, sizeof(field)*8); \ |
646 | idx < sizeof(field)*8; \ | 654 | idx < sizeof(field)*8; \ |
647 | idx = find_next_bit(&field, sizeof(field)*8, idx)) | 655 | idx = find_next_bit(&field, sizeof(field)*8, idx+1)) |
648 | 656 | ||
649 | int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resources) | 657 | int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resources) |
650 | { | 658 | { |
@@ -665,12 +673,14 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc | |||
665 | t->resources = resources; | 673 | t->resources = resources; |
666 | 674 | ||
667 | spin_lock_irqsave(&sem->wait.lock, flags); | 675 | spin_lock_irqsave(&sem->wait.lock, flags); |
676 | |||
668 | 677 | ||
669 | // if sem->locked & resources == 0, then all resources are available, | 678 | // if sem->locked & resources == 0, then all resources are available, |
670 | // otherwise we must suspend. | 679 | // otherwise we must suspend. |
671 | if (sem->locked & resources){ | 680 | if (waitqueue_active(&sem->wait) || sem->logically_locked & resources || |
681 | sem->logically_boosted[task_cpu(t)]){ | ||
672 | 682 | ||
673 | STRACE("Resources locked, suspending\n"); | 683 | STRACE("Resources unavailable, suspending\n"); |
674 | 684 | ||
675 | init_waitqueue_entry(&wait, t); | 685 | init_waitqueue_entry(&wait, t); |
676 | 686 | ||
@@ -678,6 +688,9 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc | |||
678 | 688 | ||
679 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | 689 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); |
680 | 690 | ||
691 | sem->logically_boosted[task_cpu(t)] = true; | ||
692 | sem->logically_locked |= resources; | ||
693 | |||
681 | TS_LOCK_SUSPEND; | 694 | TS_LOCK_SUSPEND; |
682 | 695 | ||
683 | spin_unlock_irqrestore(&sem->wait.lock, flags); | 696 | spin_unlock_irqrestore(&sem->wait.lock, flags); |
@@ -687,9 +700,10 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc | |||
687 | TS_LOCK_RESUME; | 700 | TS_LOCK_RESUME; |
688 | } else { | 701 | } else { |
689 | 702 | ||
690 | STRACE("Acquired a resource\n"); | 703 | STRACE("Acquired resource(s)\n"); |
691 | 704 | ||
692 | sem->locked = sem->locked | resources; | 705 | sem->locked |= resources; |
706 | sem->logically_locked |= resources; | ||
693 | 707 | ||
694 | // if a job requests a resource, then it was scheduled, and therefore | 708 | // if a job requests a resource, then it was scheduled, and therefore |
695 | // there was not another boosted job, so this is safe. | 709 | // there was not another boosted job, so this is safe. |
@@ -698,10 +712,12 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc | |||
698 | boost_priority(t); | 712 | boost_priority(t); |
699 | 713 | ||
700 | sem->boosted[task_cpu(t)] = true; | 714 | sem->boosted[task_cpu(t)] = true; |
715 | sem->logically_boosted[task_cpu(t)] = true; | ||
701 | 716 | ||
702 | spin_unlock_irqrestore(&sem->wait.lock, flags); | 717 | spin_unlock_irqrestore(&sem->wait.lock, flags); |
703 | } | 718 | } |
704 | 719 | ||
720 | // tracing what resources are used when. | ||
705 | for_each_bit(resources, resource) | 721 | for_each_bit(resources, resource) |
706 | sched_trace_server_switch_to(resource, 0, t->pid, get_job_no(t), | 722 | sched_trace_server_switch_to(resource, 0, t->pid, get_job_no(t), |
707 | get_partition(t)); | 723 | get_partition(t)); |
@@ -709,23 +725,12 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc | |||
709 | return 0; | 725 | return 0; |
710 | } | 726 | } |
711 | 727 | ||
712 | inline int num_boosted(struct dgl_semaphore *sem) | ||
713 | { | ||
714 | int ret = 0; | ||
715 | int i; | ||
716 | for(i = 0; i < NR_CPUS; i++){ | ||
717 | ret += sem->boosted[i]; | ||
718 | } | ||
719 | return ret; | ||
720 | } | ||
721 | |||
722 | int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resources) | 728 | int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resources) |
723 | { | 729 | { |
724 | struct task_struct *t = current, *tsk; | 730 | struct task_struct *t = current, *tsk; |
725 | struct dgl_semaphore *sem = dgl_from_lock(l); | 731 | struct dgl_semaphore *sem = dgl_from_lock(l); |
726 | unsigned long flags; | 732 | unsigned long flags; |
727 | int err = 0, resource; | 733 | int err = 0, resource, i; |
728 | resource_mask_t logically_locked; | ||
729 | struct list_head *pos, *tmp; | 734 | struct list_head *pos, *tmp; |
730 | 735 | ||
731 | TRACE_CUR("Trying to unlock a DGL\n"); | 736 | TRACE_CUR("Trying to unlock a DGL\n"); |
@@ -754,31 +759,26 @@ int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resou | |||
754 | STRACE("Released all resources\n"); | 759 | STRACE("Released all resources\n"); |
755 | unboost_priority(t); | 760 | unboost_priority(t); |
756 | sem->boosted[task_cpu(t)] = false; | 761 | sem->boosted[task_cpu(t)] = false; |
762 | //n.b., logically_boosted[task_cpu(t)] may be reset to true in the | ||
763 | //subsequent iteration. | ||
764 | sem->logically_boosted[task_cpu(t)] = false; | ||
757 | } else { | 765 | } else { |
758 | // update t->resources to reflect the resources currently owned. | 766 | // update t->resources to reflect the resources currently owned. |
759 | STRACE("Unlocked a subset of locked resources\n"); | 767 | STRACE("Unlocked a subset of locked resources\n"); |
760 | t->resources = t->resources & ~resources; | 768 | t->resources = t->resources & ~resources; |
761 | } | 769 | } |
762 | 770 | ||
763 | logically_locked = sem->locked; | 771 | sem->logically_locked = sem->locked; |
772 | for(i = 0; i < NR_CPUS; i++) | ||
773 | sem->logically_boosted[i] = sem->boosted[i]; | ||
764 | 774 | ||
775 | // iterate through the waitqueue and unlock ready tasks. Also recreate logically_locked. | ||
765 | list_for_each_safe(pos, tmp, &sem->wait.task_list) { | 776 | list_for_each_safe(pos, tmp, &sem->wait.task_list) { |
766 | tsk = (struct task_struct*) list_entry(pos, wait_queue_t, | 777 | tsk = (struct task_struct*) list_entry(pos, wait_queue_t, |
767 | task_list)->private; | 778 | task_list)->private; |
768 | STRACE_TASK(tsk, "Evaluating\n"); | ||
769 | |||
770 | if ( (logically_locked == -1) || (num_boosted(sem) == NR_CPUS) ){ | ||
771 | STRACE_TASK(tsk, "All procs boosted, or all resources locked\n"); | ||
772 | break; | ||
773 | } | ||
774 | |||
775 | //STRACE_TASK(tsk, "Logically locked: %o\n", logically_locked); | ||
776 | //STRACE_TASK(tsk, "tsk->resources: %o\n", tsk->resources); | ||
777 | //STRACE_TASK(tsk, "!(tsk->resources & logically_locked): %o\n", !(tsk->resources & logically_locked)); | ||
778 | //STRACE_TASK(tsk, "!sem->boosted: %d\n", !sem->boosted[task_cpu(tsk)]); | ||
779 | 779 | ||
780 | // the resources requested are unlocked, tsk acquires its resources | 780 | // the resources requested are unlocked, tsk acquires its resources |
781 | if( !(tsk->resources & logically_locked) && !sem->boosted[task_cpu(tsk)]) { | 781 | if( !(tsk->resources & sem->logically_locked) && !sem->logically_boosted[task_cpu(tsk)]) { |
782 | 782 | ||
783 | STRACE_TASK(tsk, "Acquired a resource\n"); | 783 | STRACE_TASK(tsk, "Acquired a resource\n"); |
784 | 784 | ||
@@ -791,8 +791,8 @@ int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resou | |||
791 | 791 | ||
792 | wake_up_process(tsk); | 792 | wake_up_process(tsk); |
793 | } | 793 | } |
794 | 794 | sem->logically_locked |= tsk->resources; | |
795 | logically_locked = logically_locked | tsk->resources; | 795 | sem->logically_boosted[task_cpu(tsk)] = true; |
796 | } | 796 | } |
797 | 797 | ||
798 | for_each_bit(resources, resource) | 798 | for_each_bit(resources, resource) |
@@ -847,10 +847,13 @@ static struct litmus_lock* psnedf_new_dgl(void) | |||
847 | return NULL; | 847 | return NULL; |
848 | 848 | ||
849 | sem->locked = 0; | 849 | sem->locked = 0; |
850 | sem->logically_locked = 0; | ||
850 | sem->dgl_resources = 0; | 851 | sem->dgl_resources = 0; |
851 | 852 | ||
852 | for(i = 0; i < NR_CPUS; i++) | 853 | for(i = 0; i < NR_CPUS; i++){ |
853 | sem->boosted[i] = false; | 854 | sem->boosted[i] = false; |
855 | sem->logically_boosted[i] = false; | ||
856 | } | ||
854 | 857 | ||
855 | init_waitqueue_head(&sem->wait); | 858 | init_waitqueue_head(&sem->wait); |
856 | sem->litmus_lock.ops = &psnedf_dgl_lock_ops; | 859 | sem->litmus_lock.ops = &psnedf_dgl_lock_ops; |