aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Ward <bcw@cs.unc.edu>2012-08-12 16:35:36 -0400
committerBryan Ward <bcw@cs.unc.edu>2013-04-16 14:34:35 -0400
commit44123a1a3076503bef7666ffc3fdcb3f8e68e8da (patch)
tree4d6d41e315f8394f0970f24314cd6a569c56bdf7
parent30bb245b67c5be41a53203a5cc874e84985c528a (diff)
DGLock and DGUnlock implementation.
These methods haven't been fully tested, but they compile and pass a few simple tests.
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/litmus/litmus.h1
-rw-r--r--include/litmus/locking.h4
-rw-r--r--include/litmus/wait.h1
-rw-r--r--litmus/locking.c13
-rw-r--r--litmus/sched_psn_edf.c131
6 files changed, 126 insertions, 27 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9c990d13ae35..2b094bdaafa3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct sched_param {
96 96
97#include <litmus/rt_param.h> 97#include <litmus/rt_param.h>
98#include <litmus/preempt.h> 98#include <litmus/preempt.h>
99#include <litmus/fdso.h>
99 100
100struct exec_domain; 101struct exec_domain;
101struct futex_pi_state; 102struct futex_pi_state;
@@ -1538,6 +1539,8 @@ struct task_struct {
1538 /* references to PI semaphores, etc. */ 1539 /* references to PI semaphores, etc. */
1539 struct od_table_entry *od_table; 1540 struct od_table_entry *od_table;
1540 1541
1542 resource_mask_t resources;
1543
1541#ifdef CONFIG_LATENCYTOP 1544#ifdef CONFIG_LATENCYTOP
1542 int latency_record_count; 1545 int latency_record_count;
1543 struct latency_record latency_record[LT_SAVECOUNT]; 1546 struct latency_record latency_record[LT_SAVECOUNT];
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index c9206adb3493..9282f3a8f28f 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -27,6 +27,7 @@ static inline int in_list(struct list_head* list)
27} 27}
28 28
29struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); 29struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
30struct task_struct* __waitqueue_peek_first(wait_queue_head_t *wq);
30 31
31#define NO_CPU 0xffffffff 32#define NO_CPU 0xffffffff
32 33
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 8e501c326b8b..968ba6fa828c 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -21,10 +21,6 @@ struct litmus_lock_ops {
21 int (*open)(struct litmus_lock*, void* __user); 21 int (*open)(struct litmus_lock*, void* __user);
22 int (*close)(struct litmus_lock*); 22 int (*close)(struct litmus_lock*);
23 23
24 /* add or remove a resource from control by the dynamic group lock */
25 int (*add)(struct litmus_lock*, int);
26 int (*remove)(struct litmus_lock*, int);
27
28 /* Current tries to lock/unlock this lock (mandatory methods). */ 24 /* Current tries to lock/unlock this lock (mandatory methods). */
29 int (*lock)(struct litmus_lock*); 25 int (*lock)(struct litmus_lock*);
30 int (*unlock)(struct litmus_lock*); 26 int (*unlock)(struct litmus_lock*);
diff --git a/include/litmus/wait.h b/include/litmus/wait.h
index ce1347c355f8..7e20c0a4a1f4 100644
--- a/include/litmus/wait.h
+++ b/include/litmus/wait.h
@@ -2,6 +2,7 @@
2#define _LITMUS_WAIT_H_ 2#define _LITMUS_WAIT_H_
3 3
4struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); 4struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
5struct task_struct* __waitqueue_peek_first(wait_queue_head_t *wq);
5 6
6/* wrap regular wait_queue_t head */ 7/* wrap regular wait_queue_t head */
7struct __prio_wait_queue { 8struct __prio_wait_queue {
diff --git a/litmus/locking.c b/litmus/locking.c
index 55529064f7ca..65fd51f2c80a 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -169,6 +169,19 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
169 return(t); 169 return(t);
170} 170}
171 171
172struct task_struct* __waitqueue_peek_first(wait_queue_head_t *wq)
173{
174 wait_queue_t* q;
175 struct task_struct* t = NULL;
176
177 if (waitqueue_active(wq)) {
178 q = list_entry(wq->task_list.next,
179 wait_queue_t, task_list);
180 t = (struct task_struct*) q->private;
181 }
182 return(t);
183}
184
172unsigned int __add_wait_queue_prio_exclusive( 185unsigned int __add_wait_queue_prio_exclusive(
173 wait_queue_head_t* head, 186 wait_queue_head_t* head,
174 prio_wait_queue_t *new) 187 prio_wait_queue_t *new)
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 8745682b409f..c76c0412f809 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -421,7 +421,7 @@ struct dgl_semaphore {
421 * partitioned scheduling, the resource holders are priority boosted, and 421 * partitioned scheduling, the resource holders are priority boosted, and
422 * it is impossible to have $>m$ boosted jobs. 422 * it is impossible to have $>m$ boosted jobs.
423 */ 423 */
424 unsigned int resource_holders; 424 bool boosted[NR_CPUS];
425 425
426 /* FIFO queue of waiting tasks */ 426 /* FIFO queue of waiting tasks */
427 wait_queue_head_t wait; 427 wait_queue_head_t wait;
@@ -574,24 +574,16 @@ int psnedf_dgl_close(struct litmus_lock* l)
574 return 0; 574 return 0;
575} 575}
576 576
577int psnedf_dgl_add(struct litmus_lock* l, int od) 577/* for compatibility, assume lock requests the whole group. */
578{
579 return 0;
580}
581
582int psnedf_dgl_remove(struct litmus_lock* l, int od)
583{
584 return 0;
585}
586
587int psnedf_dgl_lock(struct litmus_lock* l) 578int psnedf_dgl_lock(struct litmus_lock* l)
588{ 579{
589 return 0; 580 return l->ops->dynamic_group_lock(l, dgl_from_lock(l)->dgl_resources);
590} 581}
591 582
583/* for compatibility, assume unlock releasess the whole group. */
592int psnedf_dgl_unlock(struct litmus_lock* l) 584int psnedf_dgl_unlock(struct litmus_lock* l)
593{ 585{
594 return 0; 586 return l->ops->dynamic_group_unlock(l, dgl_from_lock(l)->dgl_resources);
595} 587}
596 588
597/** 589/**
@@ -606,7 +598,7 @@ int psnedf_dgl_unlock(struct litmus_lock* l)
606 * is requested, the cache is updated (in the while loop). This is done by 598 * is requested, the cache is updated (in the while loop). This is done by
607 * checking that two fdso point to the same lock object. 599 * checking that two fdso point to the same lock object.
608 */ 600 */
609bool check_mask_valid(struct litmus_lock* l, resource_mask_t mask) 601bool is_mask_valid(struct litmus_lock* l, resource_mask_t mask)
610{ 602{
611 struct dgl_semaphore* d; 603 struct dgl_semaphore* d;
612 struct od_table_entry* entry; 604 struct od_table_entry* entry;
@@ -642,20 +634,109 @@ bool check_mask_valid(struct litmus_lock* l, resource_mask_t mask)
642 634
643} 635}
644 636
645int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t lock_ods) 637int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resources)
646{ 638{
647 //struct task_struct* t = current; 639 struct task_struct* t = current;
648 //struct dgl_semaphore *sem = dgl_from_lock(l); 640 struct dgl_semaphore *sem = dgl_from_lock(l);
641 wait_queue_t wait;
642 unsigned long flags;
643
649 TRACE("Trying to lock a DGL\n"); 644 TRACE("Trying to lock a DGL\n");
650 645
651 check_mask_valid(l, lock_ods); 646 if (!is_realtime(t))
647 return -EPERM;
648
649 if ( !is_mask_valid(l, resources) )
650 return -EINVAL;
651
652 t->resources = resources;
653
654 spin_lock_irqsave(&sem->wait.lock, flags);
655
656 // if sem->locked & resources == 0, then all resources are available,
657 // otherwise we must suspend.
658 if (sem->locked & resources){
659 init_waitqueue_entry(&wait, t);
660
661 set_task_state(t, TASK_UNINTERRUPTIBLE);
662
663 __add_wait_queue_tail_exclusive(&sem->wait, &wait);
664
665 TS_LOCK_SUSPEND;
666
667 spin_unlock_irqrestore(&sem->wait.lock, flags);
668
669 schedule();
670
671 TS_LOCK_RESUME;
672 } else {
673 sem->locked = sem->locked | resources;
674
675 // if a job requests a resource, then it was scheduled, and therefore
676 // there was not another boosted job, so this is safe.
677 BUG_ON(sem->boosted[task_cpu(t)]);
678
679 boost_priority(t);
680
681 sem->boosted[task_cpu(t)] = true;
682
683 spin_unlock_irqrestore(&sem->wait.lock, flags);
684 }
685
652 return 0; 686 return 0;
653} 687}
654 688
655int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t lock_ods) 689int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resources)
656{ 690{
691 struct task_struct *t = current, *next;
692 struct dgl_semaphore *sem = dgl_from_lock(l);
693 unsigned long flags;
694 int err = 0;
695
657 TRACE("Trying to unlock a DGL\n"); 696 TRACE("Trying to unlock a DGL\n");
658 return 0; 697
698 spin_lock_irqsave(&sem->wait.lock, flags);
699
700 // ~resources | t->resources checks that t owns the resources being released
701 // note that a job can release a subset of the resources it has acquired.
702 if ( !is_mask_valid(l, resources)){
703 TRACE("Invalid mask %d\n", resources);
704 err = -EINVAL;
705 goto out;
706 } else if ( (~resources | t->resources) != -1){
707 TRACE("Trying to lock unowned resources: %d\t%d\n", resources, t->resources);
708 err = -EINVAL;
709 goto out;
710 }
711
712 // if the job released all of the resources it owned, then unboost.
713 if (resources == t->resources){
714 unboost_priority(t);
715 } else {
716 // update t->resources to reflect the resources currently owned.
717 t->resources = t->resources & ~resources;
718 }
719
720 next = __waitqueue_peek_first(&sem->wait);
721 while( next && ~(next->resources & sem->locked) == -1 &&
722 !sem->boosted[task_cpu(next)] ){
723
724 //next should not change, it should just be removed
725 //from the head of the queue.
726 next = __waitqueue_remove_first(&sem->wait);
727
728 boost_priority(next);
729
730 sem->locked = sem->locked | next->resources;
731
732 wake_up_process(next);
733
734 next = __waitqueue_peek_first(&sem->wait);
735 }
736
737out:
738 spin_unlock_irqrestore(&sem->wait.lock, flags);
739 return err;
659} 740}
660 741
661void psnedf_dgl_free(struct litmus_lock* l) 742void psnedf_dgl_free(struct litmus_lock* l)
@@ -667,8 +748,6 @@ void psnedf_dgl_free(struct litmus_lock* l)
667 748
668static struct litmus_lock_ops psnedf_dgl_lock_ops = { 749static struct litmus_lock_ops psnedf_dgl_lock_ops = {
669 .close = psnedf_dgl_close, 750 .close = psnedf_dgl_close,
670 .add = psnedf_dgl_add,
671 .remove = psnedf_dgl_remove,
672 .lock = psnedf_dgl_lock, 751 .lock = psnedf_dgl_lock,
673 .unlock = psnedf_dgl_unlock, 752 .unlock = psnedf_dgl_unlock,
674 .dynamic_group_lock = psnedf_dgl_dynamic_group_lock, 753 .dynamic_group_lock = psnedf_dgl_dynamic_group_lock,
@@ -694,6 +773,7 @@ static struct litmus_lock* psnedf_new_fmlp(void)
694static struct litmus_lock* psnedf_new_dgl(void) 773static struct litmus_lock* psnedf_new_dgl(void)
695{ 774{
696 struct dgl_semaphore* sem; 775 struct dgl_semaphore* sem;
776 int i;
697 777
698 TRACE("Creating another DGL\n"); 778 TRACE("Creating another DGL\n");
699 779
@@ -701,8 +781,13 @@ static struct litmus_lock* psnedf_new_dgl(void)
701 if (!sem) 781 if (!sem)
702 return NULL; 782 return NULL;
703 783
704 sem->resource_holders = 0; 784 sem->locked = 0;
705 sem->dgl_resources = 0; 785 sem->dgl_resources = 0;
786
787 // This doesn't feel like the preferred way to do it, but it should work.
788 for(i = 0; i < NR_CPUS; i++)
789 sem->boosted[i] = false;
790
706 init_waitqueue_head(&sem->wait); 791 init_waitqueue_head(&sem->wait);
707 sem->litmus_lock.ops = &psnedf_dgl_lock_ops; 792 sem->litmus_lock.ops = &psnedf_dgl_lock_ops;
708 sem->litmus_lock.type = DGL_SEM; 793 sem->litmus_lock.type = DGL_SEM;