aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Ward <bcw@cs.unc.edu>2012-08-10 23:05:11 -0400
committerBryan Ward <bcw@cs.unc.edu>2013-04-16 14:34:35 -0400
commit30bb245b67c5be41a53203a5cc874e84985c528a (patch)
tree44731df2feb1a5a2b45e194ec0a3291e2a03f099
parentba3f616d900d1a8caad96d0fb8c4f168c30a8afd (diff)
Check for valid fdsos in a dgl request.
Apply bitwise operations to quickly check if the set of resources requested are contained within one group.
-rw-r--r--include/litmus/locking.h13
-rw-r--r--litmus/locking.c25
-rw-r--r--litmus/sched_psn_edf.c57
3 files changed, 72 insertions, 23 deletions
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 203466933f3c..8e501c326b8b 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -5,6 +5,8 @@
5 5
6struct litmus_lock_ops; 6struct litmus_lock_ops;
7 7
8extern struct fdso_ops generic_lock_ops;
9
8/* Generic base struct for LITMUS^RT userspace semaphores. 10/* Generic base struct for LITMUS^RT userspace semaphores.
9 * This structure should be embedded in protocol-specific semaphores. 11 * This structure should be embedded in protocol-specific semaphores.
10 */ 12 */
@@ -34,4 +36,15 @@ struct litmus_lock_ops {
34 void (*deallocate)(struct litmus_lock*); 36 void (*deallocate)(struct litmus_lock*);
35}; 37};
36 38
39static inline bool is_lock(struct od_table_entry* entry)
40{
41 return entry->class == &generic_lock_ops;
42}
43
44static inline struct litmus_lock* get_lock(struct od_table_entry* entry)
45{
46 BUG_ON(!is_lock(entry));
47 return (struct litmus_lock*) entry->obj->obj;
48}
49
37#endif 50#endif
diff --git a/litmus/locking.c b/litmus/locking.c
index 348a4be97cf9..55529064f7ca 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -22,17 +22,6 @@ struct fdso_ops generic_lock_ops = {
22 .destroy = destroy_generic_lock 22 .destroy = destroy_generic_lock
23}; 23};
24 24
25static inline bool is_lock(struct od_table_entry* entry)
26{
27 return entry->class == &generic_lock_ops;
28}
29
30static inline struct litmus_lock* get_lock(struct od_table_entry* entry)
31{
32 BUG_ON(!is_lock(entry));
33 return (struct litmus_lock*) entry->obj->obj;
34}
35
36static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) 25static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg)
37{ 26{
38 struct litmus_lock* lock; 27 struct litmus_lock* lock;
@@ -68,14 +57,6 @@ static void destroy_generic_lock(obj_type_t type, void* obj)
68 lock->ops->deallocate(lock); 57 lock->ops->deallocate(lock);
69} 58}
70 59
71bool check_mask_valid(struct litmus_lock* l, resource_mask_t mask)
72{
73 // this should really check if all of the resources requested are
74 // controlled by the dynamic group lock. this can be done with bitwise
75 // magic, by observing that A->B <-> ~A|B
76 return true;
77}
78
79asmlinkage long sys_dynamic_group_lock(resource_mask_t lock_ods) 60asmlinkage long sys_dynamic_group_lock(resource_mask_t lock_ods)
80{ 61{
81 long err = -EINVAL; 62 long err = -EINVAL;
@@ -87,7 +68,7 @@ asmlinkage long sys_dynamic_group_lock(resource_mask_t lock_ods)
87 entry = get_entry_for_od(ffs(lock_ods)-1); 68 entry = get_entry_for_od(ffs(lock_ods)-1);
88 if (entry && is_lock(entry)) { 69 if (entry && is_lock(entry)) {
89 l = get_lock(entry); 70 l = get_lock(entry);
90 if (check_mask_valid(l, mask)){ 71 if (l->type == DGL_SEM){
91 TRACE_CUR("attempts to lock %d\n", lock_ods); 72 TRACE_CUR("attempts to lock %d\n", lock_ods);
92 err = l->ops->dynamic_group_lock(l, lock_ods); 73 err = l->ops->dynamic_group_lock(l, lock_ods);
93 } 74 }
@@ -109,9 +90,11 @@ asmlinkage long sys_dynamic_group_unlock(resource_mask_t lock_ods)
109 entry = get_entry_for_od(ffs(lock_ods)-1); 90 entry = get_entry_for_od(ffs(lock_ods)-1);
110 if (entry && is_lock(entry)) { 91 if (entry && is_lock(entry)) {
111 l = get_lock(entry); 92 l = get_lock(entry);
112 if (check_mask_valid(l, mask)){ 93 if (l->type == DGL_SEM){
113 TRACE_CUR("attempts to unlock all resources in 0x%p\n",l); 94 TRACE_CUR("attempts to unlock all resources in 0x%p\n",l);
114 err = l->ops->dynamic_group_unlock(l, lock_ods); 95 err = l->ops->dynamic_group_unlock(l, lock_ods);
96 } else{
97 TRACE_CUR("Wrong Type: %d\n", l->type);
115 } 98 }
116 } 99 }
117 100
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 7d81f8a245a1..8745682b409f 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -594,11 +594,61 @@ int psnedf_dgl_unlock(struct litmus_lock* l)
594 return 0; 594 return 0;
595} 595}
596 596
597/**
598 * This function checks to ensure that all resources requested in the mask are
599 * controlled by the dgl in l. This is validated in one of two ways.
600 *
601 * The dgl struct maintains a cache of resources known to be controlled by that
602 * particular dgl. If the requested resources are in that cache, return true.
603 *
604 * Note that this cache is not immediately updated when a resource is added to
605 * a group (because I didn't see an easy way to do it). The first time a resource
606 * is requested, the cache is updated (in the while loop). This is done by
607 * checking that two fdso point to the same lock object.
608 */
609bool check_mask_valid(struct litmus_lock* l, resource_mask_t mask)
610{
611 struct dgl_semaphore* d;
612 struct od_table_entry* entry;
613 resource_mask_t tmp;
614 int prev = -1;
615
616 if (l->type != DGL_SEM)
617 return false;
618
619 d = dgl_from_lock(l);
620
621 // mask -> d->dgl_resources (bitwise logical implication)
622 tmp = ~mask | d->dgl_resources;
623
624 //n.b. if tmp is 0xffffffff, ffs(~tmp) - 1 = -1, and -1 >= prev always, so stop.
625 while (prev < ffs(~tmp) - 1)
626 {
627 prev = ffs(~tmp) - 1;
628 entry = get_entry_for_od( ffs(~tmp) - 1);
629 if (entry && is_lock(entry) && get_lock(entry) == l){
630 d->dgl_resources = d->dgl_resources | ( 1 << (ffs(~tmp) -1) );
631 }
632 tmp = ~mask|d->dgl_resources;
633 }
634
635 // 2's complement: -1 is 0xffffffff
636
637 if ( tmp == -1){
638 return true;
639 } else {
640 return false;
641 }
642
643}
644
597int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t lock_ods) 645int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t lock_ods)
598{ 646{
599 struct task_struct* t = current; 647 //struct task_struct* t = current;
600 struct dgl_semaphore *sem = dgl_from_lock(l); 648 //struct dgl_semaphore *sem = dgl_from_lock(l);
601 TRACE("Trying to lock a DGL\n"); 649 TRACE("Trying to lock a DGL\n");
650
651 check_mask_valid(l, lock_ods);
602 return 0; 652 return 0;
603} 653}
604 654
@@ -645,6 +695,8 @@ static struct litmus_lock* psnedf_new_dgl(void)
645{ 695{
646 struct dgl_semaphore* sem; 696 struct dgl_semaphore* sem;
647 697
698 TRACE("Creating another DGL\n");
699
648 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 700 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
649 if (!sem) 701 if (!sem)
650 return NULL; 702 return NULL;
@@ -653,6 +705,7 @@ static struct litmus_lock* psnedf_new_dgl(void)
653 sem->dgl_resources = 0; 705 sem->dgl_resources = 0;
654 init_waitqueue_head(&sem->wait); 706 init_waitqueue_head(&sem->wait);
655 sem->litmus_lock.ops = &psnedf_dgl_lock_ops; 707 sem->litmus_lock.ops = &psnedf_dgl_lock_ops;
708 sem->litmus_lock.type = DGL_SEM;
656 return &sem->litmus_lock; 709 return &sem->litmus_lock;
657} 710}
658 711