aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/fifo_lock.h1
-rw-r--r--include/litmus/locking.h2
-rw-r--r--include/litmus/prioq_lock.h1
-rw-r--r--include/litmus/unistd_32.h5
-rw-r--r--include/litmus/unistd_64.h9
-rw-r--r--litmus/fifo_lock.c20
-rw-r--r--litmus/locking.c83
-rw-r--r--litmus/prioq_lock.c20
-rw-r--r--litmus/sched_cedf.c4
-rw-r--r--litmus/sched_gsn_edf.c4
10 files changed, 147 insertions, 2 deletions
diff --git a/include/litmus/fifo_lock.h b/include/litmus/fifo_lock.h
index fcf53c10ca92..cff2cc663fb7 100644
--- a/include/litmus/fifo_lock.h
+++ b/include/litmus/fifo_lock.h
@@ -51,6 +51,7 @@ void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
51 51
52int fifo_mutex_lock(struct litmus_lock* l); 52int fifo_mutex_lock(struct litmus_lock* l);
53int fifo_mutex_unlock(struct litmus_lock* l); 53int fifo_mutex_unlock(struct litmus_lock* l);
54int fifo_mutex_should_yield_lock(struct litmus_lock* l);
54int fifo_mutex_close(struct litmus_lock* l); 55int fifo_mutex_close(struct litmus_lock* l);
55void fifo_mutex_free(struct litmus_lock* l); 56void fifo_mutex_free(struct litmus_lock* l);
56struct litmus_lock* fifo_mutex_new(struct litmus_lock_ops*); 57struct litmus_lock* fifo_mutex_new(struct litmus_lock_ops*);
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 4dd8c66868e6..39e32d3f48c7 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -116,6 +116,7 @@ typedef int (*lock_op_t)(struct litmus_lock *l);
116typedef lock_op_t lock_close_t; 116typedef lock_op_t lock_close_t;
117typedef lock_op_t lock_lock_t; 117typedef lock_op_t lock_lock_t;
118typedef lock_op_t lock_unlock_t; 118typedef lock_op_t lock_unlock_t;
119typedef lock_op_t lock_should_yield_lock_t;
119 120
120typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg); 121typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg);
121typedef void (*lock_free_t)(struct litmus_lock *l); 122typedef void (*lock_free_t)(struct litmus_lock *l);
@@ -135,6 +136,7 @@ struct litmus_lock_ops {
135 /* Current tries to lock/unlock this lock (mandatory methods). */ 136 /* Current tries to lock/unlock this lock (mandatory methods). */
136 lock_lock_t lock; 137 lock_lock_t lock;
137 lock_unlock_t unlock; 138 lock_unlock_t unlock;
139 lock_should_yield_lock_t should_yield_lock;
138 140
139 /* The lock is no longer being referenced (mandatory method). */ 141 /* The lock is no longer being referenced (mandatory method). */
140 lock_free_t deallocate; 142 lock_free_t deallocate;
diff --git a/include/litmus/prioq_lock.h b/include/litmus/prioq_lock.h
index 1128e3aab077..73d1e0dbc9a2 100644
--- a/include/litmus/prioq_lock.h
+++ b/include/litmus/prioq_lock.h
@@ -57,6 +57,7 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
57 57
58int prioq_mutex_lock(struct litmus_lock* l); 58int prioq_mutex_lock(struct litmus_lock* l);
59int prioq_mutex_unlock(struct litmus_lock* l); 59int prioq_mutex_unlock(struct litmus_lock* l);
60int prioq_mutex_should_yield_lock(struct litmus_lock* l);
60int prioq_mutex_close(struct litmus_lock* l); 61int prioq_mutex_close(struct litmus_lock* l);
61void prioq_mutex_free(struct litmus_lock* l); 62void prioq_mutex_free(struct litmus_lock* l);
62struct litmus_lock* prioq_mutex_new(struct litmus_lock_ops*); 63struct litmus_lock* prioq_mutex_new(struct litmus_lock_ops*);
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
index d1fe84a5d574..4ba93c095c0a 100644
--- a/include/litmus/unistd_32.h
+++ b/include/litmus/unistd_32.h
@@ -24,4 +24,7 @@
24 24
25#define __NR_sched_trace_event __LSC(15) 25#define __NR_sched_trace_event __LSC(15)
26 26
27#define NR_litmus_syscalls 16 27#define __NR_litmus_should_unlock __LSC(16)
28#define __NR_litmus_dgl_should_unlock __LSC(17)
29
30#define NR_litmus_syscalls 18
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
index 75f9fcb897f5..ec732f99748f 100644
--- a/include/litmus/unistd_64.h
+++ b/include/litmus/unistd_64.h
@@ -40,4 +40,11 @@ __SYSCALL(__NR_set_aux_tasks, sys_set_aux_tasks)
40#define __NR_sched_trace_event __LSC(15) 40#define __NR_sched_trace_event __LSC(15)
41__SYSCALL(__NR_sched_trace_event, sys_sched_trace_event) 41__SYSCALL(__NR_sched_trace_event, sys_sched_trace_event)
42 42
43#define NR_litmus_syscalls 16 43
44#define __NR_litmus_should_yield_lock __LSC(16)
45__SYSCALL(__NR_litmus_should_yield_lock, sys_litmus_should_yield_lock)
46#define __NR_litmus_dgl_should_yield_lock __LSC(17)
47__SYSCALL(__NR_litmus_dgl_should_yield_lock, sys_litmus_dgl_should_yield_lock)
48
49
50#define NR_litmus_syscalls 18
diff --git a/litmus/fifo_lock.c b/litmus/fifo_lock.c
index 4598cddbdb79..ebb915621e9c 100644
--- a/litmus/fifo_lock.c
+++ b/litmus/fifo_lock.c
@@ -510,6 +510,26 @@ out:
510 return err; 510 return err;
511} 511}
512 512
513int fifo_mutex_should_yield_lock(struct litmus_lock* l)
514{
515 int should_yield;
516 struct fifo_mutex *mutex = fifo_mutex_from_lock(l);
517 struct task_struct *t = current;
518 unsigned long flags;
519
520 if (unlikely(mutex->owner != t))
521 return -EINVAL;
522
523 local_irq_save(flags);
524
525 /* Yield if someone is waiting. Check does not need to be atomic. */
526 should_yield = waitqueue_active(&mutex->wait);
527
528 local_irq_restore(flags);
529
530 return should_yield;
531}
532
513 533
514void fifo_mutex_propagate_increase_inheritance(struct litmus_lock* l, 534void fifo_mutex_propagate_increase_inheritance(struct litmus_lock* l,
515 struct task_struct* t, 535 struct task_struct* t,
diff --git a/litmus/locking.c b/litmus/locking.c
index 67f5fda590c0..37aaab5fb7e8 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -184,6 +184,30 @@ asmlinkage long sys_litmus_unlock(int lock_od)
184 return err; 184 return err;
185} 185}
186 186
187asmlinkage long sys_litmus_should_yield_lock(int lock_od)
188{
189 long err = -EINVAL;
190 struct od_table_entry* entry;
191 struct litmus_lock* l;
192
193 entry = get_entry_for_od(lock_od);
194 if (entry && is_lock(entry)) {
195 l = get_lock(entry);
196
197 if (l->ops->should_yield_lock) {
198 TRACE_CUR("Checking to see if should yield lock %d\n", l->ident);
199 err = l->ops->should_yield_lock(l);
200 }
201 else {
202 TRACE_CUR("Lock %d does not support yielding.\n", l->ident);
203 err = 0; /* report back "no, don't unlock" */
204 }
205 }
206
207 return err;
208}
209
210
187struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) 211struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
188{ 212{
189 wait_queue_t* q; 213 wait_queue_t* q;
@@ -718,6 +742,60 @@ out:
718 return err; 742 return err;
719} 743}
720 744
745
746asmlinkage long sys_litmus_dgl_should_yield_lock(void* __user usr_dgl_ods, int dgl_size)
747{
748 long err = -EINVAL;
749 int dgl_ods[MAX_DGL_SIZE];
750
751 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1)
752 goto out;
753
754 if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods))))
755 goto out;
756
757 if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods))))
758 goto out;
759
760
761 if (dgl_size == 1) {
762 /* DGL size of 1. Just call regular singular lock. */
763 TRACE_CUR("Treating as regular lock.\n");
764 err = sys_litmus_should_yield_lock(dgl_ods[0]);
765 }
766 else {
767 unsigned long flags;
768 int i;
769 err = 0;
770
771 local_irq_save(flags);
772
773 for(i = 0; (i < dgl_size) && (0 == err); ++i) {
774 struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]);
775 if (entry && is_lock(entry)) {
776 struct litmus_lock *l = get_lock(entry);
777 if (l->ops->should_yield_lock) {
778 TRACE_CUR("Checking to see if should yield lock %d\n", l->ident);
779 err = l->ops->should_yield_lock(l);
780 }
781 else {
782 TRACE_CUR("Lock %d does not support yielding.\n", l->ident);
783 }
784 }
785 else {
786 TRACE_CUR("Invalid lock identifier\n");
787 err = -EINVAL;
788 }
789 }
790
791 local_irq_restore(flags);
792 }
793
794out:
795 return err;
796}
797
798
721#else // CONFIG_LITMUS_DGL_SUPPORT 799#else // CONFIG_LITMUS_DGL_SUPPORT
722 800
723asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) 801asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
@@ -730,6 +808,11 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
730 return -ENOSYS; 808 return -ENOSYS;
731} 809}
732 810
811asmlinkage long sys_litmus_dgl_should_yield_lock(void* __user usr_dgl_ods, int dgl_size)
812{
813 return -ENOSYS;
814}
815
733#endif 816#endif
734 817
735unsigned int __add_wait_queue_prio_exclusive( 818unsigned int __add_wait_queue_prio_exclusive(
diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c
index 61682d20da51..2d363cffd482 100644
--- a/litmus/prioq_lock.c
+++ b/litmus/prioq_lock.c
@@ -1031,6 +1031,26 @@ out:
1031 return err; 1031 return err;
1032} 1032}
1033 1033
1034int prioq_mutex_should_yield_lock(struct litmus_lock* l)
1035{
1036 int should_yield;
1037 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
1038 struct task_struct *t = current;
1039 unsigned long flags;
1040
1041 if (unlikely(mutex->owner != t))
1042 return -EINVAL;
1043
1044 local_irq_save(flags);
1045
1046 /* if hp_waiter can preempt 't', then 't' should be inheriting from hp_waiter */
1047 should_yield = (NULL != mutex->hp_waiter) &&
1048 (effective_priority(t) == effective_priority(mutex->hp_waiter));
1049
1050 local_irq_restore(flags);
1051
1052 return should_yield;
1053}
1034 1054
1035void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, 1055void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l,
1036 struct task_struct* t, 1056 struct task_struct* t,
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index b931c83579bc..024470a7fb58 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -2380,6 +2380,7 @@ static void nested_decrease_priority_inheritance(struct task_struct* t,
2380static struct litmus_lock_ops cedf_fifo_mutex_lock_ops = { 2380static struct litmus_lock_ops cedf_fifo_mutex_lock_ops = {
2381 .lock = fifo_mutex_lock, 2381 .lock = fifo_mutex_lock,
2382 .unlock = fifo_mutex_unlock, 2382 .unlock = fifo_mutex_unlock,
2383 .should_yield_lock = fifo_mutex_should_yield_lock,
2383 .close = fifo_mutex_close, 2384 .close = fifo_mutex_close,
2384 .deallocate = fifo_mutex_free, 2385 .deallocate = fifo_mutex_free,
2385 2386
@@ -2414,6 +2415,7 @@ static struct litmus_lock* cedf_new_fifo_mutex(void)
2414static struct litmus_lock_ops cedf_prioq_mutex_lock_ops = { 2415static struct litmus_lock_ops cedf_prioq_mutex_lock_ops = {
2415 .lock = prioq_mutex_lock, 2416 .lock = prioq_mutex_lock,
2416 .unlock = prioq_mutex_unlock, 2417 .unlock = prioq_mutex_unlock,
2418 .should_yield_lock = prioq_mutex_should_yield_lock,
2417 .close = prioq_mutex_close, 2419 .close = prioq_mutex_close,
2418 .deallocate = prioq_mutex_free, 2420 .deallocate = prioq_mutex_free,
2419 2421
@@ -2448,6 +2450,7 @@ static struct litmus_lock* cedf_new_prioq_mutex(void)
2448static struct litmus_lock_ops cedf_ikglp_lock_ops = { 2450static struct litmus_lock_ops cedf_ikglp_lock_ops = {
2449 .lock = ikglp_lock, 2451 .lock = ikglp_lock,
2450 .unlock = ikglp_unlock, 2452 .unlock = ikglp_unlock,
2453 .should_yield_lock = NULL,
2451 .close = ikglp_close, 2454 .close = ikglp_close,
2452 .deallocate = ikglp_free, 2455 .deallocate = ikglp_free,
2453 2456
@@ -2479,6 +2482,7 @@ static struct litmus_lock* cedf_new_ikglp(void* __user arg)
2479static struct litmus_lock_ops cedf_kfmlp_lock_ops = { 2482static struct litmus_lock_ops cedf_kfmlp_lock_ops = {
2480 .lock = kfmlp_lock, 2483 .lock = kfmlp_lock,
2481 .unlock = kfmlp_unlock, 2484 .unlock = kfmlp_unlock,
2485 .should_yield_lock = NULL,
2482 .close = kfmlp_close, 2486 .close = kfmlp_close,
2483 .deallocate = kfmlp_free, 2487 .deallocate = kfmlp_free,
2484 2488
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 4aeb7a0db3bd..c652048f8df1 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -1495,6 +1495,7 @@ static void nested_decrease_priority_inheritance(struct task_struct* t,
1495static struct litmus_lock_ops gsnedf_fifo_mutex_lock_ops = { 1495static struct litmus_lock_ops gsnedf_fifo_mutex_lock_ops = {
1496 .lock = fifo_mutex_lock, 1496 .lock = fifo_mutex_lock,
1497 .unlock = fifo_mutex_unlock, 1497 .unlock = fifo_mutex_unlock,
1498 .should_yield_lock = fifo_mutex_should_yield_lock,
1498 .close = fifo_mutex_close, 1499 .close = fifo_mutex_close,
1499 .deallocate = fifo_mutex_free, 1500 .deallocate = fifo_mutex_free,
1500 1501
@@ -1518,6 +1519,7 @@ static struct litmus_lock* gsnedf_new_fifo_mutex(void)
1518static struct litmus_lock_ops gsnedf_ikglp_lock_ops = { 1519static struct litmus_lock_ops gsnedf_ikglp_lock_ops = {
1519 .lock = ikglp_lock, 1520 .lock = ikglp_lock,
1520 .unlock = ikglp_unlock, 1521 .unlock = ikglp_unlock,
1522 .should_yield_lock = NULL,
1521 .close = ikglp_close, 1523 .close = ikglp_close,
1522 .deallocate = ikglp_free, 1524 .deallocate = ikglp_free,
1523 1525
@@ -1539,6 +1541,7 @@ static struct litmus_lock* gsnedf_new_ikglp(void* __user arg)
1539static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = { 1541static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = {
1540 .lock = kfmlp_lock, 1542 .lock = kfmlp_lock,
1541 .unlock = kfmlp_unlock, 1543 .unlock = kfmlp_unlock,
1544 .should_yield_lock = NULL,
1542 .close = kfmlp_close, 1545 .close = kfmlp_close,
1543 .deallocate = kfmlp_free, 1546 .deallocate = kfmlp_free,
1544 1547
@@ -1741,6 +1744,7 @@ void gsnedf_fmlp_free(struct litmus_lock* lock)
1741static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { 1744static struct litmus_lock_ops gsnedf_fmlp_lock_ops = {
1742 .close = gsnedf_fmlp_close, 1745 .close = gsnedf_fmlp_close,
1743 .lock = gsnedf_fmlp_lock, 1746 .lock = gsnedf_fmlp_lock,
1747 .should_yield_lock = NULL,
1744 .unlock = gsnedf_fmlp_unlock, 1748 .unlock = gsnedf_fmlp_unlock,
1745 .deallocate = gsnedf_fmlp_free, 1749 .deallocate = gsnedf_fmlp_free,
1746 1750