aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/locking.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-04-19 17:31:52 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2013-04-19 17:31:52 -0400
commitf70a290e8a889caa905ab7650c696f2bb299be1a (patch)
tree56f0886d839499e9f522f189999024b3e86f9be2 /litmus/locking.c
parentfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (diff)
parent7ef4a793a624c6e66c16ca1051847f75161f5bec (diff)
Merge branch 'wip-nested-locking' into tegra-nested-lockingwip-nested-locking
Conflicts: Makefile include/linux/fs.h
Diffstat (limited to 'litmus/locking.c')
-rw-r--r--litmus/locking.c236
1 files changed, 236 insertions, 0 deletions
diff --git a/litmus/locking.c b/litmus/locking.c
new file mode 100644
index 00000000000..1d46d148e9e
--- /dev/null
+++ b/litmus/locking.c
@@ -0,0 +1,236 @@
1#include <linux/sched.h>
2#include <litmus/litmus.h>
3#include <litmus/fdso.h>
4
5#ifdef CONFIG_LITMUS_LOCKING
6
7#include <linux/sched.h>
8#include <litmus/litmus.h>
9#include <litmus/sched_plugin.h>
10#include <litmus/trace.h>
11#include <litmus/wait.h>
12
13static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg);
14static int open_generic_lock(struct od_table_entry* entry, void* __user arg);
15static int close_generic_lock(struct od_table_entry* entry);
16static void destroy_generic_lock(obj_type_t type, void* sem);
17
18struct fdso_ops generic_lock_ops = {
19 .create = create_generic_lock,
20 .open = open_generic_lock,
21 .close = close_generic_lock,
22 .destroy = destroy_generic_lock
23};
24
25static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg)
26{
27 struct litmus_lock* lock;
28 int err;
29
30 err = litmus->allocate_lock(&lock, type, arg);
31 if (err == 0)
32 *obj_ref = lock;
33 return err;
34}
35
36static int open_generic_lock(struct od_table_entry* entry, void* __user arg)
37{
38 struct litmus_lock* lock = get_lock(entry);
39 if (lock->ops->open)
40 return lock->ops->open(lock, arg);
41 else
42 return 0; /* default: any task can open it */
43}
44
45static int close_generic_lock(struct od_table_entry* entry)
46{
47 struct litmus_lock* lock = get_lock(entry);
48 if (lock->ops->close)
49 return lock->ops->close(lock);
50 else
51 return 0; /* default: closing succeeds */
52}
53
54static void destroy_generic_lock(obj_type_t type, void* obj)
55{
56 struct litmus_lock* lock = (struct litmus_lock*) obj;
57 lock->ops->deallocate(lock);
58}
59
60asmlinkage long sys_dynamic_group_lock(resource_mask_t lock_ods)
61{
62 long err = -EINVAL;
63 struct od_table_entry* entry;
64 struct litmus_lock* l;
65
66 TS_LOCK_START;
67
68 entry = get_entry_for_od(ffs(lock_ods)-1);
69 if (entry && is_lock(entry)) {
70 l = get_lock(entry);
71 if (l->type == DGL_SEM){
72 err = l->ops->dynamic_group_lock(l, lock_ods);
73 }else{
74 TRACE("Attempted to DG-lock type: %d\n", l->type);
75 }
76 } else {
77 TRACE_CUR("Attempted to lock invalid entry %d\n", entry);
78 }
79
80 TS_LOCK_END;
81
82 return err;
83}
84
85asmlinkage long sys_dynamic_group_unlock(resource_mask_t lock_ods)
86{
87 long err = -EINVAL;
88 struct od_table_entry* entry;
89 struct litmus_lock* l;
90
91 TS_UNLOCK_START;
92
93 entry = get_entry_for_od(ffs(lock_ods)-1);
94 if (entry && is_lock(entry)) {
95 l = get_lock(entry);
96 if (l->type == DGL_SEM){
97 err = l->ops->dynamic_group_unlock(l, lock_ods);
98 } else{
99 TRACE_CUR("Attempted to DG-unlock type: %d\n", l->type);
100 }
101 } else {
102 TRACE_CUR("Attempted to unlock invalid entry %d\n", entry);
103 }
104
105 TS_UNLOCK_END;
106
107 return err;
108}
109
110asmlinkage long sys_litmus_lock(int lock_od)
111{
112 long err = -EINVAL;
113 struct od_table_entry* entry;
114 struct litmus_lock* l;
115
116 TS_LOCK_START;
117
118 entry = get_entry_for_od(lock_od);
119 if (entry && is_lock(entry)) {
120 l = get_lock(entry);
121 TRACE_CUR("attempts to lock 0x%p\n", l);
122 err = l->ops->lock(l);
123 }
124
125 /* Note: task my have been suspended or preempted in between! Take
126 * this into account when computing overheads. */
127 TS_LOCK_END;
128
129 TS_SYSCALL_OUT_START;
130
131 return err;
132}
133
134asmlinkage long sys_litmus_unlock(int lock_od)
135{
136 long err = -EINVAL;
137 struct od_table_entry* entry;
138 struct litmus_lock* l;
139
140 TS_SYSCALL_IN_START;
141
142 TS_SYSCALL_IN_END;
143
144 TS_UNLOCK_START;
145
146 entry = get_entry_for_od(lock_od);
147 if (entry && is_lock(entry)) {
148 l = get_lock(entry);
149 TRACE_CUR("attempts to unlock 0x%p\n", l);
150 err = l->ops->unlock(l);
151 }
152
153 /* Note: task my have been preempted in between! Take this into
154 * account when computing overheads. */
155 TS_UNLOCK_END;
156
157 TS_SYSCALL_OUT_START;
158
159 return err;
160}
161
162struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
163{
164 wait_queue_t* q;
165 struct task_struct* t = NULL;
166
167 if (waitqueue_active(wq)) {
168 q = list_entry(wq->task_list.next,
169 wait_queue_t, task_list);
170 t = (struct task_struct*) q->private;
171 __remove_wait_queue(wq, q);
172 }
173 return(t);
174}
175
176struct task_struct* __waitqueue_peek_first(wait_queue_head_t *wq)
177{
178 wait_queue_t* q;
179 struct task_struct* t = NULL;
180
181 if (waitqueue_active(wq)) {
182 q = list_entry(wq->task_list.next,
183 wait_queue_t, task_list);
184 t = (struct task_struct*) q->private;
185 }
186 return(t);
187}
188
189unsigned int __add_wait_queue_prio_exclusive(
190 wait_queue_head_t* head,
191 prio_wait_queue_t *new)
192{
193 struct list_head *pos;
194 unsigned int passed = 0;
195
196 new->wq.flags |= WQ_FLAG_EXCLUSIVE;
197
198 /* find a spot where the new entry is less than the next */
199 list_for_each(pos, &head->task_list) {
200 prio_wait_queue_t* queued = list_entry(pos, prio_wait_queue_t,
201 wq.task_list);
202
203 if (unlikely(lt_before(new->priority, queued->priority) ||
204 (new->priority == queued->priority &&
205 new->tie_breaker < queued->tie_breaker))) {
206 /* pos is not less than new, thus insert here */
207 __list_add(&new->wq.task_list, pos->prev, pos);
208 goto out;
209 }
210 passed++;
211 }
212
213 /* if we get to this point either the list is empty or every entry
214 * queued element is less than new.
215 * Let's add new to the end. */
216 list_add_tail(&new->wq.task_list, &head->task_list);
217out:
218 return passed;
219}
220
221
222#else
223
224struct fdso_ops generic_lock_ops = {};
225
226asmlinkage long sys_litmus_lock(int sem_od)
227{
228 return -ENOSYS;
229}
230
231asmlinkage long sys_litmus_unlock(int sem_od)
232{
233 return -ENOSYS;
234}
235
236#endif