aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/locking.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-04-11 15:57:59 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-04-11 15:57:59 -0400
commit8eb55f8fa1a2c3854f0f77b9b8663178c0129f6c (patch)
tree40a244d4d80512342a8b307253c5dd58e5b9cd2b /litmus/locking.c
parent0c80d0acbbc2103a744f2b2b76cb66ddeb28ebbf (diff)
Added support for Dynamic Group Locks (DGLs)
Added support for Dynamic Group Locks. Locks are FIFO ordered (no timestamps), so a big DGL lock is needed to enqueue for resources atomically. Unfortunatly, this requires nested inheritance to use coarse-grain locking. Coarse-grain locking is used when DGLs are enabled. Fine-grain locking is used when DGLs are disabled. TODO: Clean up IKGLP implementatio. There is a lot of needless debug/TRACE work.
Diffstat (limited to 'litmus/locking.c')
-rw-r--r--litmus/locking.c339
1 files changed, 320 insertions, 19 deletions
diff --git a/litmus/locking.c b/litmus/locking.c
index 19ed5a8e16e9..b2f4a205cd04 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -6,6 +6,10 @@
6#include <litmus/trace.h> 6#include <litmus/trace.h>
7#include <litmus/litmus.h> 7#include <litmus/litmus.h>
8 8
9#ifdef CONFIG_LITMUS_DGL_SUPPORT
10#include <linux/uaccess.h>
11#endif
12
9static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg); 13static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg);
10static int open_generic_lock(struct od_table_entry* entry, void* __user arg); 14static int open_generic_lock(struct od_table_entry* entry, void* __user arg);
11static int close_generic_lock(struct od_table_entry* entry); 15static int close_generic_lock(struct od_table_entry* entry);
@@ -31,7 +35,7 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry)
31 35
32 36
33atomic_t lock_id_gen = ATOMIC_INIT(0); 37atomic_t lock_id_gen = ATOMIC_INIT(0);
34raw_spinlock_t rsm_global_lock; 38//raw_spinlock_t rsm_global_lock;
35 39
36 40
37static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) 41static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg)
@@ -50,9 +54,9 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar
50 54
51 lock->ident = atomic_inc_return(&lock_id_gen); 55 lock->ident = atomic_inc_return(&lock_id_gen);
52 56
53 if(lock->ident == 1) { 57// if(lock->ident == 1) {
54 raw_spin_lock_init(&rsm_global_lock); 58// raw_spin_lock_init(&rsm_global_lock);
55 } 59// }
56#endif 60#endif
57 *obj_ref = lock; 61 *obj_ref = lock;
58 } 62 }
@@ -142,25 +146,322 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
142} 146}
143 147
144 148
145//#ifdef CONFIG_LITMUS_NESTED_LOCKING 149#ifdef CONFIG_LITMUS_DGL_SUPPORT
146///* not "lock_nest" ... get it? */ 150
147//void nest_lock(struct litmus_lock *l, struct task_struct *t) 151void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock)
148//{ 152{
149// if(tsk_rt(t)->last_lock) { 153// int i = dgl_wait->size - 1;
150// /* push new lock to front of old lock */ 154
151// struct litmus_lock *old = tsk_rt(t)->last_lock; 155
152// 156 BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock);
153// list_add(&l->lock_chain, &old->lock_chain); 157
154// } 158 WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock);
155// 159//
156// tsk_rt(t)->last_lock = l; 160// // since dgl_wait->task->blocked_lock, all locks after prev_lock
161// // are already held.
157// 162//
158// // local inh now becomes transitive inh 163// // find the lock after prev.
159// tsk_rt(t)->trans_prio = tsk_rt(t)->local_prio; // what about old transitive prio??? 164// if(prev_lock) {
160// tsk_rt(t)->local_prio = NULL; 165// for(/**/; i >= 0; --i) {
166// if(prev_lock == dgl_wait->locks[i]) {
167// --i;
168// break;
169// }
170// else {
171// BUG_ON(!dgl_wait->locks[i]->ops->is_owner(dgl_wait->locks[i], dgl_wait->task));
172// }
173// }
174// }
175
176 for(dgl_wait->last_primary = dgl_wait->last_primary - 1;
177 dgl_wait->last_primary >= 0;
178 --(dgl_wait->last_primary)){
179 if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner(dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) {
180
181 tsk_rt(dgl_wait->task)->blocked_lock = dgl_wait->locks[dgl_wait->last_primary];
182 mb();
183
184 TRACE_CUR("New blocked lock is %d\n", dgl_wait->locks[dgl_wait->last_primary]->ident);
185
186 break;
187 }
188 }
189
190// for(/**/; i >= 0; --i) {
191// struct litmus_lock *l = dgl_wait->locks[i];
192// if(!l->ops->is_owner(l, dgl_wait->task)) {
193//
194// tsk_rt(dgl_wait->task)->blocked_lock = l;
195// mb();
196//
197// TRACE_CUR("New blocked lock is %d\n", l->ident);
198//
199// if(dgl_wait->last_primary >= 0)
200// {
201// TRACE_CUR("old meth = %d; new meth = %d\n", l->ident, dgl_wait->locks[dgl_wait->last_primary]->ident);
202// WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != l);
203// }
204//
205// break;
206// }
207// else {
208// TRACE_CUR("Lock %d is actually held!\n", l->ident);
209// }
210// }
211}
212
213int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key)
214{
215 // should never be called.
216 BUG();
217 return 1;
218}
219
220void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task)
221{
222 wait_queue_t *q;
223
224 *dgl_wait = NULL;
225 *task = NULL;
226
227 if (waitqueue_active(wq)) {
228 q = list_entry(wq->task_list.next,
229 wait_queue_t, task_list);
230
231 if(q->func == dgl_wake_up) {
232 *dgl_wait = (dgl_wait_state_t*) q->private;
233 }
234 else {
235 *task = (struct task_struct*) q->private;
236 }
237
238 __remove_wait_queue(wq, q);
239 }
240}
241
242void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait)
243{
244 init_waitqueue_entry(wq_node, dgl_wait->task);
245 wq_node->private = dgl_wait;
246 wq_node->func = dgl_wake_up;
247}
248
249
250static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
251{
252 int i;
253 unsigned long irqflags; //, dummyflags;
254 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task);
255
256 BUG_ON(dgl_wait->task != current);
257
258 raw_spin_lock_irqsave(dgl_lock, irqflags);
259
260
261 dgl_wait->nr_remaining = dgl_wait->size;
262 //atomic_set(&dgl_wait->nr_remaining, dgl_wait->size);
263
264 // try to acquire each lock. enqueue (non-blocking) if it is unavailable.
265 for(i = 0; i < dgl_wait->size; ++i) {
266 struct litmus_lock *l = dgl_wait->locks[i];
267
268 // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks.
269
270 if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) {
271 --(dgl_wait->nr_remaining);
272 //atomic_dec(&dgl_wait->nr_remaining);
273 TRACE_CUR("Acquired lock %d immediatly.\n", l->ident);
274 }
275 }
276
277 //if(atomic_read(&dgl_wait->nr_remaining) == 0) {
278 if(dgl_wait->nr_remaining == 0) {
279 // acquired entire group immediatly
280 TRACE_CUR("Acquired all locks in DGL immediatly!\n");
281 }
282 else {
283
284 TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", dgl_wait->nr_remaining); //atomic_read(&dgl_wait->nr_remaining));
285
286 for(i = dgl_wait->size - 1; i >= 0; --i) {
287 struct litmus_lock *l = dgl_wait->locks[i];
288 if(!l->ops->is_owner(l, dgl_wait->task)) { // double-check to be thread safe
289
290 TRACE_CUR("Activating priority inheritance on lock %d\n", l->ident);
291
292 TS_DGL_LOCK_SUSPEND;
293
294 l->ops->enable_priority(l, dgl_wait);
295 dgl_wait->last_primary = i;
296
297 TRACE_CUR("Suspending for lock %d\n", l->ident);
298
299 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending
300
301 schedule(); // suspend!!!
302
303 TS_DGL_LOCK_RESUME;
304
305 TRACE_CUR("Woken up from DGL suspension.\n");
306
307 goto all_acquired; // we should hold all locks when we wake up.
308 }
309 }
310
311 TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n");
312 BUG();
313 }
314
315 raw_spin_unlock_irqrestore(dgl_lock, irqflags);
316
317all_acquired:
318
319 // FOR SANITY CHECK FOR TESTING
320 for(i = 0; i < dgl_wait->size; ++i) {
321 struct litmus_lock *l = dgl_wait->locks[i];
322 BUG_ON(!l->ops->is_owner(l, dgl_wait->task));
323 }
324
325 TRACE_CUR("Acquired entire DGL\n");
326
327 return 0;
328}
329
330//static int supports_dgl(struct litmus_lock *l)
331//{
332// struct litmus_lock_ops* ops = l->ops;
333//
334// return (ops->dgl_lock &&
335// ops->is_owner &&
336// ops->enable_priority);
161//} 337//}
162//#endif
163 338
339asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
340{
341 struct task_struct *t = current;
342 long err = -EINVAL;
343 int dgl_ods[MAX_DGL_SIZE];
344 int i;
345
346 dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held.
347
348 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1)
349 goto out;
350
351 if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int))))
352 goto out;
353
354 if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int))))
355 goto out;
356
357 if (!is_realtime(t)) {
358 err = -EPERM;
359 goto out;
360 }
361
362 for(i = 0; i < dgl_size; ++i) {
363 struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]);
364 if(entry && is_lock(entry)) {
365 dgl_wait_state.locks[i] = get_lock(entry);
366// if(!supports_dgl(dgl_wait_state.locks[i])) {
367// TRACE_CUR("Lock %d does not support all required DGL operations.\n",
368// dgl_wait_state.locks[i]->ident);
369// goto out;
370// }
371 }
372 else {
373 TRACE_CUR("Invalid lock identifier\n");
374 goto out;
375 }
376 }
377
378 dgl_wait_state.task = t;
379 dgl_wait_state.size = dgl_size;
380
381 TS_DGL_LOCK_START;
382 err = do_litmus_dgl_lock(&dgl_wait_state);
383
384 /* Note: task my have been suspended or preempted in between! Take
385 * this into account when computing overheads. */
386 TS_DGL_LOCK_END;
387
388out:
389 return err;
390}
391
392static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size)
393{
394 int i;
395 long err = 0;
396
397 TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size);
398
399 for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order
400
401 struct litmus_lock *l = dgl_locks[i];
402 long tmp_err;
403
404 TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident);
405
406 tmp_err = l->ops->unlock(l);
407
408 if(tmp_err) {
409 TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err);
410 err = tmp_err;
411 }
412 }
413
414 TRACE_CUR("DGL unlocked. err = %d\n", err);
415
416 return err;
417}
418
419asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
420{
421 long err = -EINVAL;
422 int dgl_ods[MAX_DGL_SIZE];
423 struct od_table_entry* entry;
424 int i;
425
426 struct litmus_lock* dgl_locks[MAX_DGL_SIZE];
427
428 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1)
429 goto out;
430
431 if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int))))
432 goto out;
433
434 if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int))))
435 goto out;
436
437 for(i = 0; i < dgl_size; ++i) {
438 entry = get_entry_for_od(dgl_ods[i]);
439 if(entry && is_lock(entry)) {
440 dgl_locks[i] = get_lock(entry);
441// if(!supports_dgl(dgl_locks[i])) {
442// TRACE_CUR("Lock %d does not support all required DGL operations.\n",
443// dgl_locks[i]->ident);
444// goto out;
445// }
446 }
447 else {
448 TRACE_CUR("Invalid lock identifier\n");
449 goto out;
450 }
451 }
452
453 TS_DGL_UNLOCK_START;
454 err = do_litmus_dgl_unlock(dgl_locks, dgl_size);
455
456 /* Note: task my have been suspended or preempted in between! Take
457 * this into account when computing overheads. */
458 TS_DGL_UNLOCK_END;
459
460out:
461 return err;
462}
463
464#endif
164 465
165 466
166#else 467#else