aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/locking.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-04-13 16:18:03 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-04-13 16:18:03 -0400
commitc0667dc4894e913048cf8904f0ce9a79b481b556 (patch)
tree1803f6f9a6de45c949f57d1172aab4aa2546393b /litmus/locking.c
parent8eb55f8fa1a2c3854f0f77b9b8663178c0129f6c (diff)
Move RSM and IKGLP imp. to own .c fileswip-ikglp
Also reformated code to be slightly more standard coding practice compliant.
Diffstat (limited to 'litmus/locking.c')
-rw-r--r--litmus/locking.c276
1 files changed, 150 insertions, 126 deletions
diff --git a/litmus/locking.c b/litmus/locking.c
index b2f4a205cd04..f78169dbbeef 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -22,6 +22,9 @@ struct fdso_ops generic_lock_ops = {
22 .destroy = destroy_generic_lock 22 .destroy = destroy_generic_lock
23}; 23};
24 24
25static atomic_t lock_id_gen = ATOMIC_INIT(0);
26
27
25static inline bool is_lock(struct od_table_entry* entry) 28static inline bool is_lock(struct od_table_entry* entry)
26{ 29{
27 return entry->class == &generic_lock_ops; 30 return entry->class == &generic_lock_ops;
@@ -33,11 +36,6 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry)
33 return (struct litmus_lock*) entry->obj->obj; 36 return (struct litmus_lock*) entry->obj->obj;
34} 37}
35 38
36
37atomic_t lock_id_gen = ATOMIC_INIT(0);
38//raw_spinlock_t rsm_global_lock;
39
40
41static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) 39static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg)
42{ 40{
43 struct litmus_lock* lock; 41 struct litmus_lock* lock;
@@ -48,16 +46,11 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar
48#ifdef CONFIG_LITMUS_NESTED_LOCKING 46#ifdef CONFIG_LITMUS_NESTED_LOCKING
49 lock->nest.lock = lock; 47 lock->nest.lock = lock;
50 lock->nest.hp_waiter_eff_prio = NULL; 48 lock->nest.hp_waiter_eff_prio = NULL;
51 49
52 INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node); 50 INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node);
53 WARN_ON(!(lock->nest.hp_waiter_ptr)); 51 WARN_ON(!(lock->nest.hp_waiter_ptr));
54
55 lock->ident = atomic_inc_return(&lock_id_gen);
56
57// if(lock->ident == 1) {
58// raw_spin_lock_init(&rsm_global_lock);
59// }
60#endif 52#endif
53 lock->ident = atomic_inc_return(&lock_id_gen);
61 *obj_ref = lock; 54 *obj_ref = lock;
62 } 55 }
63 return err; 56 return err;
@@ -145,69 +138,86 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
145 return(t); 138 return(t);
146} 139}
147 140
141#ifdef CONFIG_LITMUS_NESTED_LOCKING
142
143void print_hp_waiters(struct binheap_node* n, int depth)
144{
145 struct litmus_lock *l;
146 struct nested_info *nest;
147 char padding[81] = " ";
148 struct task_struct *hp = NULL;
149 struct task_struct *hp_eff = NULL;
150 struct task_struct *node_prio = NULL;
151
152
153 if(n == NULL) {
154 TRACE("+-> %p\n", NULL);
155 return;
156 }
157
158 nest = binheap_entry(n, struct nested_info, hp_binheap_node);
159 l = nest->lock;
160
161 if(depth*2 <= 80)
162 padding[depth*2] = '\0';
163
164 if(nest->hp_waiter_ptr && *(nest->hp_waiter_ptr)) {
165 hp = *(nest->hp_waiter_ptr);
166
167 if(tsk_rt(hp)->inh_task) {
168 hp_eff = tsk_rt(hp)->inh_task;
169 }
170 }
171
172 node_prio = nest->hp_waiter_eff_prio;
173
174 TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n",
175 padding,
176 (node_prio) ? node_prio->comm : "nil",
177 (node_prio) ? node_prio->pid : -1,
178 (hp) ? hp->comm : "nil",
179 (hp) ? hp->pid : -1,
180 (hp_eff) ? hp_eff->comm : "nil",
181 (hp_eff) ? hp_eff->pid : -1,
182 l->ident);
183
184 if(n->left) print_hp_waiters(n->left, depth+1);
185 if(n->right) print_hp_waiters(n->right, depth+1);
186}
187#endif
188
148 189
149#ifdef CONFIG_LITMUS_DGL_SUPPORT 190#ifdef CONFIG_LITMUS_DGL_SUPPORT
150 191
151void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock) 192void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/)
152{ 193{
153// int i = dgl_wait->size - 1; 194 /*
154 195 We pick the next lock in reverse order. This causes inheritance propagation
155 196 from locks received earlier to flow in the same direction as regular nested
197 locking. This might make fine-grain DGL easier in the future.
198 */
199
156 BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); 200 BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock);
157 201
158 WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); 202 //WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock);
159// 203
160// // since dgl_wait->task->blocked_lock, all locks after prev_lock 204 // note reverse order
161// // are already held.
162//
163// // find the lock after prev.
164// if(prev_lock) {
165// for(/**/; i >= 0; --i) {
166// if(prev_lock == dgl_wait->locks[i]) {
167// --i;
168// break;
169// }
170// else {
171// BUG_ON(!dgl_wait->locks[i]->ops->is_owner(dgl_wait->locks[i], dgl_wait->task));
172// }
173// }
174// }
175
176 for(dgl_wait->last_primary = dgl_wait->last_primary - 1; 205 for(dgl_wait->last_primary = dgl_wait->last_primary - 1;
177 dgl_wait->last_primary >= 0; 206 dgl_wait->last_primary >= 0;
178 --(dgl_wait->last_primary)){ 207 --(dgl_wait->last_primary)){
179 if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner(dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { 208 if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner(
180 209 dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) {
181 tsk_rt(dgl_wait->task)->blocked_lock = dgl_wait->locks[dgl_wait->last_primary]; 210
211 tsk_rt(dgl_wait->task)->blocked_lock =
212 dgl_wait->locks[dgl_wait->last_primary];
182 mb(); 213 mb();
183 214
184 TRACE_CUR("New blocked lock is %d\n", dgl_wait->locks[dgl_wait->last_primary]->ident); 215 TRACE_CUR("New blocked lock is %d\n",
185 216 dgl_wait->locks[dgl_wait->last_primary]->ident);
217
186 break; 218 break;
187 } 219 }
188 } 220 }
189
190// for(/**/; i >= 0; --i) {
191// struct litmus_lock *l = dgl_wait->locks[i];
192// if(!l->ops->is_owner(l, dgl_wait->task)) {
193//
194// tsk_rt(dgl_wait->task)->blocked_lock = l;
195// mb();
196//
197// TRACE_CUR("New blocked lock is %d\n", l->ident);
198//
199// if(dgl_wait->last_primary >= 0)
200// {
201// TRACE_CUR("old meth = %d; new meth = %d\n", l->ident, dgl_wait->locks[dgl_wait->last_primary]->ident);
202// WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != l);
203// }
204//
205// break;
206// }
207// else {
208// TRACE_CUR("Lock %d is actually held!\n", l->ident);
209// }
210// }
211} 221}
212 222
213int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) 223int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key)
@@ -217,24 +227,26 @@ int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key)
217 return 1; 227 return 1;
218} 228}
219 229
220void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task) 230void __waitqueue_dgl_remove_first(wait_queue_head_t *wq,
231 dgl_wait_state_t** dgl_wait,
232 struct task_struct **task)
221{ 233{
222 wait_queue_t *q; 234 wait_queue_t *q;
223 235
224 *dgl_wait = NULL; 236 *dgl_wait = NULL;
225 *task = NULL; 237 *task = NULL;
226 238
227 if (waitqueue_active(wq)) { 239 if (waitqueue_active(wq)) {
228 q = list_entry(wq->task_list.next, 240 q = list_entry(wq->task_list.next,
229 wait_queue_t, task_list); 241 wait_queue_t, task_list);
230 242
231 if(q->func == dgl_wake_up) { 243 if(q->func == dgl_wake_up) {
232 *dgl_wait = (dgl_wait_state_t*) q->private; 244 *dgl_wait = (dgl_wait_state_t*) q->private;
233 } 245 }
234 else { 246 else {
235 *task = (struct task_struct*) q->private; 247 *task = (struct task_struct*) q->private;
236 } 248 }
237 249
238 __remove_wait_queue(wq, q); 250 __remove_wait_queue(wq, q);
239 } 251 }
240} 252}
@@ -252,76 +264,76 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
252 int i; 264 int i;
253 unsigned long irqflags; //, dummyflags; 265 unsigned long irqflags; //, dummyflags;
254 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); 266 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task);
255 267
256 BUG_ON(dgl_wait->task != current); 268 BUG_ON(dgl_wait->task != current);
257 269
258 raw_spin_lock_irqsave(dgl_lock, irqflags); 270 raw_spin_lock_irqsave(dgl_lock, irqflags);
259 271
260 272
261 dgl_wait->nr_remaining = dgl_wait->size; 273 dgl_wait->nr_remaining = dgl_wait->size;
262 //atomic_set(&dgl_wait->nr_remaining, dgl_wait->size); 274
263
264 // try to acquire each lock. enqueue (non-blocking) if it is unavailable. 275 // try to acquire each lock. enqueue (non-blocking) if it is unavailable.
265 for(i = 0; i < dgl_wait->size; ++i) { 276 for(i = 0; i < dgl_wait->size; ++i) {
266 struct litmus_lock *l = dgl_wait->locks[i]; 277 struct litmus_lock *l = dgl_wait->locks[i];
267 278
268 // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. 279 // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks.
269 280
270 if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) { 281 if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) {
271 --(dgl_wait->nr_remaining); 282 --(dgl_wait->nr_remaining);
272 //atomic_dec(&dgl_wait->nr_remaining);
273 TRACE_CUR("Acquired lock %d immediatly.\n", l->ident); 283 TRACE_CUR("Acquired lock %d immediatly.\n", l->ident);
274 } 284 }
275 } 285 }
276 286
277 //if(atomic_read(&dgl_wait->nr_remaining) == 0) {
278 if(dgl_wait->nr_remaining == 0) { 287 if(dgl_wait->nr_remaining == 0) {
279 // acquired entire group immediatly 288 // acquired entire group immediatly
280 TRACE_CUR("Acquired all locks in DGL immediatly!\n"); 289 TRACE_CUR("Acquired all locks in DGL immediatly!\n");
281 } 290 }
282 else { 291 else {
292
293 TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n",
294 dgl_wait->nr_remaining);
283 295
284 TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", dgl_wait->nr_remaining); //atomic_read(&dgl_wait->nr_remaining)); 296 // note reverse order. see comments in select_next_lock for reason.
285
286 for(i = dgl_wait->size - 1; i >= 0; --i) { 297 for(i = dgl_wait->size - 1; i >= 0; --i) {
287 struct litmus_lock *l = dgl_wait->locks[i]; 298 struct litmus_lock *l = dgl_wait->locks[i];
288 if(!l->ops->is_owner(l, dgl_wait->task)) { // double-check to be thread safe 299 if(!l->ops->is_owner(l, dgl_wait->task)) { // double-check to be thread safe
289 300
290 TRACE_CUR("Activating priority inheritance on lock %d\n", l->ident); 301 TRACE_CUR("Activating priority inheritance on lock %d\n",
291 302 l->ident);
303
292 TS_DGL_LOCK_SUSPEND; 304 TS_DGL_LOCK_SUSPEND;
293 305
294 l->ops->enable_priority(l, dgl_wait); 306 l->ops->enable_priority(l, dgl_wait);
295 dgl_wait->last_primary = i; 307 dgl_wait->last_primary = i;
296 308
297 TRACE_CUR("Suspending for lock %d\n", l->ident); 309 TRACE_CUR("Suspending for lock %d\n", l->ident);
298 310
299 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending 311 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending
300 312
301 schedule(); // suspend!!! 313 schedule(); // suspend!!!
302 314
303 TS_DGL_LOCK_RESUME; 315 TS_DGL_LOCK_RESUME;
304 316
305 TRACE_CUR("Woken up from DGL suspension.\n"); 317 TRACE_CUR("Woken up from DGL suspension.\n");
306 318
307 goto all_acquired; // we should hold all locks when we wake up. 319 goto all_acquired; // we should hold all locks when we wake up.
308 } 320 }
309 } 321 }
310 322
311 TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n"); 323 TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n");
312 BUG(); 324 BUG();
313 } 325 }
314 326
315 raw_spin_unlock_irqrestore(dgl_lock, irqflags); 327 raw_spin_unlock_irqrestore(dgl_lock, irqflags);
316 328
317all_acquired: 329all_acquired:
318 330
319 // FOR SANITY CHECK FOR TESTING 331 // FOR SANITY CHECK FOR TESTING
320 for(i = 0; i < dgl_wait->size; ++i) { 332 for(i = 0; i < dgl_wait->size; ++i) {
321 struct litmus_lock *l = dgl_wait->locks[i]; 333 struct litmus_lock *l = dgl_wait->locks[i];
322 BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); 334 BUG_ON(!l->ops->is_owner(l, dgl_wait->task));
323 } 335 }
324 336
325 TRACE_CUR("Acquired entire DGL\n"); 337 TRACE_CUR("Acquired entire DGL\n");
326 338
327 return 0; 339 return 0;
@@ -330,7 +342,7 @@ all_acquired:
330//static int supports_dgl(struct litmus_lock *l) 342//static int supports_dgl(struct litmus_lock *l)
331//{ 343//{
332// struct litmus_lock_ops* ops = l->ops; 344// struct litmus_lock_ops* ops = l->ops;
333// 345//
334// return (ops->dgl_lock && 346// return (ops->dgl_lock &&
335// ops->is_owner && 347// ops->is_owner &&
336// ops->enable_priority); 348// ops->enable_priority);
@@ -342,23 +354,23 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
342 long err = -EINVAL; 354 long err = -EINVAL;
343 int dgl_ods[MAX_DGL_SIZE]; 355 int dgl_ods[MAX_DGL_SIZE];
344 int i; 356 int i;
345 357
346 dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. 358 dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held.
347 359
348 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) 360 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1)
349 goto out; 361 goto out;
350 362
351 if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) 363 if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int))))
352 goto out; 364 goto out;
353 365
354 if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) 366 if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int))))
355 goto out; 367 goto out;
356 368
357 if (!is_realtime(t)) { 369 if (!is_realtime(t)) {
358 err = -EPERM; 370 err = -EPERM;
359 goto out; 371 goto out;
360 } 372 }
361 373
362 for(i = 0; i < dgl_size; ++i) { 374 for(i = 0; i < dgl_size; ++i) {
363 struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); 375 struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]);
364 if(entry && is_lock(entry)) { 376 if(entry && is_lock(entry)) {
@@ -374,17 +386,17 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
374 goto out; 386 goto out;
375 } 387 }
376 } 388 }
377 389
378 dgl_wait_state.task = t; 390 dgl_wait_state.task = t;
379 dgl_wait_state.size = dgl_size; 391 dgl_wait_state.size = dgl_size;
380 392
381 TS_DGL_LOCK_START; 393 TS_DGL_LOCK_START;
382 err = do_litmus_dgl_lock(&dgl_wait_state); 394 err = do_litmus_dgl_lock(&dgl_wait_state);
383 395
384 /* Note: task my have been suspended or preempted in between! Take 396 /* Note: task my have been suspended or preempted in between! Take
385 * this into account when computing overheads. */ 397 * this into account when computing overheads. */
386 TS_DGL_LOCK_END; 398 TS_DGL_LOCK_END;
387 399
388out: 400out:
389 return err; 401 return err;
390} 402}
@@ -393,26 +405,26 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size)
393{ 405{
394 int i; 406 int i;
395 long err = 0; 407 long err = 0;
396 408
397 TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size); 409 TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size);
398 410
399 for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order 411 for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order
400 412
401 struct litmus_lock *l = dgl_locks[i]; 413 struct litmus_lock *l = dgl_locks[i];
402 long tmp_err; 414 long tmp_err;
403 415
404 TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); 416 TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident);
405 417
406 tmp_err = l->ops->unlock(l); 418 tmp_err = l->ops->unlock(l);
407 419
408 if(tmp_err) { 420 if(tmp_err) {
409 TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); 421 TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err);
410 err = tmp_err; 422 err = tmp_err;
411 } 423 }
412 } 424 }
413 425
414 TRACE_CUR("DGL unlocked. err = %d\n", err); 426 TRACE_CUR("DGL unlocked. err = %d\n", err);
415 427
416 return err; 428 return err;
417} 429}
418 430
@@ -422,18 +434,18 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
422 int dgl_ods[MAX_DGL_SIZE]; 434 int dgl_ods[MAX_DGL_SIZE];
423 struct od_table_entry* entry; 435 struct od_table_entry* entry;
424 int i; 436 int i;
425 437
426 struct litmus_lock* dgl_locks[MAX_DGL_SIZE]; 438 struct litmus_lock* dgl_locks[MAX_DGL_SIZE];
427 439
428 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) 440 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1)
429 goto out; 441 goto out;
430 442
431 if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) 443 if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int))))
432 goto out; 444 goto out;
433 445
434 if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) 446 if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int))))
435 goto out; 447 goto out;
436 448
437 for(i = 0; i < dgl_size; ++i) { 449 for(i = 0; i < dgl_size; ++i) {
438 entry = get_entry_for_od(dgl_ods[i]); 450 entry = get_entry_for_od(dgl_ods[i]);
439 if(entry && is_lock(entry)) { 451 if(entry && is_lock(entry)) {
@@ -449,16 +461,28 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
449 goto out; 461 goto out;
450 } 462 }
451 } 463 }
452 464
453 TS_DGL_UNLOCK_START; 465 TS_DGL_UNLOCK_START;
454 err = do_litmus_dgl_unlock(dgl_locks, dgl_size); 466 err = do_litmus_dgl_unlock(dgl_locks, dgl_size);
455 467
456 /* Note: task my have been suspended or preempted in between! Take 468 /* Note: task my have been suspended or preempted in between! Take
457 * this into account when computing overheads. */ 469 * this into account when computing overheads. */
458 TS_DGL_UNLOCK_END; 470 TS_DGL_UNLOCK_END;
459 471
460out: 472out:
461 return err; 473 return err;
474}
475
476#else
477
478asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
479{
480 return -ENOSYS;
481}
482
483asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
484{
485 return -ENOSYS;
462} 486}
463 487
464#endif 488#endif