aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-30 16:43:52 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-30 16:43:52 -0400
commit62f2907f445b08f958acf1cc1a0c29736d4ba206 (patch)
treea11743eddcc125c9c3ac0c527078338e3d01b295 /litmus/litmus.c
parentd0961e328a2a4c026c884c768b798cb882922708 (diff)
Nested inheritance with fine-grained locking.
Minor hack to lockdep was required too allow the inheritance propagation locking logic to work.
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r--litmus/litmus.c31
1 files changed, 23 insertions, 8 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 40340dfa9d67..9d4fbd2f8a65 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -291,7 +291,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
291{ 291{
292 struct rt_task user_config = {}; 292 struct rt_task user_config = {};
293 void* ctrl_page = NULL; 293 void* ctrl_page = NULL;
294 294
295#ifdef CONFIG_LITMUS_NESTED_LOCKING
296 binheap_order_t prio_order = NULL;
297#endif
298
299
295 if (restore) { 300 if (restore) {
296 /* Safe user-space provided configuration data. 301 /* Safe user-space provided configuration data.
297 * and allocated page. */ 302 * and allocated page. */
@@ -299,24 +304,33 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
299 ctrl_page = p->rt_param.ctrl_page; 304 ctrl_page = p->rt_param.ctrl_page;
300 } 305 }
301 306
307#ifdef CONFIG_LITMUS_NESTED_LOCKING
308 prio_order = p->rt_param.hp_blocked_tasks.compare;
309#endif
310
302 /* We probably should not be inheriting any task's priority 311 /* We probably should not be inheriting any task's priority
303 * at this point in time. 312 * at this point in time.
304 */ 313 */
305 WARN_ON(p->rt_param.eff_prio); 314 WARN_ON(p->rt_param.inh_task);
306 315
307#ifdef CONFIG_LITMUS_NESTED_LOCKING 316#ifdef CONFIG_LITMUS_NESTED_LOCKING
308 WARN_ON(p->rt_param.local_prio); 317 WARN_ON(p->rt_param.blocked_lock);
309 WARN_ON(p->rt_param.trans_prio); 318 WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks));
310#endif 319#endif
311 320
312 /* Cleanup everything else. */ 321 /* Cleanup everything else. */
313 memset(&p->rt_param, 0, sizeof(p->rt_param)); 322 memset(&p->rt_param, 0, sizeof(p->rt_param));
314 323
315 /* Restore preserved fields. */ 324 /* Restore preserved fields. */
316 if (restore) { 325 if (restore) {
317 p->rt_param.task_params = user_config; 326 p->rt_param.task_params = user_config;
318 p->rt_param.ctrl_page = ctrl_page; 327 p->rt_param.ctrl_page = ctrl_page;
319 } 328 }
329
330#ifdef CONFIG_LITMUS_NESTED_LOCKING
331 INIT_BINHEAP_HANDLE(&p->rt_param.hp_blocked_tasks, prio_order);
332 raw_spin_lock_init(&p->rt_param.hp_blocked_tasks_lock);
333#endif
320} 334}
321 335
322long litmus_admit_task(struct task_struct* tsk) 336long litmus_admit_task(struct task_struct* tsk)
@@ -363,6 +377,9 @@ long litmus_admit_task(struct task_struct* tsk)
363 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); 377 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
364 } 378 }
365 379
380 tsk_rt(tsk)->blocked_lock = NULL;
381 raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock);
382
366 retval = litmus->admit_task(tsk); 383 retval = litmus->admit_task(tsk);
367 384
368 if (!retval) { 385 if (!retval) {
@@ -473,9 +490,7 @@ void litmus_exec(void)
473 struct task_struct* p = current; 490 struct task_struct* p = current;
474 491
475 if (is_realtime(p)) { 492 if (is_realtime(p)) {
476 WARN_ON(p->rt_param.eff_prio); 493 WARN_ON(p->rt_param.inh_task);
477 WARN_ON(p->rt_param.local_prio);
478 WARN_ON(p->rt_param.trans_prio);
479 if (tsk_rt(p)->ctrl_page) { 494 if (tsk_rt(p)->ctrl_page) {
480 free_page((unsigned long) tsk_rt(p)->ctrl_page); 495 free_page((unsigned long) tsk_rt(p)->ctrl_page);
481 tsk_rt(p)->ctrl_page = NULL; 496 tsk_rt(p)->ctrl_page = NULL;