diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-14 08:34:36 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-14 08:34:36 -0400 |
commit | 4ad6ba08f0dab67bbd89a26b27f1cc86e3c45c13 (patch) | |
tree | fd982c7a12f7a947278e05d0b126a015c24793f4 /litmus/litmus.c | |
parent | c1d1979c99ca397241da4e3d7e0cb77f7ec28240 (diff) |
checkpoint for aux_tasks. can still deadlock
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r-- | litmus/litmus.c | 111 |
1 files changed, 50 insertions, 61 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c index 83e8ef3f42af..1b4182ac3337 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -25,6 +25,10 @@ | |||
25 | #include <litmus/nvidia_info.h> | 25 | #include <litmus/nvidia_info.h> |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #ifdef CONFIG_LITMUS_LOCKING | ||
29 | #include <litmus/aux_tasks.h> | ||
30 | #endif | ||
31 | |||
28 | /* Number of RT tasks that exist in the system */ | 32 | /* Number of RT tasks that exist in the system */ |
29 | atomic_t rt_task_count = ATOMIC_INIT(0); | 33 | atomic_t rt_task_count = ATOMIC_INIT(0); |
30 | static DEFINE_RAW_SPINLOCK(task_transition_lock); | 34 | static DEFINE_RAW_SPINLOCK(task_transition_lock); |
@@ -327,60 +331,6 @@ asmlinkage long sys_null_call(cycles_t __user *ts) | |||
327 | return ret; | 331 | return ret; |
328 | } | 332 | } |
329 | 333 | ||
330 | |||
331 | long __litmus_admit_task(struct task_struct* tsk); | ||
332 | |||
333 | asmlinkage long sys_slave_non_rt_threads(void) | ||
334 | { | ||
335 | long retval = 0; | ||
336 | struct task_struct *leader = current->group_leader; | ||
337 | struct task_struct *t; | ||
338 | struct task_struct *hp = NULL; | ||
339 | |||
340 | read_lock_irq(&tasklist_lock); | ||
341 | |||
342 | t = leader; | ||
343 | do { | ||
344 | TRACE_CUR("threads in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); | ||
345 | |||
346 | if (tsk_rt(t)->heap_node == NULL) { | ||
347 | retval = __litmus_admit_task(t); | ||
348 | |||
349 | if (retval != 0) break; | ||
350 | |||
351 | /* hasn't been admitted into rt. make it a slave. */ | ||
352 | tsk_rt(t)->slave = 1; | ||
353 | } | ||
354 | else { | ||
355 | tsk_rt(t)->has_slaves = 1; | ||
356 | |||
357 | if (is_realtime(t) && litmus->compare(t, hp)) { | ||
358 | hp = t; | ||
359 | } | ||
360 | } | ||
361 | |||
362 | t = next_thread(t); | ||
363 | } while(t != leader); | ||
364 | |||
365 | if (hp) { | ||
366 | TRACE_CUR("found hp in group: %s/%d\n", hp->comm, hp->pid); | ||
367 | |||
368 | /* set up inheritance */ | ||
369 | leader->hp_group = hp; | ||
370 | |||
371 | t = leader; | ||
372 | do { | ||
373 | if (tsk_rt(t)->slave) { | ||
374 | litmus->increase_prio(t); | ||
375 | } | ||
376 | } while(t != leader); | ||
377 | } | ||
378 | |||
379 | read_unlock_irq(&tasklist_lock); | ||
380 | |||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | 334 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) |
385 | void init_gpu_affinity_state(struct task_struct* p) | 335 | void init_gpu_affinity_state(struct task_struct* p) |
386 | { | 336 | { |
@@ -412,11 +362,13 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
412 | { | 362 | { |
413 | struct rt_task user_config = {}; | 363 | struct rt_task user_config = {}; |
414 | void* ctrl_page = NULL; | 364 | void* ctrl_page = NULL; |
415 | 365 | ||
416 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 366 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
417 | binheap_order_t prio_order = NULL; | 367 | binheap_order_t prio_order = NULL; |
418 | #endif | 368 | #endif |
419 | 369 | ||
370 | TRACE_TASK(p, "reinit_litmus_state: restore = %d\n", restore); | ||
371 | |||
420 | if (restore) { | 372 | if (restore) { |
421 | /* Safe user-space provided configuration data. | 373 | /* Safe user-space provided configuration data. |
422 | * and allocated page. */ | 374 | * and allocated page. */ |
@@ -428,10 +380,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
428 | prio_order = p->rt_param.hp_blocked_tasks.compare; | 380 | prio_order = p->rt_param.hp_blocked_tasks.compare; |
429 | #endif | 381 | #endif |
430 | 382 | ||
383 | #ifdef CONFIG_LITMUS_LOCKING | ||
431 | /* We probably should not be inheriting any task's priority | 384 | /* We probably should not be inheriting any task's priority |
432 | * at this point in time. | 385 | * at this point in time. |
433 | */ | 386 | */ |
434 | WARN_ON(p->rt_param.inh_task); | 387 | WARN_ON(p->rt_param.inh_task); |
388 | #endif | ||
435 | 389 | ||
436 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 390 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
437 | WARN_ON(p->rt_param.blocked_lock); | 391 | WARN_ON(p->rt_param.blocked_lock); |
@@ -459,6 +413,13 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
459 | /* Cleanup everything else. */ | 413 | /* Cleanup everything else. */ |
460 | memset(&p->rt_param, 0, sizeof(p->rt_param)); | 414 | memset(&p->rt_param, 0, sizeof(p->rt_param)); |
461 | 415 | ||
416 | #ifdef CONFIG_LITMUS_LOCKING | ||
417 | /* also clear out the aux_data. the !restore case is only called on | ||
418 | * fork (initial thread creation). */ | ||
419 | if (!restore) | ||
420 | memset(&p->aux_data, 0, sizeof(p->aux_data)); | ||
421 | #endif | ||
422 | |||
462 | /* Restore preserved fields. */ | 423 | /* Restore preserved fields. */ |
463 | if (restore) { | 424 | if (restore) { |
464 | p->rt_param.task_params = user_config; | 425 | p->rt_param.task_params = user_config; |
@@ -475,7 +436,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
475 | #endif | 436 | #endif |
476 | } | 437 | } |
477 | 438 | ||
439 | |||
440 | #ifdef CONFIG_LITMUS_LOCKING | ||
441 | long __litmus_admit_task(struct task_struct* tsk, int clear_aux) | ||
442 | #else | ||
478 | long __litmus_admit_task(struct task_struct* tsk) | 443 | long __litmus_admit_task(struct task_struct* tsk) |
444 | #endif | ||
479 | { | 445 | { |
480 | long retval = 0; | 446 | long retval = 0; |
481 | unsigned long flags; | 447 | unsigned long flags; |
@@ -520,6 +486,14 @@ long __litmus_admit_task(struct task_struct* tsk) | |||
520 | atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); | 486 | atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); |
521 | #endif | 487 | #endif |
522 | 488 | ||
489 | #ifdef CONFIG_LITMUS_LOCKING | ||
490 | /* turns out our aux thread isn't really an aux thread. */ | ||
491 | if (clear_aux && tsk_rt(tsk)->is_aux_task) { | ||
492 | exit_aux_task(tsk); | ||
493 | tsk_rt(tsk)->has_aux_tasks = 1; | ||
494 | } | ||
495 | #endif | ||
496 | |||
523 | retval = litmus->admit_task(tsk); | 497 | retval = litmus->admit_task(tsk); |
524 | 498 | ||
525 | if (!retval) { | 499 | if (!retval) { |
@@ -537,8 +511,7 @@ out_unlock: | |||
537 | long litmus_admit_task(struct task_struct* tsk) | 511 | long litmus_admit_task(struct task_struct* tsk) |
538 | { | 512 | { |
539 | long retval = 0; | 513 | long retval = 0; |
540 | unsigned long flags; | 514 | |
541 | |||
542 | BUG_ON(is_realtime(tsk)); | 515 | BUG_ON(is_realtime(tsk)); |
543 | 516 | ||
544 | if (get_rt_relative_deadline(tsk) == 0 || | 517 | if (get_rt_relative_deadline(tsk) == 0 || |
@@ -560,8 +533,12 @@ long litmus_admit_task(struct task_struct* tsk) | |||
560 | goto out; | 533 | goto out; |
561 | } | 534 | } |
562 | 535 | ||
536 | #ifdef CONFIG_LITMUS_LOCKING | ||
537 | retval = __litmus_admit_task(tsk, (tsk_rt(tsk)->task_params.period != MAGIC_AUX_TASK_PERIOD)); | ||
538 | #else | ||
563 | retval = __litmus_admit_task(tsk); | 539 | retval = __litmus_admit_task(tsk); |
564 | 540 | #endif | |
541 | |||
565 | out: | 542 | out: |
566 | return retval; | 543 | return retval; |
567 | } | 544 | } |
@@ -574,7 +551,7 @@ void litmus_exit_task(struct task_struct* tsk) | |||
574 | litmus->task_exit(tsk); | 551 | litmus->task_exit(tsk); |
575 | 552 | ||
576 | BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); | 553 | BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); |
577 | bheap_node_free(tsk_rt(tsk)->heap_node); | 554 | bheap_node_free(tsk_rt(tsk)->heap_node); |
578 | release_heap_free(tsk_rt(tsk)->rel_heap); | 555 | release_heap_free(tsk_rt(tsk)->rel_heap); |
579 | 556 | ||
580 | atomic_dec(&rt_task_count); | 557 | atomic_dec(&rt_task_count); |
@@ -647,14 +624,22 @@ out: | |||
647 | */ | 624 | */ |
648 | void litmus_fork(struct task_struct* p) | 625 | void litmus_fork(struct task_struct* p) |
649 | { | 626 | { |
627 | reinit_litmus_state(p, 0); | ||
628 | |||
650 | if (is_realtime(p)) { | 629 | if (is_realtime(p)) { |
630 | TRACE_TASK(p, "fork, is real-time\n"); | ||
651 | /* clean out any litmus related state, don't preserve anything */ | 631 | /* clean out any litmus related state, don't preserve anything */ |
652 | reinit_litmus_state(p, 0); | 632 | //reinit_litmus_state(p, 0); |
653 | /* Don't let the child be a real-time task. */ | 633 | /* Don't let the child be a real-time task. */ |
654 | p->sched_reset_on_fork = 1; | 634 | p->sched_reset_on_fork = 1; |
655 | } else | 635 | } else { |
656 | /* non-rt tasks might have ctrl_page set */ | 636 | /* non-rt tasks might have ctrl_page set */ |
657 | tsk_rt(p)->ctrl_page = NULL; | 637 | tsk_rt(p)->ctrl_page = NULL; |
638 | |||
639 | /* still don't inherit any parental parameters */ | ||
640 | //memset(&p->rt_param, 0, sizeof(p->rt_param)); | ||
641 | //memset(&p->aux_data, 0, sizeof(p->aux_data)); | ||
642 | } | ||
658 | 643 | ||
659 | /* od tables are never inherited across a fork */ | 644 | /* od tables are never inherited across a fork */ |
660 | p->od_table = NULL; | 645 | p->od_table = NULL; |
@@ -751,6 +736,10 @@ static int __init _init_litmus(void) | |||
751 | init_topology(); | 736 | init_topology(); |
752 | #endif | 737 | #endif |
753 | 738 | ||
739 | #ifdef CONFIG_LITMUS_NVIDIA | ||
740 | //init_nvidia_info(); | ||
741 | #endif | ||
742 | |||
754 | return 0; | 743 | return 0; |
755 | } | 744 | } |
756 | 745 | ||