diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-08-29 15:15:05 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-08-29 15:15:05 -0400 |
commit | 0db09db5c908a470a8746fab0b0e6b8fa8e131f2 (patch) | |
tree | b4e8d0f696037648916caf959fb48a3f5e2c2883 /litmus | |
parent | 4f07dfbc024de2afdcd81c20c1607f32b79b61fe (diff) |
modifications for easier debugging
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/sched_mc.c | 28 |
1 files changed, 17 insertions, 11 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 457bd2a94a29..eb5728c97c87 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -127,6 +127,10 @@ cpu_entry_t* mc_cpus[NR_CPUS]; | |||
127 | 127 | ||
128 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) | 128 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) |
129 | #define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit) | 129 | #define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit) |
130 | #define TRACE_TASK(t, fmt, args...) \ | ||
131 | TRACE("(%s/%d:%d:%d) " fmt, (t)->comm, (t)->pid, \ | ||
132 | (t)->rt_param.job_params.job_no, \ | ||
133 | (tsk_mc_data(t)) ? tsk_mc_crit(t) : -1, ##args) | ||
130 | 134 | ||
131 | /* need to do a short-circuit null check on mc_data before checking is_ghost */ | 135 | /* need to do a short-circuit null check on mc_data before checking is_ghost */ |
132 | static inline int is_ghost(struct task_struct *t) | 136 | static inline int is_ghost(struct task_struct *t) |
@@ -328,15 +332,14 @@ static void update_ghost_time(struct task_struct *p) | |||
328 | TRACE_TASK(p, "WARNING: negative time delta.\n"); | 332 | TRACE_TASK(p, "WARNING: negative time delta.\n"); |
329 | } | 333 | } |
330 | if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) { | 334 | if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) { |
331 | /*Currently will just set ghost budget to zero since | 335 | /* Currently will just set ghost budget to zero since |
332 | * task has already been queued. Could probably do | 336 | * task has already been queued. Could probably do |
333 | * more efficiently with significant reworking. | 337 | * more efficiently with significant reworking. |
334 | */ | 338 | */ |
335 | TRACE_TASK(p, "Ghost job could have ended\n"); | 339 | TRACE_TASK(p, "Ghost job could have ended\n"); |
336 | tsk_mc_data(p)->mc_job.ghost_budget = 0; | 340 | tsk_mc_data(p)->mc_job.ghost_budget = 0; |
337 | p->se.exec_start = clock; | 341 | p->se.exec_start = clock; |
338 | } | 342 | } else { |
339 | else{ | ||
340 | TRACE_TASK(p, "Ghost jub updated, but didn't finish\n"); | 343 | TRACE_TASK(p, "Ghost jub updated, but didn't finish\n"); |
341 | tsk_mc_data(p)->mc_job.ghost_budget -= delta; | 344 | tsk_mc_data(p)->mc_job.ghost_budget -= delta; |
342 | p->se.exec_start = clock; | 345 | p->se.exec_start = clock; |
@@ -397,6 +400,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
397 | entry->cpu); | 400 | entry->cpu); |
398 | BUG_ON(entry->linked && | 401 | BUG_ON(entry->linked && |
399 | tsk_mc_crit(entry->linked) < tsk_mc_crit(linked)); | 402 | tsk_mc_crit(entry->linked) < tsk_mc_crit(linked)); |
403 | |||
400 | tmp = entry->ghost_tasks[tsk_mc_crit(linked)]; | 404 | tmp = entry->ghost_tasks[tsk_mc_crit(linked)]; |
401 | if (tmp) { | 405 | if (tmp) { |
402 | unlink(tmp); | 406 | unlink(tmp); |
@@ -578,9 +582,7 @@ static void preempt(cpu_entry_t *entry) | |||
578 | */ | 582 | */ |
579 | static noinline void requeue(struct task_struct* task) | 583 | static noinline void requeue(struct task_struct* task) |
580 | { | 584 | { |
581 | /* BUG_ON(!task || !is_realtime(task));*/ | 585 | BUG_ON(!task || !is_realtime(task)); |
582 | BUG_ON(!task); | ||
583 | BUG_ON(!is_realtime(task)); | ||
584 | /* sanity check before insertion */ | 586 | /* sanity check before insertion */ |
585 | BUG_ON(is_queued(task)); | 587 | BUG_ON(is_queued(task)); |
586 | 588 | ||
@@ -623,8 +625,9 @@ static void prepare_preemption(rt_domain_t *dom, cpu_entry_t *cpu, | |||
623 | } | 625 | } |
624 | 626 | ||
625 | /* Callers always have global lock for functions in this section*/ | 627 | /* Callers always have global lock for functions in this section*/ |
626 | static void check_for_c_preemptions(rt_domain_t *dom) { | 628 | static noinline void check_for_c_preemptions(rt_domain_t *dom) { |
627 | cpu_entry_t* last; | 629 | cpu_entry_t* last; |
630 | TRACE("Checking for c preempt"); | ||
628 | for (last = lowest_prio_cpu_c(); | 631 | for (last = lowest_prio_cpu_c(); |
629 | mc_edf_preemption_needed(dom, CRIT_LEVEL_C, | 632 | mc_edf_preemption_needed(dom, CRIT_LEVEL_C, |
630 | last); | 633 | last); |
@@ -633,8 +636,9 @@ static void check_for_c_preemptions(rt_domain_t *dom) { | |||
633 | } | 636 | } |
634 | } | 637 | } |
635 | 638 | ||
636 | static void check_for_d_preemptions(rt_domain_t *dom) { | 639 | static noinline void check_for_d_preemptions(rt_domain_t *dom) { |
637 | cpu_entry_t* last; | 640 | cpu_entry_t* last; |
641 | TRACE("Checking for d preempt"); | ||
638 | for (last = lowest_prio_cpu_d(); | 642 | for (last = lowest_prio_cpu_d(); |
639 | mc_edf_preemption_needed(dom, CRIT_LEVEL_D, | 643 | mc_edf_preemption_needed(dom, CRIT_LEVEL_D, |
640 | last); | 644 | last); |
@@ -643,13 +647,15 @@ static void check_for_d_preemptions(rt_domain_t *dom) { | |||
643 | } | 647 | } |
644 | } | 648 | } |
645 | 649 | ||
646 | static void check_for_a_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { | 650 | static noinline void check_for_a_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { |
651 | TRACE("Checking for a preempt"); | ||
647 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_A, cpu)) { | 652 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_A, cpu)) { |
648 | prepare_preemption(dom, cpu, CRIT_LEVEL_A); | 653 | prepare_preemption(dom, cpu, CRIT_LEVEL_A); |
649 | } | 654 | } |
650 | } | 655 | } |
651 | 656 | ||
652 | static void check_for_b_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { | 657 | static noinline void check_for_b_preemption(rt_domain_t *dom, cpu_entry_t *cpu) { |
658 | TRACE("Checking for b preempt"); | ||
653 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_B, cpu)) { | 659 | if (mc_edf_preemption_needed(dom, CRIT_LEVEL_B, cpu)) { |
654 | prepare_preemption(dom, cpu, CRIT_LEVEL_B); | 660 | prepare_preemption(dom, cpu, CRIT_LEVEL_B); |
655 | } | 661 | } |
@@ -733,7 +739,7 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
733 | * needed. | 739 | * needed. |
734 | */ | 740 | */ |
735 | if (!is_ghost(t)) { | 741 | if (!is_ghost(t)) { |
736 | TRACE_TASK(t, "Converting to ghost.\n"); | 742 | TRACE_TASK(t, "Converting to ghost from %d.\n", t->rt_param.scheduled_on); |
737 | cpu = remote_cpu_entry(t->rt_param.scheduled_on); | 743 | cpu = remote_cpu_entry(t->rt_param.scheduled_on); |
738 | /*Unlink first while it's not a ghost job.*/ | 744 | /*Unlink first while it's not a ghost job.*/ |
739 | unlink(t); | 745 | unlink(t); |