From f5264e2cb8213dad425cb2d2db564edbc443a51a Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Fri, 20 Jan 2012 11:09:15 -0500 Subject: Fix bugs in tracing and PAI handling --- include/litmus/sched_plugin.h | 2 -- include/litmus/trace.h | 6 ++++ litmus/sched_cedf.c | 71 +++++++++++++++++++++++++++++++++--------- litmus/sched_cfifo.c | 14 ++++++--- litmus/sched_crm.c | 72 ++++++++++++++++++++++++++++++++++--------- litmus/sched_crm_srt.c | 14 ++++++--- litmus/sched_gsn_edf.c | 15 ++++++--- 7 files changed, 151 insertions(+), 43 deletions(-) diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 3fc64f832fef..8fdf05dd7cd3 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h @@ -11,9 +11,7 @@ #include #endif -#ifdef CONFIG_LITMUS_PAI_SOFTIRQD #include -#endif /************************ setup/tear down ********************/ diff --git a/include/litmus/trace.h b/include/litmus/trace.h index aa3ee4a6757b..09d409b60268 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h @@ -114,4 +114,10 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu) #endif +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +#define TS_NV_SCHED_BOTISR_START TIMESTAMP(206) +#define TS_NV_SCHED_BOTISR_END TIMESTAMP(207) +#endif + + #endif /* !_SYS_TRACE_H_ */ diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 02106f455c0f..9aa5822c3834 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -57,6 +57,7 @@ #ifdef CONFIG_LITMUS_PAI_SOFTIRQD #include +#include #endif #ifdef CONFIG_LITMUS_NVIDIA @@ -432,17 +433,24 @@ static void cedf_tick(struct task_struct* t) static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) { if (!atomic_read(&tasklet->count)) { - sched_trace_tasklet_begin(tasklet->owner); + if(tasklet->owner) { + sched_trace_tasklet_begin(tasklet->owner); + } if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) { BUG(); } - TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); + TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", + __FUNCTION__, + (tasklet->owner) ? tasklet->owner->pid : -1, + (tasklet->owner) ? 0 : 1); tasklet->func(tasklet->data); tasklet_unlock(tasklet); - - sched_trace_tasklet_end(tasklet->owner, flushed); + + if(tasklet->owner) { + sched_trace_tasklet_end(tasklet->owner, flushed); + } } else { BUG(); @@ -498,6 +506,7 @@ static void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task, static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task) { +#if 0 unsigned long flags; struct tasklet_head task_tasklets; struct tasklet_struct* step; @@ -520,6 +529,27 @@ static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task) step = temp; } +#endif + + // lazy flushing. + // just change ownership to NULL and let an idle processor + // take care of it. :P + + struct tasklet_struct* step; + unsigned long flags; + + raw_spin_lock_irqsave(&cluster->cedf_lock, flags); + + for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) + { + if(step->owner == task) + { + TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); + step->owner = NULL; + } + } + + raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); } @@ -531,6 +561,9 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta unsigned long flags; while(work_to_do) { + + TS_NV_SCHED_BOTISR_START; + // remove tasklet at head of list if it has higher priority. raw_spin_lock_irqsave(&cluster->cedf_lock, flags); @@ -544,7 +577,6 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); TRACE("%s: done.\n", __FUNCTION__); */ - if(cluster->pending_tasklets.head != NULL) { // remove tasklet at head. @@ -554,17 +586,17 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta if(NULL == tasklet->next) { // tasklet is at the head, list only has one element - TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1); cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); } // remove the tasklet from the queue cluster->pending_tasklets.head = tasklet->next; - TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1); } else { - TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); + TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1, smp_processor_id()); tasklet = NULL; } } @@ -584,9 +616,11 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta TRACE("%s: done.\n", __FUNCTION__); */ - raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); + + TS_NV_SCHED_BOTISR_END; + if(tasklet) { __do_lit_tasklet(tasklet, 0ul); tasklet = NULL; @@ -690,7 +724,16 @@ static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* clu // insert tasklet right before step->next. - TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); + TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, + tasklet->owner->pid, + (step->owner) ? + step->owner->pid : + -1, + (step->next) ? + ((step->next->owner) ? + step->next->owner->pid : + -1) : + -1); tasklet->next = step->next; step->next = tasklet; @@ -1080,6 +1123,10 @@ static void cedf_task_exit(struct task_struct * t) unsigned long flags; cedf_domain_t *cluster = task_cpu_cluster(t); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + flush_tasklets(cluster, t); +#endif + /* unlink if necessary */ raw_spin_lock_irqsave(&cluster->cedf_lock, flags); unlink(t); @@ -1091,10 +1138,6 @@ static void cedf_task_exit(struct task_struct * t) } raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); -#ifdef CONFIG_LITMUS_PAI_SOFTIRQD - flush_tasklets(cluster, t); -#endif - BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); } diff --git a/litmus/sched_cfifo.c b/litmus/sched_cfifo.c index 689b2dbe5fae..7fbdec3f1d15 100644 --- a/litmus/sched_cfifo.c +++ b/litmus/sched_cfifo.c @@ -57,6 +57,7 @@ #ifdef CONFIG_LITMUS_PAI_SOFTIRQD #include +#include #endif #ifdef CONFIG_LITMUS_NVIDIA @@ -530,6 +531,9 @@ static void do_lit_tasklets(cfifo_domain_t* cluster, struct task_struct* sched_t unsigned long flags; while(work_to_do) { + + TS_NV_SCHED_BOTISR_START; + // remove tasklet at head of list if it has higher priority. raw_spin_lock_irqsave(&cluster->cfifo_lock, flags); @@ -584,6 +588,8 @@ static void do_lit_tasklets(cfifo_domain_t* cluster, struct task_struct* sched_t raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); + TS_NV_SCHED_BOTISR_END; + if(tasklet) { __do_lit_tasklet(tasklet, 0ul); tasklet = NULL; @@ -1071,6 +1077,10 @@ static void cfifo_task_exit(struct task_struct * t) unsigned long flags; cfifo_domain_t *cluster = task_cpu_cluster(t); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + flush_tasklets(cluster, t); +#endif + /* unlink if necessary */ raw_spin_lock_irqsave(&cluster->cfifo_lock, flags); unlink(t); @@ -1081,10 +1091,6 @@ static void cfifo_task_exit(struct task_struct * t) tsk_rt(t)->scheduled_on = NO_CPU; } raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); - -#ifdef CONFIG_LITMUS_PAI_SOFTIRQD - flush_tasklets(cluster, t); -#endif BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); diff --git a/litmus/sched_crm.c b/litmus/sched_crm.c index fd7fab982998..e51de10557f9 100644 --- a/litmus/sched_crm.c +++ b/litmus/sched_crm.c @@ -57,6 +57,7 @@ #ifdef CONFIG_LITMUS_PAI_SOFTIRQD #include +#include #endif #ifdef CONFIG_LITMUS_NVIDIA @@ -425,17 +426,24 @@ static void crm_tick(struct task_struct* t) static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) { if (!atomic_read(&tasklet->count)) { - sched_trace_tasklet_begin(tasklet->owner); + if(tasklet->owner) { + sched_trace_tasklet_begin(tasklet->owner); + } if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) { BUG(); } - TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); + TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", + __FUNCTION__, + (tasklet->owner) ? tasklet->owner->pid : -1, + (tasklet->owner) ? 0 : 1); tasklet->func(tasklet->data); tasklet_unlock(tasklet); - sched_trace_tasklet_end(tasklet->owner, flushed); + if(tasklet->owner) { + sched_trace_tasklet_end(tasklet->owner, flushed); + } } else { BUG(); @@ -491,6 +499,7 @@ static void __extract_tasklets(crm_domain_t* cluster, struct task_struct* task, static void flush_tasklets(crm_domain_t* cluster, struct task_struct* task) { +#if 0 unsigned long flags; struct tasklet_head task_tasklets; struct tasklet_struct* step; @@ -513,6 +522,27 @@ static void flush_tasklets(crm_domain_t* cluster, struct task_struct* task) step = temp; } +#endif + + // lazy flushing. + // just change ownership to NULL and let an idle processor + // take care of it. :P + + struct tasklet_struct* step; + unsigned long flags; + + raw_spin_lock_irqsave(&cluster->crm_lock, flags); + + for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) + { + if(step->owner == task) + { + TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); + step->owner = NULL; + } + } + + raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); } @@ -524,6 +554,9 @@ static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_tas unsigned long flags; while(work_to_do) { + + TS_NV_SCHED_BOTISR_START; + // remove tasklet at head of list if it has higher priority. raw_spin_lock_irqsave(&cluster->crm_lock, flags); @@ -546,17 +579,17 @@ static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_tas if(NULL == tasklet->next) { // tasklet is at the head, list only has one element - TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1); cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); } // remove the tasklet from the queue cluster->pending_tasklets.head = tasklet->next; - TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1); } else { - TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); + TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1, smp_processor_id()); tasklet = NULL; } } @@ -577,6 +610,8 @@ static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_tas raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); + TS_NV_SCHED_BOTISR_END; + if(tasklet) { __do_lit_tasklet(tasklet, 0ul); tasklet = NULL; @@ -619,8 +654,8 @@ static void run_tasklets(struct task_struct* sched_task) preempt_disable(); cluster = (is_realtime(sched_task)) ? - task_cpu_cluster(sched_task) : - remote_cluster(smp_processor_id()); + task_cpu_cluster(sched_task) : + remote_cluster(smp_processor_id()); if(cluster && cluster->pending_tasklets.head != NULL) { TRACE("%s: There are tasklets to process.\n", __FUNCTION__); @@ -679,8 +714,17 @@ static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_domain_t* clus // insert tasklet right before step->next. - TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); - + TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, + tasklet->owner->pid, + (step->owner) ? + step->owner->pid : + -1, + (step->next) ? + ((step->next->owner) ? + step->next->owner->pid : + -1) : + -1); + tasklet->next = step->next; step->next = tasklet; @@ -1070,6 +1114,10 @@ static void crm_task_exit(struct task_struct * t) unsigned long flags; crm_domain_t *cluster = task_cpu_cluster(t); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + flush_tasklets(cluster, t); +#endif + /* unlink if necessary */ raw_spin_lock_irqsave(&cluster->crm_lock, flags); unlink(t); @@ -1080,10 +1128,6 @@ static void crm_task_exit(struct task_struct * t) tsk_rt(t)->scheduled_on = NO_CPU; } raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); - -#ifdef CONFIG_LITMUS_PAI_SOFTIRQD - flush_tasklets(cluster, t); -#endif BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); diff --git a/litmus/sched_crm_srt.c b/litmus/sched_crm_srt.c index c0004354573d..f0064d486953 100644 --- a/litmus/sched_crm_srt.c +++ b/litmus/sched_crm_srt.c @@ -57,6 +57,7 @@ #ifdef CONFIG_LITMUS_PAI_SOFTIRQD #include +#include #endif #ifdef CONFIG_LITMUS_NVIDIA @@ -529,6 +530,9 @@ static void do_lit_tasklets(crm_srt_domain_t* cluster, struct task_struct* sched unsigned long flags; while(work_to_do) { + + TS_NV_SCHED_BOTISR_START; + // remove tasklet at head of list if it has higher priority. raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); @@ -582,6 +586,8 @@ static void do_lit_tasklets(crm_srt_domain_t* cluster, struct task_struct* sched raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); + TS_NV_SCHED_BOTISR_END; + if(tasklet) { __do_lit_tasklet(tasklet, 0ul); tasklet = NULL; @@ -1067,6 +1073,10 @@ static void crm_srt_task_exit(struct task_struct * t) unsigned long flags; crm_srt_domain_t *cluster = task_cpu_cluster(t); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + flush_tasklets(cluster, t); +#endif + /* unlink if necessary */ raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); unlink(t); @@ -1077,10 +1087,6 @@ static void crm_srt_task_exit(struct task_struct * t) tsk_rt(t)->scheduled_on = NO_CPU; } raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); - -#ifdef CONFIG_LITMUS_PAI_SOFTIRQD - flush_tasklets(cluster, t); -#endif BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index b40ff7ba4f0e..30c745fe33a7 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -37,6 +37,7 @@ #ifdef CONFIG_LITMUS_PAI_SOFTIRQD #include +#include #endif #ifdef CONFIG_LITMUS_NVIDIA @@ -523,6 +524,9 @@ static void do_lit_tasklets(struct task_struct* sched_task) unsigned long flags; while(work_to_do) { + + TS_NV_SCHED_BOTISR_START; + // remove tasklet at head of list if it has higher priority. raw_spin_lock_irqsave(&gsnedf_lock, flags); @@ -576,9 +580,10 @@ static void do_lit_tasklets(struct task_struct* sched_task) TRACE("%s: done.\n", __FUNCTION__); */ - raw_spin_unlock_irqrestore(&gsnedf_lock, flags); + TS_NV_SCHED_BOTISR_END; + if(tasklet) { __do_lit_tasklet(tasklet, 0ul); tasklet = NULL; @@ -1069,6 +1074,10 @@ static void gsnedf_task_exit(struct task_struct * t) { unsigned long flags; +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + flush_tasklets(t); +#endif + /* unlink if necessary */ raw_spin_lock_irqsave(&gsnedf_lock, flags); unlink(t); @@ -1077,10 +1086,6 @@ static void gsnedf_task_exit(struct task_struct * t) tsk_rt(t)->scheduled_on = NO_CPU; } raw_spin_unlock_irqrestore(&gsnedf_lock, flags); - -#ifdef CONFIG_LITMUS_PAI_SOFTIRQD - flush_tasklets(t); -#endif BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); -- cgit v1.2.2