From 1a582a2c5e361e01a4c64f185bb1a23c3f70701a Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Sat, 14 Jan 2012 16:56:47 -0500 Subject: Port PAI interrupts to GSN-EDF, C-RM/RM-SRT/FIFO. --- litmus/sched_cedf.c | 32 ++-- litmus/sched_cfifo.c | 450 ++++++++++++++++++++++++++++++++++++++++++++++++- litmus/sched_crm.c | 448 +++++++++++++++++++++++++++++++++++++++++++++++- litmus/sched_crm_srt.c | 445 +++++++++++++++++++++++++++++++++++++++++++++++- litmus/sched_gsn_edf.c | 434 ++++++++++++++++++++++++++++++++++++++++++++++- 5 files changed, 1787 insertions(+), 22 deletions(-) diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 4924da21865e..02106f455c0f 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -124,7 +124,6 @@ typedef struct clusterdomain { #ifdef CONFIG_LITMUS_PAI_SOFTIRQD - raw_spinlock_t tasklet_lock; struct tasklet_head pending_tasklets; #endif @@ -430,7 +429,7 @@ static void cedf_tick(struct task_struct* t) #ifdef CONFIG_LITMUS_PAI_SOFTIRQD -void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) +static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) { if (!atomic_read(&tasklet->count)) { sched_trace_tasklet_begin(tasklet->owner); @@ -451,7 +450,7 @@ void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) } -void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets) +static void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets) { struct tasklet_struct* step; struct tasklet_struct* tasklet; @@ -497,7 +496,7 @@ void __extract_tasklets(cedf_domain_t* cluster, struct task_struct* task, struct } } -void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task) +static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task) { unsigned long flags; struct tasklet_head task_tasklets; @@ -524,18 +523,18 @@ void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task) } -void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task) +static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task) { int work_to_do = 1; struct tasklet_struct *tasklet = NULL; - struct tasklet_struct *step; + //struct tasklet_struct *step; unsigned long flags; while(work_to_do) { // remove tasklet at head of list if it has higher priority. raw_spin_lock_irqsave(&cluster->cedf_lock, flags); - +/* step = cluster->pending_tasklets.head; TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); while(step != NULL){ @@ -544,6 +543,7 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task) } TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); TRACE("%s: done.\n", __FUNCTION__); + */ if(cluster->pending_tasklets.head != NULL) { @@ -573,6 +573,7 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task) } + /* step = cluster->pending_tasklets.head; TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); while(step != NULL){ @@ -581,6 +582,7 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task) } TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); TRACE("%s: done.\n", __FUNCTION__); + */ raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); @@ -598,7 +600,7 @@ void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task) } -void run_tasklets(struct task_struct* sched_task) +static void run_tasklets(struct task_struct* sched_task) { cedf_domain_t* cluster; @@ -641,10 +643,11 @@ void run_tasklets(struct task_struct* sched_task) } -void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) +static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) { struct tasklet_struct* step; + /* step = cluster->pending_tasklets.head; TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); while(step != NULL){ @@ -653,6 +656,7 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) } TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); TRACE("%s: done.\n", __FUNCTION__); + */ tasklet->next = NULL; // make sure there are no old values floating around @@ -674,7 +678,7 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) } else { - WARN_ON(1 == 1); + //WARN_ON(1 == 1); // insert the tasklet somewhere in the middle. @@ -699,7 +703,7 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) } } - + /* step = cluster->pending_tasklets.head; TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); while(step != NULL){ @@ -707,7 +711,8 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) step = step->next; } TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); - TRACE("%s: done.\n", __FUNCTION__); + TRACE("%s: done.\n", __FUNCTION__); + */ // TODO: Maintain this list in priority order. // tasklet->next = NULL; @@ -715,7 +720,7 @@ void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) // cluster->pending_tasklets.tail = &tasklet->next; } -int enqueue_pai_tasklet(struct tasklet_struct* tasklet) +static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) { cedf_domain_t *cluster = NULL; cpu_entry_t *targetCPU = NULL; @@ -1909,7 +1914,6 @@ static long cedf_activate_plugin(void) #ifdef CONFIG_LITMUS_PAI_SOFTIRQD - raw_spin_lock_init(&(cedf[i].tasklet_lock)); cedf[i].pending_tasklets.head = NULL; cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head); #endif diff --git a/litmus/sched_cfifo.c b/litmus/sched_cfifo.c index f515446f76ed..689b2dbe5fae 100644 --- a/litmus/sched_cfifo.c +++ b/litmus/sched_cfifo.c @@ -55,6 +55,10 @@ #include #endif +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +#include +#endif + #ifdef CONFIG_LITMUS_NVIDIA #include #endif @@ -91,6 +95,15 @@ DEFINE_PER_CPU(cpu_entry_t, cfifo_cpu_entries); #define test_will_schedule(cpu) \ (atomic_read(&per_cpu(cfifo_cpu_entries, cpu).will_schedule)) + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +struct tasklet_head +{ + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; +#endif + /* * In C-FIFO there is a cfifo domain _per_ cluster * The number of clusters is dynamically determined accordingly to the @@ -108,6 +121,12 @@ typedef struct clusterdomain { struct bheap cpu_heap; /* lock for this cluster */ #define cfifo_lock domain.ready_lock + + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + struct tasklet_head pending_tasklets; +#endif + } cfifo_domain_t; /* a cfifo_domain per cluster; allocation is done at init/activation time */ @@ -251,7 +270,7 @@ static void preempt(cpu_entry_t *entry) preempt_if_preemptable(entry->scheduled, entry->cpu); } -/* requeue - Put an unlinked task into gsn-edf domain. +/* requeue - Put an unlinked task into c-fifo domain. * Caller must hold cfifo_lock. */ static noinline void requeue(struct task_struct* task) @@ -395,6 +414,419 @@ static void cfifo_tick(struct task_struct* t) } } + + + + + + + + + + + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + + +static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) +{ + if (!atomic_read(&tasklet->count)) { + sched_trace_tasklet_begin(tasklet->owner); + + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) + { + BUG(); + } + TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); + tasklet->func(tasklet->data); + tasklet_unlock(tasklet); + + sched_trace_tasklet_end(tasklet->owner, flushed); + } + else { + BUG(); + } +} + + +static void __extract_tasklets(cfifo_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets) +{ + struct tasklet_struct* step; + struct tasklet_struct* tasklet; + struct tasklet_struct* prev; + + task_tasklets->head = NULL; + task_tasklets->tail = &(task_tasklets->head); + + prev = NULL; + for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) + { + if(step->owner == task) + { + TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); + + tasklet = step; + + if(prev) { + prev->next = tasklet->next; + } + else if(cluster->pending_tasklets.head == tasklet) { + // we're at the head. + cluster->pending_tasklets.head = tasklet->next; + } + + if(cluster->pending_tasklets.tail == &tasklet) { + // we're at the tail + if(prev) { + cluster->pending_tasklets.tail = &prev; + } + else { + cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); + } + } + + tasklet->next = NULL; + *(task_tasklets->tail) = tasklet; + task_tasklets->tail = &(tasklet->next); + } + else { + prev = step; + } + } +} + +static void flush_tasklets(cfifo_domain_t* cluster, struct task_struct* task) +{ + unsigned long flags; + struct tasklet_head task_tasklets; + struct tasklet_struct* step; + + raw_spin_lock_irqsave(&cluster->cfifo_lock, flags); + __extract_tasklets(cluster, task, &task_tasklets); + raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); + + if(cluster->pending_tasklets.head != NULL) { + TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid); + } + + // now execute any flushed tasklets. + for(step = cluster->pending_tasklets.head; step != NULL; /**/) + { + struct tasklet_struct* temp = step->next; + + step->next = NULL; + __do_lit_tasklet(step, 1ul); + + step = temp; + } +} + + +static void do_lit_tasklets(cfifo_domain_t* cluster, struct task_struct* sched_task) +{ + int work_to_do = 1; + struct tasklet_struct *tasklet = NULL; + //struct tasklet_struct *step; + unsigned long flags; + + while(work_to_do) { + // remove tasklet at head of list if it has higher priority. + raw_spin_lock_irqsave(&cluster->cfifo_lock, flags); + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + + if(cluster->pending_tasklets.head != NULL) { + // remove tasklet at head. + tasklet = cluster->pending_tasklets.head; + + if(fifo_higher_prio(tasklet->owner, sched_task)) { + + if(NULL == tasklet->next) { + // tasklet is at the head, list only has one element + TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); + } + + // remove the tasklet from the queue + cluster->pending_tasklets.head = tasklet->next; + + TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + } + else { + TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); + tasklet = NULL; + } + } + else { + TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); + } + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); + + if(tasklet) { + __do_lit_tasklet(tasklet, 0ul); + tasklet = NULL; + } + else { + work_to_do = 0; + } + } + + //TRACE("%s: exited.\n", __FUNCTION__); +} + + +static void run_tasklets(struct task_struct* sched_task) +{ + cfifo_domain_t* cluster; + +#if 0 + int task_is_rt = is_realtime(sched_task); + cfifo_domain_t* cluster; + + if(is_realtime(sched_task)) { + cluster = task_cpu_cluster(sched_task); + } + else { + cluster = remote_cluster(get_cpu()); + } + + if(cluster && cluster->pending_tasklets.head != NULL) { + TRACE("%s: There are tasklets to process.\n", __FUNCTION__); + + do_lit_tasklets(cluster, sched_task); + } + + if(!task_is_rt) { + put_cpu_no_resched(); + } +#else + + preempt_disable(); + + cluster = (is_realtime(sched_task)) ? + task_cpu_cluster(sched_task) : + remote_cluster(smp_processor_id()); + + if(cluster && cluster->pending_tasklets.head != NULL) { + TRACE("%s: There are tasklets to process.\n", __FUNCTION__); + do_lit_tasklets(cluster, sched_task); + } + + preempt_enable_no_resched(); + +#endif +} + + +static void __add_pai_tasklet(struct tasklet_struct* tasklet, cfifo_domain_t* cluster) +{ + struct tasklet_struct* step; + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + + tasklet->next = NULL; // make sure there are no old values floating around + + step = cluster->pending_tasklets.head; + if(step == NULL) { + TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); + // insert at tail. + *(cluster->pending_tasklets.tail) = tasklet; + cluster->pending_tasklets.tail = &(tasklet->next); + } + else if((*(cluster->pending_tasklets.tail) != NULL) && + fifo_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) { + // insert at tail. + TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); + + *(cluster->pending_tasklets.tail) = tasklet; + cluster->pending_tasklets.tail = &(tasklet->next); + } + else { + + //WARN_ON(1 == 1); + + // insert the tasklet somewhere in the middle. + + TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); + + while(step->next && fifo_higher_prio(step->next->owner, tasklet->owner)) { + step = step->next; + } + + // insert tasklet right before step->next. + + TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); + + tasklet->next = step->next; + step->next = tasklet; + + // patch up the head if needed. + if(cluster->pending_tasklets.head == step) + { + TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); + cluster->pending_tasklets.head = tasklet; + } + } + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + // TODO: Maintain this list in priority order. + // tasklet->next = NULL; + // *(cluster->pending_tasklets.tail) = tasklet; + // cluster->pending_tasklets.tail = &tasklet->next; +} + +static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) +{ + cfifo_domain_t *cluster = NULL; + cpu_entry_t *targetCPU = NULL; + int thisCPU; + int runLocal = 0; + int runNow = 0; + unsigned long flags; + + if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) + { + TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); + return 0; + } + + cluster = task_cpu_cluster(tasklet->owner); + + raw_spin_lock_irqsave(&cluster->cfifo_lock, flags); + + thisCPU = smp_processor_id(); + +#if 1 +#ifdef CONFIG_SCHED_CPU_AFFINITY + { + cpu_entry_t* affinity = NULL; + + // use this CPU if it is in our cluster and isn't running any RT work. + if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cfifo_cpu_entries).linked == NULL)) { + affinity = &(__get_cpu_var(cfifo_cpu_entries)); + } + else { + // this CPU is busy or shouldn't run tasklet in this cluster. + // look for available near by CPUs. + // NOTE: Affinity towards owner and not this CPU. Is this right? + affinity = + cfifo_get_nearest_available_cpu(cluster, + &per_cpu(cfifo_cpu_entries, task_cpu(tasklet->owner))); + } + + targetCPU = affinity; + } +#endif +#endif + + if (targetCPU == NULL) { + targetCPU = lowest_prio_cpu(cluster); + } + + if (fifo_higher_prio(tasklet->owner, targetCPU->linked)) { + if (thisCPU == targetCPU->cpu) { + TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); + runLocal = 1; + runNow = 1; + } + else { + TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); + runLocal = 0; + runNow = 1; + } + } + else { + runLocal = 0; + runNow = 0; + } + + if(!runLocal) { + // enqueue the tasklet + __add_pai_tasklet(tasklet, cluster); + } + + raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); + + + if (runLocal /*&& runNow */) { // runNow == 1 is implied + TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); + __do_lit_tasklet(tasklet, 0ul); + } + else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied + TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); + preempt(targetCPU); // need to be protected by cfifo_lock? + } + else { + TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); + } + + return(1); // success +} + + +#endif + + + + + + + + + + + + + + + + + + + + /* Getting schedule() right is a bit tricky. schedule() may not make any * assumptions on the state of the current task since it may be called for a * number of reasons. The reasons include a scheduler_tick() determined that it @@ -544,7 +976,7 @@ static void cfifo_task_new(struct task_struct * t, int on_rq, int running) cpu_entry_t* entry; cfifo_domain_t* cluster; - TRACE("gsn edf: task new %d\n", t->pid); + TRACE("cfifo: task new %d\n", t->pid); /* the cluster doesn't change even if t is running */ cluster = task_cpu_cluster(t); @@ -650,6 +1082,10 @@ static void cfifo_task_exit(struct task_struct * t) } raw_spin_unlock_irqrestore(&cluster->cfifo_lock, flags); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + flush_tasklets(cluster, t); +#endif + BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); } @@ -1467,6 +1903,12 @@ static long cfifo_activate_plugin(void) bheap_init(&(cfifo[i].cpu_heap)); fifo_domain_init(&(cfifo[i].domain), NULL, cfifo_release_jobs); + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + cfifo[i].pending_tasklets.head = NULL; + cfifo[i].pending_tasklets.tail = &(cfifo[i].pending_tasklets.head); +#endif + if(!zalloc_cpumask_var(&cfifo[i].cpu_map, GFP_ATOMIC)) return -ENOMEM; } @@ -1578,6 +2020,10 @@ static struct sched_plugin cfifo_plugin __cacheline_aligned_in_smp = { #ifdef CONFIG_LITMUS_SOFTIRQD .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, +#endif +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + .enqueue_pai_tasklet = enqueue_pai_tasklet, + .run_tasklets = run_tasklets, #endif }; diff --git a/litmus/sched_crm.c b/litmus/sched_crm.c index 061b29eaff7e..fd7fab982998 100644 --- a/litmus/sched_crm.c +++ b/litmus/sched_crm.c @@ -55,6 +55,10 @@ #include #endif +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +#include +#endif + #ifdef CONFIG_LITMUS_NVIDIA #include #endif @@ -91,6 +95,14 @@ DEFINE_PER_CPU(cpu_entry_t, crm_cpu_entries); #define test_will_schedule(cpu) \ (atomic_read(&per_cpu(crm_cpu_entries, cpu).will_schedule)) +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +struct tasklet_head +{ + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; +#endif + /* * In C-RM there is a crm domain _per_ cluster * The number of clusters is dynamically determined accordingly to the @@ -108,6 +120,10 @@ typedef struct clusterdomain { struct bheap cpu_heap; /* lock for this cluster */ #define crm_lock domain.ready_lock + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + struct tasklet_head pending_tasklets; +#endif } crm_domain_t; /* a crm_domain per cluster; allocation is done at init/activation time */ @@ -251,7 +267,7 @@ static void preempt(cpu_entry_t *entry) preempt_if_preemptable(entry->scheduled, entry->cpu); } -/* requeue - Put an unlinked task into gsn-edf domain. +/* requeue - Put an unlinked task into c-rm domain. * Caller must hold crm_lock. */ static noinline void requeue(struct task_struct* task) @@ -394,6 +410,421 @@ static void crm_tick(struct task_struct* t) } } } + + + + + + + + + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + + +static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) +{ + if (!atomic_read(&tasklet->count)) { + sched_trace_tasklet_begin(tasklet->owner); + + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) + { + BUG(); + } + TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); + tasklet->func(tasklet->data); + tasklet_unlock(tasklet); + + sched_trace_tasklet_end(tasklet->owner, flushed); + } + else { + BUG(); + } +} + + +static void __extract_tasklets(crm_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets) +{ + struct tasklet_struct* step; + struct tasklet_struct* tasklet; + struct tasklet_struct* prev; + + task_tasklets->head = NULL; + task_tasklets->tail = &(task_tasklets->head); + + prev = NULL; + for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) + { + if(step->owner == task) + { + TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); + + tasklet = step; + + if(prev) { + prev->next = tasklet->next; + } + else if(cluster->pending_tasklets.head == tasklet) { + // we're at the head. + cluster->pending_tasklets.head = tasklet->next; + } + + if(cluster->pending_tasklets.tail == &tasklet) { + // we're at the tail + if(prev) { + cluster->pending_tasklets.tail = &prev; + } + else { + cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); + } + } + + tasklet->next = NULL; + *(task_tasklets->tail) = tasklet; + task_tasklets->tail = &(tasklet->next); + } + else { + prev = step; + } + } +} + +static void flush_tasklets(crm_domain_t* cluster, struct task_struct* task) +{ + unsigned long flags; + struct tasklet_head task_tasklets; + struct tasklet_struct* step; + + raw_spin_lock_irqsave(&cluster->crm_lock, flags); + __extract_tasklets(cluster, task, &task_tasklets); + raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); + + if(cluster->pending_tasklets.head != NULL) { + TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid); + } + + // now execute any flushed tasklets. + for(step = cluster->pending_tasklets.head; step != NULL; /**/) + { + struct tasklet_struct* temp = step->next; + + step->next = NULL; + __do_lit_tasklet(step, 1ul); + + step = temp; + } +} + + +static void do_lit_tasklets(crm_domain_t* cluster, struct task_struct* sched_task) +{ + int work_to_do = 1; + struct tasklet_struct *tasklet = NULL; + //struct tasklet_struct *step; + unsigned long flags; + + while(work_to_do) { + // remove tasklet at head of list if it has higher priority. + raw_spin_lock_irqsave(&cluster->crm_lock, flags); + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + if(cluster->pending_tasklets.head != NULL) { + // remove tasklet at head. + tasklet = cluster->pending_tasklets.head; + + if(rm_higher_prio(tasklet->owner, sched_task)) { + + if(NULL == tasklet->next) { + // tasklet is at the head, list only has one element + TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); + } + + // remove the tasklet from the queue + cluster->pending_tasklets.head = tasklet->next; + + TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + } + else { + TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); + tasklet = NULL; + } + } + else { + TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); + } + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); + + if(tasklet) { + __do_lit_tasklet(tasklet, 0ul); + tasklet = NULL; + } + else { + work_to_do = 0; + } + } + + //TRACE("%s: exited.\n", __FUNCTION__); +} + + +static void run_tasklets(struct task_struct* sched_task) +{ + crm_domain_t* cluster; + +#if 0 + int task_is_rt = is_realtime(sched_task); + crm_domain_t* cluster; + + if(is_realtime(sched_task)) { + cluster = task_cpu_cluster(sched_task); + } + else { + cluster = remote_cluster(get_cpu()); + } + + if(cluster && cluster->pending_tasklets.head != NULL) { + TRACE("%s: There are tasklets to process.\n", __FUNCTION__); + + do_lit_tasklets(cluster, sched_task); + } + + if(!task_is_rt) { + put_cpu_no_resched(); + } +#else + + preempt_disable(); + + cluster = (is_realtime(sched_task)) ? + task_cpu_cluster(sched_task) : + remote_cluster(smp_processor_id()); + + if(cluster && cluster->pending_tasklets.head != NULL) { + TRACE("%s: There are tasklets to process.\n", __FUNCTION__); + do_lit_tasklets(cluster, sched_task); + } + + preempt_enable_no_resched(); + +#endif +} + + +static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_domain_t* cluster) +{ + struct tasklet_struct* step; + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + tasklet->next = NULL; // make sure there are no old values floating around + + step = cluster->pending_tasklets.head; + if(step == NULL) { + TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); + // insert at tail. + *(cluster->pending_tasklets.tail) = tasklet; + cluster->pending_tasklets.tail = &(tasklet->next); + } + else if((*(cluster->pending_tasklets.tail) != NULL) && + rm_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) { + // insert at tail. + TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); + + *(cluster->pending_tasklets.tail) = tasklet; + cluster->pending_tasklets.tail = &(tasklet->next); + } + else { + + //WARN_ON(1 == 1); + + // insert the tasklet somewhere in the middle. + + TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); + + while(step->next && rm_higher_prio(step->next->owner, tasklet->owner)) { + step = step->next; + } + + // insert tasklet right before step->next. + + TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); + + tasklet->next = step->next; + step->next = tasklet; + + // patch up the head if needed. + if(cluster->pending_tasklets.head == step) + { + TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); + cluster->pending_tasklets.head = tasklet; + } + } + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + // TODO: Maintain this list in priority order. + // tasklet->next = NULL; + // *(cluster->pending_tasklets.tail) = tasklet; + // cluster->pending_tasklets.tail = &tasklet->next; +} + +static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) +{ + crm_domain_t *cluster = NULL; + cpu_entry_t *targetCPU = NULL; + int thisCPU; + int runLocal = 0; + int runNow = 0; + unsigned long flags; + + if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) + { + TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); + return 0; + } + + cluster = task_cpu_cluster(tasklet->owner); + + raw_spin_lock_irqsave(&cluster->crm_lock, flags); + + thisCPU = smp_processor_id(); + +#if 1 +#ifdef CONFIG_SCHED_CPU_AFFINITY + { + cpu_entry_t* affinity = NULL; + + // use this CPU if it is in our cluster and isn't running any RT work. + if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(crm_cpu_entries).linked == NULL)) { + affinity = &(__get_cpu_var(crm_cpu_entries)); + } + else { + // this CPU is busy or shouldn't run tasklet in this cluster. + // look for available near by CPUs. + // NOTE: Affinity towards owner and not this CPU. Is this right? + affinity = + crm_get_nearest_available_cpu(cluster, + &per_cpu(crm_cpu_entries, task_cpu(tasklet->owner))); + } + + targetCPU = affinity; + } +#endif +#endif + + if (targetCPU == NULL) { + targetCPU = lowest_prio_cpu(cluster); + } + + if (rm_higher_prio(tasklet->owner, targetCPU->linked)) { + if (thisCPU == targetCPU->cpu) { + TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); + runLocal = 1; + runNow = 1; + } + else { + TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); + runLocal = 0; + runNow = 1; + } + } + else { + runLocal = 0; + runNow = 0; + } + + if(!runLocal) { + // enqueue the tasklet + __add_pai_tasklet(tasklet, cluster); + } + + raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); + + + if (runLocal /*&& runNow */) { // runNow == 1 is implied + TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); + __do_lit_tasklet(tasklet, 0ul); + } + else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied + TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); + preempt(targetCPU); // need to be protected by crm_lock? + } + else { + TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); + } + + return(1); // success +} + + +#endif + + + + + + + + + + + + + + + + + + + + + + + + + + /* Getting schedule() right is a bit tricky. schedule() may not make any * assumptions on the state of the current task since it may be called for a @@ -544,7 +975,7 @@ static void crm_task_new(struct task_struct * t, int on_rq, int running) cpu_entry_t* entry; crm_domain_t* cluster; - TRACE("gsn edf: task new %d\n", t->pid); + TRACE("crm: task new %d\n", t->pid); /* the cluster doesn't change even if t is running */ cluster = task_cpu_cluster(t); @@ -650,6 +1081,10 @@ static void crm_task_exit(struct task_struct * t) } raw_spin_unlock_irqrestore(&cluster->crm_lock, flags); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + flush_tasklets(cluster, t); +#endif + BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); } @@ -1467,6 +1902,11 @@ static long crm_activate_plugin(void) bheap_init(&(crm[i].cpu_heap)); rm_domain_init(&(crm[i].domain), NULL, crm_release_jobs); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + crm[i].pending_tasklets.head = NULL; + crm[i].pending_tasklets.tail = &(crm[i].pending_tasklets.head); +#endif + if(!zalloc_cpumask_var(&crm[i].cpu_map, GFP_ATOMIC)) return -ENOMEM; } @@ -1578,6 +2018,10 @@ static struct sched_plugin crm_plugin __cacheline_aligned_in_smp = { #ifdef CONFIG_LITMUS_SOFTIRQD .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, +#endif +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + .enqueue_pai_tasklet = enqueue_pai_tasklet, + .run_tasklets = run_tasklets, #endif }; diff --git a/litmus/sched_crm_srt.c b/litmus/sched_crm_srt.c index 4473f35e64cd..c0004354573d 100644 --- a/litmus/sched_crm_srt.c +++ b/litmus/sched_crm_srt.c @@ -55,6 +55,10 @@ #include #endif +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +#include +#endif + #ifdef CONFIG_LITMUS_NVIDIA #include #endif @@ -91,6 +95,15 @@ DEFINE_PER_CPU(cpu_entry_t, crm_srt_cpu_entries); #define test_will_schedule(cpu) \ (atomic_read(&per_cpu(crm_srt_cpu_entries, cpu).will_schedule)) + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +struct tasklet_head +{ + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; +#endif + /* * In C-RM-SRT there is a crm_srt domain _per_ cluster * The number of clusters is dynamically determined accordingly to the @@ -108,6 +121,12 @@ typedef struct clusterdomain { struct bheap cpu_heap; /* lock for this cluster */ #define crm_srt_lock domain.ready_lock + + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + struct tasklet_head pending_tasklets; +#endif + } crm_srt_domain_t; /* a crm_srt_domain per cluster; allocation is done at init/activation time */ @@ -251,7 +270,7 @@ static void preempt(cpu_entry_t *entry) preempt_if_preemptable(entry->scheduled, entry->cpu); } -/* requeue - Put an unlinked task into gsn-edf domain. +/* requeue - Put an unlinked task into c-rm-srt domain. * Caller must hold crm_srt_lock. */ static noinline void requeue(struct task_struct* task) @@ -395,6 +414,415 @@ static void crm_srt_tick(struct task_struct* t) } } + + + + + + + + + + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + + +static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) +{ + if (!atomic_read(&tasklet->count)) { + sched_trace_tasklet_begin(tasklet->owner); + + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) + { + BUG(); + } + TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); + tasklet->func(tasklet->data); + tasklet_unlock(tasklet); + + sched_trace_tasklet_end(tasklet->owner, flushed); + } + else { + BUG(); + } +} + + +static void __extract_tasklets(crm_srt_domain_t* cluster, struct task_struct* task, struct tasklet_head* task_tasklets) +{ + struct tasklet_struct* step; + struct tasklet_struct* tasklet; + struct tasklet_struct* prev; + + task_tasklets->head = NULL; + task_tasklets->tail = &(task_tasklets->head); + + prev = NULL; + for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) + { + if(step->owner == task) + { + TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); + + tasklet = step; + + if(prev) { + prev->next = tasklet->next; + } + else if(cluster->pending_tasklets.head == tasklet) { + // we're at the head. + cluster->pending_tasklets.head = tasklet->next; + } + + if(cluster->pending_tasklets.tail == &tasklet) { + // we're at the tail + if(prev) { + cluster->pending_tasklets.tail = &prev; + } + else { + cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); + } + } + + tasklet->next = NULL; + *(task_tasklets->tail) = tasklet; + task_tasklets->tail = &(tasklet->next); + } + else { + prev = step; + } + } +} + +static void flush_tasklets(crm_srt_domain_t* cluster, struct task_struct* task) +{ + unsigned long flags; + struct tasklet_head task_tasklets; + struct tasklet_struct* step; + + raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); + __extract_tasklets(cluster, task, &task_tasklets); + raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); + + if(cluster->pending_tasklets.head != NULL) { + TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid); + } + + // now execute any flushed tasklets. + for(step = cluster->pending_tasklets.head; step != NULL; /**/) + { + struct tasklet_struct* temp = step->next; + + step->next = NULL; + __do_lit_tasklet(step, 1ul); + + step = temp; + } +} + + +static void do_lit_tasklets(crm_srt_domain_t* cluster, struct task_struct* sched_task) +{ + int work_to_do = 1; + struct tasklet_struct *tasklet = NULL; + //struct tasklet_struct *step; + unsigned long flags; + + while(work_to_do) { + // remove tasklet at head of list if it has higher priority. + raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + if(cluster->pending_tasklets.head != NULL) { + // remove tasklet at head. + tasklet = cluster->pending_tasklets.head; + + if(rm_srt_higher_prio(tasklet->owner, sched_task)) { + + if(NULL == tasklet->next) { + // tasklet is at the head, list only has one element + TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head); + } + + // remove the tasklet from the queue + cluster->pending_tasklets.head = tasklet->next; + + TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + } + else { + TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); + tasklet = NULL; + } + } + else { + TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); + } + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); + + if(tasklet) { + __do_lit_tasklet(tasklet, 0ul); + tasklet = NULL; + } + else { + work_to_do = 0; + } + } + + //TRACE("%s: exited.\n", __FUNCTION__); +} + + +static void run_tasklets(struct task_struct* sched_task) +{ + crm_srt_domain_t* cluster; + +#if 0 + int task_is_rt = is_realtime(sched_task); + crm_srt_domain_t* cluster; + + if(is_realtime(sched_task)) { + cluster = task_cpu_cluster(sched_task); + } + else { + cluster = remote_cluster(get_cpu()); + } + + if(cluster && cluster->pending_tasklets.head != NULL) { + TRACE("%s: There are tasklets to process.\n", __FUNCTION__); + + do_lit_tasklets(cluster, sched_task); + } + + if(!task_is_rt) { + put_cpu_no_resched(); + } +#else + + preempt_disable(); + + cluster = (is_realtime(sched_task)) ? + task_cpu_cluster(sched_task) : + remote_cluster(smp_processor_id()); + + if(cluster && cluster->pending_tasklets.head != NULL) { + TRACE("%s: There are tasklets to process.\n", __FUNCTION__); + do_lit_tasklets(cluster, sched_task); + } + + preempt_enable_no_resched(); + +#endif +} + + +static void __add_pai_tasklet(struct tasklet_struct* tasklet, crm_srt_domain_t* cluster) +{ + struct tasklet_struct* step; + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + tasklet->next = NULL; // make sure there are no old values floating around + + step = cluster->pending_tasklets.head; + if(step == NULL) { + TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); + // insert at tail. + *(cluster->pending_tasklets.tail) = tasklet; + cluster->pending_tasklets.tail = &(tasklet->next); + } + else if((*(cluster->pending_tasklets.tail) != NULL) && + rm_srt_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) { + // insert at tail. + TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); + + *(cluster->pending_tasklets.tail) = tasklet; + cluster->pending_tasklets.tail = &(tasklet->next); + } + else { + + //WARN_ON(1 == 1); + + // insert the tasklet somewhere in the middle. + + TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); + + while(step->next && rm_srt_higher_prio(step->next->owner, tasklet->owner)) { + step = step->next; + } + + // insert tasklet right before step->next. + + TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); + + tasklet->next = step->next; + step->next = tasklet; + + // patch up the head if needed. + if(cluster->pending_tasklets.head == step) + { + TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); + cluster->pending_tasklets.head = tasklet; + } + } + + /* + step = cluster->pending_tasklets.head; + TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(cluster->pending_tasklets.tail), (*(cluster->pending_tasklets.tail) != NULL) ? (*(cluster->pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + // TODO: Maintain this list in priority order. + // tasklet->next = NULL; + // *(cluster->pending_tasklets.tail) = tasklet; + // cluster->pending_tasklets.tail = &tasklet->next; +} + +static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) +{ + crm_srt_domain_t *cluster = NULL; + cpu_entry_t *targetCPU = NULL; + int thisCPU; + int runLocal = 0; + int runNow = 0; + unsigned long flags; + + if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) + { + TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); + return 0; + } + + cluster = task_cpu_cluster(tasklet->owner); + + raw_spin_lock_irqsave(&cluster->crm_srt_lock, flags); + + thisCPU = smp_processor_id(); + +#if 1 +#ifdef CONFIG_SCHED_CPU_AFFINITY + { + cpu_entry_t* affinity = NULL; + + // use this CPU if it is in our cluster and isn't running any RT work. + if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(crm_srt_cpu_entries).linked == NULL)) { + affinity = &(__get_cpu_var(crm_srt_cpu_entries)); + } + else { + // this CPU is busy or shouldn't run tasklet in this cluster. + // look for available near by CPUs. + // NOTE: Affinity towards owner and not this CPU. Is this right? + affinity = + crm_srt_get_nearest_available_cpu(cluster, + &per_cpu(crm_srt_cpu_entries, task_cpu(tasklet->owner))); + } + + targetCPU = affinity; + } +#endif +#endif + + if (targetCPU == NULL) { + targetCPU = lowest_prio_cpu(cluster); + } + + if (rm_srt_higher_prio(tasklet->owner, targetCPU->linked)) { + if (thisCPU == targetCPU->cpu) { + TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); + runLocal = 1; + runNow = 1; + } + else { + TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); + runLocal = 0; + runNow = 1; + } + } + else { + runLocal = 0; + runNow = 0; + } + + if(!runLocal) { + // enqueue the tasklet + __add_pai_tasklet(tasklet, cluster); + } + + raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); + + + if (runLocal /*&& runNow */) { // runNow == 1 is implied + TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); + __do_lit_tasklet(tasklet, 0ul); + } + else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied + TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); + preempt(targetCPU); // need to be protected by crm_srt_lock? + } + else { + TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); + } + + return(1); // success +} + + +#endif + + + + + + + + + + + + + + + + + + + /* Getting schedule() right is a bit tricky. schedule() may not make any * assumptions on the state of the current task since it may be called for a * number of reasons. The reasons include a scheduler_tick() determined that it @@ -544,7 +972,7 @@ static void crm_srt_task_new(struct task_struct * t, int on_rq, int running) cpu_entry_t* entry; crm_srt_domain_t* cluster; - TRACE("gsn edf: task new %d\n", t->pid); + TRACE("crm srt: task new %d\n", t->pid); /* the cluster doesn't change even if t is running */ cluster = task_cpu_cluster(t); @@ -650,6 +1078,10 @@ static void crm_srt_task_exit(struct task_struct * t) } raw_spin_unlock_irqrestore(&cluster->crm_srt_lock, flags); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + flush_tasklets(cluster, t); +#endif + BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); } @@ -1467,6 +1899,11 @@ static long crm_srt_activate_plugin(void) bheap_init(&(crm_srt[i].cpu_heap)); rm_srt_domain_init(&(crm_srt[i].domain), NULL, crm_srt_release_jobs); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + crm_srt[i].pending_tasklets.head = NULL; + crm_srt[i].pending_tasklets.tail = &(crm_srt[i].pending_tasklets.head); +#endif + if(!zalloc_cpumask_var(&crm_srt[i].cpu_map, GFP_ATOMIC)) return -ENOMEM; } @@ -1578,6 +2015,10 @@ static struct sched_plugin crm_srt_plugin __cacheline_aligned_in_smp = { #ifdef CONFIG_LITMUS_SOFTIRQD .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, +#endif +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + .enqueue_pai_tasklet = enqueue_pai_tasklet, + .run_tasklets = run_tasklets, #endif }; diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index ac7685fe69f0..b40ff7ba4f0e 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -35,6 +35,10 @@ #include #endif +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +#include +#endif + #ifdef CONFIG_LITMUS_NVIDIA #include #endif @@ -126,6 +130,16 @@ static struct bheap gsnedf_cpu_heap; static rt_domain_t gsnedf; #define gsnedf_lock (gsnedf.ready_lock) +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD +struct tasklet_head +{ + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; + +struct tasklet_head gsnedf_pending_tasklets; +#endif + /* Uncomment this if you want to see all scheduling decisions in the * TRACE() log. @@ -393,6 +407,410 @@ static void gsnedf_tick(struct task_struct* t) } } + + + + + + + + + + + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + + +static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) +{ + if (!atomic_read(&tasklet->count)) { + sched_trace_tasklet_begin(tasklet->owner); + + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) + { + BUG(); + } + TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); + tasklet->func(tasklet->data); + tasklet_unlock(tasklet); + + sched_trace_tasklet_end(tasklet->owner, flushed); + } + else { + BUG(); + } +} + + +static void __extract_tasklets(struct task_struct* task, struct tasklet_head* task_tasklets) +{ + struct tasklet_struct* step; + struct tasklet_struct* tasklet; + struct tasklet_struct* prev; + + task_tasklets->head = NULL; + task_tasklets->tail = &(task_tasklets->head); + + prev = NULL; + for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) + { + if(step->owner == task) + { + TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); + + tasklet = step; + + if(prev) { + prev->next = tasklet->next; + } + else if(gsnedf_pending_tasklets.head == tasklet) { + // we're at the head. + gsnedf_pending_tasklets.head = tasklet->next; + } + + if(gsnedf_pending_tasklets.tail == &tasklet) { + // we're at the tail + if(prev) { + gsnedf_pending_tasklets.tail = &prev; + } + else { + gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); + } + } + + tasklet->next = NULL; + *(task_tasklets->tail) = tasklet; + task_tasklets->tail = &(tasklet->next); + } + else { + prev = step; + } + } +} + +static void flush_tasklets(struct task_struct* task) +{ + unsigned long flags; + struct tasklet_head task_tasklets; + struct tasklet_struct* step; + + raw_spin_lock_irqsave(&gsnedf_lock, flags); + __extract_tasklets(task, &task_tasklets); + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); + + if(gsnedf_pending_tasklets.head != NULL) { + TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid); + } + + // now execute any flushed tasklets. + for(step = gsnedf_pending_tasklets.head; step != NULL; /**/) + { + struct tasklet_struct* temp = step->next; + + step->next = NULL; + __do_lit_tasklet(step, 1ul); + + step = temp; + } +} + + +static void do_lit_tasklets(struct task_struct* sched_task) +{ + int work_to_do = 1; + struct tasklet_struct *tasklet = NULL; + //struct tasklet_struct *step; + unsigned long flags; + + while(work_to_do) { + // remove tasklet at head of list if it has higher priority. + raw_spin_lock_irqsave(&gsnedf_lock, flags); + + /* + step = gsnedf_pending_tasklets.head; + TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + + if(gsnedf_pending_tasklets.head != NULL) { + // remove tasklet at head. + tasklet = gsnedf_pending_tasklets.head; + + if(edf_higher_prio(tasklet->owner, sched_task)) { + + if(NULL == tasklet->next) { + // tasklet is at the head, list only has one element + TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); + } + + // remove the tasklet from the queue + gsnedf_pending_tasklets.head = tasklet->next; + + TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); + } + else { + TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); + tasklet = NULL; + } + } + else { + TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); + } + + + /* + step = gsnedf_pending_tasklets.head; + TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); + + if(tasklet) { + __do_lit_tasklet(tasklet, 0ul); + tasklet = NULL; + } + else { + work_to_do = 0; + } + } + + //TRACE("%s: exited.\n", __FUNCTION__); +} + + +static void run_tasklets(struct task_struct* sched_task) +{ +#if 0 + int task_is_rt = is_realtime(sched_task); + cedf_domain_t* cluster; + + if(is_realtime(sched_task)) { + cluster = task_cpu_cluster(sched_task); + } + else { + cluster = remote_cluster(get_cpu()); + } + + if(cluster && gsnedf_pending_tasklets.head != NULL) { + TRACE("%s: There are tasklets to process.\n", __FUNCTION__); + + do_lit_tasklets(cluster, sched_task); + } + + if(!task_is_rt) { + put_cpu_no_resched(); + } +#else + + preempt_disable(); + + if(gsnedf_pending_tasklets.head != NULL) { + TRACE("%s: There are tasklets to process.\n", __FUNCTION__); + do_lit_tasklets(sched_task); + } + + preempt_enable_no_resched(); + +#endif +} + + +static void __add_pai_tasklet(struct tasklet_struct* tasklet) +{ + struct tasklet_struct* step; + + /* + step = gsnedf_pending_tasklets.head; + TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + + tasklet->next = NULL; // make sure there are no old values floating around + + step = gsnedf_pending_tasklets.head; + if(step == NULL) { + TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); + // insert at tail. + *(gsnedf_pending_tasklets.tail) = tasklet; + gsnedf_pending_tasklets.tail = &(tasklet->next); + } + else if((*(gsnedf_pending_tasklets.tail) != NULL) && + edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) { + // insert at tail. + TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); + + *(gsnedf_pending_tasklets.tail) = tasklet; + gsnedf_pending_tasklets.tail = &(tasklet->next); + } + else { + + //WARN_ON(1 == 1); + + // insert the tasklet somewhere in the middle. + + TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); + + while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) { + step = step->next; + } + + // insert tasklet right before step->next. + + TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); + + tasklet->next = step->next; + step->next = tasklet; + + // patch up the head if needed. + if(gsnedf_pending_tasklets.head == step) + { + TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); + gsnedf_pending_tasklets.head = tasklet; + } + } + + /* + step = gsnedf_pending_tasklets.head; + TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); + while(step != NULL){ + TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); + step = step->next; + } + TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); + TRACE("%s: done.\n", __FUNCTION__); + */ + + // TODO: Maintain this list in priority order. + // tasklet->next = NULL; + // *(gsnedf_pending_tasklets.tail) = tasklet; + // gsnedf_pending_tasklets.tail = &tasklet->next; +} + +static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) +{ + cpu_entry_t *targetCPU = NULL; + int thisCPU; + int runLocal = 0; + int runNow = 0; + unsigned long flags; + + if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) + { + TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); + return 0; + } + + + raw_spin_lock_irqsave(&gsnedf_lock, flags); + + thisCPU = smp_processor_id(); + +#if 1 +#ifdef CONFIG_SCHED_CPU_AFFINITY + { + cpu_entry_t* affinity = NULL; + + // use this CPU if it is in our cluster and isn't running any RT work. + if( +#ifdef CONFIG_RELEASE_MASTER + (thisCPU != gsnedf.release_master) && +#endif + (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) { + affinity = &(__get_cpu_var(gsnedf_cpu_entries)); + } + else { + // this CPU is busy or shouldn't run tasklet in this cluster. + // look for available near by CPUs. + // NOTE: Affinity towards owner and not this CPU. Is this right? + affinity = + gsnedf_get_nearest_available_cpu( + &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner))); + } + + targetCPU = affinity; + } +#endif +#endif + + if (targetCPU == NULL) { + targetCPU = lowest_prio_cpu(); + } + + if (edf_higher_prio(tasklet->owner, targetCPU->linked)) { + if (thisCPU == targetCPU->cpu) { + TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); + runLocal = 1; + runNow = 1; + } + else { + TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); + runLocal = 0; + runNow = 1; + } + } + else { + runLocal = 0; + runNow = 0; + } + + if(!runLocal) { + // enqueue the tasklet + __add_pai_tasklet(tasklet); + } + + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); + + + if (runLocal /*&& runNow */) { // runNow == 1 is implied + TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); + __do_lit_tasklet(tasklet, 0ul); + } + else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied + TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); + preempt(targetCPU); // need to be protected by cedf_lock? + } + else { + TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); + } + + return(1); // success +} + + +#endif + + + + + + + + + + + + + /* Getting schedule() right is a bit tricky. schedule() may not make any * assumptions on the state of the current task since it may be called for a * number of reasons. The reasons include a scheduler_tick() determined that it @@ -592,7 +1010,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) static void gsnedf_task_wake_up(struct task_struct *task) { unsigned long flags; - lt_t now; + //lt_t now; TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); @@ -660,6 +1078,10 @@ static void gsnedf_task_exit(struct task_struct * t) } raw_spin_unlock_irqrestore(&gsnedf_lock, flags); +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + flush_tasklets(t); +#endif + BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); } @@ -1602,6 +2024,11 @@ static long gsnedf_activate_plugin(void) } #endif } + +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + gsnedf_pending_tasklets.head = NULL; + gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); +#endif #ifdef CONFIG_LITMUS_SOFTIRQD spawn_klitirqd(NULL); @@ -1636,7 +2063,10 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, #endif - +#ifdef CONFIG_LITMUS_PAI_SOFTIRQD + .enqueue_pai_tasklet = enqueue_pai_tasklet, + .run_tasklets = run_tasklets, +#endif }; -- cgit v1.2.2