From 0f6a8e02773f8c23b5b6a3dbfa044e50c9d7d811 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Thu, 31 Mar 2011 10:47:01 -0400 Subject: Improve FMLP queue management. The next owner of a FMLP-protected resource is dequeued from the FMLP FIFO queue by unlock() (when the resource is freed by the previous owner) instead of performing the dequeue by the next owner immediately after it has been woken up. This simplifies the code a little bit and also reduces potential spinlock contention. --- litmus/locking.c | 12 +++++++----- litmus/sched_gsn_edf.c | 4 +--- litmus/sched_psn_edf.c | 6 +----- 3 files changed, 9 insertions(+), 13 deletions(-) (limited to 'litmus') diff --git a/litmus/locking.c b/litmus/locking.c index 728b56835cf7..2693f1aca859 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -107,16 +107,18 @@ asmlinkage long sys_litmus_unlock(int lock_od) return err; } -struct task_struct* waitqueue_first(wait_queue_head_t *wq) +struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) { - wait_queue_t *q; + wait_queue_t* q; + struct task_struct* t = NULL; if (waitqueue_active(wq)) { q = list_entry(wq->task_list.next, wait_queue_t, task_list); - return (struct task_struct*) q->private; - } else - return NULL; + t = (struct task_struct*) q->private; + __remove_wait_queue(wq, q); + } + return(t); } diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index c5c9600c33d8..08b8847ede97 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -776,8 +776,6 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) * ->owner. We can thus check it without acquiring the spin * lock. */ BUG_ON(sem->owner != t); - - remove_wait_queue(&sem->wait, &wait); } else { /* it's ours now */ sem->owner = t; @@ -803,7 +801,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) } /* check if there are jobs waiting for this resource */ - next = waitqueue_first(&sem->wait); + next = __waitqueue_remove_first(&sem->wait); if (next) { /* next becomes the resouce holder */ sem->owner = next; diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index abb06fa53e3a..71c02409efa2 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -442,10 +442,6 @@ int psnedf_fmlp_lock(struct litmus_lock* l) * ->owner. We can thus check it without acquiring the spin * lock. */ BUG_ON(sem->owner != t); - - /* FIXME: could we punt the dequeuing to the previous job, - * which is holding the spinlock anyway? */ - remove_wait_queue(&sem->wait, &wait); } else { /* it's ours now */ sem->owner = t; @@ -478,7 +474,7 @@ int psnedf_fmlp_unlock(struct litmus_lock* l) unboost_priority(t); /* check if there are jobs waiting for this resource */ - next = waitqueue_first(&sem->wait); + next = __waitqueue_remove_first(&sem->wait); if (next) { /* boost next job */ boost_priority(next); -- cgit v1.2.2 From 6d4cc883ec2470500be6c95fd2e7c6944e89c3e8 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 12 Feb 2011 16:40:43 -0500 Subject: bugfix: release master CPU must signal task was picked Otherwise, the release master CPU may try to reschedule in an infinite loop. --- litmus/sched_gsn_edf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 08b8847ede97..3092797480f8 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -374,8 +374,10 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) /* Bail out early if we are the release master. * The release master never schedules any real-time tasks. */ - if (gsnedf.release_master == entry->cpu) + if (gsnedf.release_master == entry->cpu) { + sched_state_task_picked(); return NULL; + } #endif raw_spin_lock(&gsnedf_lock); -- cgit v1.2.2 From 7d754596756240fa918b94cd0c3011c77a638987 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Sat, 16 Apr 2011 20:12:00 -0400 Subject: LITMUS Core: Check for valid class in RT-param syscall. --- litmus/litmus.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'litmus') diff --git a/litmus/litmus.c b/litmus/litmus.c index 11ccaafd50de..26938acacafc 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -110,6 +110,14 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) "because wcet > period\n", pid); goto out_unlock; } + if ( tp.cls != RT_CLASS_HARD && + tp.cls != RT_CLASS_SOFT && + tp.cls != RT_CLASS_BEST_EFFORT) + { + printk(KERN_INFO "litmus: real-time task %d rejected " + "because its class is invalid\n"); + goto out_unlock; + } if (tp.budget_policy != NO_ENFORCEMENT && tp.budget_policy != QUANTUM_ENFORCEMENT && tp.budget_policy != PRECISE_ENFORCEMENT) -- cgit v1.2.2 From 56c5c609615322bfbda5adff94ce011eb3d28fef Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Sat, 27 Aug 2011 16:10:06 +0200 Subject: Fix prototype mismatching and synch syscall numbers * Update prototypes for switched_to(), prio_changed(), select_task_rq(). * Fix missing pid field in printk output. * Synchronize syscall numbers for arm and x86. --- litmus/litmus.c | 2 +- litmus/sched_litmus.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'litmus') diff --git a/litmus/litmus.c b/litmus/litmus.c index 26938acacafc..bb8c6c7e9dd1 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -115,7 +115,7 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) tp.cls != RT_CLASS_BEST_EFFORT) { printk(KERN_INFO "litmus: real-time task %d rejected " - "because its class is invalid\n"); + "because its class is invalid\n", pid); goto out_unlock; } if (tp.budget_policy != NO_ENFORCEMENT && diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index cb705e7f0466..5a15ce938984 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -252,12 +252,12 @@ static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued) return; } -static void switched_to_litmus(struct rq *rq, struct task_struct *p, int running) +static void switched_to_litmus(struct rq *rq, struct task_struct *p) { } static void prio_changed_litmus(struct rq *rq, struct task_struct *p, - int oldprio, int running) + int oldprio) { } @@ -283,8 +283,8 @@ static void set_curr_task_litmus(struct rq *rq) * We don't care about the scheduling domain; can gets called from * exec, fork, wakeup. */ -static int select_task_rq_litmus(struct rq *rq, struct task_struct *p, - int sd_flag, int flags) +static int +select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags) { /* preemption is already disabled. * We don't want to change cpu here -- cgit v1.2.2 From 592eaca1409e55407e980f71b2ec604ca3610ba5 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Tue, 21 Jun 2011 01:29:34 -0400 Subject: Avoid needlessly costly migrations. CONFIG_SCHED_CPU_AFFINITY Given a choice between several available CPUs (unlinked) on which to schedule a task, let the scheduler select the CPU closest to where that task was previously scheduled. Hopefully, this will reduce cache migration penalties. Notes: SCHED_CPU_AFFINITY is dependent upon x86 (only x86 is supported at this time). Also PFair/PD^2 does not make use of this feature. Signed-off-by: Andrea Bastoni --- litmus/Kconfig | 19 +++++++++++++++++++ litmus/Makefile | 1 + litmus/affinity.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ litmus/litmus.c | 8 ++++++++ litmus/sched_cedf.c | 33 +++++++++++++++++++++++++++++++++ litmus/sched_gsn_edf.c | 41 ++++++++++++++++++++++++++++++++++++++--- 6 files changed, 143 insertions(+), 3 deletions(-) create mode 100644 litmus/affinity.c (limited to 'litmus') diff --git a/litmus/Kconfig b/litmus/Kconfig index ad8dc8308cf0..d7fde6f97e14 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -62,6 +62,25 @@ config LITMUS_LOCKING endmenu +menu "Performance Enhancements" + +config SCHED_CPU_AFFINITY + bool "Local Migration Affinity" + depends on X86 + default y + help + Rescheduled tasks prefer CPUs near to their previously used CPU. This + may improve performance through possible preservation of cache affinity. + + Warning: May make bugs harder to find since tasks may migrate less often. + + NOTES: + * Feature is not utilized by PFair/PD^2. + + Say Yes if unsure. + +endmenu + menu "Tracing" config FEATHER_TRACE diff --git a/litmus/Makefile b/litmus/Makefile index ad9936e07b83..7338180f196f 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -21,6 +21,7 @@ obj-y = sched_plugin.o litmus.o \ obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o +obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o diff --git a/litmus/affinity.c b/litmus/affinity.c new file mode 100644 index 000000000000..9adab7a3bcd7 --- /dev/null +++ b/litmus/affinity.c @@ -0,0 +1,44 @@ +#include + +#include + +struct neighborhood neigh_info[NR_CPUS]; + +/* called by _init_litmus() */ +void init_topology(void) { + int cpu; + int i; + int chk; + int depth = num_cache_leaves; + + if (depth > NUM_CACHE_LEVELS) + depth = NUM_CACHE_LEVELS; + + for_each_online_cpu(cpu) { + for (i = 0; i < depth; ++i) { + long unsigned int firstbits; + + chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); + if (chk) { + /* failed */ + neigh_info[cpu].size[i] = 0; + } else { + /* size = num bits in mask */ + neigh_info[cpu].size[i] = + cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); + } + firstbits = *neigh_info[cpu].neighbors[i]->bits; + printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", + cpu, neigh_info[cpu].size[i], i, firstbits); + } + + /* set data for non-existent levels */ + for (; i < NUM_CACHE_LEVELS; ++i) { + neigh_info[cpu].size[i] = 0; + + printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", + cpu, neigh_info[cpu].size[i], i, 0lu); + } + } +} + diff --git a/litmus/litmus.c b/litmus/litmus.c index bb8c6c7e9dd1..73af6c3010d6 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -17,6 +17,10 @@ #include #include +#ifdef CONFIG_SCHED_CPU_AFFINITY +#include +#endif + /* Number of RT tasks that exist in the system */ atomic_t rt_task_count = ATOMIC_INIT(0); static DEFINE_RAW_SPINLOCK(task_transition_lock); @@ -540,6 +544,10 @@ static int __init _init_litmus(void) init_litmus_proc(); +#ifdef CONFIG_SCHED_CPU_AFFINITY + init_topology(); +#endif + return 0; } diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 73fe1c442a0d..e29a9fe2a8e8 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -43,6 +43,10 @@ #include +#ifdef CONFIG_SCHED_CPU_AFFINITY +#include +#endif + /* to configure the cluster size */ #include #include @@ -257,6 +261,23 @@ static noinline void requeue(struct task_struct* task) } } +#ifdef CONFIG_SCHED_CPU_AFFINITY +static cpu_entry_t* cedf_get_nearest_available_cpu( + cedf_domain_t *cluster, cpu_entry_t* start) +{ + cpu_entry_t* affinity; + + get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1); + + /* make sure CPU is in our cluster */ + if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) + return(affinity); + else + return(NULL); +} +#endif + + /* check for any necessary preemptions */ static void check_for_preemptions(cedf_domain_t *cluster) { @@ -270,8 +291,20 @@ static void check_for_preemptions(cedf_domain_t *cluster) task = __take_ready(&cluster->domain); TRACE("check_for_preemptions: attempting to link task %d to %d\n", task->pid, last->cpu); +#ifdef CONFIG_SCHED_CPU_AFFINITY + { + cpu_entry_t* affinity = + cedf_get_nearest_available_cpu(cluster, + &per_cpu(cedf_cpu_entries, task_cpu(task))); + if(affinity) + last = affinity; + else if(last->linked) + requeue(last->linked); + } +#else if (last->linked) requeue(last->linked); +#endif link_task_to_cpu(task, last); preempt(last); } diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 3092797480f8..17926e9fccdc 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -23,6 +23,10 @@ #include +#ifdef CONFIG_SCHED_CPU_AFFINITY +#include +#endif + #include /* Overview of GSN-EDF operations. @@ -253,21 +257,52 @@ static noinline void requeue(struct task_struct* task) } } +#ifdef CONFIG_SCHED_CPU_AFFINITY +static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) +{ + cpu_entry_t* affinity; + + get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, +#ifdef CONFIG_RELEASE_MASTER + gsnedf.release_master +#else + -1 +#endif + ); + + return(affinity); +} +#endif + /* check for any necessary preemptions */ static void check_for_preemptions(void) { struct task_struct *task; cpu_entry_t* last; - for(last = lowest_prio_cpu(); - edf_preemption_needed(&gsnedf, last->linked); - last = lowest_prio_cpu()) { + for (last = lowest_prio_cpu(); + edf_preemption_needed(&gsnedf, last->linked); + last = lowest_prio_cpu()) { /* preemption necessary */ task = __take_ready(&gsnedf); TRACE("check_for_preemptions: attempting to link task %d to %d\n", task->pid, last->cpu); + +#ifdef CONFIG_SCHED_CPU_AFFINITY + { + cpu_entry_t* affinity = + gsnedf_get_nearest_available_cpu( + &per_cpu(gsnedf_cpu_entries, task_cpu(task))); + if (affinity) + last = affinity; + else if (last->linked) + requeue(last->linked); + } +#else if (last->linked) requeue(last->linked); +#endif + link_task_to_cpu(task, last); preempt(last); } -- cgit v1.2.2 From 17e34f413750b26aa493f1f8307f111bc5d487de Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Thu, 10 Feb 2011 20:05:15 -0500 Subject: PSN-EDF: add release master support We can give up a processor under partitioning, too. --- litmus/sched_psn_edf.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 71c02409efa2..eb444ecf1288 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -383,12 +383,6 @@ static unsigned int psnedf_get_srp_prio(struct task_struct* t) return get_rt_period(t); } -static long psnedf_activate_plugin(void) -{ - get_srp_prio = psnedf_get_srp_prio; - return 0; -} - /* ******************** FMLP support ********************** */ /* struct for semaphore with priority inheritance */ @@ -577,9 +571,35 @@ static long psnedf_allocate_lock(struct litmus_lock **lock, int type, #endif + +static long psnedf_activate_plugin(void) +{ +#ifdef CONFIG_RELEASE_MASTER + int cpu; + + for_each_online_cpu(cpu) { + remote_edf(cpu)->release_master = atomic_read(&release_master_cpu); + } +#endif + +#ifdef CONFIG_LITMUS_LOCKING + get_srp_prio = psnedf_get_srp_prio; +#endif + + return 0; +} + static long psnedf_admit_task(struct task_struct* tsk) { - return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; + if (task_cpu(tsk) == tsk->rt_param.task_params.cpu +#ifdef CONFIG_RELEASE_MASTER + /* don't allow tasks on release master CPU */ + && task_cpu(tsk) != remote_edf(task_cpu(tsk))->release_master +#endif + ) + return 0; + else + return -EINVAL; } /* Plugin object */ @@ -593,9 +613,9 @@ static struct sched_plugin psn_edf_plugin __cacheline_aligned_in_smp = { .task_wake_up = psnedf_task_wake_up, .task_block = psnedf_task_block, .admit_task = psnedf_admit_task, + .activate_plugin = psnedf_activate_plugin, #ifdef CONFIG_LITMUS_LOCKING .allocate_lock = psnedf_allocate_lock, - .activate_plugin = psnedf_activate_plugin, #endif }; -- cgit v1.2.2 From b751e4e17e667f11404fc2f290416c0df050e964 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Thu, 10 Feb 2011 18:41:38 -0500 Subject: C-EDF: add release master support As with GSN-EDF, do not insert release master into CPU heap. --- litmus/sched_cedf.c | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index e29a9fe2a8e8..0707059597d6 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -411,6 +411,14 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) int out_of_time, sleep, preempt, np, exists, blocks; struct task_struct* next = NULL; +#ifdef CONFIG_RELEASE_MASTER + /* Bail out early if we are the release master. + * The release master never schedules any real-time tasks. + */ + if (cluster->domain.release_master == entry->cpu) + return NULL; +#endif + raw_spin_lock(&cluster->lock); clear_will_schedule(); @@ -546,8 +554,18 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); BUG_ON(entry->scheduled); - entry->scheduled = t; - tsk_rt(t)->scheduled_on = task_cpu(t); +#ifdef CONFIG_RELEASE_MASTER + if (entry->cpu != cluster->domain.release_master) { +#endif + entry->scheduled = t; + tsk_rt(t)->scheduled_on = task_cpu(t); +#ifdef CONFIG_RELEASE_MASTER + } else { + /* do not schedule on release master */ + preempt(entry); /* force resched */ + tsk_rt(t)->scheduled_on = NO_CPU; + } +#endif } else { t->rt_param.scheduled_on = NO_CPU; } @@ -731,6 +749,9 @@ static long cedf_activate_plugin(void) if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) return -ENOMEM; +#ifdef CONFIG_RELEASE_MASTER + cedf[i].domain.release_master = atomic_read(&release_master_cpu); +#endif } /* cycle through cluster and add cpus to them */ @@ -773,7 +794,11 @@ static long cedf_activate_plugin(void) entry->linked = NULL; entry->scheduled = NULL; - update_cpu_position(entry); +#ifdef CONFIG_RELEASE_MASTER + /* only add CPUs that should schedule jobs */ + if (entry->cpu != entry->cluster->domain.release_master) +#endif + update_cpu_position(entry); } /* done with this cluster */ break; -- cgit v1.2.2 From b4c52e27caa701a16e120b43a0e70ca6529a58a4 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Wed, 22 Jun 2011 01:30:25 -0400 Subject: C-EDF: Make migration affinity work with Release Master Needed to update C-EDF to handle release master. Also updated get_nearest_available_cpu() to take NO_CPU instead of -1 to indicate that there is no release master. While NO_CPU is 0xffffffff (-1 in two's complement), we still translate this value to -1 in case NO_CPU changes. Signed-off-by: Andrea Bastoni --- litmus/affinity.c | 6 ++---- litmus/sched_cedf.c | 16 +++++++++++----- litmus/sched_gsn_edf.c | 10 +++++----- 3 files changed, 18 insertions(+), 14 deletions(-) (limited to 'litmus') diff --git a/litmus/affinity.c b/litmus/affinity.c index 9adab7a3bcd7..3fa6dd789400 100644 --- a/litmus/affinity.c +++ b/litmus/affinity.c @@ -16,8 +16,6 @@ void init_topology(void) { for_each_online_cpu(cpu) { for (i = 0; i < depth; ++i) { - long unsigned int firstbits; - chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); if (chk) { /* failed */ @@ -27,9 +25,9 @@ void init_topology(void) { neigh_info[cpu].size[i] = cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); } - firstbits = *neigh_info[cpu].neighbors[i]->bits; printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", - cpu, neigh_info[cpu].size[i], i, firstbits); + cpu, neigh_info[cpu].size[i], i, + *cpumask_bits(neigh_info[cpu].neighbors[i])); } /* set data for non-existent levels */ diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 0707059597d6..690b94dbd686 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -263,11 +263,17 @@ static noinline void requeue(struct task_struct* task) #ifdef CONFIG_SCHED_CPU_AFFINITY static cpu_entry_t* cedf_get_nearest_available_cpu( - cedf_domain_t *cluster, cpu_entry_t* start) + cedf_domain_t *cluster, cpu_entry_t *start) { - cpu_entry_t* affinity; + cpu_entry_t *affinity; - get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1); + get_nearest_available_cpu(affinity, start, cedf_cpu_entries, +#ifdef CONFIG_RELEASE_MASTER + cluster->domain.release_master +#else + NO_CPU +#endif + ); /* make sure CPU is in our cluster */ if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) @@ -282,7 +288,7 @@ static cpu_entry_t* cedf_get_nearest_available_cpu( static void check_for_preemptions(cedf_domain_t *cluster) { struct task_struct *task; - cpu_entry_t* last; + cpu_entry_t *last; for(last = lowest_prio_cpu(cluster); edf_preemption_needed(&cluster->domain, last->linked); @@ -293,7 +299,7 @@ static void check_for_preemptions(cedf_domain_t *cluster) task->pid, last->cpu); #ifdef CONFIG_SCHED_CPU_AFFINITY { - cpu_entry_t* affinity = + cpu_entry_t *affinity = cedf_get_nearest_available_cpu(cluster, &per_cpu(cedf_cpu_entries, task_cpu(task))); if(affinity) diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 17926e9fccdc..467f8b284de4 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -258,15 +258,15 @@ static noinline void requeue(struct task_struct* task) } #ifdef CONFIG_SCHED_CPU_AFFINITY -static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) +static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t *start) { - cpu_entry_t* affinity; + cpu_entry_t *affinity; get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, #ifdef CONFIG_RELEASE_MASTER gsnedf.release_master #else - -1 + NO_CPU #endif ); @@ -278,7 +278,7 @@ static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) static void check_for_preemptions(void) { struct task_struct *task; - cpu_entry_t* last; + cpu_entry_t *last; for (last = lowest_prio_cpu(); edf_preemption_needed(&gsnedf, last->linked); @@ -290,7 +290,7 @@ static void check_for_preemptions(void) #ifdef CONFIG_SCHED_CPU_AFFINITY { - cpu_entry_t* affinity = + cpu_entry_t *affinity = gsnedf_get_nearest_available_cpu( &per_cpu(gsnedf_cpu_entries, task_cpu(task))); if (affinity) -- cgit v1.2.2 From 399455c0e529bb07760f17e8fe0fddc342b67bc2 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 5 Feb 2011 22:49:52 -0500 Subject: Pfair: add release master support. Merged in release master support for Pfair. Some merge conflicts had to be resolved. --- litmus/sched_pfair.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index 0a64273daa47..43119e5149fa 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c @@ -445,6 +445,11 @@ static void schedule_subtasks(struct pfair_cluster *cluster, quanta_t time) list_for_each(pos, &cluster->topology.cpus) { cpu_state = from_cluster_list(pos); retry = 1; +#ifdef CONFIG_RELEASE_MASTER + /* skip release master */ + if (cluster->pfair.release_master == cpu_id(cpu_state)) + continue; +#endif while (retry) { if (pfair_higher_prio(__peek_ready(&cluster->pfair), cpu_state->linked)) @@ -615,6 +620,14 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) int blocks; struct task_struct* next = NULL; +#ifdef CONFIG_RELEASE_MASTER + /* Bail out early if we are the release master. + * The release master never schedules any real-time tasks. + */ + if (cpu_cluster(state)->pfair.release_master == cpu_id(state)) + return NULL; +#endif + raw_spin_lock(cpu_lock(state)); blocks = is_realtime(prev) && !is_running(prev); @@ -649,10 +662,16 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) cluster = tsk_pfair(t)->cluster; raw_spin_lock_irqsave(cluster_lock(cluster), flags); - if (running) + + if (running +#ifdef CONFIG_RELEASE_MASTER + && (task_cpu(t) != cluster->pfair.release_master) +#endif + ) { t->rt_param.scheduled_on = task_cpu(t); - else + } else { t->rt_param.scheduled_on = NO_CPU; + } prepare_release(t, cluster->pfair_time + 1); pfair_add_release(cluster, t); @@ -936,6 +955,9 @@ static long pfair_activate_plugin(void) pfair_init_cluster(cluster); cluster->pfair_time = now; clust[i] = &cluster->topology; +#ifdef CONFIG_RELEASE_MASTER + cluster->pfair.release_master = atomic_read(&release_master_cpu); +#endif } for (i = 0; i < num_online_cpus(); i++) { -- cgit v1.2.2 From 0720416e5b1bcb825619ba4b212d9056017ffd62 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 5 Feb 2011 21:50:36 -0500 Subject: Pfair: add support for true sporadic releases This patch also converts Pfair to implement early releasing such that no timer wheel is required anymore. This removes the need for a maximum period restriction. --- litmus/sched_pfair.c | 137 ++++++++++++++++++--------------------------------- 1 file changed, 49 insertions(+), 88 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index 43119e5149fa..055ac623edb4 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c @@ -1,7 +1,8 @@ /* * kernel/sched_pfair.c * - * Implementation of the (global) Pfair scheduling algorithm. + * Implementation of the PD^2 pfair scheduling algorithm. This + * implementation realizes "early releasing," i.e., it is work-conserving. * */ @@ -80,30 +81,20 @@ struct pfair_state { lt_t offset; /* stagger offset */ }; -/* Currently, we limit the maximum period of any task to 2000 quanta. - * The reason is that it makes the implementation easier since we do not - * need to reallocate the release wheel on task arrivals. - * In the future - */ -#define PFAIR_MAX_PERIOD 2000 - struct pfair_cluster { struct scheduling_cluster topology; /* The "global" time in this cluster. */ quanta_t pfair_time; /* the "official" PFAIR clock */ - quanta_t merge_time; /* Updated after the release queue has been - * merged. Used by drop_all_references(). - */ /* The ready queue for this cluster. */ rt_domain_t pfair; - /* This is the release queue wheel for this cluster. It is indexed by - * pfair_time % PFAIR_MAX_PERIOD. Each heap is ordered by PFAIR - * priority, so that it can be merged with the ready queue. + /* The set of jobs that should have their release enacted at the next + * quantum boundary. */ - struct bheap release_queue[PFAIR_MAX_PERIOD]; + struct bheap release_queue; + raw_spinlock_t release_lock; }; static inline struct pfair_cluster* cpu_cluster(struct pfair_state* state) @@ -121,6 +112,11 @@ static inline struct pfair_state* from_cluster_list(struct list_head* pos) return list_entry(pos, struct pfair_state, topology.cluster_list); } +static inline struct pfair_cluster* from_domain(rt_domain_t* rt) +{ + return container_of(rt, struct pfair_cluster, pfair); +} + static inline raw_spinlock_t* cluster_lock(struct pfair_cluster* cluster) { /* The ready_lock is used to serialize all scheduling events. */ @@ -161,21 +157,11 @@ static quanta_t cur_deadline(struct task_struct* t) return cur_subtask(t)->deadline + tsk_pfair(t)->release; } - -static quanta_t cur_sub_release(struct task_struct* t) -{ - return cur_subtask(t)->release + tsk_pfair(t)->release; -} - static quanta_t cur_release(struct task_struct* t) { -#ifdef EARLY_RELEASE - /* only the release of the first subtask counts when we early - * release */ + /* This is early releasing: only the release of the first subtask + * counts. */ return tsk_pfair(t)->release; -#else - return cur_sub_release(t); -#endif } static quanta_t cur_overlap(struct task_struct* t) @@ -235,11 +221,16 @@ int pfair_ready_order(struct bheap_node* a, struct bheap_node* b) return pfair_higher_prio(bheap2task(a), bheap2task(b)); } -/* return the proper release queue for time t */ -static struct bheap* relq(struct pfair_cluster* cluster, quanta_t t) +static void pfair_release_jobs(rt_domain_t* rt, struct bheap* tasks) { - struct bheap* rq = cluster->release_queue + (t % PFAIR_MAX_PERIOD); - return rq; + struct pfair_cluster* cluster = from_domain(rt); + unsigned long flags; + + raw_spin_lock_irqsave(&cluster->release_lock, flags); + + bheap_union(pfair_ready_order, &cluster->release_queue, tasks); + + raw_spin_unlock_irqrestore(&cluster->release_lock, flags); } static void prepare_release(struct task_struct* t, quanta_t at) @@ -248,25 +239,12 @@ static void prepare_release(struct task_struct* t, quanta_t at) tsk_pfair(t)->cur = 0; } -static void __pfair_add_release(struct task_struct* t, struct bheap* queue) -{ - bheap_insert(pfair_ready_order, queue, - tsk_rt(t)->heap_node); -} - -static void pfair_add_release(struct pfair_cluster* cluster, - struct task_struct* t) -{ - BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node)); - __pfair_add_release(t, relq(cluster, cur_release(t))); -} - /* pull released tasks from the release queue */ -static void poll_releases(struct pfair_cluster* cluster, - quanta_t time) +static void poll_releases(struct pfair_cluster* cluster) { - __merge_ready(&cluster->pfair, relq(cluster, time)); - cluster->merge_time = time; + raw_spin_lock(&cluster->release_lock); + __merge_ready(&cluster->pfair, &cluster->release_queue); + raw_spin_unlock(&cluster->release_lock); } static void check_preempt(struct task_struct* t) @@ -292,16 +270,12 @@ static void drop_all_references(struct task_struct *t) { int cpu; struct pfair_state* s; - struct bheap* q; struct pfair_cluster* cluster; if (bheap_node_in_heap(tsk_rt(t)->heap_node)) { - /* figure out what queue the node is in */ + /* It must be in the ready queue; drop references isn't called + * when the job is in a release queue. */ cluster = tsk_pfair(t)->cluster; - if (time_before_eq(cur_release(t), cluster->merge_time)) - q = &cluster->pfair.ready_queue; - else - q = relq(cluster, cur_release(t)); - bheap_delete(pfair_ready_order, q, + bheap_delete(pfair_ready_order, &cluster->pfair.ready_queue, tsk_rt(t)->heap_node); } for (cpu = 0; cpu < num_online_cpus(); cpu++) { @@ -322,11 +296,9 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) int to_relq; p->cur = (p->cur + 1) % p->quanta; if (!p->cur) { - sched_trace_task_completion(t, 1); if (tsk_rt(t)->present) { /* we start a new job */ prepare_for_next_period(t); - sched_trace_task_release(t); get_rt_flags(t) = RT_F_RUNNING; p->release += p->period; } else { @@ -361,7 +333,8 @@ static void advance_subtasks(struct pfair_cluster *cluster, quanta_t time) p->last_cpu = cpu_id(cpu); if (advance_subtask(time, l, cpu_id(cpu))) { cpu->linked = NULL; - pfair_add_release(cluster, l); + sched_trace_task_release(l); + add_release(&cluster->pfair, l); } } } @@ -476,7 +449,7 @@ static void schedule_next_quantum(struct pfair_cluster *cluster, quanta_t time) sched_trace_quantum_boundary(); advance_subtasks(cluster, time); - poll_releases(cluster, time); + poll_releases(cluster); schedule_subtasks(cluster, time); list_for_each(pos, &cluster->topology.cpus) { @@ -630,6 +603,9 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) raw_spin_lock(cpu_lock(state)); + if (is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP) + sched_trace_task_completion(prev, 0); + blocks = is_realtime(prev) && !is_running(prev); if (state->local && safe_to_schedule(state->local, cpu_id(state))) @@ -663,18 +639,18 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) raw_spin_lock_irqsave(cluster_lock(cluster), flags); - if (running + prepare_release(t, cluster->pfair_time + 1); + + t->rt_param.scheduled_on = NO_CPU; + + if (running) { #ifdef CONFIG_RELEASE_MASTER - && (task_cpu(t) != cluster->pfair.release_master) + if (task_cpu(t) != cluster->pfair.release_master) #endif - ) { - t->rt_param.scheduled_on = task_cpu(t); - } else { - t->rt_param.scheduled_on = NO_CPU; + t->rt_param.scheduled_on = task_cpu(t); + __add_ready(&cluster->pfair, t); } - prepare_release(t, cluster->pfair_time + 1); - pfair_add_release(cluster, t); check_preempt(t); raw_spin_unlock_irqrestore(cluster_lock(cluster), flags); @@ -703,8 +679,7 @@ static void pfair_task_wake_up(struct task_struct *t) release_at(t, now); prepare_release(t, time2quanta(now, CEIL)); sched_trace_task_release(t); - /* FIXME: race with pfair_time advancing */ - pfair_add_release(cluster, t); + __add_ready(&cluster->pfair, t); } check_preempt(t); @@ -763,15 +738,11 @@ static void pfair_release_at(struct task_struct* task, lt_t start) release_at(task, start); release = time2quanta(start, CEIL); - /* FIXME: support arbitrary offsets. */ - if (release - cluster->pfair_time >= PFAIR_MAX_PERIOD) - release = cluster->pfair_time + PFAIR_MAX_PERIOD; - TRACE_TASK(task, "sys release at %lu\n", release); drop_all_references(task); prepare_release(task, release); - pfair_add_release(cluster, task); + add_release(&cluster->pfair, task); raw_spin_unlock_irqrestore(cluster_lock(cluster), flags); } @@ -853,13 +824,6 @@ static long pfair_admit_task(struct task_struct* t) "The period of %s/%d is not a multiple of %llu.\n", t->comm, t->pid, (unsigned long long) quantum_length); - if (period >= PFAIR_MAX_PERIOD) { - printk(KERN_WARNING - "PFAIR: Rejecting task %s/%d; its period is too long.\n", - t->comm, t->pid); - return -EINVAL; - } - if (quanta == period) { /* special case: task has weight 1.0 */ printk(KERN_INFO @@ -899,12 +863,9 @@ static long pfair_admit_task(struct task_struct* t) static void pfair_init_cluster(struct pfair_cluster* cluster) { - int i; - - /* initialize release queue */ - for (i = 0; i < PFAIR_MAX_PERIOD; i++) - bheap_init(&cluster->release_queue[i]); - rt_domain_init(&cluster->pfair, pfair_ready_order, NULL, NULL); + rt_domain_init(&cluster->pfair, pfair_ready_order, NULL, pfair_release_jobs); + bheap_init(&cluster->release_queue); + raw_spin_lock_init(&cluster->release_lock); INIT_LIST_HEAD(&cluster->topology.cpus); } @@ -918,7 +879,7 @@ static void cleanup_clusters(void) num_pfair_clusters = 0; /* avoid stale pointers */ - for (i = 0; i < NR_CPUS; i++) + for (i = 0; i < num_online_cpus(); i++) pstate[i]->topology.cluster = NULL; } -- cgit v1.2.2 From ec77ede8baa013138fe03ff45dd57f7bac50e5d4 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Tue, 8 Feb 2011 12:41:10 -0500 Subject: Pfair: various fixes concerning release timers --- litmus/sched_pfair.c | 71 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 19 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index 055ac623edb4..e3db82a2bdf8 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c @@ -77,8 +77,9 @@ struct pfair_state { struct task_struct* local; /* the local copy of linked */ struct task_struct* scheduled; /* what is actually scheduled */ - unsigned long missed_quanta; lt_t offset; /* stagger offset */ + unsigned int missed_updates; + unsigned int missed_quanta; }; struct pfair_cluster { @@ -289,6 +290,15 @@ static void drop_all_references(struct task_struct *t) } } +static void pfair_prepare_next_period(struct task_struct* t) +{ + struct pfair_param* p = tsk_pfair(t); + + prepare_for_next_period(t); + get_rt_flags(t) = RT_F_RUNNING; + p->release += p->period; +} + /* returns 1 if the task needs to go the release queue */ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) { @@ -297,10 +307,8 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) p->cur = (p->cur + 1) % p->quanta; if (!p->cur) { if (tsk_rt(t)->present) { - /* we start a new job */ - prepare_for_next_period(t); - get_rt_flags(t) = RT_F_RUNNING; - p->release += p->period; + /* The job overran; we start a new budget allocation. */ + pfair_prepare_next_period(t); } else { /* remove task from system until it wakes */ drop_all_references(t); @@ -310,14 +318,13 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) } } to_relq = time_after(cur_release(t), time); - TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d\n", - cpu, p->cur, to_relq); + TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d (cur_release:%lu time:%lu)\n", + cpu, p->cur, to_relq, cur_release(t), time); return to_relq; } static void advance_subtasks(struct pfair_cluster *cluster, quanta_t time) { - int missed; struct task_struct* l; struct pfair_param* p; struct list_head* pos; @@ -326,15 +333,17 @@ static void advance_subtasks(struct pfair_cluster *cluster, quanta_t time) list_for_each(pos, &cluster->topology.cpus) { cpu = from_cluster_list(pos); l = cpu->linked; - missed = cpu->linked != cpu->local; + cpu->missed_updates += cpu->linked != cpu->local; if (l) { p = tsk_pfair(l); p->last_quantum = time; p->last_cpu = cpu_id(cpu); if (advance_subtask(time, l, cpu_id(cpu))) { - cpu->linked = NULL; - sched_trace_task_release(l); - add_release(&cluster->pfair, l); + //cpu->linked = NULL; + PTRACE_TASK(l, "should go to release queue. " + "scheduled_on=%d present=%d\n", + tsk_rt(l)->scheduled_on, + tsk_rt(l)->present); } } } @@ -455,7 +464,7 @@ static void schedule_next_quantum(struct pfair_cluster *cluster, quanta_t time) list_for_each(pos, &cluster->topology.cpus) { cpu = from_cluster_list(pos); if (cpu->linked) - PTRACE_TASK(pstate[cpu]->linked, + PTRACE_TASK(cpu->linked, " linked on %d.\n", cpu_id(cpu)); else PTRACE("(null) linked on %d.\n", cpu_id(cpu)); @@ -590,23 +599,40 @@ static int safe_to_schedule(struct task_struct* t, int cpu) static struct task_struct* pfair_schedule(struct task_struct * prev) { struct pfair_state* state = &__get_cpu_var(pfair_state); - int blocks; + struct pfair_cluster* cluster = cpu_cluster(state); + int blocks, completion, out_of_time; struct task_struct* next = NULL; #ifdef CONFIG_RELEASE_MASTER /* Bail out early if we are the release master. * The release master never schedules any real-time tasks. */ - if (cpu_cluster(state)->pfair.release_master == cpu_id(state)) + if (unlikely(cluster->pfair.release_master == cpu_id(state))) return NULL; #endif raw_spin_lock(cpu_lock(state)); - if (is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP) + blocks = is_realtime(prev) && !is_running(prev); + completion = is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP; + out_of_time = is_realtime(prev) && time_after(cur_release(prev), + state->local_tick); + + if (is_realtime(prev)) + PTRACE_TASK(prev, "blocks:%d completion:%d out_of_time:%d\n", + blocks, completion, out_of_time); + + if (completion) { sched_trace_task_completion(prev, 0); + pfair_prepare_next_period(prev); + prepare_release(prev, cur_release(prev)); + } - blocks = is_realtime(prev) && !is_running(prev); + if (!blocks && (completion || out_of_time)) { + drop_all_references(prev); + sched_trace_task_release(prev); + add_release(&cluster->pfair, prev); + } if (state->local && safe_to_schedule(state->local, cpu_id(state))) next = state->local; @@ -679,9 +705,12 @@ static void pfair_task_wake_up(struct task_struct *t) release_at(t, now); prepare_release(t, time2quanta(now, CEIL)); sched_trace_task_release(t); - __add_ready(&cluster->pfair, t); } + /* only add to ready queue if the task isn't still linked somewhere */ + if (tsk_rt(t)->linked_on == NO_CPU) + __add_ready(&cluster->pfair, t); + check_preempt(t); raw_spin_unlock_irqrestore(cluster_lock(cluster), flags); @@ -879,8 +908,11 @@ static void cleanup_clusters(void) num_pfair_clusters = 0; /* avoid stale pointers */ - for (i = 0; i < num_online_cpus(); i++) + for (i = 0; i < num_online_cpus(); i++) { pstate[i]->topology.cluster = NULL; + printk("P%d missed %u updates and %u quanta.\n", cpu_id(pstate[i]), + pstate[i]->missed_updates, pstate[i]->missed_quanta); + } } static long pfair_activate_plugin(void) @@ -926,6 +958,7 @@ static long pfair_activate_plugin(void) state->cur_tick = now; state->local_tick = now; state->missed_quanta = 0; + state->missed_updates = 0; state->offset = cpu_stagger_offset(i); printk(KERN_ERR "cpus[%d] set; %d\n", i, num_online_cpus()); cpus[i] = &state->topology; -- cgit v1.2.2 From 89174d049ea77b127fb3f8b3bbd8bc2996d0a535 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 12 Feb 2011 16:40:43 -0500 Subject: bugfix: release master CPU must signal task was picked --- litmus/sched_cedf.c | 4 +++- litmus/sched_pfair.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 690b94dbd686..4bf61f7dbf3f 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -421,8 +421,10 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) /* Bail out early if we are the release master. * The release master never schedules any real-time tasks. */ - if (cluster->domain.release_master == entry->cpu) + if (cluster->domain.release_master == entry->cpu) { + sched_state_task_picked(); return NULL; + } #endif raw_spin_lock(&cluster->lock); diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index e3db82a2bdf8..c95bde87b5d7 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c @@ -607,8 +607,10 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) /* Bail out early if we are the release master. * The release master never schedules any real-time tasks. */ - if (unlikely(cluster->pfair.release_master == cpu_id(state))) + if (unlikely(cluster->pfair.release_master == cpu_id(state))) { + sched_state_task_picked(); return NULL; + } #endif raw_spin_lock(cpu_lock(state)); -- cgit v1.2.2 From a7a7f71529d9a6aae02ab3cb64451e036ce9d028 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Wed, 2 Nov 2011 11:33:44 -0400 Subject: Add unlikely() to rel master check (match pfair). --- litmus/sched_cedf.c | 2 +- litmus/sched_gsn_edf.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 4bf61f7dbf3f..8b3f8a7e2609 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -421,7 +421,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) /* Bail out early if we are the release master. * The release master never schedules any real-time tasks. */ - if (cluster->domain.release_master == entry->cpu) { + if (unlikely(cluster->domain.release_master == entry->cpu)) { sched_state_task_picked(); return NULL; } diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 467f8b284de4..7aad7f0ad8f2 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -409,7 +409,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) /* Bail out early if we are the release master. * The release master never schedules any real-time tasks. */ - if (gsnedf.release_master == entry->cpu) { + if (unlikely(gsnedf.release_master == entry->cpu)) { sched_state_task_picked(); return NULL; } -- cgit v1.2.2 From d1d6e4c300d858c47b834be145f30973bc2921bf Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Thu, 24 Nov 2011 13:42:59 -0500 Subject: Add option to turn off preemption state tracing Preemption state tracing is only useful when debugging preemption- and IPI-related races. Since it creates a lot of clutter in the logs, this patch turns it off unless explicitly requested. --- litmus/Kconfig | 14 ++++++++++++++ litmus/preempt.c | 2 ++ 2 files changed, 16 insertions(+) (limited to 'litmus') diff --git a/litmus/Kconfig b/litmus/Kconfig index d7fde6f97e14..94b48e199577 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -199,6 +199,20 @@ config SCHED_DEBUG_TRACE_CALLER If unsure, say No. +config PREEMPT_STATE_TRACE + bool "Trace preemption state machine transitions" + depends on SCHED_DEBUG_TRACE + default n + help + With this option enabled, each CPU will log when it transitions + states in the preemption state machine. This state machine is + used to determine how to react to IPIs (avoid races with in-flight IPIs). + + Warning: this creates a lot of information in the debug trace. Only + recommended when you are debugging preemption-related races. + + If unsure, say No. + endmenu endmenu diff --git a/litmus/preempt.c b/litmus/preempt.c index ebe2e3461895..5704d0bf4c0b 100644 --- a/litmus/preempt.c +++ b/litmus/preempt.c @@ -30,8 +30,10 @@ void sched_state_will_schedule(struct task_struct* tsk) /* Litmus tasks should never be subject to a remote * set_tsk_need_resched(). */ BUG_ON(is_realtime(tsk)); +#ifdef CONFIG_PREEMPT_STATE_TRACE TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", __builtin_return_address(0)); +#endif } /* Called by the IPI handler after another CPU called smp_send_resched(). */ -- cgit v1.2.2 From 2fec12d43b366b7257c602af784b172466d8d4c5 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Thu, 24 Nov 2011 13:59:33 -0500 Subject: Pfair: improve robustness of suspensions This patch fixes two crash or hang bugs related to suspensions in Pfair. 1) When a job was not present at the end of its last subtask, then its linked_on field was not cleared. This confused the scheduler when it later resumed. Fix: clear the field. 2) Just testing for linked_on == NO_CPU is insufficient in the wake_up path to determine whether a task should be added to the ready queue. If the task remained linked and then was "preempted" at a later quantum boundary, then it already is in the ready queue and nothing is required. Fix: encode need to requeue in task_rt(t)->flags. --- litmus/sched_pfair.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index c95bde87b5d7..16f1065bbdca 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c @@ -98,6 +98,8 @@ struct pfair_cluster { raw_spinlock_t release_lock; }; +#define RT_F_REQUEUE 0x2 + static inline struct pfair_cluster* cpu_cluster(struct pfair_state* state) { return container_of(state->topology.cluster, struct pfair_cluster, topology); @@ -288,6 +290,8 @@ static void drop_all_references(struct task_struct *t) if (s->scheduled == t) s->scheduled = NULL; } + /* make sure we don't have a stale linked_on field */ + tsk_rt(t)->linked_on = NO_CPU; } static void pfair_prepare_next_period(struct task_struct* t) @@ -312,6 +316,7 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) } else { /* remove task from system until it wakes */ drop_all_references(t); + tsk_rt(t)->flags = RT_F_REQUEUE; TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n", cpu, p->cur); return 0; @@ -688,6 +693,7 @@ static void pfair_task_wake_up(struct task_struct *t) { unsigned long flags; lt_t now; + int requeue = 0; struct pfair_cluster* cluster; cluster = tsk_pfair(t)->cluster; @@ -702,16 +708,21 @@ static void pfair_task_wake_up(struct task_struct *t) * (as if it never blocked at all). Otherwise, we have a * new sporadic job release. */ + requeue = tsk_rt(t)->flags == RT_F_REQUEUE; now = litmus_clock(); if (lt_before(get_deadline(t), now)) { + TRACE_TASK(t, "sporadic release!\n"); release_at(t, now); prepare_release(t, time2quanta(now, CEIL)); sched_trace_task_release(t); } /* only add to ready queue if the task isn't still linked somewhere */ - if (tsk_rt(t)->linked_on == NO_CPU) + if (requeue) { + TRACE_TASK(t, "requeueing required\n"); + tsk_rt(t)->flags = RT_F_RUNNING; __add_ready(&cluster->pfair, t); + } check_preempt(t); -- cgit v1.2.2 From fd6d753fc4e01f91427176ebfcced2c3d3f36c32 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Tue, 8 Feb 2011 17:33:44 -0500 Subject: bugfix: add processors in order of increasing indices to clusters Pfair expects to look at processors in order of increasing index. Without this patch, Pfair could deadlock in certain situations. --- litmus/clustered.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/clustered.c b/litmus/clustered.c index 04450a8ad4fe..6fe1b512f628 100644 --- a/litmus/clustered.c +++ b/litmus/clustered.c @@ -102,7 +102,7 @@ int assign_cpus_to_clusters(enum cache_level level, cpus[i]->cluster = cpus[low_cpu]->cluster; } /* enqueue in cpus list */ - list_add(&cpus[i]->cluster_list, &cpus[i]->cluster->cpus); + list_add_tail(&cpus[i]->cluster_list, &cpus[i]->cluster->cpus); printk(KERN_INFO "Assigning CPU%u to cluster %u\n.", i, cpus[i]->cluster->id); } out: -- cgit v1.2.2 From b739b4033c0f55f9194be2793db9e6ace06047db Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 5 Feb 2011 20:11:30 -0500 Subject: Feather-Trace: start with the largest permissible range MAX_ORDER is 11, but this is about number of records, not number of pages. --- litmus/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/trace.c b/litmus/trace.c index e7ea1c2ab3e4..1e916aa406b9 100644 --- a/litmus/trace.c +++ b/litmus/trace.c @@ -69,7 +69,7 @@ feather_callback void save_timestamp_cpu(unsigned long event, * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER) * and we might not get as much */ -#define NO_TIMESTAMPS (2 << 11) +#define NO_TIMESTAMPS (2 << 16) static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) { -- cgit v1.2.2 From 4490f9ecf94e28458069a02e8cfcf4f385390499 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 5 Feb 2011 22:57:57 -0500 Subject: Feather-Trace: trace locking-related suspensions --- litmus/sched_gsn_edf.c | 5 +++++ litmus/sched_psn_edf.c | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'litmus') diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 7aad7f0ad8f2..6ed504f4750e 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -18,6 +18,7 @@ #include #include #include +#include #include @@ -799,6 +800,8 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) set_priority_inheritance(sem->owner, sem->hp_waiter); } + TS_LOCK_SUSPEND; + /* release lock before sleeping */ spin_unlock_irqrestore(&sem->wait.lock, flags); @@ -809,6 +812,8 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) schedule(); + TS_LOCK_RESUME; + /* Since we hold the lock, no other task will change * ->owner. We can thus check it without acquiring the spin * lock. */ diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index eb444ecf1288..8e4a22dd8d6a 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -20,6 +20,7 @@ #include #include #include +#include typedef struct { rt_domain_t domain; @@ -422,6 +423,8 @@ int psnedf_fmlp_lock(struct litmus_lock* l) __add_wait_queue_tail_exclusive(&sem->wait, &wait); + TS_LOCK_SUSPEND; + /* release lock before sleeping */ spin_unlock_irqrestore(&sem->wait.lock, flags); @@ -432,6 +435,8 @@ int psnedf_fmlp_lock(struct litmus_lock* l) schedule(); + TS_LOCK_RESUME; + /* Since we hold the lock, no other task will change * ->owner. We can thus check it without acquiring the spin * lock. */ -- cgit v1.2.2 From 1dead199b4ae68ab98eacec4a661fd5ecb5a2704 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 5 Feb 2011 23:15:09 -0500 Subject: Feather-Trace: keep track of release latency --- litmus/rt_domain.c | 4 +++- litmus/trace.c | 20 ++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 81a5ac16f164..d405854cd39c 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c @@ -55,12 +55,14 @@ static enum hrtimer_restart on_release_timer(struct hrtimer *timer) { unsigned long flags; struct release_heap* rh; + rh = container_of(timer, struct release_heap, timer); + + TS_RELEASE_LATENCY(rh->release_time); VTRACE("on_release_timer(0x%p) starts.\n", timer); TS_RELEASE_START; - rh = container_of(timer, struct release_heap, timer); raw_spin_lock_irqsave(&rh->dom->release_lock, flags); VTRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); diff --git a/litmus/trace.c b/litmus/trace.c index 1e916aa406b9..f0eb2c706488 100644 --- a/litmus/trace.c +++ b/litmus/trace.c @@ -61,6 +61,26 @@ feather_callback void save_timestamp_cpu(unsigned long event, __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); } +feather_callback void save_task_latency(unsigned long event, + unsigned long when_ptr) +{ + lt_t now = litmus_clock(); + lt_t *when = (lt_t*) when_ptr; + unsigned int seq_no; + int cpu = raw_smp_processor_id(); + struct timestamp *ts; + + seq_no = fetch_and_inc((int *) &ts_seq_no); + if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { + ts->event = event; + ts->timestamp = now - *when; + ts->seq_no = seq_no; + ts->cpu = cpu; + ts->task_type = TSK_RT; + ft_buffer_finish_write(trace_ts_buf, ts); + } +} + /******************************************************************************/ /* DEVICE FILE DRIVER */ /******************************************************************************/ -- cgit v1.2.2 From 49e5b0c0d7c09bef5b9bfecaaac3f0ea2cf24e43 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 23 Jul 2011 23:06:20 -0400 Subject: ftdev: remove event activation hack Instead of doing the hackisch 'write commands to device' thing, let's just use a real ioctl() interface. --- litmus/ftdev.c | 59 +++++++++++++++++++++------------------------------------- 1 file changed, 21 insertions(+), 38 deletions(-) (limited to 'litmus') diff --git a/litmus/ftdev.c b/litmus/ftdev.c index 4a4b2e3e56c2..216dc0b4cb94 100644 --- a/litmus/ftdev.c +++ b/litmus/ftdev.c @@ -250,64 +250,47 @@ out: return err; } -typedef uint32_t cmd_t; - -static ssize_t ftdev_write(struct file *filp, const char __user *from, - size_t len, loff_t *f_pos) +static long ftdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { + long err = -ENOIOCTLCMD; struct ftdev_minor* ftdm = filp->private_data; - ssize_t err = -EINVAL; - cmd_t cmd; - cmd_t id; - - if (len % sizeof(cmd) || len < 2 * sizeof(cmd)) - goto out; - - if (copy_from_user(&cmd, from, sizeof(cmd))) { - err = -EFAULT; - goto out; - } - len -= sizeof(cmd); - from += sizeof(cmd); - - if (cmd != FTDEV_ENABLE_CMD && cmd != FTDEV_DISABLE_CMD) - goto out; if (mutex_lock_interruptible(&ftdm->lock)) { err = -ERESTARTSYS; goto out; } - err = sizeof(cmd); - while (len) { - if (copy_from_user(&id, from, sizeof(cmd))) { - err = -EFAULT; - goto out_unlock; - } - /* FIXME: check id against list of acceptable events */ - len -= sizeof(cmd); - from += sizeof(cmd); - if (cmd == FTDEV_DISABLE_CMD) - deactivate(&ftdm->events, id); - else if (activate(&ftdm->events, id) != 0) { + /* FIXME: check id against list of acceptable events */ + + switch (cmd) { + case FTDEV_ENABLE_CMD: + if (activate(&ftdm->events, arg)) err = -ENOMEM; - goto out_unlock; - } - err += sizeof(cmd); - } + else + err = 0; + break; + + case FTDEV_DISABLE_CMD: + deactivate(&ftdm->events, arg); + err = 0; + break; + + default: + printk(KERN_DEBUG "ftdev: strange ioctl (%u, %lu)\n", cmd, arg); + }; -out_unlock: mutex_unlock(&ftdm->lock); out: return err; } + struct file_operations ftdev_fops = { .owner = THIS_MODULE, .open = ftdev_open, .release = ftdev_release, - .write = ftdev_write, .read = ftdev_read, + .unlocked_ioctl = ftdev_ioctl, }; int ftdev_init( struct ftdev* ftdev, struct module* owner, -- cgit v1.2.2 From 12982f31a233250c7a62b17fb4bd13594cb78777 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 23 Jul 2011 23:38:57 -0400 Subject: ftdev: let bufffer-specific code handle writes from userspace This allows us to splice in information into logs from events that were recorded in userspace. --- litmus/ftdev.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'litmus') diff --git a/litmus/ftdev.c b/litmus/ftdev.c index 216dc0b4cb94..06fcf4cf77dc 100644 --- a/litmus/ftdev.c +++ b/litmus/ftdev.c @@ -114,6 +114,7 @@ static int ftdev_open(struct inode *in, struct file *filp) goto out; ftdm = ftdev->minor + buf_idx; + ftdm->ftdev = ftdev; filp->private_data = ftdm; if (mutex_lock_interruptible(&ftdm->lock)) { @@ -284,11 +285,25 @@ out: return err; } +static ssize_t ftdev_write(struct file *filp, const char __user *from, + size_t len, loff_t *f_pos) +{ + struct ftdev_minor* ftdm = filp->private_data; + ssize_t err = -EINVAL; + struct ftdev* ftdev = ftdm->ftdev; + + /* dispatch write to buffer-specific code, if available */ + if (ftdev->write) + err = ftdev->write(ftdm->buf, len, from); + + return err; +} struct file_operations ftdev_fops = { .owner = THIS_MODULE, .open = ftdev_open, .release = ftdev_release, + .write = ftdev_write, .read = ftdev_read, .unlocked_ioctl = ftdev_ioctl, }; @@ -308,6 +323,7 @@ int ftdev_init( struct ftdev* ftdev, struct module* owner, ftdev->alloc = NULL; ftdev->free = NULL; ftdev->can_open = NULL; + ftdev->write = NULL; ftdev->minor = kcalloc(ftdev->minor_cnt, sizeof(*ftdev->minor), GFP_KERNEL); -- cgit v1.2.2 From e079932a0a1aab6adbc42fedefc6caa2d9a8af2b Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sat, 23 Jul 2011 23:40:10 -0400 Subject: Feather-trace: let userspace add overhead events This is useful for measuring locking-related overheads that are partially recorded in userspace. --- litmus/trace.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'litmus') diff --git a/litmus/trace.c b/litmus/trace.c index f0eb2c706488..5d77806da647 100644 --- a/litmus/trace.c +++ b/litmus/trace.c @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -31,6 +32,18 @@ static inline void __save_timestamp_cpu(unsigned long event, } } +static void __add_timestamp_user(struct timestamp *pre_recorded) +{ + unsigned int seq_no; + struct timestamp *ts; + seq_no = fetch_and_inc((int *) &ts_seq_no); + if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { + *ts = *pre_recorded; + ts->seq_no = seq_no; + ft_buffer_finish_write(trace_ts_buf, ts); + } +} + static inline void __save_timestamp(unsigned long event, uint8_t type) { @@ -108,6 +121,32 @@ static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) ftdev->minor[idx].buf = NULL; } +static ssize_t write_timestamp_from_user(struct ft_buffer* buf, size_t len, + const char __user *from) +{ + ssize_t consumed = 0; + struct timestamp ts; + + /* don't give us partial timestamps */ + if (len % sizeof(ts)) + return -EINVAL; + + while (len >= sizeof(ts)) { + if (copy_from_user(&ts, from, sizeof(ts))) { + consumed = -EFAULT; + goto out; + } + len -= sizeof(ts); + from += sizeof(ts); + consumed += sizeof(ts); + + __add_timestamp_user(&ts); + } + +out: + return consumed; +} + static int __init init_ft_overhead_trace(void) { int err; @@ -119,6 +158,7 @@ static int __init init_ft_overhead_trace(void) overhead_dev.alloc = alloc_timestamp_buffer; overhead_dev.free = free_timestamp_buffer; + overhead_dev.write = write_timestamp_from_user; err = register_ftdev(&overhead_dev); if (err) -- cgit v1.2.2 From 71083a7604e93e44536edde032706348f3a752ca Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Mon, 25 Jul 2011 15:31:55 -0400 Subject: locking: use correct timestamp --- litmus/locking.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/locking.c b/litmus/locking.c index 2693f1aca859..0c1aa6aa40b7 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -80,7 +80,7 @@ asmlinkage long sys_litmus_lock(int lock_od) /* Note: task my have been suspended or preempted in between! Take * this into account when computing overheads. */ - TS_UNLOCK_END; + TS_LOCK_END; return err; } -- cgit v1.2.2 From 81b8eb2ae452c241df9b3a1fb2116fa4d5adcb75 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Tue, 26 Jul 2011 22:03:18 -0400 Subject: C-EDF: rename lock -> cluster_lock The macro lock conflicts with locking protocols... --- litmus/sched_cedf.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 8b3f8a7e2609..480c62bc895b 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -99,7 +99,7 @@ typedef struct clusterdomain { struct bheap_node *heap_node; struct bheap cpu_heap; /* lock for this cluster */ -#define lock domain.ready_lock +#define cluster_lock domain.ready_lock } cedf_domain_t; /* a cedf_domain per cluster; allocation is done at init/activation time */ @@ -331,12 +331,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); unsigned long flags; - raw_spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); __merge_ready(&cluster->domain, tasks); check_for_preemptions(cluster); - raw_spin_unlock_irqrestore(&cluster->lock, flags); + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); } /* caller holds cedf_lock */ @@ -427,7 +427,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) } #endif - raw_spin_lock(&cluster->lock); + raw_spin_lock(&cluster->cluster_lock); clear_will_schedule(); /* sanity checking */ @@ -511,7 +511,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) next = prev; sched_state_task_picked(); - raw_spin_unlock(&cluster->lock); + raw_spin_unlock(&cluster->cluster_lock); #ifdef WANT_ALL_SCHED_EVENTS TRACE("cedf_lock released, next=0x%p\n", next); @@ -553,7 +553,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) /* the cluster doesn't change even if t is running */ cluster = task_cpu_cluster(t); - raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); /* setup job params */ release_at(t, litmus_clock()); @@ -580,7 +580,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) t->rt_param.linked_on = NO_CPU; cedf_job_arrival(t); - raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); + raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); } static void cedf_task_wake_up(struct task_struct *task) @@ -593,7 +593,7 @@ static void cedf_task_wake_up(struct task_struct *task) cluster = task_cpu_cluster(task); - raw_spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); /* We need to take suspensions because of semaphores into * account! If a job resumes after being suspended due to acquiring * a semaphore, it should never be treated as a new job release. @@ -616,7 +616,7 @@ static void cedf_task_wake_up(struct task_struct *task) } } cedf_job_arrival(task); - raw_spin_unlock_irqrestore(&cluster->lock, flags); + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); } static void cedf_task_block(struct task_struct *t) @@ -629,9 +629,9 @@ static void cedf_task_block(struct task_struct *t) cluster = task_cpu_cluster(t); /* unlink if necessary */ - raw_spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); unlink(t); - raw_spin_unlock_irqrestore(&cluster->lock, flags); + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); BUG_ON(!is_realtime(t)); } @@ -643,7 +643,7 @@ static void cedf_task_exit(struct task_struct * t) cedf_domain_t *cluster = task_cpu_cluster(t); /* unlink if necessary */ - raw_spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); unlink(t); if (tsk_rt(t)->scheduled_on != NO_CPU) { cpu_entry_t *cpu; @@ -651,7 +651,7 @@ static void cedf_task_exit(struct task_struct * t) cpu->scheduled = NULL; tsk_rt(t)->scheduled_on = NO_CPU; } - raw_spin_unlock_irqrestore(&cluster->lock, flags); + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); -- cgit v1.2.2 From 5bd89a34d89f252619d83fef3c9325e24311389e Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Thu, 28 Jul 2011 01:15:58 -0400 Subject: Litmus core: simplify np-section protocol User a 32-bit word for all non-preemptive section flags. Set the "please yield soon" flag atomically when accessing it on remotely-scheduled tasks. --- litmus/litmus.c | 2 ++ litmus/sched_plugin.c | 23 ++++++----------------- 2 files changed, 8 insertions(+), 17 deletions(-) (limited to 'litmus') diff --git a/litmus/litmus.c b/litmus/litmus.c index 73af6c3010d6..301390148d02 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -529,6 +529,8 @@ static int __init _init_litmus(void) */ printk("Starting LITMUS^RT kernel\n"); + BUILD_BUG_ON(sizeof(union np_flag) != sizeof(uint32_t)); + register_sched_plugin(&linux_sched_plugin); bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d54886df1f57..00a1900d6457 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -35,29 +35,18 @@ void preempt_if_preemptable(struct task_struct* t, int cpu) /* local CPU case */ /* check if we need to poke userspace */ if (is_user_np(t)) - /* yes, poke it */ + /* Yes, poke it. This doesn't have to be atomic since + * the task is definitely not executing. */ request_exit_np(t); else if (!is_kernel_np(t)) /* only if we are allowed to preempt the * currently-executing task */ reschedule = 1; } else { - /* remote CPU case */ - if (is_user_np(t)) { - /* need to notify user space of delayed - * preemption */ - - /* to avoid a race, set the flag, then test - * again */ - request_exit_np(t); - /* make sure it got written */ - mb(); - } - /* Only send an ipi if remote task might have raced our - * request, i.e., send an IPI to make sure in case it - * exited its critical section. - */ - reschedule = !is_np(t) && !is_kernel_np(t); + /* Remote CPU case. Only notify if it's not a kernel + * NP section and if we didn't set the userspace + * flag. */ + reschedule = !(is_kernel_np(t) || request_exit_np_atomic(t)); } } if (likely(reschedule)) -- cgit v1.2.2 From 83b11ea1c6ad113519c488853cf06e626c95a64d Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Tue, 24 Jan 2012 09:36:12 +0100 Subject: Feather-Trace: keep track of interrupt-related interference. Increment a processor-local counter whenever an interrupt is handled. This allows Feather-Trace to include a (truncated) counter and a flag to report interference from interrupts. This could be used to filter samples that were disturbed by interrupts. --- litmus/trace.c | 47 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) (limited to 'litmus') diff --git a/litmus/trace.c b/litmus/trace.c index 5d77806da647..3c35c527e805 100644 --- a/litmus/trace.c +++ b/litmus/trace.c @@ -16,6 +16,35 @@ static struct ftdev overhead_dev; static unsigned int ts_seq_no = 0; +DEFINE_PER_CPU(atomic_t, irq_fired_count); + +static inline void clear_irq_fired(void) +{ + atomic_set(&__raw_get_cpu_var(irq_fired_count), 0); +} + +static inline unsigned int get_and_clear_irq_fired(void) +{ + /* This is potentially not atomic since we might migrate if + * preemptions are not disabled. As a tradeoff between + * accuracy and tracing overheads, this seems acceptable. + * If it proves to be a problem, then one could add a callback + * from the migration code to invalidate irq_fired_count. + */ + return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0); +} + +static inline void __save_irq_flags(struct timestamp *ts) +{ + unsigned int irq_count; + + irq_count = get_and_clear_irq_fired(); + /* Store how many interrupts occurred. */ + ts->irq_count = irq_count; + /* Extra flag because ts->irq_count overflows quickly. */ + ts->irq_flag = irq_count > 0; +} + static inline void __save_timestamp_cpu(unsigned long event, uint8_t type, uint8_t cpu) { @@ -24,10 +53,13 @@ static inline void __save_timestamp_cpu(unsigned long event, seq_no = fetch_and_inc((int *) &ts_seq_no); if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { ts->event = event; - ts->timestamp = ft_timestamp(); ts->seq_no = seq_no; ts->cpu = cpu; ts->task_type = type; + __save_irq_flags(ts); + barrier(); + /* prevent re-ordering of ft_timestamp() */ + ts->timestamp = ft_timestamp(); ft_buffer_finish_write(trace_ts_buf, ts); } } @@ -40,6 +72,7 @@ static void __add_timestamp_user(struct timestamp *pre_recorded) if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { *ts = *pre_recorded; ts->seq_no = seq_no; + __save_irq_flags(ts); ft_buffer_finish_write(trace_ts_buf, ts); } } @@ -90,6 +123,7 @@ feather_callback void save_task_latency(unsigned long event, ts->seq_no = seq_no; ts->cpu = cpu; ts->task_type = TSK_RT; + __save_irq_flags(ts); ft_buffer_finish_write(trace_ts_buf, ts); } } @@ -107,6 +141,10 @@ feather_callback void save_task_latency(unsigned long event, static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) { unsigned int count = NO_TIMESTAMPS; + + /* An overhead-tracing timestamp should be exactly 16 bytes long. */ + BUILD_BUG_ON(sizeof(struct timestamp) != 16); + while (count && !trace_ts_buf) { printk("time stamp buffer: trying to allocate %u time stamps.\n", count); ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); @@ -149,7 +187,7 @@ out: static int __init init_ft_overhead_trace(void) { - int err; + int err, cpu; printk("Initializing Feather-Trace overhead tracing device.\n"); err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); @@ -164,6 +202,11 @@ static int __init init_ft_overhead_trace(void) if (err) goto err_dealloc; + /* initialize IRQ flags */ + for (cpu = 0; cpu < NR_CPUS; cpu++) { + clear_irq_fired(); + } + return 0; err_dealloc: -- cgit v1.2.2 From 65f96c53bc0db7733ae7908470ddb3f17dc369b4 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Mon, 26 Mar 2012 23:40:04 -0400 Subject: First commit. --- litmus/Makefile | 3 ++- litmus/color.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 litmus/color.c (limited to 'litmus') diff --git a/litmus/Makefile b/litmus/Makefile index 7338180f196f..506fdf9b0c51 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -17,7 +17,8 @@ obj-y = sched_plugin.o litmus.o \ bheap.o \ ctrldev.o \ sched_gsn_edf.o \ - sched_psn_edf.o + sched_psn_edf.o \ + color.o obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o diff --git a/litmus/color.c b/litmus/color.c new file mode 100644 index 000000000000..2ff87937f346 --- /dev/null +++ b/litmus/color.c @@ -0,0 +1,38 @@ +#include + +#include +#include +#include + +#include + +#define MPRINT(fmt, args...) \ + printk(KERN_INFO "[%s@%s:%d]: " fmt, \ + __FUNCTION__, __FILE__, __LINE__, ## args) + +#define ALLOC_ORDER (MAX_ORDER - 1) + +struct color_cache_info color_cache_info; + +static int __init init_color(void) +{ + struct page *page; + + MPRINT("ALLOC_ORDER is %d\n", ALLOC_ORDER); + MPRINT("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", + color_cache_info.size, color_cache_info.line_size, + color_cache_info.ways, color_cache_info.sets); + BUG_ON(color_cache_info.size <= 1048576 || + color_cache_info.ways < 15 || + color_cache_info.line_size != 64 || + color_cache_info.sets < 4096); + + page = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, ALLOC_ORDER); + if (!page) { + MPRINT("could not allocate pages\n"); + BUG(); + } + return 0; +} + +module_init(init_color); -- cgit v1.2.2 From 114cd359b110e24d7fdf303fa96c09bf7fad875e Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Wed, 28 Mar 2012 00:02:17 -0400 Subject: masks --- litmus/color.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 8 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index 2ff87937f346..2a942d22a676 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -11,13 +11,60 @@ __FUNCTION__, __FILE__, __LINE__, ## args) #define ALLOC_ORDER (MAX_ORDER - 1) +#define PAGES_PER_COLOR 10000 struct color_cache_info color_cache_info; +unsigned long color_mask; +unsigned long nr_colors; -static int __init init_color(void) +struct color_group { + spinlock_t lock; + struct list_head list; + int nr_pages; +}; + +struct color_group *colors; + +/* slowest possible way to find a log, but we only do this once on boot */ +static unsigned int slow_log(unsigned int v) +{ + unsigned int r = 0; + while (v >>= 1) + r++; + return r; +} + +static void __init setup_mask(void) { + const unsigned int line_size_log = slow_log(color_cache_info.line_size); + + BUG_ON(color_cache_info.size / color_cache_info.line_size / + color_cache_info.ways != color_cache_info.sets); + BUG_ON(PAGE_SIZE >= (color_cache_info.sets << line_size_log)); + color_mask = ((color_cache_info.sets << line_size_log) - 1) ^ + (PAGE_SIZE - 1); + nr_colors = (color_mask >> PAGE_SHIFT) + 1; + MPRINT("color mask: 0x%lx total colors: %lu\n", color_mask, + nr_colors); +} + +static int __init init_color_lists(void) +{ + struct page *page; + + page = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, ALLOC_ORDER); + if (!page) { + MPRINT("could not allocate pages\n"); + BUG(); + } + return 0; +} + +static int __init init_color(void) +{ + MPRINT("ALLOC_ORDER is %d\n", ALLOC_ORDER); MPRINT("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", color_cache_info.size, color_cache_info.line_size, @@ -26,13 +73,8 @@ static int __init init_color(void) color_cache_info.ways < 15 || color_cache_info.line_size != 64 || color_cache_info.sets < 4096); - - page = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, ALLOC_ORDER); - if (!page) { - MPRINT("could not allocate pages\n"); - BUG(); - } - return 0; + setup_mask(); + return init_color_lists(); } module_init(init_color); -- cgit v1.2.2 From a812f8c03abe7e6952af99ee6f0129e61b91c23f Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Wed, 28 Mar 2012 00:29:18 -0400 Subject: don't crash when cache information unavailable --- litmus/color.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index 2a942d22a676..c3e2a1072360 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -69,10 +69,13 @@ static int __init init_color(void) MPRINT("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", color_cache_info.size, color_cache_info.line_size, color_cache_info.ways, color_cache_info.sets); + if (!color_cache_info.size){ + printk(KERN_WARNING "No cache information found.\n"); + return -EINVAL; + } BUG_ON(color_cache_info.size <= 1048576 || color_cache_info.ways < 15 || - color_cache_info.line_size != 64 || - color_cache_info.sets < 4096); + color_cache_info.line_size != 64); setup_mask(); return init_color_lists(); } -- cgit v1.2.2 From b227313bc9f87f67001e92b898d5da7918102b20 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Wed, 28 Mar 2012 20:45:43 -0400 Subject: add pages to lists on boot --- litmus/color.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 88 insertions(+), 11 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index c3e2a1072360..b74b524655c8 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -3,6 +3,11 @@ #include #include #include +#include +#include + +//#include +//#include #include @@ -11,7 +16,7 @@ __FUNCTION__, __FILE__, __LINE__, ## args) #define ALLOC_ORDER (MAX_ORDER - 1) -#define PAGES_PER_COLOR 10000 +#define PAGES_PER_COLOR 1000 struct color_cache_info color_cache_info; unsigned long color_mask; @@ -20,10 +25,15 @@ unsigned long nr_colors; struct color_group { spinlock_t lock; struct list_head list; - int nr_pages; + unsigned long nr_pages; }; -struct color_group *colors; +struct color_group *color_groups; + +static inline unsigned int page_color(struct page *page) +{ + return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT); +} /* slowest possible way to find a log, but we only do this once on boot */ static unsigned int slow_log(unsigned int v) @@ -48,18 +58,85 @@ static void __init setup_mask(void) nr_colors); } -static int __init init_color_lists(void) +/* don't trust nr_pages returned from here. it's just used on startup */ +static void _add_page_to_color_list(struct page *page) { + const unsigned long color = page_color(page); + struct color_group *cgroup = &color_groups[color]; + spin_lock(&cgroup->lock); + list_add_tail(&page->lru, &cgroup->list); + cgroup->nr_pages++; + spin_unlock(&cgroup->lock); +} - struct page *page; +static void add_page_to_color_list(struct page *page) +{ + unsigned long flags; + local_irq_save(flags); + _add_page_to_color_list(page); + local_irq_restore(flags); +} +static unsigned long __init smallest_nr_pages(void) +{ + unsigned long flags, i, min_pages = -1; + struct color_group *cgroup; + local_irq_save(flags); + for (i = 0; i < nr_colors; ++i) { + cgroup = &color_groups[i]; + spin_lock(&cgroup->lock); + if (cgroup->nr_pages < min_pages) + min_pages = cgroup->nr_pages; + spin_unlock(&cgroup->lock); + } + local_irq_restore(flags); + return min_pages; +} + +static int __init init_color_groups(void) +{ + struct color_group *cgroup; + unsigned long i, flags; + int ret = 0; + + color_groups = kmalloc(nr_colors * sizeof(struct color_group), GFP_KERNEL); + if (!color_groups) { + printk(KERN_WARNING "Could not allocate color groups.\n"); + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < nr_colors; ++i) { + cgroup = &color_groups[i]; + cgroup->nr_pages = 0; + INIT_LIST_HEAD(&cgroup->list); + spin_lock_init(&cgroup->lock); + } + + while (smallest_nr_pages() < PAGES_PER_COLOR) { + struct page *page = alloc_pages( + GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT, + ALLOC_ORDER); + if (!page) { + MPRINT("could not allocate pages\n"); + BUG(); + } + local_irq_save(flags); + for (i = 0; i < (1UL << ALLOC_ORDER); ++i) + _add_page_to_color_list(&page[i]); + local_irq_restore(flags); + } - page = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, ALLOC_ORDER); - if (!page) { - MPRINT("could not allocate pages\n"); - BUG(); + local_irq_save(flags); + for (i = 0; i < nr_colors; ++i) { + cgroup = &color_groups[i]; + spin_lock(&cgroup->lock); + MPRINT("color %3lu: %4lu pages\n", i, cgroup->nr_pages); + spin_unlock(&cgroup->lock); } - return 0; + local_irq_restore(flags); +out: + return ret; } static int __init init_color(void) @@ -77,7 +154,7 @@ static int __init init_color(void) color_cache_info.ways < 15 || color_cache_info.line_size != 64); setup_mask(); - return init_color_lists(); + return init_color_groups(); } module_init(init_color); -- cgit v1.2.2 From 606f24606ecf76a62c2e262e260a51bf6f8dc009 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Thu, 29 Mar 2012 16:58:20 -0400 Subject: Allocate only one page at a time. Add proc interface. --- litmus/Makefile | 3 +- litmus/color.c | 218 ++++++++++++++++++++++++++++++++-------------------- litmus/color_proc.c | 79 +++++++++++++++++++ 3 files changed, 216 insertions(+), 84 deletions(-) create mode 100644 litmus/color_proc.c (limited to 'litmus') diff --git a/litmus/Makefile b/litmus/Makefile index 506fdf9b0c51..e4c937bc2850 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -18,7 +18,8 @@ obj-y = sched_plugin.o litmus.o \ ctrldev.o \ sched_gsn_edf.o \ sched_psn_edf.o \ - color.o + color.o \ + color_proc.o obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o diff --git a/litmus/color.c b/litmus/color.c index b74b524655c8..e7aeab8c6598 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -5,9 +5,8 @@ #include #include #include - -//#include -//#include +#include +#include #include @@ -15,28 +14,139 @@ printk(KERN_INFO "[%s@%s:%d]: " fmt, \ __FUNCTION__, __FILE__, __LINE__, ## args) -#define ALLOC_ORDER (MAX_ORDER - 1) -#define PAGES_PER_COLOR 1000 - -struct color_cache_info color_cache_info; -unsigned long color_mask; -unsigned long nr_colors; +#define PAGES_PER_COLOR 2000 struct color_group { spinlock_t lock; struct list_head list; - unsigned long nr_pages; + atomic_t nr_pages; }; -struct color_group *color_groups; +static unsigned long color_mask; +static struct color_group *color_groups; + +/* non-static: extern'ed in various files */ +unsigned long nr_colors; +struct color_cache_info color_cache_info; +int color_sysctl_add_pages_data; -static inline unsigned int page_color(struct page *page) +static inline unsigned long page_color(struct page *page) { return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT); } +void add_page_to_color_list(struct page *page) +{ + const unsigned long color = page_color(page); + struct color_group *cgroup = &color_groups[color]; + spin_lock(&cgroup->lock); + list_add_tail(&page->lru, &cgroup->list); + atomic_inc(&cgroup->nr_pages); + spin_unlock(&cgroup->lock); +} + +struct page* get_colored_page(unsigned long color) +{ + struct color_group *cgroup; + struct page *page; + BUG_ON(color >= nr_colors); + cgroup = &color_groups[color]; + spin_lock(&cgroup->lock); + BUG_ON(!atomic_read(&cgroup->nr_pages)); + page = list_first_entry(&cgroup->list, struct page, lru); + list_del(&page->lru); + atomic_dec(&cgroup->nr_pages); + spin_unlock(&cgroup->lock); + return page; + +} + +static unsigned long smallest_nr_pages(void) +{ + unsigned long i, min_pages = -1; + struct color_group *cgroup; + for (i = 0; i < nr_colors; ++i) { + cgroup = &color_groups[i]; + if (atomic_read(&cgroup->nr_pages) < min_pages) + min_pages = atomic_read(&cgroup->nr_pages); + } + return min_pages; +} + +static int do_add_pages(void) +{ + struct page *page, *page_tmp; + struct list_head free_later; + unsigned long color; + int ret = 0; + + INIT_LIST_HEAD(&free_later); + + while (smallest_nr_pages() < PAGES_PER_COLOR) { + page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | + __GFP_MOVABLE); + if (!page) { + MPRINT("could not allocate pages\n"); + BUG(); + } + color = page_color(page); + if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) + add_page_to_color_list(page); + else + list_add_tail(&page->lru, &free_later); + } + list_for_each_entry_safe(page, page_tmp, &free_later, lru) { + list_del(&page->lru); + __free_page(page); + } + return ret; +} + + +/*********************************************************** + * Proc +***********************************************************/ + +int color_add_pages_handler(struct ctl_table *table, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (ret) + return ret; + if (write && color_sysctl_add_pages_data) + do_add_pages(); + return 0; +} + + +int color_nr_pages_handler(struct ctl_table *table, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + struct color_group *cgroup; + char *buf; + unsigned long i; + int used = 0; + + if (write) + return -EPERM; + + for (i = 0; i < nr_colors; ++i) { + cgroup = &color_groups[i]; + buf = ((char*)table->data) + used; + used += snprintf(buf, table->maxlen - used, ONE_COLOR_FMT, + i, atomic_read(&cgroup->nr_pages)); + } + return proc_dostring(table, write, buffer, lenp, ppos); +} + +/*********************************************************** + * Initialization +***********************************************************/ + /* slowest possible way to find a log, but we only do this once on boot */ -static unsigned int slow_log(unsigned int v) +static unsigned int __init slow_log(unsigned int v) { unsigned int r = 0; while (v >>= 1) @@ -44,7 +154,7 @@ static unsigned int slow_log(unsigned int v) return r; } -static void __init setup_mask(void) +static void __init init_mask(void) { const unsigned int line_size_log = slow_log(color_cache_info.line_size); @@ -58,103 +168,45 @@ static void __init setup_mask(void) nr_colors); } -/* don't trust nr_pages returned from here. it's just used on startup */ -static void _add_page_to_color_list(struct page *page) -{ - const unsigned long color = page_color(page); - struct color_group *cgroup = &color_groups[color]; - spin_lock(&cgroup->lock); - list_add_tail(&page->lru, &cgroup->list); - cgroup->nr_pages++; - spin_unlock(&cgroup->lock); -} - -static void add_page_to_color_list(struct page *page) -{ - unsigned long flags; - local_irq_save(flags); - _add_page_to_color_list(page); - local_irq_restore(flags); -} - -static unsigned long __init smallest_nr_pages(void) -{ - unsigned long flags, i, min_pages = -1; - struct color_group *cgroup; - local_irq_save(flags); - for (i = 0; i < nr_colors; ++i) { - cgroup = &color_groups[i]; - spin_lock(&cgroup->lock); - if (cgroup->nr_pages < min_pages) - min_pages = cgroup->nr_pages; - spin_unlock(&cgroup->lock); - } - local_irq_restore(flags); - return min_pages; -} - static int __init init_color_groups(void) { struct color_group *cgroup; - unsigned long i, flags; - int ret = 0; + unsigned long i; color_groups = kmalloc(nr_colors * sizeof(struct color_group), GFP_KERNEL); if (!color_groups) { printk(KERN_WARNING "Could not allocate color groups.\n"); - ret = -ENOMEM; - goto out; + return -ENOMEM; } for (i = 0; i < nr_colors; ++i) { cgroup = &color_groups[i]; - cgroup->nr_pages = 0; + atomic_set(&cgroup->nr_pages, 0); INIT_LIST_HEAD(&cgroup->list); spin_lock_init(&cgroup->lock); } - - while (smallest_nr_pages() < PAGES_PER_COLOR) { - struct page *page = alloc_pages( - GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT, - ALLOC_ORDER); - if (!page) { - MPRINT("could not allocate pages\n"); - BUG(); - } - local_irq_save(flags); - for (i = 0; i < (1UL << ALLOC_ORDER); ++i) - _add_page_to_color_list(&page[i]); - local_irq_restore(flags); - } - - local_irq_save(flags); - for (i = 0; i < nr_colors; ++i) { - cgroup = &color_groups[i]; - spin_lock(&cgroup->lock); - MPRINT("color %3lu: %4lu pages\n", i, cgroup->nr_pages); - spin_unlock(&cgroup->lock); - } - local_irq_restore(flags); -out: - return ret; + return 0; } static int __init init_color(void) { + int ret = 0; - MPRINT("ALLOC_ORDER is %d\n", ALLOC_ORDER); MPRINT("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", color_cache_info.size, color_cache_info.line_size, color_cache_info.ways, color_cache_info.sets); if (!color_cache_info.size){ printk(KERN_WARNING "No cache information found.\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } BUG_ON(color_cache_info.size <= 1048576 || color_cache_info.ways < 15 || color_cache_info.line_size != 64); - setup_mask(); - return init_color_groups(); + init_mask(); + ret = init_color_groups(); +out: + return ret; } module_init(init_color); diff --git a/litmus/color_proc.c b/litmus/color_proc.c new file mode 100644 index 000000000000..1eba0740db9d --- /dev/null +++ b/litmus/color_proc.c @@ -0,0 +1,79 @@ +#include +#include +#include + +#include + +extern int color_sysctl_add_pages_data; /* litmus/color.c */ + +static int zero = 0; +static int one = 1; + +#define NR_PAGES_INDEX 1 /* location of nr_pages in the table below */ +static struct ctl_table color_table[] = +{ + { + .procname = "add_pages", + .data = &color_sysctl_add_pages_data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = color_add_pages_handler, + .extra1 = &zero, + .extra2 = &one, + }, + { + .procname = "nr_pages", + .mode = 0444, + .proc_handler = color_nr_pages_handler, + .data = NULL, /* dynamically later */ + .maxlen = 0, /* also set later */ + }, + { } +}; + +static struct ctl_table litmus_table[] = +{ + { + .procname = "color", + .mode = 0555, + .child = color_table, + }, + { } +}; +static struct ctl_table litmus_dir_table[] = { + { + .procname = "litmus", + .mode = 0555, + .child = litmus_table, + }, + { } +}; + +extern unsigned long nr_colors; /* litmus/color.c */ + +/* must be called AFTER nr_colors is set */ +static int __init init_sysctl_nr_colors(void) +{ + int maxlen = ONE_COLOR_LEN * nr_colors; + color_table[NR_PAGES_INDEX].data = kmalloc(maxlen, GFP_KERNEL); + if (!color_table[NR_PAGES_INDEX].data) { + printk(KERN_WARNING "Could not allocate nr_pages buffer.\n"); + return -ENOMEM; + } + color_table[NR_PAGES_INDEX].maxlen = maxlen; + return 0; +} + +static struct ctl_table_header *litmus_sysctls; + +static int __init litmus_sysctl_init(void) +{ + litmus_sysctls = register_sysctl_table(litmus_dir_table); + if (!litmus_sysctls) { + printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n"); + return -EFAULT; + } + return init_sysctl_nr_colors(); +} + +module_init(litmus_sysctl_init); -- cgit v1.2.2 From 9ba35c84953f070616b00237975de9206b698afd Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Sat, 31 Mar 2012 16:38:06 -0400 Subject: Allow dynamically allocated locks for Lockdep. --- litmus/color.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index e7aeab8c6598..1077473c8ae9 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -16,8 +17,16 @@ #define PAGES_PER_COLOR 2000 +/* + * This is used only to "trick" lockdep into permitting dynamically allocated + * locks of different classes that are initialized on the same line. + */ +#define LOCKDEP_MAX_NR_COLORS 512 +static struct lock_class_key color_lock_keys[LOCKDEP_MAX_NR_COLORS]; + struct color_group { spinlock_t lock; + char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN]; struct list_head list; atomic_t nr_pages; }; @@ -166,6 +175,7 @@ static void __init init_mask(void) nr_colors = (color_mask >> PAGE_SHIFT) + 1; MPRINT("color mask: 0x%lx total colors: %lu\n", color_mask, nr_colors); + BUG_ON(LOCKDEP_MAX_NR_COLORS < nr_colors); } static int __init init_color_groups(void) @@ -184,6 +194,8 @@ static int __init init_color_groups(void) atomic_set(&cgroup->nr_pages, 0); INIT_LIST_HEAD(&cgroup->list); spin_lock_init(&cgroup->lock); + LOCKDEP_DYNAMIC_ALLOC(&cgroup->lock, &color_lock_keys[i], + cgroup->_lock_name, "color%lu", i); } return 0; } -- cgit v1.2.2 From db4d9fcd3dfbda54b351ef42c13d93a00009784f Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Sun, 1 Apr 2012 19:24:46 -0400 Subject: minor clean-up --- litmus/color.c | 45 ++++++++++++++++++++++++++++----------------- litmus/color_proc.c | 16 +++++++++++----- 2 files changed, 39 insertions(+), 22 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index 1077473c8ae9..aefb76e36626 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -57,17 +57,20 @@ void add_page_to_color_list(struct page *page) struct page* get_colored_page(unsigned long color) { struct color_group *cgroup; - struct page *page; + struct page *page = NULL; BUG_ON(color >= nr_colors); cgroup = &color_groups[color]; spin_lock(&cgroup->lock); - BUG_ON(!atomic_read(&cgroup->nr_pages)); + if (unlikely(!atomic_read(&cgroup->nr_pages))) { + printk(KERN_WARNING "no free %lu colored pages.\n", color); + goto out_unlock; + } page = list_first_entry(&cgroup->list, struct page, lru); list_del(&page->lru); atomic_dec(&cgroup->nr_pages); +out_unlock: spin_unlock(&cgroup->lock); return page; - } static unsigned long smallest_nr_pages(void) @@ -94,9 +97,10 @@ static int do_add_pages(void) while (smallest_nr_pages() < PAGES_PER_COLOR) { page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_MOVABLE); - if (!page) { + if (unlikely(!page)) { MPRINT("could not allocate pages\n"); - BUG(); + ret = -ENOMEM; + goto out; } color = page_color(page); if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) @@ -108,6 +112,7 @@ static int do_add_pages(void) list_del(&page->lru); __free_page(page); } +out: return ret; } @@ -119,14 +124,14 @@ static int do_add_pages(void) int color_add_pages_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int ret; - + int ret = 0; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret) - return ret; + goto out; if (write && color_sysctl_add_pages_data) - do_add_pages(); - return 0; + ret = do_add_pages(); +out: + return ret; } @@ -136,18 +141,21 @@ int color_nr_pages_handler(struct ctl_table *table, int write, void __user *buff struct color_group *cgroup; char *buf; unsigned long i; - int used = 0; - - if (write) - return -EPERM; + int used = 0, ret = 0; + if (write) { + ret = -EPERM; + goto out; + } for (i = 0; i < nr_colors; ++i) { cgroup = &color_groups[i]; buf = ((char*)table->data) + used; used += snprintf(buf, table->maxlen - used, ONE_COLOR_FMT, i, atomic_read(&cgroup->nr_pages)); } - return proc_dostring(table, write, buffer, lenp, ppos); + ret = proc_dostring(table, write, buffer, lenp, ppos); +out: + return ret; } /*********************************************************** @@ -182,11 +190,13 @@ static int __init init_color_groups(void) { struct color_group *cgroup; unsigned long i; + int ret = 0; color_groups = kmalloc(nr_colors * sizeof(struct color_group), GFP_KERNEL); if (!color_groups) { printk(KERN_WARNING "Could not allocate color groups.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto out; } for (i = 0; i < nr_colors; ++i) { @@ -197,7 +207,8 @@ static int __init init_color_groups(void) LOCKDEP_DYNAMIC_ALLOC(&cgroup->lock, &color_lock_keys[i], cgroup->_lock_name, "color%lu", i); } - return 0; +out: + return ret; } static int __init init_color(void) diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 1eba0740db9d..31eec0d728a5 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -54,26 +54,32 @@ extern unsigned long nr_colors; /* litmus/color.c */ /* must be called AFTER nr_colors is set */ static int __init init_sysctl_nr_colors(void) { - int maxlen = ONE_COLOR_LEN * nr_colors; + int ret = 0, maxlen = ONE_COLOR_LEN * nr_colors; color_table[NR_PAGES_INDEX].data = kmalloc(maxlen, GFP_KERNEL); if (!color_table[NR_PAGES_INDEX].data) { printk(KERN_WARNING "Could not allocate nr_pages buffer.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto out; } color_table[NR_PAGES_INDEX].maxlen = maxlen; - return 0; +out: + return ret; } static struct ctl_table_header *litmus_sysctls; static int __init litmus_sysctl_init(void) { + int ret = 0; litmus_sysctls = register_sysctl_table(litmus_dir_table); if (!litmus_sysctls) { printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n"); - return -EFAULT; + ret = -EFAULT; + goto out; } - return init_sysctl_nr_colors(); + ret = init_sysctl_nr_colors(); +out: + return ret; } module_init(litmus_sysctl_init); -- cgit v1.2.2 From ae95a4582d707de8a57a8159ea81b16ba7bddd54 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Sun, 1 Apr 2012 19:24:46 -0400 Subject: Page reclaiming, control devices, cleanup. Track allocated pages and add a proc handler to reclaim free pages and add control device for allocating colored memory with mmap. --- litmus/Makefile | 3 +- litmus/color.c | 82 +++++++++++--- litmus/color_dev.c | 312 ++++++++++++++++++++++++++++++++++++++++++++++++++++ litmus/color_proc.c | 24 +++- litmus/litmus.c | 25 ++++- 5 files changed, 421 insertions(+), 25 deletions(-) create mode 100644 litmus/color_dev.c (limited to 'litmus') diff --git a/litmus/Makefile b/litmus/Makefile index e4c937bc2850..2d77d11e905e 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -19,7 +19,8 @@ obj-y = sched_plugin.o litmus.o \ sched_gsn_edf.o \ sched_psn_edf.o \ color.o \ - color_proc.o + color_proc.o \ + color_dev.o obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o diff --git a/litmus/color.c b/litmus/color.c index aefb76e36626..a3cc193418c0 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -1,15 +1,14 @@ #include -#include #include -#include -#include +#include #include -#include #include #include +#include /* required by litmus.h */ #include +#include /* for in_list(...) */ #define MPRINT(fmt, args...) \ printk(KERN_INFO "[%s@%s:%d]: " fmt, \ @@ -31,13 +30,20 @@ struct color_group { atomic_t nr_pages; }; +static struct alloced_pages { + spinlock_t lock; + struct list_head list; +} alloced_pages; + static unsigned long color_mask; static struct color_group *color_groups; + /* non-static: extern'ed in various files */ unsigned long nr_colors; struct color_cache_info color_cache_info; int color_sysctl_add_pages_data; +int color_sysctl_reclaim_pages_data; static inline unsigned long page_color(struct page *page) { @@ -48,12 +54,25 @@ void add_page_to_color_list(struct page *page) { const unsigned long color = page_color(page); struct color_group *cgroup = &color_groups[color]; + BUG_ON(in_list(&page->lru) || PageLRU(page)); + BUG_ON(page_mapped(page) || page_count(page) > 1); spin_lock(&cgroup->lock); list_add_tail(&page->lru, &cgroup->list); atomic_inc(&cgroup->nr_pages); + SetPageLRU(page); spin_unlock(&cgroup->lock); } +void add_page_to_alloced_list(struct page *page) +{ + BUG_ON(in_list(&page->lru) || PageLRU(page)); + BUG_ON(!page_mapped(page) || page_count(page) < 2); + spin_lock(&alloced_pages.lock); + list_add_tail(&page->lru, &alloced_pages.list); + SetPageLRU(page); + spin_unlock(&alloced_pages.lock); +} + struct page* get_colored_page(unsigned long color) { struct color_group *cgroup; @@ -66,8 +85,10 @@ struct page* get_colored_page(unsigned long color) goto out_unlock; } page = list_first_entry(&cgroup->list, struct page, lru); + BUG_ON(page_mapped(page) || page_count(page) > 1); list_del(&page->lru); atomic_dec(&cgroup->nr_pages); + ClearPageLRU(page); out_unlock: spin_unlock(&cgroup->lock); return page; @@ -88,12 +109,10 @@ static unsigned long smallest_nr_pages(void) static int do_add_pages(void) { struct page *page, *page_tmp; - struct list_head free_later; + LIST_HEAD(free_later); unsigned long color; int ret = 0; - INIT_LIST_HEAD(&free_later); - while (smallest_nr_pages() < PAGES_PER_COLOR) { page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_MOVABLE); @@ -116,6 +135,25 @@ out: return ret; } +static int do_reclaim_pages(void) +{ + struct page *page, *page_tmp; + unsigned long nr_reclaimed = 0; + spin_lock(&alloced_pages.lock); + list_for_each_entry_safe(page, page_tmp, &alloced_pages.list, lru) { + if (1 == page_count(page) && !page_mapped(page)) { + list_del(&page->lru); + ClearPageLRU(page); + add_page_to_color_list(page); + nr_reclaimed++; + TRACE_CUR("Reclaimed page (pfn:%lu phys:0x%lx).\n", + page_to_pfn(page), page_to_phys(page)); + } + } + spin_unlock(&alloced_pages.lock); + TRACE_CUR("Reclaimed %lu pages.\n", nr_reclaimed); + return 0; +} /*********************************************************** * Proc @@ -158,6 +196,19 @@ out: return ret; } +int color_reclaim_pages_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret = 0; + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (ret) + goto out; + if (write && color_sysctl_reclaim_pages_data) + ret = do_reclaim_pages(); +out: + return ret; +} + /*********************************************************** * Initialization ***********************************************************/ @@ -181,7 +232,7 @@ static void __init init_mask(void) color_mask = ((color_cache_info.sets << line_size_log) - 1) ^ (PAGE_SIZE - 1); nr_colors = (color_mask >> PAGE_SHIFT) + 1; - MPRINT("color mask: 0x%lx total colors: %lu\n", color_mask, + printk("Color mask: 0x%lx Total colors: %lu\n", color_mask, nr_colors); BUG_ON(LOCKDEP_MAX_NR_COLORS < nr_colors); } @@ -214,18 +265,23 @@ out: static int __init init_color(void) { int ret = 0; + printk("Initializing LITMUS^RT cache coloring.\n"); - MPRINT("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", + BUG_ON(color_cache_info.size <= 1048576 || + color_cache_info.ways < 15 || + color_cache_info.line_size != 64); + INIT_LIST_HEAD(&alloced_pages.list); + spin_lock_init(&alloced_pages.lock); + + printk("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", color_cache_info.size, color_cache_info.line_size, color_cache_info.ways, color_cache_info.sets); - if (!color_cache_info.size){ + if (!color_cache_info.size) { printk(KERN_WARNING "No cache information found.\n"); ret = -EINVAL; goto out; } - BUG_ON(color_cache_info.size <= 1048576 || - color_cache_info.ways < 15 || - color_cache_info.line_size != 64); + init_mask(); ret = init_color_groups(); out: diff --git a/litmus/color_dev.c b/litmus/color_dev.c new file mode 100644 index 000000000000..b8218b6d1d9c --- /dev/null +++ b/litmus/color_dev.c @@ -0,0 +1,312 @@ +#include +#include +#include +#include +#include +#include + +#include +#include + +#define ALLOC_NAME "litmus/color_alloc" +#define CTRL_NAME "litmus/color_ctrl" + +static struct non_rt_colors { + spinlock_t lock; + unsigned long color; +} non_rt_colors; + +extern unsigned long nr_colors; + +/*********************************************************** + * Control device +***********************************************************/ + +static void litmus_color_ctrl_vm_close(struct vm_area_struct *vma) +{ + TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, + vma->vm_flags, pgprot_val(vma->vm_page_prot)); + + TRACE_CUR(CTRL_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", + (void*) vma->vm_start, (void*) vma->vm_end, vma, + vma->vm_private_data); +} + +static int litmus_color_ctrl_vm_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + /* This function should never be called, since + * all pages should have been mapped by mmap() + * already. */ + TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags); + printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__, + vma->vm_flags); + + /* nope, you only get one page */ + return VM_FAULT_SIGBUS; +} + +static struct vm_operations_struct litmus_color_ctrl_vm_ops = { + .close = litmus_color_ctrl_vm_close, + .fault = litmus_color_ctrl_vm_fault, +}; + +static int mmap_common_checks(struct vm_area_struct *vma) +{ + /* you can only map the "first" page */ + if (vma->vm_pgoff != 0) + return -EINVAL; + + /* you can't share it with anyone */ + if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) + return -EINVAL; + + return 0; +} + +static int alloc_color_ctrl_page(void) +{ + struct task_struct *t; + int err = 0; + + t = current; + /* only allocate if the task doesn't have one yet */ + if (!tsk_rt(t)->color_ctrl_page) { + tsk_rt(t)->color_ctrl_page = (void*) get_zeroed_page(GFP_KERNEL); + if (!tsk_rt(t)->color_ctrl_page) + err = -ENOMEM; + /* will get de-allocated in task teardown */ + TRACE_TASK(t, "%s color_ctrl_page = %p\n", __FUNCTION__, + tsk_rt(t)->color_ctrl_page); + } + return err; +} + +static int map_color_ctrl_page(struct vm_area_struct *vma) +{ + int err; + unsigned long pfn; + struct task_struct *t = current; + struct page *color_ctrl = virt_to_page(tsk_rt(t)->color_ctrl_page); + + t = current; + /* Increase ref count. Is decreased when vma is destroyed. */ + get_page(color_ctrl); + pfn = page_to_pfn(color_ctrl); + + TRACE_CUR(CTRL_NAME + ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n", + tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl), + vma->vm_start, vma->vm_page_prot); + + /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise + * userspace actually gets a copy-on-write page. */ + err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); + TRACE_CUR("page shared: guess:0x1(63)...1??111 actual:0x%lx\n", PAGE_SHARED); + /* present, RW, user, accessed, NX=63 */ + + if (err) + TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); + + return err; +} + +static int litmus_color_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int err = 0; + + /* you can only get one page */ + if (vma->vm_end - vma->vm_start != PAGE_SIZE) { + err = -EINVAL; + goto out; + } + + err = mmap_common_checks(vma); + if (err) + goto out; + + vma->vm_ops = &litmus_color_ctrl_vm_ops; + /* this mapping should not be kept across forks, + * and cannot be expanded */ + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + + err = alloc_color_ctrl_page(); + if (!err) + err = map_color_ctrl_page(vma); + + TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags, + pgprot_val(vma->vm_page_prot)); +out: + return err; +} + + +/*********************************************************** + * Allocation device +***********************************************************/ + +static int map_colored_pages_non_rt(struct vm_area_struct *vma) +{ + unsigned long color, mapped; + int err; + const unsigned long nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + + spin_lock(&non_rt_colors.lock); + color = non_rt_colors.color; + non_rt_colors.color = (non_rt_colors.color + nr_pages) % nr_colors; + spin_unlock(&non_rt_colors.lock); + + TRACE_CUR(ALLOC_NAME ": allocating %lu pages from color %lu.\n", + nr_pages, color); + + for (mapped = 0; mapped < nr_pages; + mapped++, color = (color + 1) % nr_colors) + { + struct page *page = get_colored_page(color); + const unsigned long addr = vma->vm_start + PAGE_SIZE * mapped; + + if (!page) { + TRACE_CUR(ALLOC_NAME ": Could not get page with " + " color %lu.\n", color); + /* TODO unmap mapped pages */ + break; + } + TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%llx, pfn:%lu, " + "color:%lu count:%d LRU:%d) at 0x%lx " + "(prot: 0x%lx)\n", + page_to_phys(page), page_to_pfn(page), color, + page_count(page), PageLRU(page), addr, + pgprot_val(vma->vm_page_prot)); + err = vm_insert_page(vma, addr, page); + if (err) { + TRACE_CUR(ALLOC_NAME ": vm_insert_page() failed " + "(%d)\n", err); + /* TODO unmap mapped pages */ + break; + } + add_page_to_alloced_list(page); + } + return err; +} + +static int map_colored_pages_rt(struct vm_area_struct *vma) +{ + /* TODO */ + return -EINVAL; +} + +static int map_colored_pages(struct vm_area_struct *vma) +{ + if (likely(is_realtime(current))) + return map_colored_pages_rt(vma); + return map_colored_pages_non_rt(vma); +} + +static void litmus_color_alloc_vm_close(struct vm_area_struct *vma) +{ + TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, + vma->vm_flags, pgprot_val(vma->vm_page_prot)); + + TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", + (void*) vma->vm_start, (void*) vma->vm_end, vma, + vma->vm_private_data); +} + +static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + /* This function should never be called, since + * all pages should have been mapped by mmap() + * already. */ + TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags); + printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__, + vma->vm_flags); + + /* nope, you only get one page */ + return VM_FAULT_SIGBUS; +} + +static struct vm_operations_struct litmus_color_alloc_vm_ops = { + .close = litmus_color_alloc_vm_close, + .fault = litmus_color_alloc_vm_fault, +}; + +static int litmus_color_alloc_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int err = 0; + + /* you may only request integer multiple of PAGE_SIZE */ + if (offset_in_page(vma->vm_end - vma->vm_start)) { + err = -EINVAL; + goto out; + } + + err = mmap_common_checks(vma); + if (err) + goto out; + + vma->vm_ops = &litmus_color_alloc_vm_ops; + /* this mapping should not be kept across forks, + * and cannot be expanded */ + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + + err = map_colored_pages(vma); + + TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags, + pgprot_val(vma->vm_page_prot)); +out: + return err; +} + +/*********************************************************** + * Initilization +***********************************************************/ + +static struct file_operations litmus_color_ctrl_fops = { + .owner = THIS_MODULE, + .mmap = litmus_color_ctrl_mmap, +}; + +static struct miscdevice litmus_color_ctrl_dev = { + .name = CTRL_NAME, + .minor = MISC_DYNAMIC_MINOR, + .fops = &litmus_color_ctrl_fops, +}; + +static struct file_operations litmus_color_alloc_fops = { + .owner = THIS_MODULE, + .mmap = litmus_color_alloc_mmap, +}; + +static struct miscdevice litmus_color_alloc_dev = { + .name = ALLOC_NAME, + .minor = MISC_DYNAMIC_MINOR, + .fops = &litmus_color_alloc_fops, +}; + +static int __init init_dev(const char* name, struct miscdevice *dev) +{ + int err; + err = misc_register(dev); + if (err) + printk(KERN_WARNING "Could not allocate %s device (%d).\n", + name, err); + return err; +} + +static int __init init_color_devices(void) +{ + int err; + spin_lock_init(&non_rt_colors.lock); + non_rt_colors.color = 0; + + printk("Allocating LITMUS^RT color devices.\n"); + err = init_dev(ALLOC_NAME, &litmus_color_alloc_dev); + if (err) + goto out; + err = init_dev(CTRL_NAME, &litmus_color_ctrl_dev); +out: + return err; +} + +module_init(init_color_devices); diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 31eec0d728a5..cac336ac1731 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -5,13 +5,22 @@ #include extern int color_sysctl_add_pages_data; /* litmus/color.c */ +extern int color_sysctl_reclaim_pages_data; /* litmus/color.c */ static int zero = 0; static int one = 1; -#define NR_PAGES_INDEX 1 /* location of nr_pages in the table below */ +#define NR_PAGES_INDEX 0 /* location of nr_pages in the table below */ static struct ctl_table color_table[] = { + { + /* you MUST update NR_PAGES_INDEX if you move this entry */ + .procname = "nr_pages", + .mode = 0444, + .proc_handler = color_nr_pages_handler, + .data = NULL, /* dynamically set later */ + .maxlen = 0, /* also set later */ + }, { .procname = "add_pages", .data = &color_sysctl_add_pages_data, @@ -22,11 +31,13 @@ static struct ctl_table color_table[] = .extra2 = &one, }, { - .procname = "nr_pages", - .mode = 0444, - .proc_handler = color_nr_pages_handler, - .data = NULL, /* dynamically later */ - .maxlen = 0, /* also set later */ + .procname = "reclaim_pages", + .data = &color_sysctl_reclaim_pages_data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = color_reclaim_pages_handler, + .extra1 = &zero, + .extra2 = &one, }, { } }; @@ -71,6 +82,7 @@ static struct ctl_table_header *litmus_sysctls; static int __init litmus_sysctl_init(void) { int ret = 0; + printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n"); litmus_sysctls = register_sysctl_table(litmus_dir_table); if (!litmus_sysctls) { printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n"); diff --git a/litmus/litmus.c b/litmus/litmus.c index 301390148d02..eaa2070d28ce 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -291,12 +291,14 @@ static void reinit_litmus_state(struct task_struct* p, int restore) { struct rt_task user_config = {}; void* ctrl_page = NULL; + void* color_ctrl_page = NULL; if (restore) { /* Safe user-space provided configuration data. * and allocated page. */ - user_config = p->rt_param.task_params; - ctrl_page = p->rt_param.ctrl_page; + user_config = p->rt_param.task_params; + ctrl_page = p->rt_param.ctrl_page; + color_ctrl_page = p->rt_param.color_ctrl_page; } /* We probably should not be inheriting any task's priority @@ -309,8 +311,9 @@ static void reinit_litmus_state(struct task_struct* p, int restore) /* Restore preserved fields. */ if (restore) { - p->rt_param.task_params = user_config; - p->rt_param.ctrl_page = ctrl_page; + p->rt_param.task_params = user_config; + p->rt_param.ctrl_page = ctrl_page; + p->rt_param.color_ctrl_page = color_ctrl_page; } } @@ -451,9 +454,11 @@ void litmus_fork(struct task_struct* p) reinit_litmus_state(p, 0); /* Don't let the child be a real-time task. */ p->sched_reset_on_fork = 1; - } else + } else { /* non-rt tasks might have ctrl_page set */ tsk_rt(p)->ctrl_page = NULL; + tsk_rt(p)->color_ctrl_page = NULL; + } /* od tables are never inherited across a fork */ p->od_table = NULL; @@ -473,6 +478,10 @@ void litmus_exec(void) free_page((unsigned long) tsk_rt(p)->ctrl_page); tsk_rt(p)->ctrl_page = NULL; } + if (tsk_rt(p)->color_ctrl_page) { + free_page((unsigned long) tsk_rt(p)->color_ctrl_page); + tsk_rt(p)->color_ctrl_page = NULL; + } } } @@ -490,6 +499,12 @@ void exit_litmus(struct task_struct *dead_tsk) tsk_rt(dead_tsk)->ctrl_page); free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page); } + if (tsk_rt(dead_tsk)->color_ctrl_page) { + TRACE_TASK(dead_tsk, + "freeing color_ctrl_page %p\n", + tsk_rt(dead_tsk)->color_ctrl_page); + free_page((unsigned long) tsk_rt(dead_tsk)->color_ctrl_page); + } /* main cleanup only for RT tasks */ if (is_realtime(dead_tsk)) -- cgit v1.2.2 From 461e3fdc9cfcc51cfbd6ea9833ff6715b9cb235f Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Sun, 8 Apr 2012 17:48:18 -0400 Subject: Allocate colored pages according to color ctrl page. --- litmus/color.c | 15 +++++++------- litmus/color_dev.c | 61 ++++++++++++++++++++++++++++-------------------------- 2 files changed, 39 insertions(+), 37 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index a3cc193418c0..6f017e388c12 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -10,10 +10,6 @@ #include #include /* for in_list(...) */ -#define MPRINT(fmt, args...) \ - printk(KERN_INFO "[%s@%s:%d]: " fmt, \ - __FUNCTION__, __FILE__, __LINE__, ## args) - #define PAGES_PER_COLOR 2000 /* @@ -77,10 +73,14 @@ struct page* get_colored_page(unsigned long color) { struct color_group *cgroup; struct page *page = NULL; - BUG_ON(color >= nr_colors); + + if (color >= nr_colors) + goto out; + cgroup = &color_groups[color]; spin_lock(&cgroup->lock); if (unlikely(!atomic_read(&cgroup->nr_pages))) { + TRACE_CUR("No free %lu colored pages.\n", color); printk(KERN_WARNING "no free %lu colored pages.\n", color); goto out_unlock; } @@ -91,6 +91,7 @@ struct page* get_colored_page(unsigned long color) ClearPageLRU(page); out_unlock: spin_unlock(&cgroup->lock); +out: return page; } @@ -117,7 +118,7 @@ static int do_add_pages(void) page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_MOVABLE); if (unlikely(!page)) { - MPRINT("could not allocate pages\n"); + printk(KERN_WARNING "Could not allocate pages.\n"); ret = -ENOMEM; goto out; } @@ -146,8 +147,6 @@ static int do_reclaim_pages(void) ClearPageLRU(page); add_page_to_color_list(page); nr_reclaimed++; - TRACE_CUR("Reclaimed page (pfn:%lu phys:0x%lx).\n", - page_to_pfn(page), page_to_phys(page)); } } spin_unlock(&alloced_pages.lock); diff --git a/litmus/color_dev.c b/litmus/color_dev.c index b8218b6d1d9c..6e7ab3a28b87 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -11,11 +11,6 @@ #define ALLOC_NAME "litmus/color_alloc" #define CTRL_NAME "litmus/color_ctrl" -static struct non_rt_colors { - spinlock_t lock; - unsigned long color; -} non_rt_colors; - extern unsigned long nr_colors; /*********************************************************** @@ -145,25 +140,27 @@ out: * Allocation device ***********************************************************/ -static int map_colored_pages_non_rt(struct vm_area_struct *vma) +#define vma_nr_pages(vma) \ + ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;}) + +#define next_color(c) ({unsigned long _x = (c + 1) % nr_colors; _x;}) + +static int do_map_colored_pages(struct vm_area_struct *vma) { - unsigned long color, mapped; + const unsigned long nr_pages = vma_nr_pages(vma); + unsigned long mapped; + uint32_t *cur_color; int err; - const unsigned long nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - spin_lock(&non_rt_colors.lock); - color = non_rt_colors.color; - non_rt_colors.color = (non_rt_colors.color + nr_pages) % nr_colors; - spin_unlock(&non_rt_colors.lock); + TRACE_CUR(ALLOC_NAME ": allocating %lu pages\n", nr_pages); - TRACE_CUR(ALLOC_NAME ": allocating %lu pages from color %lu.\n", - nr_pages, color); - - for (mapped = 0; mapped < nr_pages; - mapped++, color = (color + 1) % nr_colors) + for (mapped = 0, cur_color = tsk_rt(current)->color_ctrl_page->colors; + mapped < nr_pages; + mapped++, cur_color++) { + const unsigned long color = *cur_color; + const unsigned long addr = vma->vm_start + (mapped << PAGE_SHIFT); struct page *page = get_colored_page(color); - const unsigned long addr = vma->vm_start + PAGE_SIZE * mapped; if (!page) { TRACE_CUR(ALLOC_NAME ": Could not get page with " @@ -189,17 +186,25 @@ static int map_colored_pages_non_rt(struct vm_area_struct *vma) return err; } -static int map_colored_pages_rt(struct vm_area_struct *vma) -{ - /* TODO */ - return -EINVAL; -} - static int map_colored_pages(struct vm_area_struct *vma) { - if (likely(is_realtime(current))) - return map_colored_pages_rt(vma); - return map_colored_pages_non_rt(vma); + int err = 0; + + if (!tsk_rt(current)->color_ctrl_page) { + TRACE_CUR("Process has no color control page.\n"); + err = -EINVAL; + goto out; + } + + if (COLORS_PER_CONTROL_PAGE < vma_nr_pages(vma)) { + TRACE_CUR("Max page request %lu but want %lu.\n", + COLORS_PER_CONTROL_PAGE, vma_nr_pages(vma)); + err = -EINVAL; + goto out; + } + err = do_map_colored_pages(vma); +out: + return err; } static void litmus_color_alloc_vm_close(struct vm_area_struct *vma) @@ -297,8 +302,6 @@ static int __init init_dev(const char* name, struct miscdevice *dev) static int __init init_color_devices(void) { int err; - spin_lock_init(&non_rt_colors.lock); - non_rt_colors.color = 0; printk("Allocating LITMUS^RT color devices.\n"); err = init_dev(ALLOC_NAME, &litmus_color_alloc_dev); -- cgit v1.2.2 From 221741e9a87b20741ffccbd7d9b3e637ccc276ce Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Tue, 10 Apr 2012 22:27:20 -0400 Subject: change color to 16 bits --- litmus/color.c | 2 +- litmus/color_dev.c | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index 6f017e388c12..7b23703f6206 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -10,7 +10,7 @@ #include #include /* for in_list(...) */ -#define PAGES_PER_COLOR 2000 +#define PAGES_PER_COLOR 3072 /* * This is used only to "trick" lockdep into permitting dynamically allocated diff --git a/litmus/color_dev.c b/litmus/color_dev.c index 6e7ab3a28b87..414f4d7f1082 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -112,13 +112,16 @@ static int litmus_color_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) /* you can only get one page */ if (vma->vm_end - vma->vm_start != PAGE_SIZE) { + TRACE_CUR(CTRL_NAME ": must allocate a multiple of PAGE_SIZE\n"); err = -EINVAL; goto out; } err = mmap_common_checks(vma); - if (err) + if (err) { + TRACE_CUR(CTRL_NAME ": failed common mmap checks.\n"); goto out; + } vma->vm_ops = &litmus_color_ctrl_vm_ops; /* this mapping should not be kept across forks, @@ -149,7 +152,7 @@ static int do_map_colored_pages(struct vm_area_struct *vma) { const unsigned long nr_pages = vma_nr_pages(vma); unsigned long mapped; - uint32_t *cur_color; + uint16_t *cur_color; int err; TRACE_CUR(ALLOC_NAME ": allocating %lu pages\n", nr_pages); -- cgit v1.2.2 From fc19a148b4b286dfe80bc0e0c2cfc193e1405059 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Thu, 26 Apr 2012 11:09:44 -0400 Subject: Use a typedef for the color number. --- litmus/color_dev.c | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'litmus') diff --git a/litmus/color_dev.c b/litmus/color_dev.c index 414f4d7f1082..e1404142c18a 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -146,37 +146,33 @@ out: #define vma_nr_pages(vma) \ ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;}) -#define next_color(c) ({unsigned long _x = (c + 1) % nr_colors; _x;}) - static int do_map_colored_pages(struct vm_area_struct *vma) { const unsigned long nr_pages = vma_nr_pages(vma); - unsigned long mapped; - uint16_t *cur_color; + unsigned long nr_mapped; + color_t *cur_color; int err; TRACE_CUR(ALLOC_NAME ": allocating %lu pages\n", nr_pages); - for (mapped = 0, cur_color = tsk_rt(current)->color_ctrl_page->colors; - mapped < nr_pages; - mapped++, cur_color++) + for ( nr_mapped = 0, + cur_color = tsk_rt(current)->color_ctrl_page->colors; + nr_mapped < nr_pages; + nr_mapped++, cur_color++) { - const unsigned long color = *cur_color; - const unsigned long addr = vma->vm_start + (mapped << PAGE_SHIFT); - struct page *page = get_colored_page(color); + const unsigned long this_color = *cur_color; + const unsigned long addr = vma->vm_start + (nr_mapped << PAGE_SHIFT); + struct page *page = get_colored_page(this_color); if (!page) { TRACE_CUR(ALLOC_NAME ": Could not get page with " - " color %lu.\n", color); + " color %lu.\n", this_color); /* TODO unmap mapped pages */ break; } - TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%llx, pfn:%lu, " - "color:%lu count:%d LRU:%d) at 0x%lx " - "(prot: 0x%lx)\n", - page_to_phys(page), page_to_pfn(page), color, - page_count(page), PageLRU(page), addr, - pgprot_val(vma->vm_page_prot)); + TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " + "color:%3lu) at 0x%lx\n", page_to_phys(page), + page_to_pfn(page), this_color, addr); err = vm_insert_page(vma, addr, page); if (err) { TRACE_CUR(ALLOC_NAME ": vm_insert_page() failed " -- cgit v1.2.2 From 2f50cc66b3000d1c927990c4cf7a973ee9269014 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Tue, 1 May 2012 17:59:18 -0400 Subject: Add the beginings of a SPARC port. Edit page-types to work on SPARC --- litmus/color.c | 78 +++++++++++++++++++++++++++++++++++---------------- litmus/color_dev.c | 7 +++++ litmus/sched_litmus.c | 4 +-- 3 files changed, 63 insertions(+), 26 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index 7b23703f6206..dad2f07b154a 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -6,6 +6,7 @@ #include #include #include /* required by litmus.h */ +#include /* page_to_phys on SPARC */ #include #include /* for in_list(...) */ @@ -37,7 +38,9 @@ static struct color_group *color_groups; /* non-static: extern'ed in various files */ unsigned long nr_colors; +#ifdef CONFIG_X86 struct color_cache_info color_cache_info; +#endif int color_sysctl_add_pages_data; int color_sysctl_reclaim_pages_data; @@ -115,8 +118,14 @@ static int do_add_pages(void) int ret = 0; while (smallest_nr_pages() < PAGES_PER_COLOR) { +#if defined(CONFIG_X86) page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_MOVABLE); +#elif defined(CONFIG_SPARC) /* X86 */ + page = alloc_page(GFP_HIGHUSER | __GFP_MOVABLE); +#else +#error What architecture are you using? +#endif if (unlikely(!page)) { printk(KERN_WARNING "Could not allocate pages.\n"); ret = -ENOMEM; @@ -147,7 +156,11 @@ static int do_reclaim_pages(void) ClearPageLRU(page); add_page_to_color_list(page); nr_reclaimed++; + TRACE_CUR("reclaiming page (pa:0x%10llx, pfn:%8lu, " + "color:%3lu)\n", page_to_phys(page), + page_to_pfn(page), page_color(page)); } + } spin_unlock(&alloced_pages.lock); TRACE_CUR("Reclaimed %lu pages.\n", nr_reclaimed); @@ -212,6 +225,7 @@ out: * Initialization ***********************************************************/ +#if defined(CONFIG_X86) /* slowest possible way to find a log, but we only do this once on boot */ static unsigned int __init slow_log(unsigned int v) { @@ -221,9 +235,24 @@ static unsigned int __init slow_log(unsigned int v) return r; } -static void __init init_mask(void) +static int __init init_mask(void) { - const unsigned int line_size_log = slow_log(color_cache_info.line_size); + unsigned int line_size_log = slow_log(color_cache_info.line_size); + int err = 0; + + BUG_ON(color_cache_info.size <= 1048576 || + color_cache_info.ways < 15 || + color_cache_info.line_size != 64); + + printk("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", + color_cache_info.size, color_cache_info.line_size, + color_cache_info.ways, color_cache_info.sets); + if (!color_cache_info.size) { + printk(KERN_WARNING "No cache information found.\n"); + err = -EINVAL; + goto out; + } + BUG_ON(color_cache_info.size / color_cache_info.line_size / color_cache_info.ways != color_cache_info.sets); @@ -231,21 +260,31 @@ static void __init init_mask(void) color_mask = ((color_cache_info.sets << line_size_log) - 1) ^ (PAGE_SIZE - 1); nr_colors = (color_mask >> PAGE_SHIFT) + 1; - printk("Color mask: 0x%lx Total colors: %lu\n", color_mask, - nr_colors); - BUG_ON(LOCKDEP_MAX_NR_COLORS < nr_colors); +out: + return err; +} +#elif defined(CONFIG_SPARC) /* X86 */ +static int __init init_mask(void) +{ + /* bits 17:13 */ + color_mask = 0x3e000UL; + nr_colors = (1 << hweight_long(color_mask)); + return 0; } +#endif /* SPARC/X86 */ + + static int __init init_color_groups(void) { struct color_group *cgroup; unsigned long i; - int ret = 0; + int err = 0; color_groups = kmalloc(nr_colors * sizeof(struct color_group), GFP_KERNEL); if (!color_groups) { printk(KERN_WARNING "Could not allocate color groups.\n"); - ret = -ENOMEM; + err = -ENOMEM; goto out; } @@ -258,33 +297,24 @@ static int __init init_color_groups(void) cgroup->_lock_name, "color%lu", i); } out: - return ret; + return err; } static int __init init_color(void) { - int ret = 0; + int err = 0; printk("Initializing LITMUS^RT cache coloring.\n"); - BUG_ON(color_cache_info.size <= 1048576 || - color_cache_info.ways < 15 || - color_cache_info.line_size != 64); INIT_LIST_HEAD(&alloced_pages.list); spin_lock_init(&alloced_pages.lock); - printk("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", - color_cache_info.size, color_cache_info.line_size, - color_cache_info.ways, color_cache_info.sets); - if (!color_cache_info.size) { - printk(KERN_WARNING "No cache information found.\n"); - ret = -EINVAL; - goto out; - } + err = init_mask(); + printk("PAGE_SIZE: %lu Color mask: 0x%lx Total colors: %lu\n", + PAGE_SIZE, color_mask, nr_colors); - init_mask(); - ret = init_color_groups(); -out: - return ret; + BUG_ON(LOCKDEP_MAX_NR_COLORS < nr_colors); + err = init_color_groups(); + return err; } module_init(init_color); diff --git a/litmus/color_dev.c b/litmus/color_dev.c index e1404142c18a..10ab1e6b1161 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -4,6 +4,8 @@ #include #include #include +#include +#include /* page_to_phys on SPARC */ #include #include @@ -168,8 +170,12 @@ static int do_map_colored_pages(struct vm_area_struct *vma) TRACE_CUR(ALLOC_NAME ": Could not get page with " " color %lu.\n", this_color); /* TODO unmap mapped pages */ + err = -ENOMEM; break; } +#ifdef CONFIG_SPARC + clear_user_highpage(page, addr); +#endif TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " "color:%3lu) at 0x%lx\n", page_to_phys(page), page_to_pfn(page), this_color, addr); @@ -178,6 +184,7 @@ static int do_map_colored_pages(struct vm_area_struct *vma) TRACE_CUR(ALLOC_NAME ": vm_insert_page() failed " "(%d)\n", err); /* TODO unmap mapped pages */ + err = -EINVAL; break; } add_page_to_alloced_list(page); diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 5a15ce938984..6553948407de 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -102,9 +102,9 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) } } #ifdef __ARCH_WANT_UNLOCKED_CTXSW - if (next->oncpu) + if (next->on_cpu) TRACE_TASK(next, "waiting for !oncpu"); - while (next->oncpu) { + while (next->on_cpu) { cpu_relax(); mb(); } -- cgit v1.2.2 From ba84bc13f622bf1e8dd0b35dae311117cdab1dc6 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Wed, 2 May 2012 11:29:01 -0400 Subject: debugging --- litmus/color.c | 9 ++++++++- litmus/color_dev.c | 6 ++++-- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index dad2f07b154a..ae6c3f6b8206 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -59,13 +59,17 @@ void add_page_to_color_list(struct page *page) list_add_tail(&page->lru, &cgroup->list); atomic_inc(&cgroup->nr_pages); SetPageLRU(page); + SetPageReserved(page); spin_unlock(&cgroup->lock); } void add_page_to_alloced_list(struct page *page) { BUG_ON(in_list(&page->lru) || PageLRU(page)); - BUG_ON(!page_mapped(page) || page_count(page) < 2); + //BUG_ON(!page_mapped(page) || page_count(page) < 2); + TRACE_CUR("pfn:%d page_mapped:%d page_count:%d\n", + page_to_pfn(page), page_mapped(page), + page_count(page)); spin_lock(&alloced_pages.lock); list_add_tail(&page->lru, &alloced_pages.list); SetPageLRU(page); @@ -151,6 +155,9 @@ static int do_reclaim_pages(void) unsigned long nr_reclaimed = 0; spin_lock(&alloced_pages.lock); list_for_each_entry_safe(page, page_tmp, &alloced_pages.list, lru) { + TRACE_CUR("pfn:%8lu page_mapped:%d page_count:%d\n", + page_to_pfn(page), page_mapped(page), + page_count(page)); if (1 == page_count(page) && !page_mapped(page)) { list_del(&page->lru); ClearPageLRU(page); diff --git a/litmus/color_dev.c b/litmus/color_dev.c index 10ab1e6b1161..c6e500722819 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -179,9 +179,11 @@ static int do_map_colored_pages(struct vm_area_struct *vma) TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " "color:%3lu) at 0x%lx\n", page_to_phys(page), page_to_pfn(page), this_color, addr); - err = vm_insert_page(vma, addr, page); + //err = vm_insert_page(vma, addr, page); + err = remap_pfn_range(vma, addr, page_to_pfn(page), + PAGE_SIZE, PAGE_SHARED); if (err) { - TRACE_CUR(ALLOC_NAME ": vm_insert_page() failed " + TRACE_CUR(ALLOC_NAME ": remap_pfn_range() failed " "(%d)\n", err); /* TODO unmap mapped pages */ err = -EINVAL; -- cgit v1.2.2 From bf554059414a34dd17cd08a9c6bc6cfafa9ac717 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Wed, 2 May 2012 18:48:03 -0400 Subject: use remap_pfn_range and automatic reclaiming --- litmus/color.c | 111 ++++++++++++++++++++++++++++------------------------ litmus/color_dev.c | 23 +++++++---- litmus/color_proc.c | 10 ----- 3 files changed, 76 insertions(+), 68 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index ae6c3f6b8206..93d12a718543 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -27,11 +27,6 @@ struct color_group { atomic_t nr_pages; }; -static struct alloced_pages { - spinlock_t lock; - struct list_head list; -} alloced_pages; - static unsigned long color_mask; static struct color_group *color_groups; @@ -42,40 +37,31 @@ unsigned long nr_colors; struct color_cache_info color_cache_info; #endif int color_sysctl_add_pages_data; -int color_sysctl_reclaim_pages_data; static inline unsigned long page_color(struct page *page) { return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT); } +/* + * Page's count should be one, it sould not be on any LRU list. + */ void add_page_to_color_list(struct page *page) { const unsigned long color = page_color(page); struct color_group *cgroup = &color_groups[color]; BUG_ON(in_list(&page->lru) || PageLRU(page)); - BUG_ON(page_mapped(page) || page_count(page) > 1); + BUG_ON(page_count(page) > 1); spin_lock(&cgroup->lock); list_add_tail(&page->lru, &cgroup->list); atomic_inc(&cgroup->nr_pages); SetPageLRU(page); - SetPageReserved(page); spin_unlock(&cgroup->lock); } -void add_page_to_alloced_list(struct page *page) -{ - BUG_ON(in_list(&page->lru) || PageLRU(page)); - //BUG_ON(!page_mapped(page) || page_count(page) < 2); - TRACE_CUR("pfn:%d page_mapped:%d page_count:%d\n", - page_to_pfn(page), page_mapped(page), - page_count(page)); - spin_lock(&alloced_pages.lock); - list_add_tail(&page->lru, &alloced_pages.list); - SetPageLRU(page); - spin_unlock(&alloced_pages.lock); -} - +/* + * Increase's page's count to two. + */ struct page* get_colored_page(unsigned long color) { struct color_group *cgroup; @@ -92,7 +78,8 @@ struct page* get_colored_page(unsigned long color) goto out_unlock; } page = list_first_entry(&cgroup->list, struct page, lru); - BUG_ON(page_mapped(page) || page_count(page) > 1); + BUG_ON(page_count(page) > 1); + get_page(page); list_del(&page->lru); atomic_dec(&cgroup->nr_pages); ClearPageLRU(page); @@ -136,9 +123,10 @@ static int do_add_pages(void) goto out; } color = page_color(page); - if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) + if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { + SetPageReserved(page); add_page_to_color_list(page); - else + } else list_add_tail(&page->lru, &free_later); } list_for_each_entry_safe(page, page_tmp, &free_later, lru) { @@ -149,29 +137,63 @@ out: return ret; } -static int do_reclaim_pages(void) +static struct alloced_pages { + spinlock_t lock; + struct list_head list; +} alloced_pages; + +struct alloced_page { + struct page *page; + struct vm_area_struct *vma; + struct list_head list; +}; + +static struct alloced_page * new_alloced_page(struct page *page, + struct vm_area_struct *vma) { - struct page *page, *page_tmp; + struct alloced_page *ap = kmalloc(sizeof(*ap), GFP_KERNEL); + INIT_LIST_HEAD(&ap->list); + ap->page = page; + ap->vma = vma; + return ap; +} + +/* + * Page's count should be two or more. It should not be on aly LRU list. + */ +void add_page_to_alloced_list(struct page *page, struct vm_area_struct *vma) +{ + struct alloced_page *ap; + + BUG_ON(page_count(page) < 2); + ap = new_alloced_page(page, vma); + spin_lock(&alloced_pages.lock); + list_add_tail(&ap->list, &alloced_pages.list); + spin_unlock(&alloced_pages.lock); +} + +/* + * Reclaim pages. + */ +void reclaim_pages(struct vm_area_struct *vma) +{ + struct alloced_page *ap, *ap_tmp; unsigned long nr_reclaimed = 0; spin_lock(&alloced_pages.lock); - list_for_each_entry_safe(page, page_tmp, &alloced_pages.list, lru) { - TRACE_CUR("pfn:%8lu page_mapped:%d page_count:%d\n", - page_to_pfn(page), page_mapped(page), - page_count(page)); - if (1 == page_count(page) && !page_mapped(page)) { - list_del(&page->lru); - ClearPageLRU(page); - add_page_to_color_list(page); + list_for_each_entry_safe(ap, ap_tmp, &alloced_pages.list, list) { + if (vma == ap->vma) { + list_del(&ap->list); + put_page(ap->page); + add_page_to_color_list(ap->page); nr_reclaimed++; TRACE_CUR("reclaiming page (pa:0x%10llx, pfn:%8lu, " - "color:%3lu)\n", page_to_phys(page), - page_to_pfn(page), page_color(page)); + "color:%3lu)\n", page_to_phys(ap->page), + page_to_pfn(ap->page), page_color(ap->page)); + kfree(ap); } - } spin_unlock(&alloced_pages.lock); TRACE_CUR("Reclaimed %lu pages.\n", nr_reclaimed); - return 0; } /*********************************************************** @@ -215,19 +237,6 @@ out: return ret; } -int color_reclaim_pages_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - int ret = 0; - ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - if (ret) - goto out; - if (write && color_sysctl_reclaim_pages_data) - ret = do_reclaim_pages(); -out: - return ret; -} - /*********************************************************** * Initialization ***********************************************************/ diff --git a/litmus/color_dev.c b/litmus/color_dev.c index c6e500722819..d681f57be01f 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -54,9 +54,12 @@ static int mmap_common_checks(struct vm_area_struct *vma) if (vma->vm_pgoff != 0) return -EINVAL; +#if 0 /* you can't share it with anyone */ + /* well, maybe you can... */ if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) return -EINVAL; +#endif return 0; } @@ -92,9 +95,9 @@ static int map_color_ctrl_page(struct vm_area_struct *vma) pfn = page_to_pfn(color_ctrl); TRACE_CUR(CTRL_NAME - ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n", + ": mapping %p (pfn:%lx, %lx) to 0x%lx (flags:%lx prot:%lx)\n", tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl), - vma->vm_start, vma->vm_page_prot); + vma->vm_start, vma->vm_flags, pgprot_val(vma->vm_page_prot)); /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise * userspace actually gets a copy-on-write page. */ @@ -155,7 +158,8 @@ static int do_map_colored_pages(struct vm_area_struct *vma) color_t *cur_color; int err; - TRACE_CUR(ALLOC_NAME ": allocating %lu pages\n", nr_pages); + TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n", + nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot)); for ( nr_mapped = 0, cur_color = tsk_rt(current)->color_ctrl_page->colors; @@ -177,19 +181,23 @@ static int do_map_colored_pages(struct vm_area_struct *vma) clear_user_highpage(page, addr); #endif TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " - "color:%3lu) at 0x%lx\n", page_to_phys(page), - page_to_pfn(page), this_color, addr); + "color:%3lu) at 0x%lx (flags:%lx prot:%lx\n", + page_to_phys(page), + page_to_pfn(page), this_color, addr, + vma->vm_flags, pgprot_val(vma->vm_page_prot)); //err = vm_insert_page(vma, addr, page); err = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, PAGE_SHARED); if (err) { TRACE_CUR(ALLOC_NAME ": remap_pfn_range() failed " - "(%d)\n", err); + "(%d) (flags:%lx prot:%lx)\n", err, + vma->vm_flags, + pgprot_val(vma->vm_page_prot)); /* TODO unmap mapped pages */ err = -EINVAL; break; } - add_page_to_alloced_list(page); + add_page_to_alloced_list(page, vma); } return err; } @@ -223,6 +231,7 @@ static void litmus_color_alloc_vm_close(struct vm_area_struct *vma) TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", (void*) vma->vm_start, (void*) vma->vm_end, vma, vma->vm_private_data); + reclaim_pages(vma); } static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma, diff --git a/litmus/color_proc.c b/litmus/color_proc.c index cac336ac1731..4cb6c9ac89bb 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -5,7 +5,6 @@ #include extern int color_sysctl_add_pages_data; /* litmus/color.c */ -extern int color_sysctl_reclaim_pages_data; /* litmus/color.c */ static int zero = 0; static int one = 1; @@ -30,15 +29,6 @@ static struct ctl_table color_table[] = .extra1 = &zero, .extra2 = &one, }, - { - .procname = "reclaim_pages", - .data = &color_sysctl_reclaim_pages_data, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = color_reclaim_pages_handler, - .extra1 = &zero, - .extra2 = &one, - }, { } }; -- cgit v1.2.2 From bb35f3fc684667598d7ae39fd2d49a16f77beb39 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Thu, 3 May 2012 16:50:32 -0400 Subject: Added color schedule --- litmus/Kconfig | 18 ++ litmus/Makefile | 14 +- litmus/budget.c | 29 +- litmus/color_proc.c | 143 +++++++++ litmus/dgl.c | 248 +++++++++++++++ litmus/fifo_common.c | 58 ++++ litmus/ftdev.c | 2 + litmus/locking.c | 8 +- litmus/rt_server.c | 34 +++ litmus/sched_color.c | 811 ++++++++++++++++++++++++++++++++++++++++++++++++++ litmus/sched_litmus.c | 4 +- litmus/sched_plugin.c | 6 + litmus/sync.c | 3 + 13 files changed, 1357 insertions(+), 21 deletions(-) create mode 100644 litmus/dgl.c create mode 100644 litmus/fifo_common.c create mode 100644 litmus/rt_server.c create mode 100644 litmus/sched_color.c (limited to 'litmus') diff --git a/litmus/Kconfig b/litmus/Kconfig index 94b48e199577..68459d4dca41 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -138,6 +138,24 @@ config SCHED_TASK_TRACE_SHIFT 10 => 1k events 8 => 512 events +config SCHED_LITMUS_TRACEPOINT + bool "Enable Event/Tracepoint Tracing for real-time task tracing" + depends on TRACEPOINTS + default n + help + Enable kernel-style events (tracepoint) for Litmus. Litmus events + trace the same functions as the above sched_trace_XXX(), but can + be enabled independently. + Litmus tracepoints can be recorded and analyzed together (single + time reference) with all other kernel tracing events (e.g., + sched:sched_switch, etc.). + + This also enables a quick way to visualize schedule traces using + trace-cmd utility and kernelshark visualizer. + + Say Yes for debugging and visualization purposes. + Say No for overhead tracing. + config SCHED_OVERHEAD_TRACE bool "Record timestamps for overhead measurements" depends on FEATHER_TRACE diff --git a/litmus/Makefile b/litmus/Makefile index 2d77d11e905e..d24e9855a7f9 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -16,16 +16,22 @@ obj-y = sched_plugin.o litmus.o \ srp.o \ bheap.o \ ctrldev.o \ - sched_gsn_edf.o \ - sched_psn_edf.o \ color.o \ color_proc.o \ - color_dev.o + color_dev.o \ + rt_server.o \ + dgl.o \ + fifo_common.o \ + sched_color.o + + # sched_psn_edf.o \ + # sched_gsn_edf.o \ + + obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o - obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o diff --git a/litmus/budget.c b/litmus/budget.c index 310e9a3d4172..84f3f22770b1 100644 --- a/litmus/budget.c +++ b/litmus/budget.c @@ -4,13 +4,8 @@ #include #include - -struct enforcement_timer { - /* The enforcement timer is used to accurately police - * slice budgets. */ - struct hrtimer timer; - int armed; -}; +#include +#include DEFINE_PER_CPU(struct enforcement_timer, budget_timer); @@ -32,7 +27,7 @@ static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer) } /* assumes called with IRQs off */ -static void cancel_enforcement_timer(struct enforcement_timer* et) +void cancel_enforcement_timer(struct enforcement_timer* et) { int ret; @@ -54,11 +49,10 @@ static void cancel_enforcement_timer(struct enforcement_timer* et) } /* assumes called with IRQs off */ -static void arm_enforcement_timer(struct enforcement_timer* et, - struct task_struct* t) +void arm_enforcement_timer(struct enforcement_timer* et, + struct task_struct* t) { lt_t when_to_fire; - TRACE_TASK(t, "arming enforcement timer.\n"); /* Calling this when there is no budget left for the task * makes no sense, unless the task is non-preemptive. */ @@ -67,8 +61,11 @@ static void arm_enforcement_timer(struct enforcement_timer* et, /* __hrtimer_start_range_ns() cancels the timer * anyway, so we don't have to check whether it is still armed */ - if (likely(!is_np(t))) { + if (likely(!is_user_np(t))) { when_to_fire = litmus_clock() + budget_remaining(t); + TRACE_TASK(t, "arming enforcement timer for %llu.\n", + when_to_fire); + __hrtimer_start_range_ns(&et->timer, ns_to_ktime(when_to_fire), 0 /* delta */, @@ -94,6 +91,11 @@ void update_enforcement_timer(struct task_struct* t) } } +void init_enforcement_timer(struct enforcement_timer *et) +{ + hrtimer_init(&et->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + et->timer.function = on_enforcement_timeout; +} static int __init init_budget_enforcement(void) { @@ -102,8 +104,7 @@ static int __init init_budget_enforcement(void) for (cpu = 0; cpu < NR_CPUS; cpu++) { et = &per_cpu(budget_timer, cpu); - hrtimer_init(&et->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - et->timer.function = on_enforcement_timeout; + init_enforcement_timer(et); } return 0; } diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 4cb6c9ac89bb..0ac533f96d3e 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -2,16 +2,29 @@ #include #include +#include #include +#define SPERIOD_LEN 7 +#define SPERIOD_FILE "period" +#define SWCET_LEN 5 +#define SWCET_FILE "wcet" + extern int color_sysctl_add_pages_data; /* litmus/color.c */ static int zero = 0; static int one = 1; +static unsigned long *server_wcet; +static unsigned long *server_period; + #define NR_PAGES_INDEX 0 /* location of nr_pages in the table below */ static struct ctl_table color_table[] = { + { + .procname = "servers", + .mode = 0555, + }, { /* you MUST update NR_PAGES_INDEX if you move this entry */ .procname = "nr_pages", @@ -41,6 +54,7 @@ static struct ctl_table litmus_table[] = }, { } }; + static struct ctl_table litmus_dir_table[] = { { .procname = "litmus", @@ -50,6 +64,26 @@ static struct ctl_table litmus_dir_table[] = { { } }; +int color_server_params(int cpu, unsigned long *wcet, unsigned long *period) +{ + if (cpu >= num_online_cpus()) { + printk(KERN_WARNING "Cannot access illegal CPU: %d\n", cpu); + return -EFAULT; + } + + if (server_wcet[cpu] == 0 || server_period[cpu] == 0) { + printk(KERN_WARNING "Server %d is uninitialized!\n", cpu); + return -EPERM; + } + + *wcet = server_wcet[cpu]; + *period = server_period[cpu]; + + TRACE("For %d: %lu, %lu\n", cpu, server_wcet[cpu], server_period[cpu]); + + return 0; +} + extern unsigned long nr_colors; /* litmus/color.c */ /* must be called AFTER nr_colors is set */ @@ -67,11 +101,101 @@ out: return ret; } +static void __init init_server_entry(struct ctl_table *entry, + unsigned long *parameter, + char *name) +{ + entry->procname = name; + entry->mode = 0666; + entry->proc_handler = proc_doulongvec_minmax; + entry->data = parameter; + entry->maxlen = sizeof(unsigned long); +} + +static int __init init_cpu_entry(struct ctl_table *cpu_table, int cpu) +{ + char *name; + size_t size; + struct ctl_table *server_table, *entry; + + server_wcet[cpu] = 0; + server_period[cpu] = 0; + + printk(KERN_INFO "Creating cpu %d\n", cpu); + + size = sizeof(ctl_table) * 3; + server_table = kmalloc(size, GFP_ATOMIC); + if (!server_table) { + printk(KERN_WARNING "Could not allocate " + "color server proc for CPU %d.\n", cpu); + return -ENOMEM; + } + memset(server_table, 0, size); + + /* Server WCET */ + name = kmalloc(SWCET_LEN, GFP_ATOMIC); + if (!name) { + return -ENOMEM; + } + strcpy(name, SWCET_FILE); + entry = &server_table[0]; + init_server_entry(entry, &server_wcet[cpu], name); + + + /* Server period */ + name = kmalloc(SPERIOD_LEN, GFP_ATOMIC); + if (!name) { + return -ENOMEM; + } + strcpy(name, SPERIOD_FILE); + entry = &server_table[1]; + init_server_entry(entry, &server_period[cpu], name); + + name = kmalloc(3, GFP_ATOMIC); + if (!name) { + return -ENOMEM; + } + snprintf(name, 2, "%d", cpu); + cpu_table->procname = name; + cpu_table->mode = 0555; + cpu_table->child = server_table; + + return 0; +} + +static int __init init_server_entries(struct ctl_table *cpu_tables) +{ + size_t size; + int ret, cpu; + struct ctl_table *cpu_table; + + size = sizeof(unsigned long) * num_online_cpus(); + server_wcet = kmalloc(size, GFP_ATOMIC); + server_period = kmalloc(size, GFP_ATOMIC); + if (!server_wcet || !server_period) { + printk(KERN_WARNING "Could not allocate server parameters.\n"); + return -ENOMEM; + } + + for_each_online_cpu(cpu) { + cpu_table = &cpu_tables[cpu]; + ret = init_cpu_entry(cpu_table, cpu); + if (ret) { + return ret; + } + } + return 0; +} + + static struct ctl_table_header *litmus_sysctls; static int __init litmus_sysctl_init(void) { int ret = 0; + size_t size; + struct ctl_table *cpu_tables; + printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n"); litmus_sysctls = register_sysctl_table(litmus_dir_table); if (!litmus_sysctls) { @@ -80,6 +204,25 @@ static int __init litmus_sysctl_init(void) goto out; } ret = init_sysctl_nr_colors(); + if (ret) + goto out; + + + size = sizeof(ctl_table) * (num_online_cpus() + 2); + cpu_tables = kmalloc(size, GFP_ATOMIC); + if (!cpu_tables) { + printk(KERN_WARNING "Could not allocate color CPU proc.\n"); + ret = -ENOMEM; + goto out; + } + memset(cpu_tables, 0, size); + + ret = init_server_entries(cpu_tables); + if (ret) + goto out; + + color_table[0].child = cpu_tables; + out: return ret; } diff --git a/litmus/dgl.c b/litmus/dgl.c new file mode 100644 index 000000000000..e09d57cc2672 --- /dev/null +++ b/litmus/dgl.c @@ -0,0 +1,248 @@ +#include +#include +#include +#include + +/* Word, bit -> resource id */ +#define ri(w, b) (w * MASK_SIZE + b) + + /* For loop, where @i iterates over each set bit in @bit_arr */ +#define for_each_resource(bit_arr, w, b, i) \ + for(w = 0; w < MASK_WORDS; ++w) \ + for(b = find_first_bit(&bit_arr[w],MASK_SIZE), i = ri(w, b); \ + b < MASK_SIZE; \ + b = find_next_bit(&bit_arr[w],MASK_SIZE,b+1), i = ri(w, b)) + +/* Return resource id in dgl @d for resource @r */ +#define resource_id(d, r) ((((void*)r) - (void*)(&(d)->resources))/ sizeof(*r)) + +/* Return request group of req @r for resource @i */ +#define req_group(r, i) (container_of(((void*)r) - sizeof(*r)*(i), \ + struct dgl_group_req, requests)) + +/* Resource id -> word, bit */ +static inline void mask_idx(int resource, int *word, int *bit) +{ + *word = resource / MASK_SIZE; + *bit = resource % MASK_SIZE; +} + + +static void print_waiting(struct dgl *dgl, struct dgl_resource *resource) +{ + struct dgl_req *pos; + struct dgl_group_req *greq; + int rid = resource_id(dgl, resource); + unsigned long long last = 0; + + TRACE("List for rid %d\n", resource_id(dgl, resource)); + list_for_each_entry(pos, &resource->waiting, list) { + greq = req_group(pos, rid); + TRACE(" 0x%p with timestamp %llu\n", greq, greq->ts); + BUG_ON(greq->ts < last); + last = greq->ts; + } +} + +void dgl_init(struct dgl *dgl) +{ + int i; + struct dgl_resource *resource; + + for (i = 0; i < NR_CPUS; ++i) + dgl->acquired[i] = NULL; + + for (i = 0; i < NUM_RESOURCES; ++i) { + resource = &dgl->resources[i]; + + INIT_LIST_HEAD(&resource->waiting); + resource->free_replicas = NUM_REPLICAS; + } + + dgl->requests = 0; + dgl->running = 0; + dgl->ts = 0; +} + +void dgl_group_req_init(struct dgl_group_req *greq) +{ + int i; + greq->cpu = NO_CPU; + for (i = 0; i < MASK_WORDS; ++i) { + greq->requested[i] = 0; + greq->waiting[i] = 0; + } +} + +/** + * set_req - create request for @replicas of @resource. + */ +void set_req(struct dgl_group_req *greq, int resource, int replicas) +{ + int word, bit; + struct dgl_req *req; + + BUG_ON(replicas > NUM_REPLICAS); + + mask_idx(resource, &word, &bit); + __set_bit(bit, &greq->requested[word]); + + req = &greq->requests[resource]; + INIT_LIST_HEAD(&req->list); + req->replicas = replicas; +} + +/* + * Attempt to fulfill request @req for @resource. + * Return 1 if successful. If the matching group request has acquired all of + * its needed resources, this will then set that req as dgl->acquired[cpu]. + */ +static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource, + struct dgl_req *req) +{ + int word, bit, rid, head, empty, room; + unsigned long waiting; + struct dgl_group_req *greq; + + rid = resource_id(dgl, resource); + greq = req_group(req, rid); + + head = resource->waiting.next == &req->list; + empty = list_empty(&resource->waiting); + room = resource->free_replicas >= req->replicas; + + if (! (room && (head || empty)) ) { + TRACE("0x%p cannot acquire %d replicas, %d free\n", + greq, req->replicas, resource->free_replicas, + room, head, empty); + return 0; + } + + resource->free_replicas -= req->replicas; + BUG_ON(resource->free_replicas > NUM_REPLICAS); + + TRACE("0x%p acquired %d replicas of rid %d\n", + greq, req->replicas, rid); + + mask_idx(rid, &word, &bit); + clear_bit(bit, &greq->waiting[word]); + + waiting = 0; + for (word = 0; word < MASK_WORDS; word++) { + waiting |= greq->waiting[word]; + if (waiting) + break; + } + + if (!waiting) { + TRACE("0x%p acquired all resources\n", greq); + BUG_ON(dgl->acquired[greq->cpu]); + dgl->acquired[greq->cpu] = greq; + litmus_reschedule(greq->cpu); + dgl->running++; + } + + return 1; +} + +/** + * add_group_req - initiate group request. + */ +void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu) +{ + int b, w, i, succ, all_succ = 1; + struct dgl_req *req; + struct dgl_resource *resource; + + greq->cpu = cpu; + greq->ts = dgl->ts++; + + TRACE("0x%p group request added for CPU %d\n", greq, cpu); + BUG_ON(dgl->acquired[cpu] == greq); + + ++dgl->requests; + + for_each_resource(greq->requested, w, b, i) { + __set_bit(b, &greq->waiting[w]); + } + + for_each_resource(greq->requested, w, b, i) { + req = &greq->requests[i]; + resource = &dgl->resources[i]; + + succ = try_acquire(dgl, resource, req); + all_succ &= succ; + + if (!succ) { + TRACE("0x%p waiting on rid %d\n", greq, i); + list_add_tail(&req->list, &resource->waiting); + } + } + + /* Grant empty requests */ + if (all_succ && !dgl->acquired[cpu]) { + TRACE("0x%p empty group request acquired cpu %d\n", greq, cpu); + dgl->acquired[cpu] = greq; + ++dgl->running; + } + + BUG_ON(dgl->requests && !dgl->running); +} + +/** + * remove_group_req - abandon group request. + * + * This will also progress the waiting queues of resources acquired by @greq. + */ +void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq) +{ + int b, w, i; + struct dgl_req *req, *next; + struct dgl_resource *resource; + + TRACE("0x%p removing group request for CPU %d\n", greq, greq->cpu); + + --dgl->requests; + + if (dgl->acquired[greq->cpu] == greq) { + TRACE("0x%p no longer acquired on CPU %d\n", greq, greq->cpu); + dgl->acquired[greq->cpu] = NULL; + --dgl->running; + } + + for_each_resource(greq->requested, w, b, i) { + req = &greq->requests[i]; + resource = &dgl->resources[i]; + + if (!list_empty(&req->list)) { + /* Waiting on resource */ + clear_bit(b, &greq->waiting[w]); + list_del_init(&req->list); + TRACE("Quitting 0x%p from rid %d\n", + req, i); + } else { + /* Have resource */ + resource->free_replicas += req->replicas; + BUG_ON(resource->free_replicas > NUM_REPLICAS); + TRACE("0x%p releasing %d of %d replicas, rid %d\n", + greq, req->replicas, resource->free_replicas, i); + + if (!list_empty(&resource->waiting)) { + /* Give it to the next guy */ + next = list_first_entry(&resource->waiting, + struct dgl_req, + list); + + BUG_ON(req_group(next, i)->ts < greq->ts); + + if (try_acquire(dgl, resource, next)) { + list_del_init(&next->list); + print_waiting(dgl, resource); + + } + } + } + } + + BUG_ON(dgl->requests && !dgl->running); +} diff --git a/litmus/fifo_common.c b/litmus/fifo_common.c new file mode 100644 index 000000000000..84ae98e42ae4 --- /dev/null +++ b/litmus/fifo_common.c @@ -0,0 +1,58 @@ +/* + * kernel/edf_common.c + * + * Common functions for EDF based scheduler. + */ + +#include +#include +#include + +#include +#include +#include + +#include + +int fifo_higher_prio(struct task_struct* first, + struct task_struct* second) +{ + /* There is no point in comparing a task to itself. */ + if (first && first == second) { + TRACE_TASK(first, + "WARNING: pointless fifo priority comparison.\n"); + BUG_ON(1); + return 0; + } + + if (!first || !second) + return first && !second; + + /* Tiebreak by PID */ + return (get_release(first) == get_release(second) && + first->pid > second->pid) || + (get_release(first) < get_release(second)); + + +} + +int fifo_ready_order(struct bheap_node* a, struct bheap_node* b) +{ + return fifo_higher_prio(bheap2task(a), bheap2task(b)); +} + +void fifo_domain_init(rt_domain_t* rt, check_resched_needed_t resched, + release_jobs_t release) +{ + rt_domain_init(rt, fifo_ready_order, resched, release); +} + +int fifo_preemption_needed(rt_domain_t* rt, struct task_struct *t) +{ + if (!__jobs_pending(rt)) + return 0; + if (!t) + return 1; + + return !is_realtime(t) || fifo_higher_prio(__next_ready(rt), t); +} diff --git a/litmus/ftdev.c b/litmus/ftdev.c index 06fcf4cf77dc..7ff7f25b56aa 100644 --- a/litmus/ftdev.c +++ b/litmus/ftdev.c @@ -231,7 +231,9 @@ static ssize_t ftdev_read(struct file *filp, * lost if the task is interrupted (e.g., killed). */ set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(50); + if (signal_pending(current)) { if (err == 0) /* nothing read yet, signal problem */ diff --git a/litmus/locking.c b/litmus/locking.c index 0c1aa6aa40b7..4881ca119acf 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -1,3 +1,5 @@ +#include +#include #include #ifdef CONFIG_LITMUS_LOCKING @@ -28,14 +30,18 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry) return (struct litmus_lock*) entry->obj->obj; } +atomic_t lock_id = ATOMIC_INIT(0); + static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) { struct litmus_lock* lock; int err; err = litmus->allocate_lock(&lock, type, arg); - if (err == 0) + if (err == 0) { + lock->id = atomic_add_return(1, &lock_id); *obj_ref = lock; + } return err; } diff --git a/litmus/rt_server.c b/litmus/rt_server.c new file mode 100644 index 000000000000..818588a3d317 --- /dev/null +++ b/litmus/rt_server.c @@ -0,0 +1,34 @@ +#include + + +static struct task_struct* default_server_take(struct rt_server *srv) +{ + return __take_ready(srv->domain); +} + +static void default_server_update(struct rt_server *srv) +{ +} + +void init_rt_server(struct rt_server *server, + int sid, int cpu, rt_domain_t *domain, + need_preempt_t need_preempt, + server_requeue_t requeue, + server_update_t update, + server_take_t take) +{ + if (!need_preempt || !requeue) + BUG_ON(1); + + server->need_preempt = need_preempt; + server->requeue = requeue; + + server->update = (update) ? update : default_server_update; + server->take = (take) ? take : default_server_take; + + server->sid = sid; + server->cpu = cpu; + server->linked = NULL; + server->domain = domain; + server->running = 0; +} diff --git a/litmus/sched_color.c b/litmus/sched_color.c new file mode 100644 index 000000000000..98a46bb1b06f --- /dev/null +++ b/litmus/sched_color.c @@ -0,0 +1,811 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * @rt_server Common server functionality. + * @task Task used to schedule server. + * @timer Budget enforcement for @task + * @start_time If set, time at which server began running. + */ +struct fifo_server { + struct rt_server server; + struct task_struct* task; + struct enforcement_timer timer; + lt_t start_time; +}; + +/** + * @server Common server functionality. + * @edf_domain PEDF domain. + * @scheduled Task physically running on CPU. + * @fifo_server Server partitioned to this CPU. + */ +struct cpu_entry { + struct rt_server server; + rt_domain_t edf_domain; + struct task_struct* scheduled; + struct fifo_server fifo_server; +}; + +DEFINE_PER_CPU(struct cpu_entry, color_cpus); + +static rt_domain_t fifo_domain; +static raw_spinlock_t fifo_lock; + +static struct dgl group_lock; +static raw_spinlock_t dgl_lock; + +#define local_entry (&__get_cpu_var(color_cpus)) +#define remote_entry(cpu) (&per_cpu(color_cpus, cpu)) +#define task_entry(task) remote_entry(get_partition(task)) +#define task_fserver(task) (&task_entry(task)->fifo_server.server) +#define entry_lock(entry) (&entry->edf_domain.ready_lock) + +#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) +#define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->edf_domain) +#define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) + +/* + * Requeue onto domain's release or ready queue based on task state. + */ +static void requeue(rt_domain_t *dom, struct task_struct* t) +{ + if (is_server(t) && !tsk_rt(t)->present) + /* Remove stopped server from the system */ + return; + + TRACE_TASK(t, "Requeueing\n"); + if (is_queued(t)) { + TRACE_TASK(t, "Already queued!\n"); + return; + } + + set_rt_flags(t, RT_F_RUNNING); + if (is_released(t, litmus_clock())) + __add_ready(dom, t); + else + add_release(dom, t); +} + +/* + * Relinquish resources held by @t (or its children). + */ +static void release_resources(struct task_struct *t) +{ + struct task_struct *sched; + + TRACE_TASK(t, "Releasing resources\n"); + + if (is_server(t)) { + sched = task_fserver(t)->linked; + if (sched) + release_resources(sched); + } else if (is_kernel_np(t)) + remove_group_req(&group_lock, tsk_rt(t)->req); + tsk_rt(t)->kernel_np = 0; +} + +/* + * Put in requests for resources needed by @t. If @t is a server, this will + * set @t's np flag to reflect resources held by @t's children. + */ +static void acquire_resources(struct task_struct *t) +{ + int cpu; + struct rt_server *server; + struct task_struct *sched; + + /* Can't acquire resources if t is not running */ + BUG_ON(!get_task_server(t)); + + if (is_kernel_np(t)) { + TRACE_TASK(t, "Already contending for resources\n"); + return; + } + cpu = get_task_server(t)->cpu; + + if (is_server(t)) { + server = task_fserver(t); + sched = server->linked; + + /* Happens when server is booted off on completion or + * has just completed executing a task. + */ + if (sched && !is_kernel_np(sched)) + acquire_resources(sched); + + /* Become np if there is a running task */ + if (sched && has_resources(sched, cpu)) { + TRACE_TASK(t, "Running task with resource\n"); + tsk_rt(t)->kernel_np = 1; + } else { + TRACE_TASK(t, "Running no resources\n"); + tsk_rt(t)->kernel_np = 0; + } + } else { + TRACE_TASK(t, "Acquiring resources\n"); + if (!has_resources(t, cpu)) + add_group_req(&group_lock, tsk_rt(t)->req, cpu); + tsk_rt(t)->kernel_np = 1; + } +} + +/* + * Stop logically running the currently linked task. + */ +static void unlink(struct rt_server *server) +{ + BUG_ON(!server->linked); + + if (is_server(server->linked)) + task_fserver(server->linked)->running = 0; + + + sched_trace_server_switch_away(server->sid, 0, + server->linked->pid, + get_rt_job(server->linked)); + TRACE_TASK(server->linked, "No longer run by server %d\n", server->sid); + + raw_spin_lock(&dgl_lock); + release_resources(server->linked); + raw_spin_unlock(&dgl_lock); + + get_task_server(server->linked) = NULL; + server->linked = NULL; +} + +static struct task_struct* schedule_server(struct rt_server *server); + +/* + * Logically run @task. + */ +static void link(struct rt_server *server, struct task_struct *task) +{ + struct rt_server *tserv; + + BUG_ON(server->linked); + BUG_ON(!server->running); + BUG_ON(is_kernel_np(task)); + + TRACE_TASK(task, "Run by server %d\n", server->sid); + + if (is_server(task)) { + tserv = task_fserver(task); + tserv->running = 1; + schedule_server(tserv); + } + + server->linked = task; + get_task_server(task) = server; + + sched_trace_server_switch_to(server->sid, 0, + task->pid, get_rt_job(task)); +} + +/* + * Complete job for task linked to @server. + */ +static void job_completion(struct rt_server *server) +{ + struct task_struct *t = server->linked; + + TRACE_TASK(t, "Job completed\n"); + if (is_server(t)) + sched_trace_server_completion(t->pid, get_rt_job(t)); + else + sched_trace_task_completion(t, 0); + + unlink(server); + set_rt_flags(t, RT_F_SLEEP); + prepare_for_next_period(t); + + if (is_server(t)) + sched_trace_server_release(t->pid, get_rt_job(t), + get_release(t), get_deadline(t)); + else + sched_trace_task_release(t); + + if (is_running(t)) + server->requeue(server, t); +} + +/* + * Update @server state to reflect task's state. + */ +static void update_task(struct rt_server *server) +{ + int oot, sleep, block, np; + struct task_struct *t = server->linked; + + block = !is_running(t); + oot = budget_enforced(t) && budget_exhausted(t); + np = is_kernel_np(t); + sleep = get_rt_flags(t) == RT_F_SLEEP; + + TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d\n", + block, oot, np, sleep); + + if (block) + unlink(server); + else if (oot || sleep) + job_completion(server); +} + +/* + * Link next task for @server. + */ +static struct task_struct* schedule_server(struct rt_server *server) +{ + struct task_struct *next; + struct rt_server *lserver; + + TRACE("Scheduling server %d\n", server->sid); + + if (server->linked) { + if (is_server(server->linked)) { + lserver = task_fserver(server->linked); + lserver->update(lserver); + } + update_task(server); + } + + next = server->linked; + if ((!next || !is_np(next)) && + server->need_preempt(server->domain, next)) { + if (next) { + TRACE_TASK(next, "Preempted\n"); + unlink(server); + server->requeue(server, next); + } + next = __take_ready(server->domain); + link(server, next); + } + + return next; +} + +/* + * Dumb requeue for PEDF (CPU) servers. + */ +static void edf_requeue(struct rt_server *server, struct task_struct *t) +{ + BUG_ON(is_be(t)); + requeue(server->domain, t); +} + +/* + * Locking requeue for FIFO servers. + */ +static void fifo_requeue(struct rt_server *server, struct task_struct *t) +{ + BUG_ON(!is_be(t)); + raw_spin_lock(&fifo_lock); + requeue(server->domain, t); + raw_spin_unlock(&fifo_lock); +} + + +/* + * Locking take for FIFO servers. + */ +static struct task_struct* fifo_take(struct rt_server *server) +{ + struct task_struct *ret; + + raw_spin_lock(&fifo_lock); + ret = __take_ready(server->domain); + raw_spin_unlock(&fifo_lock); + + return ret; +} + +/* + * Update server state, including picking next running task and incrementing + * server execution time. + */ +static void fifo_update(struct rt_server *server) +{ + lt_t delta; + struct fifo_server *fserver; + + fserver = container_of(server, struct fifo_server, server); + TRACE_TASK(fserver->task, "Updating FIFO server\n"); + + if (!server->linked || has_resources(server->linked, server->cpu)) { + /* Running here means linked to a parent server */ + BUG_ON(!server->running); + + /* Stop executing */ + if (fserver->start_time) { + delta = litmus_clock() - fserver->start_time; + tsk_rt(fserver->task)->job_params.exec_time += delta; + fserver->start_time = 0; + cancel_enforcement_timer(&fserver->timer); + } else { + /* Server is linked, but not executing */ + BUG_ON(fserver->timer.armed); + } + + /* Calculate next task */ + schedule_server(&fserver->server); + + /* Reserve needed resources */ + raw_spin_lock(&dgl_lock); + acquire_resources(fserver->task); + raw_spin_unlock(&dgl_lock); + } +} + +/* + * Triggers preemption on edf-scheduled "linked" field only. + */ +static void color_edf_release(rt_domain_t *edf, struct bheap *tasks) +{ + unsigned long flags; + struct cpu_entry *entry; + + TRACE_TASK(bheap2task(bheap_peek(edf->order, tasks)), + "Released set of EDF tasks\n"); + + entry = container_of(edf, struct cpu_entry, edf_domain); + raw_spin_lock_irqsave(entry_lock(entry), flags); + + __merge_ready(edf, tasks); + + if (edf_preemption_needed(edf, entry->server.linked) && + (!entry->server.linked || !is_kernel_np(entry->server.linked))) { + litmus_reschedule(entry->server.cpu); + } + + raw_spin_unlock_irqrestore(entry_lock(entry), flags); +} + +/* + * Triggers preemption on first FIFO server which is running NULL. + */ +static void check_for_fifo_preempt(void) +{ + int ret = 0, cpu; + struct cpu_entry *entry; + struct rt_server *cpu_server, *fifo_server; + + TRACE("Checking for FIFO preempt\n"); + + for_each_online_cpu(cpu) { + entry = remote_entry(cpu); + cpu_server = &entry->server; + fifo_server = &entry->fifo_server.server; + + raw_spin_lock(entry_lock(entry)); + raw_spin_lock(&fifo_lock); + + if (cpu_server->linked && is_server(cpu_server->linked) && + !fifo_server->linked) { + litmus_reschedule(cpu); + ret = 1; + } + + raw_spin_unlock(&fifo_lock); + raw_spin_unlock(entry_lock(entry)); + + if (ret) + break; + } +} + +static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks) +{ + unsigned long flags; + + TRACE_TASK(bheap2task(bheap_peek(dom->order, tasks)), + "Released set of FIFO tasks\n"); + local_irq_save(flags); + + raw_spin_lock(&fifo_lock); + __merge_ready(dom, tasks); + raw_spin_unlock(&fifo_lock); + + check_for_fifo_preempt(); + + local_irq_restore(flags); +} + +#define cpu_empty(entry, run) \ + (!(run) || (is_server(run) && !(entry)->fifo_server.server.linked)) + +static struct task_struct* color_schedule(struct task_struct *prev) +{ + unsigned long flags; + int server_running; + struct cpu_entry *entry = local_entry; + struct task_struct *next, *plink = entry->server.linked; + + TRACE("Reschedule on %d at %llu\n", entry->server.cpu, litmus_clock()); + BUG_ON(entry->scheduled && entry->scheduled != prev); + BUG_ON(entry->scheduled && !is_realtime(prev)); + + raw_spin_lock_irqsave(entry_lock(entry), flags); + + if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) { + TRACE_TASK(prev, "Snuck in on new!\n"); + requeue(task_dom(entry, prev), prev); + } + + /* Pick next top-level task */ + next = schedule_server(&entry->server); + /* Schedule hierarchically */ + server_running = next && is_server(next); + if (server_running) + next = task_fserver(next)->linked; + + /* Selected tasks must contend for group lock */ + if (next) { + raw_spin_lock(&dgl_lock); + acquire_resources(next); + if (has_resources(next, entry->server.cpu)) { + TRACE_TASK(next, "Has group lock\n"); + sched_trace_task_resume(next, 1); + } else { + TRACE_TASK(next, "Does not have lock, 0x%p does\n", + group_lock.acquired[entry->server.cpu]); + if (next != prev) + sched_trace_task_block(next, 1); + next = NULL; + server_running = 0; + } + raw_spin_unlock(&dgl_lock); + } + + /* Server is blocked if its running task is blocked. Note that if the + * server has no running task, the server will now execute NULL. + */ + if (server_running) { + TRACE_TASK(entry->server.linked, "Server running\n"); + arm_enforcement_timer(&entry->fifo_server.timer, + entry->fifo_server.task); + entry->fifo_server.start_time = litmus_clock(); + } + + if (prev) + tsk_rt(prev)->scheduled_on = NO_CPU; + if (next) + tsk_rt(next)->scheduled_on = entry->server.cpu; + + entry->scheduled = next; + sched_state_task_picked(); + + raw_spin_unlock_irqrestore(entry_lock(entry), flags); + + return entry->scheduled; +} + +static void color_task_new(struct task_struct *t, int on_rq, int running) +{ + unsigned long flags; + int i, replicas; + raw_spinlock_t *lock; + struct cpu_entry *entry; + struct dgl_group_req *req; + + TRACE_TASK(t, "New colored task\n"); + local_irq_save(flags); + + entry = (is_be(t)) ? local_entry : task_entry(t); + lock = task_lock(entry, t); + + release_at(t, litmus_clock()); + + /* Create request for dynamic group locks */ + req = kmalloc(sizeof(*req), GFP_ATOMIC); + dgl_group_req_init(req); + for (i = 0; i < NUM_RESOURCES; i++) { + replicas = get_control_page(t)->requests[i]; + if (replicas) + set_req(req, i, replicas); + } + tsk_rt(t)->req = req; + + /* Join system */ + raw_spin_lock(lock); + if (running) { + TRACE_TASK(t, "Already scheduled on %d\n", entry->server.cpu); + BUG_ON(entry->scheduled); + entry->scheduled = t; + tsk_rt(t)->scheduled_on = entry->server.cpu; + } else + requeue(task_dom(entry, t), t); + raw_spin_unlock(lock); + + /* Trigger preemptions */ + if (is_be(t)) + check_for_fifo_preempt(); + else + litmus_reschedule(entry->server.cpu); + + local_irq_restore(flags); +} + +static void color_task_wake_up(struct task_struct *task) +{ + unsigned long flags; + struct cpu_entry* entry = task_entry(task); + raw_spinlock_t *lock = task_lock(entry, task); + lt_t now = litmus_clock(); + + TRACE_TASK(task, "Wake up at %llu\n", now); + + local_irq_save(flags); + + /* Abuse sporadic model */ + if (is_tardy(task, now)) { + release_at(task, now); + sched_trace_task_release(task); + } + + /* Re-enter system */ + if (entry->scheduled != task) { + raw_spin_lock(lock); + requeue(task_dom(entry, task), task); + raw_spin_unlock(lock); + } else { + TRACE_TASK(task, "Is already scheduled on %d!\n", + entry->scheduled); + } + + /* Trigger preemptions */ + if (is_be(task)) + check_for_fifo_preempt(); + else + litmus_reschedule(entry->server.cpu); + + local_irq_restore(flags); +} + +static void color_task_block(struct task_struct *t) +{ + TRACE_TASK(t, "Block at %llu, state=%d\n", litmus_clock(), t->state); + BUG_ON(!is_realtime(t)); + BUG_ON(is_queued(t)); +} + +static void color_task_exit(struct task_struct * t) +{ + unsigned long flags; + struct cpu_entry *entry = task_entry(t); + raw_spinlock_t *lock = task_lock(entry, t); + + TRACE_TASK(t, "RIP, now reschedule\n"); + + local_irq_save(flags); + + /* Remove from scheduler consideration */ + if (is_queued(t)) { + raw_spin_lock(lock); + remove(task_dom(entry, t), t); + raw_spin_unlock(lock); + } + + /* Stop parent server */ + if (get_task_server(t)) + unlink(get_task_server(t)); + + /* Unschedule running task */ + if (tsk_rt(t)->scheduled_on != NO_CPU) { + entry = remote_entry(tsk_rt(t)->scheduled_on); + + raw_spin_lock(entry_lock(entry)); + + tsk_rt(t)->scheduled_on = NO_CPU; + entry->scheduled = NULL; + litmus_reschedule(entry->server.cpu); + + raw_spin_unlock(entry_lock(entry)); + } + + /* Remove dgl request from system */ + raw_spin_lock(&dgl_lock); + release_resources(t); + raw_spin_unlock(&dgl_lock); + kfree(tsk_rt(t)->req); + + local_irq_restore(flags); +} + +/* + * Non-be tasks must have migrated to the right CPU. + */ +static long color_admit_task(struct task_struct* t) +{ + int ret = is_be(t) || task_cpu(t) == get_partition(t) ? 0 : -EINVAL; + if (!ret) { + printk(KERN_WARNING "Task failed to migrate to CPU %d\n", + get_partition(t)); + } + return ret; +} + +/* + * Load server parameters. + */ +static long color_activate_plugin(void) +{ + int cpu, ret = 0; + struct rt_task tp; + struct task_struct *server_task; + struct cpu_entry *entry; + lt_t now = litmus_clock(); + + for_each_online_cpu(cpu) { + entry = remote_entry(cpu); + server_task = entry->fifo_server.task; + + raw_spin_lock(entry_lock(entry)); + + ret = color_server_params(cpu, &tp.exec_cost, + &tp.period); + if (ret) { + printk(KERN_WARNING "Uninitialized server for CPU %d\n", + entry->server.cpu); + goto loop_end; + } + + /* Fill rt parameters */ + tp.phase = 0; + tp.cpu = cpu; + tp.cls = RT_CLASS_SOFT; + tp.budget_policy = PRECISE_ENFORCEMENT; + tsk_rt(server_task)->task_params = tp; + tsk_rt(server_task)->present = 1; + + /* Make runnable */ + release_at(server_task, now); + entry->fifo_server.start_time = 0; + entry->scheduled = NULL; + + if (!is_queued(server_task)) + requeue(&entry->edf_domain, server_task); + + TRACE_TASK(server_task, "Created server with wcet: %llu, " + "period: %llu\n", tp.exec_cost, tp.period); + + loop_end: + raw_spin_unlock(entry_lock(entry)); + } + + return ret; +} + +/* + * Mark servers as unused, making future calls to requeue fail. + */ +static long color_deactivate_plugin(void) +{ + int cpu; + struct cpu_entry *entry; + + for_each_online_cpu(cpu) { + entry = remote_entry(cpu); + if (entry->fifo_server.task) { + tsk_rt(entry->fifo_server.task)->present = 0; + } + } + return 0; +} + +/* + * Dump container and server parameters for tracing. + */ +static void color_release_ts(lt_t time) +{ + int cpu, fifo_cid; + char fifo_name[TASK_COMM_LEN], cpu_name[TASK_COMM_LEN]; + struct cpu_entry *entry; + struct task_struct *stask; + + strcpy(cpu_name, "CPU"); + strcpy(fifo_name, "BE"); + + fifo_cid = num_online_cpus(); + trace_litmus_container_param(fifo_cid, fifo_name); + + for_each_online_cpu(cpu) { + entry = remote_entry(cpu); + trace_litmus_container_param(cpu, cpu_name); + trace_litmus_server_param(entry->server.sid, cpu, 0, 0); + stask = entry->fifo_server.task; + trace_litmus_server_param(stask->pid, fifo_cid, + get_exec_cost(stask), + get_rt_period(stask)); + } +} + +static struct sched_plugin color_plugin __cacheline_aligned_in_smp = { + .plugin_name = "COLOR", + .task_new = color_task_new, + .complete_job = complete_job, + .task_exit = color_task_exit, + .schedule = color_schedule, + .task_wake_up = color_task_wake_up, + .task_block = color_task_block, + .admit_task = color_admit_task, + + .release_ts = color_release_ts, + + .activate_plugin = color_activate_plugin, + .deactivate_plugin = color_deactivate_plugin, +}; + +static int __init init_color(void) +{ + int cpu; + struct cpu_entry *entry; + struct task_struct *server_task; + struct fifo_server *fifo_server; + struct rt_server *cpu_server; + + for_each_online_cpu(cpu) { + entry = remote_entry(cpu); + edf_domain_init(&entry->edf_domain, NULL, color_edf_release); + + entry->scheduled = NULL; + + /* Create FIFO server */ + fifo_server = &entry->fifo_server; + init_rt_server(&fifo_server->server, + cpu + num_online_cpus() + 1, + cpu, + &fifo_domain, + fifo_preemption_needed, + fifo_requeue, fifo_update, fifo_take); + + + /* Create task struct for FIFO server */ + server_task = kmalloc(sizeof(struct task_struct), GFP_ATOMIC); + memset(server_task, 0, sizeof(*server_task)); + server_task->policy = SCHED_LITMUS; + strcpy(server_task->comm, "server"); + server_task->pid = fifo_server->server.sid; + fifo_server->task = server_task; + + /* Create rt_params for FIFO server */ + tsk_rt(server_task)->heap_node = bheap_node_alloc(GFP_ATOMIC); + tsk_rt(server_task)->rel_heap = release_heap_alloc(GFP_ATOMIC); + bheap_node_init(&tsk_rt(server_task)->heap_node, server_task); + tsk_rt(server_task)->is_server = 1; + + /* Create CPU server */ + cpu_server = &entry->server; + init_rt_server(cpu_server, cpu + 1, cpu, + &entry->edf_domain, edf_preemption_needed, + edf_requeue, NULL, NULL); + cpu_server->running = 1; + + init_enforcement_timer(&fifo_server->timer); + } + + fifo_domain_init(&fifo_domain, NULL, color_fifo_release); + raw_spin_lock_init(&fifo_lock); + + dgl_init(&group_lock); + raw_spin_lock_init(&dgl_lock); + + return register_sched_plugin(&color_plugin); +} + +module_init(init_color); diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 6553948407de..3de3c8605aae 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -160,7 +160,7 @@ static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, int flags) { if (flags & ENQUEUE_WAKEUP) { - sched_trace_task_resume(p); + sched_trace_task_resume(p, 0); tsk_rt(p)->present = 1; /* LITMUS^RT plugins need to update the state * _before_ making it available in global structures. @@ -185,7 +185,7 @@ static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, if (flags & DEQUEUE_SLEEP) { litmus->task_block(p); tsk_rt(p)->present = 0; - sched_trace_task_block(p); + sched_trace_task_block(p, 0); rq->litmus.nr_running--; } else diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 00a1900d6457..123c7516fb76 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -95,6 +95,10 @@ static void litmus_dummy_task_exit(struct task_struct *task) { } +static void litmus_dummy_release_ts(lt_t time) +{ +} + static long litmus_dummy_complete_job(void) { return -ENOSYS; @@ -136,6 +140,7 @@ struct sched_plugin linux_sched_plugin = { .finish_switch = litmus_dummy_finish_switch, .activate_plugin = litmus_dummy_activate_plugin, .deactivate_plugin = litmus_dummy_deactivate_plugin, + .release_ts = litmus_dummy_release_ts, #ifdef CONFIG_LITMUS_LOCKING .allocate_lock = litmus_dummy_allocate_lock, #endif @@ -174,6 +179,7 @@ int register_sched_plugin(struct sched_plugin* plugin) CHECK(complete_job); CHECK(activate_plugin); CHECK(deactivate_plugin); + CHECK(release_ts); #ifdef CONFIG_LITMUS_LOCKING CHECK(allocate_lock); #endif diff --git a/litmus/sync.c b/litmus/sync.c index bf75fde5450b..f3c9262f7022 100644 --- a/litmus/sync.c +++ b/litmus/sync.c @@ -73,6 +73,9 @@ static long do_release_ts(lt_t start) complete_n(&ts_release, task_count); + /* TODO: remove this hack */ + litmus->release_ts(start); + return task_count; } -- cgit v1.2.2 From b881d3cdf01cd463073b016261b9af2cfe3ed417 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Thu, 3 May 2012 18:07:15 -0400 Subject: Fix minor compiler complaints. --- litmus/Kconfig | 7 +++++++ litmus/Makefile | 8 ++++---- litmus/sched_color.c | 4 ++-- 3 files changed, 13 insertions(+), 6 deletions(-) (limited to 'litmus') diff --git a/litmus/Kconfig b/litmus/Kconfig index 68459d4dca41..69150b954dba 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -12,6 +12,13 @@ config PLUGIN_CEDF On smaller platforms (e.g., ARM PB11MPCore), using C-EDF makes little sense since there aren't any shared caches. +config PLUGIN_COLOR + bool "Scheduling with Colors" + depends on NP_SECTION + default y + help + Include the scheduling with colors scheduler. + config PLUGIN_PFAIR bool "PFAIR" depends on HIGH_RES_TIMERS && !NO_HZ diff --git a/litmus/Makefile b/litmus/Makefile index d24e9855a7f9..1b9b75aa38c9 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -22,17 +22,17 @@ obj-y = sched_plugin.o litmus.o \ rt_server.o \ dgl.o \ fifo_common.o \ - sched_color.o - - # sched_psn_edf.o \ - # sched_gsn_edf.o \ + sched_psn_edf.o \ + sched_gsn_edf.o obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o +obj-$(CONFIG_PLUGIN_COLOR) += sched_color.o obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o + diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 98a46bb1b06f..c21142ea0698 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -657,8 +657,8 @@ static long color_activate_plugin(void) raw_spin_lock(entry_lock(entry)); - ret = color_server_params(cpu, &tp.exec_cost, - &tp.period); + ret = color_server_params(cpu, ((unsigned long*)&tp.exec_cost), + ((unsigned long*)&tp.period)); if (ret) { printk(KERN_WARNING "Uninitialized server for CPU %d\n", entry->server.cpu); -- cgit v1.2.2 From 803e44774958312123b0bee0fcffd4e97c7c88aa Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Fri, 4 May 2012 12:20:52 -0400 Subject: Move nr_colors into color_cache_info and extern it for everyone. --- litmus/color.c | 34 ++++++++++++++++++++-------------- litmus/color_dev.c | 8 ++++---- 2 files changed, 24 insertions(+), 18 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index 93d12a718543..21bb0ee81895 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -32,10 +32,7 @@ static struct color_group *color_groups; /* non-static: extern'ed in various files */ -unsigned long nr_colors; -#ifdef CONFIG_X86 struct color_cache_info color_cache_info; -#endif int color_sysctl_add_pages_data; static inline unsigned long page_color(struct page *page) @@ -67,7 +64,7 @@ struct page* get_colored_page(unsigned long color) struct color_group *cgroup; struct page *page = NULL; - if (color >= nr_colors) + if (color >= color_cache_info.nr_colors) goto out; cgroup = &color_groups[color]; @@ -93,7 +90,7 @@ static unsigned long smallest_nr_pages(void) { unsigned long i, min_pages = -1; struct color_group *cgroup; - for (i = 0; i < nr_colors; ++i) { + for (i = 0; i < color_cache_info.nr_colors; ++i) { cgroup = &color_groups[i]; if (atomic_read(&cgroup->nr_pages) < min_pages) min_pages = atomic_read(&cgroup->nr_pages); @@ -226,7 +223,7 @@ int color_nr_pages_handler(struct ctl_table *table, int write, void __user *buff ret = -EPERM; goto out; } - for (i = 0; i < nr_colors; ++i) { + for (i = 0; i < color_cache_info.nr_colors; ++i) { cgroup = &color_groups[i]; buf = ((char*)table->data) + used; used += snprintf(buf, table->maxlen - used, ONE_COLOR_FMT, @@ -275,16 +272,24 @@ static int __init init_mask(void) BUG_ON(PAGE_SIZE >= (color_cache_info.sets << line_size_log)); color_mask = ((color_cache_info.sets << line_size_log) - 1) ^ (PAGE_SIZE - 1); - nr_colors = (color_mask >> PAGE_SHIFT) + 1; + color_cache_info.nr_colors = (color_mask >> PAGE_SHIFT) + 1; out: return err; } #elif defined(CONFIG_SPARC) /* X86 */ static int __init init_mask(void) { - /* bits 17:13 */ - color_mask = 0x3e000UL; - nr_colors = (1 << hweight_long(color_mask)); + /* + * Static assuming we are using Flare (our Niagara machine). + * This machine has weirdness with cache banks, and I don't want + * to waste time trying to auto-detect this. + */ + color_mask = 0x3e000UL; /* bits 17:13 */ + color_cache_info.size = 3 * 1024 * 1024; /* 3 MB */ + color_cache_info.line_size = 64; + color_cache_info.ways = 12; + color_cache_info.sets = 1024 * 4; + color_cache_info.nr_colors = (1 << hweight_long(color_mask)); return 0; } #endif /* SPARC/X86 */ @@ -297,14 +302,15 @@ static int __init init_color_groups(void) unsigned long i; int err = 0; - color_groups = kmalloc(nr_colors * sizeof(struct color_group), GFP_KERNEL); + color_groups = kmalloc(color_cache_info.nr_colors * + sizeof(struct color_group), GFP_KERNEL); if (!color_groups) { printk(KERN_WARNING "Could not allocate color groups.\n"); err = -ENOMEM; goto out; } - for (i = 0; i < nr_colors; ++i) { + for (i = 0; i < color_cache_info.nr_colors; ++i) { cgroup = &color_groups[i]; atomic_set(&cgroup->nr_pages, 0); INIT_LIST_HEAD(&cgroup->list); @@ -326,9 +332,9 @@ static int __init init_color(void) err = init_mask(); printk("PAGE_SIZE: %lu Color mask: 0x%lx Total colors: %lu\n", - PAGE_SIZE, color_mask, nr_colors); + PAGE_SIZE, color_mask, color_cache_info.nr_colors); - BUG_ON(LOCKDEP_MAX_NR_COLORS < nr_colors); + BUG_ON(LOCKDEP_MAX_NR_COLORS < color_cache_info.nr_colors); err = init_color_groups(); return err; } diff --git a/litmus/color_dev.c b/litmus/color_dev.c index d681f57be01f..7ccdaf03740b 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -102,8 +102,6 @@ static int map_color_ctrl_page(struct vm_area_struct *vma) /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise * userspace actually gets a copy-on-write page. */ err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); - TRACE_CUR("page shared: guess:0x1(63)...1??111 actual:0x%lx\n", PAGE_SHARED); - /* present, RW, user, accessed, NX=63 */ if (err) TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); @@ -181,10 +179,12 @@ static int do_map_colored_pages(struct vm_area_struct *vma) clear_user_highpage(page, addr); #endif TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " - "color:%3lu) at 0x%lx (flags:%lx prot:%lx\n", + "color:%3lu) at 0x%lx (flags:%16lx prot:%16lx) " + "PAGE_SHARED:0x%16lx\n", page_to_phys(page), page_to_pfn(page), this_color, addr, - vma->vm_flags, pgprot_val(vma->vm_page_prot)); + vma->vm_flags, pgprot_val(vma->vm_page_prot), + PAGE_SHARED); //err = vm_insert_page(vma, addr, page); err = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, PAGE_SHARED); -- cgit v1.2.2 From 27549ac2327435e299b1a08de626c794a9005be2 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Fri, 4 May 2012 12:24:53 -0400 Subject: Fix a forgotten nr_colors in color_proc.c --- litmus/color_proc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'litmus') diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 0ac533f96d3e..3ad010f21b02 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -84,12 +84,10 @@ int color_server_params(int cpu, unsigned long *wcet, unsigned long *period) return 0; } -extern unsigned long nr_colors; /* litmus/color.c */ - /* must be called AFTER nr_colors is set */ static int __init init_sysctl_nr_colors(void) { - int ret = 0, maxlen = ONE_COLOR_LEN * nr_colors; + int ret = 0, maxlen = ONE_COLOR_LEN * color_cache_info.nr_colors; color_table[NR_PAGES_INDEX].data = kmalloc(maxlen, GFP_KERNEL); if (!color_table[NR_PAGES_INDEX].data) { printk(KERN_WARNING "Could not allocate nr_pages buffer.\n"); -- cgit v1.2.2 From bc53437f599fa2595385a2088a7bb74e47c4c8bb Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Fri, 4 May 2012 15:58:00 -0400 Subject: Fix up color proc CPU servers. --- litmus/color_proc.c | 141 +++++++++++++++++++--------------------------------- 1 file changed, 51 insertions(+), 90 deletions(-) (limited to 'litmus') diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 3ad010f21b02..a8bcd145ccf0 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -5,26 +5,32 @@ #include #include -#define SPERIOD_LEN 7 -#define SPERIOD_FILE "period" -#define SWCET_LEN 5 -#define SWCET_FILE "wcet" - extern int color_sysctl_add_pages_data; /* litmus/color.c */ static int zero = 0; static int one = 1; +/* used as names for server proc entries */ +char *period_str = "period"; +char *wcet_str = "wcet"; + +/* servers have a WCET and period */ +#define NR_SERVER_PARAMS 2 +#define CPU_NAME_LEN 3 +struct color_cpu_server { + char name[CPU_NAME_LEN]; + unsigned long wcet; + unsigned long period; + /* the + 1 is for the sentinel element */ + struct ctl_table table[NR_SERVER_PARAMS + 1]; +}; +static struct color_cpu_server color_cpu_servers[NR_CPUS]; -static unsigned long *server_wcet; -static unsigned long *server_period; +/* the + 1 is for the sentinel element */ +static struct ctl_table color_cpu_tables[NR_CPUS + 1]; #define NR_PAGES_INDEX 0 /* location of nr_pages in the table below */ static struct ctl_table color_table[] = { - { - .procname = "servers", - .mode = 0555, - }, { /* you MUST update NR_PAGES_INDEX if you move this entry */ .procname = "nr_pages", @@ -33,6 +39,11 @@ static struct ctl_table color_table[] = .data = NULL, /* dynamically set later */ .maxlen = 0, /* also set later */ }, + { + .procname = "servers", + .mode = 0555, + .child = color_cpu_tables, + }, { .procname = "add_pages", .data = &color_sysctl_add_pages_data, @@ -66,20 +77,23 @@ static struct ctl_table litmus_dir_table[] = { int color_server_params(int cpu, unsigned long *wcet, unsigned long *period) { + struct color_cpu_server *svr; + if (cpu >= num_online_cpus()) { printk(KERN_WARNING "Cannot access illegal CPU: %d\n", cpu); return -EFAULT; } - if (server_wcet[cpu] == 0 || server_period[cpu] == 0) { + svr = &color_cpu_servers[cpu]; + if (svr->wcet == 0 || svr->period == 0) { printk(KERN_WARNING "Server %d is uninitialized!\n", cpu); return -EPERM; } - *wcet = server_wcet[cpu]; - *period = server_period[cpu]; + *wcet = svr->wcet; + *period = svr->period; - TRACE("For %d: %lu, %lu\n", cpu, server_wcet[cpu], server_period[cpu]); + TRACE("For %d: %lu, %lu\n", cpu, svr->wcet, svr->period); return 0; } @@ -107,82 +121,44 @@ static void __init init_server_entry(struct ctl_table *entry, entry->mode = 0666; entry->proc_handler = proc_doulongvec_minmax; entry->data = parameter; - entry->maxlen = sizeof(unsigned long); + entry->maxlen = sizeof(*parameter); } -static int __init init_cpu_entry(struct ctl_table *cpu_table, int cpu) +static int __init init_cpu_entry(struct ctl_table *cpu_table, + struct color_cpu_server *svr, int cpu) { - char *name; - size_t size; - struct ctl_table *server_table, *entry; - - server_wcet[cpu] = 0; - server_period[cpu] = 0; + struct ctl_table *entry = svr->table; printk(KERN_INFO "Creating cpu %d\n", cpu); - size = sizeof(ctl_table) * 3; - server_table = kmalloc(size, GFP_ATOMIC); - if (!server_table) { - printk(KERN_WARNING "Could not allocate " - "color server proc for CPU %d.\n", cpu); - return -ENOMEM; - } - memset(server_table, 0, size); - - /* Server WCET */ - name = kmalloc(SWCET_LEN, GFP_ATOMIC); - if (!name) { - return -ENOMEM; - } - strcpy(name, SWCET_FILE); - entry = &server_table[0]; - init_server_entry(entry, &server_wcet[cpu], name); - - - /* Server period */ - name = kmalloc(SPERIOD_LEN, GFP_ATOMIC); - if (!name) { - return -ENOMEM; - } - strcpy(name, SPERIOD_FILE); - entry = &server_table[1]; - init_server_entry(entry, &server_period[cpu], name); + init_server_entry(entry, &svr->wcet, wcet_str); + entry++; + init_server_entry(entry, &svr->period, period_str); - name = kmalloc(3, GFP_ATOMIC); - if (!name) { - return -ENOMEM; - } - snprintf(name, 2, "%d", cpu); - cpu_table->procname = name; + /* minus one for the null byte */ + snprintf(svr->name, CPU_NAME_LEN - 1, "%d", cpu); + cpu_table->procname = svr->name; cpu_table->mode = 0555; - cpu_table->child = server_table; + cpu_table->child = svr->table; return 0; } -static int __init init_server_entries(struct ctl_table *cpu_tables) +static int __init init_server_entries(void) { - size_t size; - int ret, cpu; + int cpu, err = 0; struct ctl_table *cpu_table; - - size = sizeof(unsigned long) * num_online_cpus(); - server_wcet = kmalloc(size, GFP_ATOMIC); - server_period = kmalloc(size, GFP_ATOMIC); - if (!server_wcet || !server_period) { - printk(KERN_WARNING "Could not allocate server parameters.\n"); - return -ENOMEM; - } + struct color_cpu_server *svr; for_each_online_cpu(cpu) { - cpu_table = &cpu_tables[cpu]; - ret = init_cpu_entry(cpu_table, cpu); - if (ret) { - return ret; - } + cpu_table = &color_cpu_tables[cpu]; + svr = &color_cpu_servers[cpu]; + err = init_cpu_entry(cpu_table, svr, cpu); + if (err) + goto out; } - return 0; +out: + return err; } @@ -191,8 +167,6 @@ static struct ctl_table_header *litmus_sysctls; static int __init litmus_sysctl_init(void) { int ret = 0; - size_t size; - struct ctl_table *cpu_tables; printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n"); litmus_sysctls = register_sysctl_table(litmus_dir_table); @@ -205,22 +179,9 @@ static int __init litmus_sysctl_init(void) if (ret) goto out; - - size = sizeof(ctl_table) * (num_online_cpus() + 2); - cpu_tables = kmalloc(size, GFP_ATOMIC); - if (!cpu_tables) { - printk(KERN_WARNING "Could not allocate color CPU proc.\n"); - ret = -ENOMEM; - goto out; - } - memset(cpu_tables, 0, size); - - ret = init_server_entries(cpu_tables); + ret = init_server_entries(); if (ret) goto out; - - color_table[0].child = cpu_tables; - out: return ret; } -- cgit v1.2.2 From 788c87d653beb3c28a7b44d79b09ddcab5f7aa41 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Fri, 4 May 2012 16:01:31 -0400 Subject: add forgotten static --- litmus/color_proc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'litmus') diff --git a/litmus/color_proc.c b/litmus/color_proc.c index a8bcd145ccf0..adf38a80f93a 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -10,8 +10,8 @@ extern int color_sysctl_add_pages_data; /* litmus/color.c */ static int zero = 0; static int one = 1; /* used as names for server proc entries */ -char *period_str = "period"; -char *wcet_str = "wcet"; +static char *period_str = "period"; +static char *wcet_str = "wcet"; /* servers have a WCET and period */ #define NR_SERVER_PARAMS 2 -- cgit v1.2.2 From 2f421a06c3663fff3e3f0d0238f6d4651a8cb50d Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Fri, 4 May 2012 16:52:44 -0400 Subject: Group locks now allocate fields dynamically --- litmus/dgl.c | 92 ++++++++++++++++++++++++++++++++++++++++------------ litmus/sched_color.c | 20 +++++++++--- 2 files changed, 87 insertions(+), 25 deletions(-) (limited to 'litmus') diff --git a/litmus/dgl.c b/litmus/dgl.c index e09d57cc2672..af710e72b5ef 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c @@ -1,20 +1,28 @@ #include +#include + #include #include #include +#define MASK_SIZE (sizeof(unsigned long) * 8) + +/* Return number of MASK_SIZE fields needed to store a mask in d */ +#define WP(num, word) (num / word + (num % word != 0)) +#define MASK_WORDS(d) WP(d->num_resources, MASK_SIZE) + /* Word, bit -> resource id */ #define ri(w, b) (w * MASK_SIZE + b) /* For loop, where @i iterates over each set bit in @bit_arr */ -#define for_each_resource(bit_arr, w, b, i) \ - for(w = 0; w < MASK_WORDS; ++w) \ +#define for_each_resource(bit_arr, d, w, b, i) \ + for(w = 0; w < MASK_WORDS(d); ++w) \ for(b = find_first_bit(&bit_arr[w],MASK_SIZE), i = ri(w, b); \ b < MASK_SIZE; \ b = find_next_bit(&bit_arr[w],MASK_SIZE,b+1), i = ri(w, b)) /* Return resource id in dgl @d for resource @r */ -#define resource_id(d, r) ((((void*)r) - (void*)(&(d)->resources))/ sizeof(*r)) +#define resource_id(d, r) ((((void*)r) - (void*)((d)->resources))/ sizeof(*r)) /* Return request group of req @r for resource @i */ #define req_group(r, i) (container_of(((void*)r) - sizeof(*r)*(i), \ @@ -32,31 +40,39 @@ static void print_waiting(struct dgl *dgl, struct dgl_resource *resource) { struct dgl_req *pos; struct dgl_group_req *greq; - int rid = resource_id(dgl, resource); unsigned long long last = 0; TRACE("List for rid %d\n", resource_id(dgl, resource)); list_for_each_entry(pos, &resource->waiting, list) { - greq = req_group(pos, rid); + greq = pos->greq; TRACE(" 0x%p with timestamp %llu\n", greq, greq->ts); BUG_ON(greq->ts < last); last = greq->ts; } } -void dgl_init(struct dgl *dgl) +void dgl_init(struct dgl *dgl, unsigned long num_resources, + unsigned long num_replicas) { int i; struct dgl_resource *resource; - for (i = 0; i < NR_CPUS; ++i) + dgl->num_replicas = num_replicas; + dgl->num_resources = num_resources; + + dgl->resources = kmalloc(sizeof(*dgl->resources) * num_resources, + GFP_ATOMIC); + dgl->acquired = kmalloc(sizeof(*dgl->acquired) * num_online_cpus(), + GFP_ATOMIC); + + for (i = 0; i < num_online_cpus(); ++i) dgl->acquired[i] = NULL; - for (i = 0; i < NUM_RESOURCES; ++i) { + for (i = 0; i < num_resources; i++) { resource = &dgl->resources[i]; INIT_LIST_HEAD(&resource->waiting); - resource->free_replicas = NUM_REPLICAS; + resource->free_replicas = dgl->num_replicas; } dgl->requests = 0; @@ -64,30 +80,57 @@ void dgl_init(struct dgl *dgl) dgl->ts = 0; } -void dgl_group_req_init(struct dgl_group_req *greq) +void dgl_free(struct dgl *dgl) +{ + kfree(dgl->resources); + kfree(dgl->acquired); +} + +void dgl_group_req_init(struct dgl *dgl, struct dgl_group_req *greq) { int i; + + greq->requested = kmalloc(sizeof(*greq->requested) * MASK_WORDS(dgl), + GFP_ATOMIC); + greq->waiting = kmalloc(sizeof(*greq->waiting) * MASK_WORDS(dgl), + GFP_ATOMIC); + greq->requests = kmalloc(sizeof(*greq->requests) * dgl->num_resources, + GFP_ATOMIC); + + BUG_ON(!greq->requested); + BUG_ON(!greq->waiting); + BUG_ON(!greq->requests); + greq->cpu = NO_CPU; - for (i = 0; i < MASK_WORDS; ++i) { + for (i = 0; i < MASK_WORDS(dgl); ++i) { greq->requested[i] = 0; greq->waiting[i] = 0; } } +void dgl_group_req_free(struct dgl_group_req *greq) +{ + kfree(greq->requested); + kfree(greq->waiting); + kfree(greq->requests); +} + /** * set_req - create request for @replicas of @resource. */ -void set_req(struct dgl_group_req *greq, int resource, int replicas) +void set_req(struct dgl *dgl, struct dgl_group_req *greq, + int resource, int replicas) { int word, bit; struct dgl_req *req; - BUG_ON(replicas > NUM_REPLICAS); + BUG_ON(replicas > dgl->num_replicas); mask_idx(resource, &word, &bit); __set_bit(bit, &greq->requested[word]); req = &greq->requests[resource]; + req->greq = greq; INIT_LIST_HEAD(&req->list); req->replicas = replicas; } @@ -105,7 +148,9 @@ static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource, struct dgl_group_req *greq; rid = resource_id(dgl, resource); - greq = req_group(req, rid); + greq = req->greq; + + TRACE("0x%p greq\n", greq); head = resource->waiting.next == &req->list; empty = list_empty(&resource->waiting); @@ -119,16 +164,21 @@ static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource, } resource->free_replicas -= req->replicas; - BUG_ON(resource->free_replicas > NUM_REPLICAS); + BUG_ON(resource->free_replicas > dgl->num_replicas); TRACE("0x%p acquired %d replicas of rid %d\n", greq, req->replicas, rid); mask_idx(rid, &word, &bit); + + + TRACE("0x%p, %lu, 0x%p\n", greq->waiting, greq->waiting[word], + &greq->waiting[word]); + clear_bit(bit, &greq->waiting[word]); waiting = 0; - for (word = 0; word < MASK_WORDS; word++) { + for (word = 0; word < MASK_WORDS(dgl); word++) { waiting |= greq->waiting[word]; if (waiting) break; @@ -162,11 +212,11 @@ void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu) ++dgl->requests; - for_each_resource(greq->requested, w, b, i) { + for_each_resource(greq->requested, dgl, w, b, i) { __set_bit(b, &greq->waiting[w]); } - for_each_resource(greq->requested, w, b, i) { + for_each_resource(greq->requested, dgl, w, b, i) { req = &greq->requests[i]; resource = &dgl->resources[i]; @@ -210,7 +260,7 @@ void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq) --dgl->running; } - for_each_resource(greq->requested, w, b, i) { + for_each_resource(greq->requested, dgl, w, b, i) { req = &greq->requests[i]; resource = &dgl->resources[i]; @@ -223,7 +273,7 @@ void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq) } else { /* Have resource */ resource->free_replicas += req->replicas; - BUG_ON(resource->free_replicas > NUM_REPLICAS); + BUG_ON(resource->free_replicas > dgl->num_replicas); TRACE("0x%p releasing %d of %d replicas, rid %d\n", greq, req->replicas, resource->free_replicas, i); @@ -233,7 +283,7 @@ void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq) struct dgl_req, list); - BUG_ON(req_group(next, i)->ts < greq->ts); + BUG_ON(next->greq->ts < greq->ts); if (try_acquire(dgl, resource, next)) { list_del_init(&next->list); diff --git a/litmus/sched_color.c b/litmus/sched_color.c index c21142ea0698..b0a92caeae91 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -188,7 +188,9 @@ static void link(struct rt_server *server, struct task_struct *task) if (is_server(task)) { tserv = task_fserver(task); tserv->running = 1; + raw_spin_lock(&fifo_lock); schedule_server(tserv); + raw_spin_unlock(&fifo_lock); } server->linked = task; @@ -303,6 +305,7 @@ static void fifo_requeue(struct rt_server *server, struct task_struct *t) /* * Locking take for FIFO servers. + * TODO: no longer necessary. */ static struct task_struct* fifo_take(struct rt_server *server) { @@ -513,11 +516,11 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) /* Create request for dynamic group locks */ req = kmalloc(sizeof(*req), GFP_ATOMIC); - dgl_group_req_init(req); - for (i = 0; i < NUM_RESOURCES; i++) { + dgl_group_req_init(&group_lock, req); + for (i = 0; i < group_lock.num_resources; i++) { replicas = get_control_page(t)->requests[i]; if (replicas) - set_req(req, i, replicas); + set_req(&group_lock, req, i, replicas); } tsk_rt(t)->req = req; @@ -622,6 +625,8 @@ static void color_task_exit(struct task_struct * t) raw_spin_lock(&dgl_lock); release_resources(t); raw_spin_unlock(&dgl_lock); + + dgl_group_req_free(tsk_rt(t)->req); kfree(tsk_rt(t)->req); local_irq_restore(flags); @@ -802,10 +807,17 @@ static int __init init_color(void) fifo_domain_init(&fifo_domain, NULL, color_fifo_release); raw_spin_lock_init(&fifo_lock); - dgl_init(&group_lock); + dgl_init(&group_lock, color_cache_info.nr_colors, + color_cache_info.ways); raw_spin_lock_init(&dgl_lock); return register_sched_plugin(&color_plugin); } +static void exit_color(void) +{ + dgl_free(&group_lock); +} + module_init(init_color); +module_exit(exit_color); -- cgit v1.2.2 From 10bc618f3663fd34d6ffaf8adcaa369db8668eda Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sat, 5 May 2012 22:32:30 -0400 Subject: Tasks specify colors and pages when allocating pages --- litmus/Makefile | 1 + litmus/color_dev.c | 86 ++++++++++++++++++++++++++++++---------------------- litmus/dgl.c | 2 ++ litmus/sched_color.c | 59 +++++++++++++++++++---------------- 4 files changed, 85 insertions(+), 63 deletions(-) (limited to 'litmus') diff --git a/litmus/Makefile b/litmus/Makefile index 1b9b75aa38c9..d490cedbd7bb 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -22,6 +22,7 @@ obj-y = sched_plugin.o litmus.o \ rt_server.o \ dgl.o \ fifo_common.o \ + rm_common.o \ sched_psn_edf.o \ sched_gsn_edf.o diff --git a/litmus/color_dev.c b/litmus/color_dev.c index 7ccdaf03740b..d8480d7fd543 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -152,53 +152,65 @@ out: static int do_map_colored_pages(struct vm_area_struct *vma) { const unsigned long nr_pages = vma_nr_pages(vma); + struct color_ctrl_page *color_ctrl = tsk_rt(current)->color_ctrl_page; unsigned long nr_mapped; - color_t *cur_color; - int err; + int i, err = 0; TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n", nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot)); - for ( nr_mapped = 0, - cur_color = tsk_rt(current)->color_ctrl_page->colors; - nr_mapped < nr_pages; - nr_mapped++, cur_color++) - { - const unsigned long this_color = *cur_color; - const unsigned long addr = vma->vm_start + (nr_mapped << PAGE_SHIFT); - struct page *page = get_colored_page(this_color); - - if (!page) { - TRACE_CUR(ALLOC_NAME ": Could not get page with " - " color %lu.\n", this_color); - /* TODO unmap mapped pages */ - err = -ENOMEM; - break; - } + for (i = 0, nr_mapped = 0; nr_mapped < nr_pages; ++i) { + const unsigned long color_no = color_ctrl->colors[i]; + unsigned int page_no = 0; + + for (; page_no < color_ctrl->pages[i]; ++page_no, ++nr_mapped) { + const unsigned long addr = vma->vm_start + + (nr_mapped << PAGE_SHIFT); + struct page *page = get_colored_page(color_no); + + if (!page) { + TRACE_CUR(ALLOC_NAME ": Could not get page with" + " color %lu.\n", color_no); + /* TODO unmap mapped pages */ + err = -ENOMEM; + goto out; + } + #ifdef CONFIG_SPARC - clear_user_highpage(page, addr); + clear_user_highpage(page, addr); #endif - TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " - "color:%3lu) at 0x%lx (flags:%16lx prot:%16lx) " - "PAGE_SHARED:0x%16lx\n", - page_to_phys(page), - page_to_pfn(page), this_color, addr, - vma->vm_flags, pgprot_val(vma->vm_page_prot), - PAGE_SHARED); - //err = vm_insert_page(vma, addr, page); - err = remap_pfn_range(vma, addr, page_to_pfn(page), - PAGE_SIZE, PAGE_SHARED); - if (err) { - TRACE_CUR(ALLOC_NAME ": remap_pfn_range() failed " - "(%d) (flags:%lx prot:%lx)\n", err, - vma->vm_flags, - pgprot_val(vma->vm_page_prot)); - /* TODO unmap mapped pages */ + + TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, " + "pfn:%8lu, color:%3lu) at 0x%lx (flags:%16lx " + "prot:%16lx) PAGE_SHARED:0x%16lx\n", + page_to_phys(page), + page_to_pfn(page), color_no, addr, + vma->vm_flags, pgprot_val(vma->vm_page_prot), + PAGE_SHARED); + + err = remap_pfn_range(vma, addr, page_to_pfn(page), + PAGE_SIZE, PAGE_SHARED); + + if (err) { + TRACE_CUR(ALLOC_NAME ": remap_pfn_range() fail " + "(%d) (flags:%lx prot:%lx)\n", err, + vma->vm_flags, + pgprot_val(vma->vm_page_prot)); + /* TODO unmap mapped pages */ + err = -EINVAL; + goto out; + } + add_page_to_alloced_list(page, vma); + } + + if (!page_no) { + TRACE_CUR(ALLOC_NAME ": 0 pages given for color %lu\n", + color_no); err = -EINVAL; - break; + goto out; } - add_page_to_alloced_list(page, vma); } + out: return err; } diff --git a/litmus/dgl.c b/litmus/dgl.c index af710e72b5ef..6c1267839123 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c @@ -129,6 +129,8 @@ void set_req(struct dgl *dgl, struct dgl_group_req *greq, mask_idx(resource, &word, &bit); __set_bit(bit, &greq->requested[word]); + TRACE("0x%p requesting %d of %d\n", greq, replicas, resource); + req = &greq->requests[resource]; req->greq = greq; INIT_LIST_HEAD(&req->list); diff --git a/litmus/sched_color.c b/litmus/sched_color.c index b0a92caeae91..f6115e552cf8 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include @@ -32,13 +32,13 @@ struct fifo_server { /** * @server Common server functionality. - * @edf_domain PEDF domain. + * @rm_domain PRM domain. * @scheduled Task physically running on CPU. * @fifo_server Server partitioned to this CPU. */ struct cpu_entry { struct rt_server server; - rt_domain_t edf_domain; + rt_domain_t rm_domain; struct task_struct* scheduled; struct fifo_server fifo_server; }; @@ -55,10 +55,10 @@ static raw_spinlock_t dgl_lock; #define remote_entry(cpu) (&per_cpu(color_cpus, cpu)) #define task_entry(task) remote_entry(get_partition(task)) #define task_fserver(task) (&task_entry(task)->fifo_server.server) -#define entry_lock(entry) (&entry->edf_domain.ready_lock) +#define entry_lock(entry) (&entry->rm_domain.ready_lock) #define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) -#define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->edf_domain) +#define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->rm_domain) #define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) /* @@ -283,9 +283,9 @@ static struct task_struct* schedule_server(struct rt_server *server) } /* - * Dumb requeue for PEDF (CPU) servers. + * Dumb requeue for PRM (CPU) servers. */ -static void edf_requeue(struct rt_server *server, struct task_struct *t) +static void rm_requeue(struct rt_server *server, struct task_struct *t) { BUG_ON(is_be(t)); requeue(server->domain, t); @@ -346,7 +346,9 @@ static void fifo_update(struct rt_server *server) } /* Calculate next task */ + raw_spin_lock(&fifo_lock); schedule_server(&fserver->server); + raw_spin_unlock(&fifo_lock); /* Reserve needed resources */ raw_spin_lock(&dgl_lock); @@ -356,22 +358,22 @@ static void fifo_update(struct rt_server *server) } /* - * Triggers preemption on edf-scheduled "linked" field only. + * Triggers preemption on rm-scheduled "linked" field only. */ -static void color_edf_release(rt_domain_t *edf, struct bheap *tasks) +static void color_rm_release(rt_domain_t *rm, struct bheap *tasks) { unsigned long flags; struct cpu_entry *entry; - TRACE_TASK(bheap2task(bheap_peek(edf->order, tasks)), - "Released set of EDF tasks\n"); + TRACE_TASK(bheap2task(bheap_peek(rm->order, tasks)), + "Released set of RM tasks\n"); - entry = container_of(edf, struct cpu_entry, edf_domain); + entry = container_of(rm, struct cpu_entry, rm_domain); raw_spin_lock_irqsave(entry_lock(entry), flags); - __merge_ready(edf, tasks); + __merge_ready(rm, tasks); - if (edf_preemption_needed(edf, entry->server.linked) && + if (rm_preemption_needed(rm, entry->server.linked) && (!entry->server.linked || !is_kernel_np(entry->server.linked))) { litmus_reschedule(entry->server.cpu); } @@ -501,10 +503,11 @@ static struct task_struct* color_schedule(struct task_struct *prev) static void color_task_new(struct task_struct *t, int on_rq, int running) { unsigned long flags; - int i, replicas; + int i; raw_spinlock_t *lock; struct cpu_entry *entry; struct dgl_group_req *req; + color_t *colors, *pages; TRACE_TASK(t, "New colored task\n"); local_irq_save(flags); @@ -514,16 +517,18 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) release_at(t, litmus_clock()); - /* Create request for dynamic group locks */ req = kmalloc(sizeof(*req), GFP_ATOMIC); dgl_group_req_init(&group_lock, req); - for (i = 0; i < group_lock.num_resources; i++) { - replicas = get_control_page(t)->requests[i]; - if (replicas) - set_req(&group_lock, req, i, replicas); - } tsk_rt(t)->req = req; + /* Fill request */ + if (tsk_rt(t)->color_ctrl_page) { + colors = tsk_rt(t)->color_ctrl_page->colors; + pages = tsk_rt(t)->color_ctrl_page->pages; + for (i = 0; pages[i]; i++) + set_req(&group_lock, req, colors[i], pages[i]); + } + /* Join system */ raw_spin_lock(lock); if (running) { @@ -535,7 +540,7 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) requeue(task_dom(entry, t), t); raw_spin_unlock(lock); - /* Trigger preemptions */ + /* Trigger necessary preemptions */ if (is_be(t)) check_for_fifo_preempt(); else @@ -683,8 +688,10 @@ static long color_activate_plugin(void) entry->fifo_server.start_time = 0; entry->scheduled = NULL; + cancel_enforcement_timer(&entry->fifo_server.timer); + if (!is_queued(server_task)) - requeue(&entry->edf_domain, server_task); + requeue(&entry->rm_domain, server_task); TRACE_TASK(server_task, "Created server with wcet: %llu, " "period: %llu\n", tp.exec_cost, tp.period); @@ -766,7 +773,7 @@ static int __init init_color(void) for_each_online_cpu(cpu) { entry = remote_entry(cpu); - edf_domain_init(&entry->edf_domain, NULL, color_edf_release); + rm_domain_init(&entry->rm_domain, NULL, color_rm_release); entry->scheduled = NULL; @@ -797,8 +804,8 @@ static int __init init_color(void) /* Create CPU server */ cpu_server = &entry->server; init_rt_server(cpu_server, cpu + 1, cpu, - &entry->edf_domain, edf_preemption_needed, - edf_requeue, NULL, NULL); + &entry->rm_domain, rm_preemption_needed, + rm_requeue, NULL, NULL); cpu_server->running = 1; init_enforcement_timer(&fifo_server->timer); -- cgit v1.2.2 From 8acf04cf81854a05d933a8236c2019b7b6d18bae Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sun, 6 May 2012 14:45:29 -0400 Subject: Add proc entry to display cache info --- litmus/color_proc.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/color_proc.c b/litmus/color_proc.c index adf38a80f93a..1d46496542c9 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -28,6 +28,9 @@ static struct color_cpu_server color_cpu_servers[NR_CPUS]; /* the + 1 is for the sentinel element */ static struct ctl_table color_cpu_tables[NR_CPUS + 1]; +#define INFO_BUFFER_SIZE 100 +static char info_buffer[100]; + #define NR_PAGES_INDEX 0 /* location of nr_pages in the table below */ static struct ctl_table color_table[] = { @@ -53,6 +56,13 @@ static struct ctl_table color_table[] = .extra1 = &zero, .extra2 = &one, }, + { + .procname = "cache_info", + .mode = 0444, + .proc_handler = proc_dostring, + .data = info_buffer, + .maxlen = INFO_BUFFER_SIZE, + }, { } }; @@ -69,7 +79,7 @@ static struct ctl_table litmus_table[] = static struct ctl_table litmus_dir_table[] = { { .procname = "litmus", - .mode = 0555, + .mode = 0555, .child = litmus_table, }, { } @@ -182,6 +192,17 @@ static int __init litmus_sysctl_init(void) ret = init_server_entries(); if (ret) goto out; + + snprintf(info_buffer, INFO_BUFFER_SIZE, + "Cache size\t: %lu B\n" + "Line size\t: %lu B\n" + "Ways\t\t: %lu\n" + "Sets\t\t: %lu\n" + "Colors\t\t: %lu", + color_cache_info.size, color_cache_info.line_size, + color_cache_info.ways, color_cache_info.sets, + color_cache_info.nr_colors); + out: return ret; } -- cgit v1.2.2 From aab1d2517ef6e9a8a4586d2d93718d1d8e717b95 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sun, 6 May 2012 16:16:09 -0400 Subject: Added rm_common --- litmus/rm_common.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 litmus/rm_common.c (limited to 'litmus') diff --git a/litmus/rm_common.c b/litmus/rm_common.c new file mode 100644 index 000000000000..f608a084d3b8 --- /dev/null +++ b/litmus/rm_common.c @@ -0,0 +1,91 @@ +/* + * kernel/rm_common.c + * + * Common functions for RM based scheduler. + */ + +#include +#include +#include + +#include +#include +#include + +#include + +/* rm_higher_prio - returns true if first has a higher RM priority + * than second. Deadline ties are broken by PID. + * + * both first and second may be NULL + */ +int rm_higher_prio(struct task_struct* first, + struct task_struct* second) +{ + struct task_struct *first_task = first; + struct task_struct *second_task = second; + + /* There is no point in comparing a task to itself. */ + if (first && first == second) { + TRACE_TASK(first, + "WARNING: pointless rm priority comparison.\n"); + return 0; + } + + + /* check for NULL tasks */ + if (!first || !second) + return first && !second; + + return !is_realtime(second_task) || + + /* is the deadline of the first task earlier? + * Then it has higher priority. + */ + lt_before(get_rt_period(first_task), get_rt_period(second_task)) || + + /* Do we have a deadline tie? + * Then break by PID. + */ + (get_rt_period(first_task) == get_rt_period(second_task) && + (first_task->pid < second_task->pid || + + /* If the PIDs are the same then the task with the inherited + * priority wins. + */ + (first_task->pid == second_task->pid && + !second->rt_param.inh_task))); +} + +int rm_ready_order(struct bheap_node* a, struct bheap_node* b) +{ + return rm_higher_prio(bheap2task(a), bheap2task(b)); +} + +void rm_domain_init(rt_domain_t* rt, check_resched_needed_t resched, + release_jobs_t release) +{ + rt_domain_init(rt, rm_ready_order, resched, release); +} + +/* need_to_preempt - check whether the task t needs to be preempted + * call only with irqs disabled and with ready_lock acquired + * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! + */ +int rm_preemption_needed(rt_domain_t* rt, struct task_struct *t) +{ + /* we need the read lock for rm_ready_queue */ + /* no need to preempt if there is nothing pending */ + if (!__jobs_pending(rt)) + return 0; + /* we need to reschedule if t doesn't exist */ + if (!t) + return 1; + + /* NOTE: We cannot check for non-preemptibility since we + * don't know what address space we're currently in. + */ + + /* make sure to get non-rt stuff out of the way */ + return !is_realtime(t) || rm_higher_prio(__next_ready(rt), t); +} -- cgit v1.2.2 From 44b10b376140b33e03ad54c927564f5f720b61ca Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sun, 6 May 2012 16:49:35 -0400 Subject: Exported page size via color proc interface --- litmus/color_proc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 1d46496542c9..76c540adad37 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -196,10 +196,11 @@ static int __init litmus_sysctl_init(void) snprintf(info_buffer, INFO_BUFFER_SIZE, "Cache size\t: %lu B\n" "Line size\t: %lu B\n" + "Page size\t: %lu B\n" "Ways\t\t: %lu\n" "Sets\t\t: %lu\n" "Colors\t\t: %lu", - color_cache_info.size, color_cache_info.line_size, + color_cache_info.size, color_cache_info.line_size, PAGE_SIZE, color_cache_info.ways, color_cache_info.sets, color_cache_info.nr_colors); -- cgit v1.2.2 From 41bba723584060d4045eaa133d6cde5f83c10f3a Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sun, 6 May 2012 20:50:08 -0400 Subject: Finer-grained fifo locking --- litmus/sched_color.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_color.c b/litmus/sched_color.c index f6115e552cf8..29516e948749 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -60,6 +60,7 @@ static raw_spinlock_t dgl_lock; #define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) #define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->rm_domain) #define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) +#define is_fifo_server(s) (s->sid > num_online_cpus()) /* * Requeue onto domain's release or ready queue based on task state. @@ -188,9 +189,7 @@ static void link(struct rt_server *server, struct task_struct *task) if (is_server(task)) { tserv = task_fserver(task); tserv->running = 1; - raw_spin_lock(&fifo_lock); schedule_server(tserv); - raw_spin_unlock(&fifo_lock); } server->linked = task; @@ -256,6 +255,7 @@ static struct task_struct* schedule_server(struct rt_server *server) { struct task_struct *next; struct rt_server *lserver; + int is_fifo = is_fifo_server(server); TRACE("Scheduling server %d\n", server->sid); @@ -268,16 +268,20 @@ static struct task_struct* schedule_server(struct rt_server *server) } next = server->linked; + if (is_fifo) + raw_spin_lock(&fifo_lock); if ((!next || !is_np(next)) && server->need_preempt(server->domain, next)) { if (next) { TRACE_TASK(next, "Preempted\n"); unlink(server); - server->requeue(server, next); + requeue(server->domain, next); } next = __take_ready(server->domain); link(server, next); } + if (is_fifo) + raw_spin_unlock(&fifo_lock); return next; } @@ -346,9 +350,7 @@ static void fifo_update(struct rt_server *server) } /* Calculate next task */ - raw_spin_lock(&fifo_lock); schedule_server(&fserver->server); - raw_spin_unlock(&fifo_lock); /* Reserve needed resources */ raw_spin_lock(&dgl_lock); @@ -449,7 +451,9 @@ static struct task_struct* color_schedule(struct task_struct *prev) if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) { TRACE_TASK(prev, "Snuck in on new!\n"); + raw_spin_lock(&fifo_lock); requeue(task_dom(entry, prev), prev); + raw_spin_unlock(&fifo_lock); } /* Pick next top-level task */ @@ -693,6 +697,7 @@ static long color_activate_plugin(void) if (!is_queued(server_task)) requeue(&entry->rm_domain, server_task); + TRACE_TASK(server_task, "Created server with wcet: %llu, " "period: %llu\n", tp.exec_cost, tp.period); -- cgit v1.2.2 From db7b8b961c72c9ab079728dea0beb682ba78d56b Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Sun, 6 May 2012 21:18:33 -0400 Subject: Add task_exit event that records max exec time under color plugin. --- litmus/sched_color.c | 10 ++++++++++ litmus/sched_task_trace.c | 16 ++++++++++++++++ 2 files changed, 26 insertions(+) (limited to 'litmus') diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 29516e948749..0448983e13ce 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -205,6 +205,7 @@ static void link(struct rt_server *server, struct task_struct *task) static void job_completion(struct rt_server *server) { struct task_struct *t = server->linked; + lt_t et; TRACE_TASK(t, "Job completed\n"); if (is_server(t)) @@ -212,6 +213,13 @@ static void job_completion(struct rt_server *server) else sched_trace_task_completion(t, 0); + if (1 < get_rt_job(t)) { + /* our releases happen at the second job */ + et = get_exec_time(t); + if (et > tsk_rt(t)->max_exec_time) + tsk_rt(t)->max_exec_time = et; + } + unlink(server); set_rt_flags(t, RT_F_SLEEP); prepare_for_next_period(t); @@ -525,6 +533,8 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) dgl_group_req_init(&group_lock, req); tsk_rt(t)->req = req; + tsk_rt(t)->max_exec_time = 0; + /* Fill request */ if (tsk_rt(t)->color_ctrl_page) { colors = tsk_rt(t)->color_ctrl_page->colors; diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 5ef8d09ab41f..636ea038f403 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c @@ -226,6 +226,22 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, } } +feather_callback void do_sched_trace_task_exit(unsigned long id, + unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; +#ifdef CONFIG_PLUGIN_COLOR + const lt_t max_exec_time = tsk_rt(t)->max_exec_time; +#else + const lt_t max_exec_time = 0; +#endif + struct st_event_record *rec = get_record(ST_TASK_EXIT, t); + if (rec) { + rec->data.task_exit.max_exec_time = max_exec_time; + put_record(rec); + } +} + feather_callback void do_sched_trace_action(unsigned long id, unsigned long _task, unsigned long action) -- cgit v1.2.2 From 5648730ef6d76d0167a9cb0d657c3dd6c4a174f0 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sun, 6 May 2012 21:26:16 -0400 Subject: Fixed compile bug --- litmus/color.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index 21bb0ee81895..0b99523bcf66 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -11,7 +11,7 @@ #include #include /* for in_list(...) */ -#define PAGES_PER_COLOR 3072 +#define PAGES_PER_COLOR 300 /* * This is used only to "trick" lockdep into permitting dynamically allocated -- cgit v1.2.2 From 13bd12bff94d253c4425f274ce55f8dd79e177ab Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Sun, 6 May 2012 21:37:20 -0400 Subject: Add the time to the task_exit event --- litmus/sched_task_trace.c | 1 + 1 file changed, 1 insertion(+) (limited to 'litmus') diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 636ea038f403..afe2333eb723 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c @@ -237,6 +237,7 @@ feather_callback void do_sched_trace_task_exit(unsigned long id, #endif struct st_event_record *rec = get_record(ST_TASK_EXIT, t); if (rec) { + rec->data.task_exit.when = now(); rec->data.task_exit.max_exec_time = max_exec_time; put_record(rec); } -- cgit v1.2.2 From c0d2e33a54c3f99e062d9545c4d43b5e22345611 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Mon, 7 May 2012 22:15:24 -0400 Subject: sched_trace: log task exit --- litmus/sched_color.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'litmus') diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 0448983e13ce..d80bbe8ccdbb 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -616,6 +616,8 @@ static void color_task_exit(struct task_struct * t) local_irq_save(flags); + sched_trace_task_exit(t); + /* Remove from scheduler consideration */ if (is_queued(t)) { raw_spin_lock(lock); -- cgit v1.2.2 From 5fae0af221f5e914f945859e6a33df3e6206ac46 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Mon, 7 May 2012 21:34:37 -0400 Subject: Make colored memory optionally un-cachable. --- litmus/Kconfig | 7 +++++++ litmus/color.c | 15 +++++++++++++++ litmus/color_dev.c | 27 +++++++++++++++++---------- 3 files changed, 39 insertions(+), 10 deletions(-) (limited to 'litmus') diff --git a/litmus/Kconfig b/litmus/Kconfig index 69150b954dba..272b64c30ba3 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -19,6 +19,13 @@ config PLUGIN_COLOR help Include the scheduling with colors scheduler. +config PLUGIN_COLOR_UNCACHABLE + bool "Colored memory is not cachable" + depends on PLUGIN_COLOR && X86_PAT + default n + help + Any memory allocated to the color plugin is not CPU cached. + config PLUGIN_PFAIR bool "PFAIR" depends on HIGH_RES_TIMERS && !NO_HZ diff --git a/litmus/color.c b/litmus/color.c index 0b99523bcf66..8a9f68544e8f 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -8,6 +8,10 @@ #include /* required by litmus.h */ #include /* page_to_phys on SPARC */ +#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE +#include /* set_memory_uc */ +#endif + #include #include /* for in_list(...) */ @@ -106,6 +110,10 @@ static int do_add_pages(void) int ret = 0; while (smallest_nr_pages() < PAGES_PER_COLOR) { +#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE + unsigned long vaddr; +#endif + #if defined(CONFIG_X86) page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_MOVABLE); @@ -122,6 +130,13 @@ static int do_add_pages(void) color = page_color(page); if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { SetPageReserved(page); +#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE + vaddr = (unsigned long) pfn_to_kaddr(page_to_pfn(page)); + if (set_memory_uc(vaddr, 1)) { + printk(KERN_ALERT "Could not set_memory_uc\n"); + BUG(); + } +#endif add_page_to_color_list(page); } else list_add_tail(&page->lru, &free_later); diff --git a/litmus/color_dev.c b/litmus/color_dev.c index d8480d7fd543..51760328418e 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -159,6 +159,10 @@ static int do_map_colored_pages(struct vm_area_struct *vma) TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n", nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot)); +#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +#endif + for (i = 0, nr_mapped = 0; nr_mapped < nr_pages; ++i) { const unsigned long color_no = color_ctrl->colors[i]; unsigned int page_no = 0; @@ -167,6 +171,11 @@ static int do_map_colored_pages(struct vm_area_struct *vma) const unsigned long addr = vma->vm_start + (nr_mapped << PAGE_SHIFT); struct page *page = get_colored_page(color_no); +#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE + const pgprot_t ins_prot = pgprot_noncached(PAGE_SHARED); +#else + const pgprot_t ins_prot = PAGE_SHARED; +#endif if (!page) { TRACE_CUR(ALLOC_NAME ": Could not get page with" @@ -181,21 +190,19 @@ static int do_map_colored_pages(struct vm_area_struct *vma) #endif TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, " - "pfn:%8lu, color:%3lu) at 0x%lx (flags:%16lx " - "prot:%16lx) PAGE_SHARED:0x%16lx\n", + "pfn:%8lu, color:%3lu, prot:%lx) at 0x%lx " + "vma:(flags:%16lx prot:%16lx)\n", page_to_phys(page), - page_to_pfn(page), color_no, addr, - vma->vm_flags, pgprot_val(vma->vm_page_prot), - PAGE_SHARED); + page_to_pfn(page), color_no, + pgprot_val(ins_prot), addr, + vma->vm_flags, + pgprot_val(vma->vm_page_prot)); err = remap_pfn_range(vma, addr, page_to_pfn(page), - PAGE_SIZE, PAGE_SHARED); - + PAGE_SIZE, ins_prot); if (err) { TRACE_CUR(ALLOC_NAME ": remap_pfn_range() fail " - "(%d) (flags:%lx prot:%lx)\n", err, - vma->vm_flags, - pgprot_val(vma->vm_page_prot)); + "(%d)\n", err); /* TODO unmap mapped pages */ err = -EINVAL; goto out; -- cgit v1.2.2 From 94984e6987547efb7fc161965d7023fcec88d7da Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Tue, 8 May 2012 18:19:03 -0400 Subject: Removed unnecessary space --- litmus/sched_color.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/sched_color.c b/litmus/sched_color.c index d80bbe8ccdbb..22c146014045 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -32,7 +32,7 @@ struct fifo_server { /** * @server Common server functionality. - * @rm_domain PRM domain. + * @rm_domain PRM domain. * @scheduled Task physically running on CPU. * @fifo_server Server partitioned to this CPU. */ -- cgit v1.2.2 From a80dba56e2bc970b300b32ca3a8f4e911c18bbad Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Wed, 9 May 2012 16:34:38 -0400 Subject: BE servers now synchronize when task sets are released --- litmus/dgl.c | 1 - litmus/sched_color.c | 19 +++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'litmus') diff --git a/litmus/dgl.c b/litmus/dgl.c index 6c1267839123..2df27b48fcdf 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c @@ -166,7 +166,6 @@ static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource, } resource->free_replicas -= req->replicas; - BUG_ON(resource->free_replicas > dgl->num_replicas); TRACE("0x%p acquired %d replicas of rid %d\n", greq, req->replicas, rid); diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 22c146014045..6c278f5d26cf 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -675,7 +675,6 @@ static long color_activate_plugin(void) struct rt_task tp; struct task_struct *server_task; struct cpu_entry *entry; - lt_t now = litmus_clock(); for_each_online_cpu(cpu) { entry = remote_entry(cpu); @@ -699,17 +698,8 @@ static long color_activate_plugin(void) tsk_rt(server_task)->task_params = tp; tsk_rt(server_task)->present = 1; - /* Make runnable */ - release_at(server_task, now); - entry->fifo_server.start_time = 0; entry->scheduled = NULL; - cancel_enforcement_timer(&entry->fifo_server.timer); - - if (!is_queued(server_task)) - requeue(&entry->rm_domain, server_task); - - TRACE_TASK(server_task, "Created server with wcet: %llu, " "period: %llu\n", tp.exec_cost, tp.period); @@ -761,6 +751,15 @@ static void color_release_ts(lt_t time) trace_litmus_server_param(stask->pid, fifo_cid, get_exec_cost(stask), get_rt_period(stask)); + + /* Make runnable */ + release_at(stask, time); + entry->fifo_server.start_time = 0; + + cancel_enforcement_timer(&entry->fifo_server.timer); + + if (!is_queued(stask)) + requeue(&entry->rm_domain, stask); } } -- cgit v1.2.2 From 1fde4dd4de048d7fbfe3e1418f4a76c62423ad95 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Wed, 9 May 2012 17:11:56 -0400 Subject: Track tasks average execution time, drop in TASK_EXIT record --- litmus/jobs.c | 13 ++++++++----- litmus/sched_color.c | 1 + litmus/sched_task_trace.c | 4 +++- 3 files changed, 12 insertions(+), 6 deletions(-) (limited to 'litmus') diff --git a/litmus/jobs.c b/litmus/jobs.c index 36e314625d86..4da53a8c4c43 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c @@ -9,12 +9,15 @@ void prepare_for_next_period(struct task_struct *t) { BUG_ON(!t); +#ifdef CONFIG_PLUGIN_COLOR + tsk_rt(t)->tot_exec_time += tsk_rt(t)->job_params.exec_time; +#endif /* prepare next release */ - t->rt_param.job_params.release = t->rt_param.job_params.deadline; - t->rt_param.job_params.deadline += get_rt_period(t); - t->rt_param.job_params.exec_time = 0; + tsk_rt(t)->job_params.release = tsk_rt(t)->job_params.deadline; + tsk_rt(t)->job_params.deadline += get_rt_period(t); + tsk_rt(t)->job_params.exec_time = 0; /* update job sequence number */ - t->rt_param.job_params.job_no++; + tsk_rt(t)->job_params.job_no++; /* don't confuse Linux */ t->rt.time_slice = 1; @@ -22,7 +25,7 @@ void prepare_for_next_period(struct task_struct *t) void release_at(struct task_struct *t, lt_t start) { - t->rt_param.job_params.deadline = start; + tsk_rt(t)->job_params.deadline = start; prepare_for_next_period(t); set_rt_flags(t, RT_F_RUNNING); } diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 6c278f5d26cf..29b2be97da52 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -534,6 +534,7 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) tsk_rt(t)->req = req; tsk_rt(t)->max_exec_time = 0; + tsk_rt(t)->tot_exec_time = 0; /* Fill request */ if (tsk_rt(t)->color_ctrl_page) { diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index afe2333eb723..cbfcecbfe93a 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c @@ -232,12 +232,14 @@ feather_callback void do_sched_trace_task_exit(unsigned long id, struct task_struct *t = (struct task_struct*) _task; #ifdef CONFIG_PLUGIN_COLOR const lt_t max_exec_time = tsk_rt(t)->max_exec_time; + const lt_t avg_exec_time = tsk_rt(t)->tot_exec_time / (get_rt_job(t) - 1); #else const lt_t max_exec_time = 0; + const lt_t avg_exec_time = 0; #endif struct st_event_record *rec = get_record(ST_TASK_EXIT, t); if (rec) { - rec->data.task_exit.when = now(); + rec->data.task_exit.avg_exec_time = avg_exec_time; rec->data.task_exit.max_exec_time = max_exec_time; put_record(rec); } -- cgit v1.2.2 From 2d8f8176515f2516b9a0b85642a7b842eb53552b Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sun, 13 May 2012 16:15:33 -0400 Subject: Can disable nonpreemptivity --- litmus/Kconfig | 2 -- litmus/dgl.c | 4 ++++ litmus/sched_color.c | 22 +++++++++++++++++----- 3 files changed, 21 insertions(+), 7 deletions(-) (limited to 'litmus') diff --git a/litmus/Kconfig b/litmus/Kconfig index 272b64c30ba3..f0c48a6a3efb 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -14,7 +14,6 @@ config PLUGIN_CEDF config PLUGIN_COLOR bool "Scheduling with Colors" - depends on NP_SECTION default y help Include the scheduling with colors scheduler. @@ -65,7 +64,6 @@ config NP_SECTION config LITMUS_LOCKING bool "Support for real-time locking protocols" - depends on NP_SECTION default n help Enable LITMUS^RT's deterministic multiprocessor real-time diff --git a/litmus/dgl.c b/litmus/dgl.c index 2df27b48fcdf..0c1ce73868e3 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c @@ -126,6 +126,10 @@ void set_req(struct dgl *dgl, struct dgl_group_req *greq, BUG_ON(replicas > dgl->num_replicas); +#ifndef CONFIG_NP_SECTION + BUG_ON(1); +#endif + mask_idx(resource, &word, &bit); __set_bit(bit, &greq->requested[word]); diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 29b2be97da52..8554fde49c0b 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -57,11 +57,19 @@ static raw_spinlock_t dgl_lock; #define task_fserver(task) (&task_entry(task)->fifo_server.server) #define entry_lock(entry) (&entry->rm_domain.ready_lock) -#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) + #define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->rm_domain) #define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) #define is_fifo_server(s) (s->sid > num_online_cpus()) +#ifdef CONFIG_NP_SECTION +#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) +#else +#define has_resources(t, c) (1) +#endif + + + /* * Requeue onto domain's release or ready queue based on task state. */ @@ -90,6 +98,7 @@ static void requeue(rt_domain_t *dom, struct task_struct* t) static void release_resources(struct task_struct *t) { struct task_struct *sched; +#ifdef CONFIG_NP_SECTION TRACE_TASK(t, "Releasing resources\n"); @@ -99,7 +108,8 @@ static void release_resources(struct task_struct *t) release_resources(sched); } else if (is_kernel_np(t)) remove_group_req(&group_lock, tsk_rt(t)->req); - tsk_rt(t)->kernel_np = 0; + take_np(t); +#endif } /* @@ -112,6 +122,7 @@ static void acquire_resources(struct task_struct *t) struct rt_server *server; struct task_struct *sched; +#ifdef CONFIG_NP_SECTION /* Can't acquire resources if t is not running */ BUG_ON(!get_task_server(t)); @@ -134,17 +145,18 @@ static void acquire_resources(struct task_struct *t) /* Become np if there is a running task */ if (sched && has_resources(sched, cpu)) { TRACE_TASK(t, "Running task with resource\n"); - tsk_rt(t)->kernel_np = 1; + make_np(t); } else { TRACE_TASK(t, "Running no resources\n"); - tsk_rt(t)->kernel_np = 0; + take_np(t); } } else { TRACE_TASK(t, "Acquiring resources\n"); if (!has_resources(t, cpu)) add_group_req(&group_lock, tsk_rt(t)->req, cpu); - tsk_rt(t)->kernel_np = 1; + make_np(t); } +#endif } /* -- cgit v1.2.2 From 4b1126d2e422253999f071adcc05299c0172b45a Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sun, 13 May 2012 16:42:29 -0400 Subject: Allow self conflicts --- litmus/dgl.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'litmus') diff --git a/litmus/dgl.c b/litmus/dgl.c index 0c1ce73868e3..dd2a42cc9ca6 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c @@ -124,11 +124,8 @@ void set_req(struct dgl *dgl, struct dgl_group_req *greq, int word, bit; struct dgl_req *req; - BUG_ON(replicas > dgl->num_replicas); - -#ifndef CONFIG_NP_SECTION - BUG_ON(1); -#endif + if (replicas > dgl->num_replicas) + replicas = dgl->num_replicas; mask_idx(resource, &word, &bit); __set_bit(bit, &greq->requested[word]); -- cgit v1.2.2 From c7a09a0c9c97bb12bec367b1c6180f870ee32de9 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sun, 13 May 2012 20:48:34 -0400 Subject: Store exec time in completion record --- litmus/sched_task_trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'litmus') diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index cbfcecbfe93a..48124b756be7 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c @@ -186,7 +186,7 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, struct task_struct *t = (struct task_struct*) _task; struct st_event_record* rec = get_record(ST_COMPLETION, t); if (rec) { - rec->data.completion.when = now(); + rec->data.completion.when = get_exec_time(t); rec->data.completion.forced = forced; put_record(rec); } -- cgit v1.2.2 From 4bb1dab9fe43ad707f8c1b28f3e8bd5d47f09994 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Mon, 14 May 2012 20:07:03 -0400 Subject: Per job blocking --- litmus/color.c | 2 +- litmus/rt_server.c | 15 +--- litmus/sched_color.c | 215 ++++++++++++++++++++++----------------------------- 3 files changed, 96 insertions(+), 136 deletions(-) (limited to 'litmus') diff --git a/litmus/color.c b/litmus/color.c index 8a9f68544e8f..ecc191137137 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -15,7 +15,7 @@ #include #include /* for in_list(...) */ -#define PAGES_PER_COLOR 300 +#define PAGES_PER_COLOR 3072 /* * This is used only to "trick" lockdep into permitting dynamically allocated diff --git a/litmus/rt_server.c b/litmus/rt_server.c index 818588a3d317..74d7c7b0f81a 100644 --- a/litmus/rt_server.c +++ b/litmus/rt_server.c @@ -1,11 +1,5 @@ #include - -static struct task_struct* default_server_take(struct rt_server *srv) -{ - return __take_ready(srv->domain); -} - static void default_server_update(struct rt_server *srv) { } @@ -13,18 +7,13 @@ static void default_server_update(struct rt_server *srv) void init_rt_server(struct rt_server *server, int sid, int cpu, rt_domain_t *domain, need_preempt_t need_preempt, - server_requeue_t requeue, - server_update_t update, - server_take_t take) + server_update_t update) { - if (!need_preempt || !requeue) + if (!need_preempt) BUG_ON(1); server->need_preempt = need_preempt; - server->requeue = requeue; - server->update = (update) ? update : default_server_update; - server->take = (take) ? take : default_server_take; server->sid = sid; server->cpu = cpu; diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 8554fde49c0b..f095b302ddd6 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -55,15 +55,17 @@ static raw_spinlock_t dgl_lock; #define remote_entry(cpu) (&per_cpu(color_cpus, cpu)) #define task_entry(task) remote_entry(get_partition(task)) #define task_fserver(task) (&task_entry(task)->fifo_server.server) -#define entry_lock(entry) (&entry->rm_domain.ready_lock) +#define entry_lock(entry) (&(entry)->rm_domain.ready_lock) #define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->rm_domain) #define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) -#define is_fifo_server(s) (s->sid > num_online_cpus()) +#define is_fifo_server(s) ((s)->sid > num_online_cpus()) +#define lock_if(lock, cond) do { if (cond) raw_spin_lock(lock);} while(0) +#define unlock_if(lock, cond) do { if (cond) raw_spin_unlock(lock);} while(0) #ifdef CONFIG_NP_SECTION -#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) +#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) #else #define has_resources(t, c) (1) #endif @@ -211,6 +213,65 @@ static void link(struct rt_server *server, struct task_struct *task) task->pid, get_rt_job(task)); } +/* + * Triggers preemption on first FIFO server which is running NULL. + */ +static void check_for_fifo_preempt(void) +{ + int ret = 0, cpu; + struct cpu_entry *entry; + struct rt_server *cpu_server, *fifo_server; + + TRACE("Checking for FIFO preempt\n"); + + for_each_online_cpu(cpu) { + entry = remote_entry(cpu); + cpu_server = &entry->server; + fifo_server = &entry->fifo_server.server; + + raw_spin_lock(entry_lock(entry)); + raw_spin_lock(&fifo_lock); + + if (cpu_server->linked && is_server(cpu_server->linked) && + !fifo_server->linked) { + litmus_reschedule(cpu); + ret = 1; + } + + raw_spin_unlock(&fifo_lock); + raw_spin_unlock(entry_lock(entry)); + + if (ret) + break; + } +} + +/* + * Rejoin a task into the system. + */ +static void job_arrival(struct task_struct *t) +{ + int i; + rt_domain_t *dom = task_dom(task_entry(t), t); + struct dgl_group_req *gr = tsk_rt(t)->req; + struct control_page *cp = tsk_rt(t)->ctrl_page; + struct color_ctrl_page *ccp = tsk_rt(t)->color_ctrl_page; + + /* Fill request */ + if (cp && ccp && cp->colors_updated) { + cp->colors_updated = 0; + dgl_group_req_init(&group_lock, gr); + for (i = 0; ccp->pages[i]; ++i) + set_req(&group_lock, gr, ccp->colors[i], ccp->pages[i]); + } else { + TRACE("Oh noz: %p %p %d\n", cp, ccp, ((cp) ? cp->colors_updated : -1)); + } + + lock_if(&fifo_lock, is_be(t)); + requeue(dom, t); + unlock_if(&fifo_lock, is_be(t)); +} + /* * Complete job for task linked to @server. */ @@ -243,7 +304,7 @@ static void job_completion(struct rt_server *server) sched_trace_task_release(t); if (is_running(t)) - server->requeue(server, t); + job_arrival(t); } /* @@ -275,7 +336,6 @@ static struct task_struct* schedule_server(struct rt_server *server) { struct task_struct *next; struct rt_server *lserver; - int is_fifo = is_fifo_server(server); TRACE("Scheduling server %d\n", server->sid); @@ -288,8 +348,7 @@ static struct task_struct* schedule_server(struct rt_server *server) } next = server->linked; - if (is_fifo) - raw_spin_lock(&fifo_lock); + lock_if(&fifo_lock, is_fifo_server(server)); if ((!next || !is_np(next)) && server->need_preempt(server->domain, next)) { if (next) { @@ -300,48 +359,11 @@ static struct task_struct* schedule_server(struct rt_server *server) next = __take_ready(server->domain); link(server, next); } - if (is_fifo) - raw_spin_unlock(&fifo_lock); + unlock_if(&fifo_lock, is_fifo_server(server)); return next; } -/* - * Dumb requeue for PRM (CPU) servers. - */ -static void rm_requeue(struct rt_server *server, struct task_struct *t) -{ - BUG_ON(is_be(t)); - requeue(server->domain, t); -} - -/* - * Locking requeue for FIFO servers. - */ -static void fifo_requeue(struct rt_server *server, struct task_struct *t) -{ - BUG_ON(!is_be(t)); - raw_spin_lock(&fifo_lock); - requeue(server->domain, t); - raw_spin_unlock(&fifo_lock); -} - - -/* - * Locking take for FIFO servers. - * TODO: no longer necessary. - */ -static struct task_struct* fifo_take(struct rt_server *server) -{ - struct task_struct *ret; - - raw_spin_lock(&fifo_lock); - ret = __take_ready(server->domain); - raw_spin_unlock(&fifo_lock); - - return ret; -} - /* * Update server state, including picking next running task and incrementing * server execution time. @@ -403,39 +425,6 @@ static void color_rm_release(rt_domain_t *rm, struct bheap *tasks) raw_spin_unlock_irqrestore(entry_lock(entry), flags); } -/* - * Triggers preemption on first FIFO server which is running NULL. - */ -static void check_for_fifo_preempt(void) -{ - int ret = 0, cpu; - struct cpu_entry *entry; - struct rt_server *cpu_server, *fifo_server; - - TRACE("Checking for FIFO preempt\n"); - - for_each_online_cpu(cpu) { - entry = remote_entry(cpu); - cpu_server = &entry->server; - fifo_server = &entry->fifo_server.server; - - raw_spin_lock(entry_lock(entry)); - raw_spin_lock(&fifo_lock); - - if (cpu_server->linked && is_server(cpu_server->linked) && - !fifo_server->linked) { - litmus_reschedule(cpu); - ret = 1; - } - - raw_spin_unlock(&fifo_lock); - raw_spin_unlock(entry_lock(entry)); - - if (ret) - break; - } -} - static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks) { unsigned long flags; @@ -471,9 +460,7 @@ static struct task_struct* color_schedule(struct task_struct *prev) if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) { TRACE_TASK(prev, "Snuck in on new!\n"); - raw_spin_lock(&fifo_lock); - requeue(task_dom(entry, prev), prev); - raw_spin_unlock(&fifo_lock); + job_arrival(entry->scheduled); } /* Pick next top-level task */ @@ -527,51 +514,38 @@ static struct task_struct* color_schedule(struct task_struct *prev) static void color_task_new(struct task_struct *t, int on_rq, int running) { unsigned long flags; - int i; - raw_spinlock_t *lock; struct cpu_entry *entry; struct dgl_group_req *req; - color_t *colors, *pages; TRACE_TASK(t, "New colored task\n"); - local_irq_save(flags); - entry = (is_be(t)) ? local_entry : task_entry(t); - lock = task_lock(entry, t); - release_at(t, litmus_clock()); + raw_spin_lock_irqsave(entry_lock(entry), flags); req = kmalloc(sizeof(*req), GFP_ATOMIC); - dgl_group_req_init(&group_lock, req); tsk_rt(t)->req = req; - - tsk_rt(t)->max_exec_time = 0; tsk_rt(t)->tot_exec_time = 0; + tsk_rt(t)->max_exec_time = 0; + tsk_rt(t)->ctrl_page->colors_updated = 1; - /* Fill request */ - if (tsk_rt(t)->color_ctrl_page) { - colors = tsk_rt(t)->color_ctrl_page->colors; - pages = tsk_rt(t)->color_ctrl_page->pages; - for (i = 0; pages[i]; i++) - set_req(&group_lock, req, colors[i], pages[i]); - } + release_at(t, litmus_clock()); - /* Join system */ - raw_spin_lock(lock); if (running) { + /* No need to lock with irqs disabled */ TRACE_TASK(t, "Already scheduled on %d\n", entry->server.cpu); BUG_ON(entry->scheduled); entry->scheduled = t; tsk_rt(t)->scheduled_on = entry->server.cpu; - } else - requeue(task_dom(entry, t), t); - raw_spin_unlock(lock); + } else { + job_arrival(t); + } + + raw_spin_unlock(entry_lock(entry)); - /* Trigger necessary preemptions */ if (is_be(t)) check_for_fifo_preempt(); else - litmus_reschedule(entry->server.cpu); + litmus_reschedule_local(); local_irq_restore(flags); } @@ -579,13 +553,13 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) static void color_task_wake_up(struct task_struct *task) { unsigned long flags; - struct cpu_entry* entry = task_entry(task); - raw_spinlock_t *lock = task_lock(entry, task); + struct cpu_entry* entry = local_entry; + int sched; lt_t now = litmus_clock(); TRACE_TASK(task, "Wake up at %llu\n", now); - local_irq_save(flags); + raw_spin_lock_irqsave(entry_lock(entry), flags); /* Abuse sporadic model */ if (is_tardy(task, now)) { @@ -593,21 +567,20 @@ static void color_task_wake_up(struct task_struct *task) sched_trace_task_release(task); } - /* Re-enter system */ - if (entry->scheduled != task) { - raw_spin_lock(lock); - requeue(task_dom(entry, task), task); - raw_spin_unlock(lock); - } else { + sched = (entry->scheduled == task); + + if (!sched) + job_arrival(task); + else TRACE_TASK(task, "Is already scheduled on %d!\n", entry->scheduled); - } - /* Trigger preemptions */ + raw_spin_unlock(entry_lock(entry)); if (is_be(task)) check_for_fifo_preempt(); else - litmus_reschedule(entry->server.cpu); + litmus_reschedule_local(); + local_irq_restore(flags); } @@ -619,7 +592,7 @@ static void color_task_block(struct task_struct *t) BUG_ON(is_queued(t)); } -static void color_task_exit(struct task_struct * t) +static void color_task_exit(struct task_struct *t) { unsigned long flags; struct cpu_entry *entry = task_entry(t); @@ -812,8 +785,7 @@ static int __init init_color(void) cpu + num_online_cpus() + 1, cpu, &fifo_domain, - fifo_preemption_needed, - fifo_requeue, fifo_update, fifo_take); + fifo_preemption_needed, fifo_update); /* Create task struct for FIFO server */ @@ -833,8 +805,7 @@ static int __init init_color(void) /* Create CPU server */ cpu_server = &entry->server; init_rt_server(cpu_server, cpu + 1, cpu, - &entry->rm_domain, rm_preemption_needed, - rm_requeue, NULL, NULL); + &entry->rm_domain, rm_preemption_needed, NULL); cpu_server->running = 1; init_enforcement_timer(&fifo_server->timer); -- cgit v1.2.2 From 480a2018363a3ef87e4b2cc93c83d007fef565e1 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Mon, 14 May 2012 21:24:35 -0400 Subject: Commented out two rare bugs which the scheduler can recover from --- litmus/sched_color.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_color.c b/litmus/sched_color.c index f095b302ddd6..e67681fb2d24 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -378,7 +378,7 @@ static void fifo_update(struct rt_server *server) if (!server->linked || has_resources(server->linked, server->cpu)) { /* Running here means linked to a parent server */ - BUG_ON(!server->running); + /* BUG_ON(!server->running); */ /* Stop executing */ if (fserver->start_time) { @@ -388,7 +388,7 @@ static void fifo_update(struct rt_server *server) cancel_enforcement_timer(&fserver->timer); } else { /* Server is linked, but not executing */ - BUG_ON(fserver->timer.armed); + /* BUG_ON(fserver->timer.armed); */ } /* Calculate next task */ -- cgit v1.2.2 From 5b2b006507f91f9beeb9538698018fb61d36caf0 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Tue, 15 May 2012 12:19:02 -0400 Subject: Chunking for locking protocol --- litmus/budget.c | 2 +- litmus/color_proc.c | 9 ++++++++ litmus/sched_color.c | 59 +++++++++++++++++++++++++++++++++++++++++++++------- 3 files changed, 62 insertions(+), 8 deletions(-) (limited to 'litmus') diff --git a/litmus/budget.c b/litmus/budget.c index 84f3f22770b1..b1c0a4b84c02 100644 --- a/litmus/budget.c +++ b/litmus/budget.c @@ -9,7 +9,7 @@ DEFINE_PER_CPU(struct enforcement_timer, budget_timer); -static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer) +enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer) { struct enforcement_timer* et = container_of(timer, struct enforcement_timer, diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 76c540adad37..d770123c5f02 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -28,6 +28,8 @@ static struct color_cpu_server color_cpu_servers[NR_CPUS]; /* the + 1 is for the sentinel element */ static struct ctl_table color_cpu_tables[NR_CPUS + 1]; +unsigned long color_chunk; + #define INFO_BUFFER_SIZE 100 static char info_buffer[100]; @@ -63,6 +65,13 @@ static struct ctl_table color_table[] = .data = info_buffer, .maxlen = INFO_BUFFER_SIZE, }, + { + .procname = "chunk_size", + .mode = 0666, + .proc_handler = proc_doulongvec_minmax, + .data = &color_chunk, + .maxlen = sizeof(color_chunk), + }, { } }; diff --git a/litmus/sched_color.c b/litmus/sched_color.c index e67681fb2d24..e72fdc3bb7d1 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -41,6 +41,7 @@ struct cpu_entry { rt_domain_t rm_domain; struct task_struct* scheduled; struct fifo_server fifo_server; + struct hrtimer chunk_timer; }; DEFINE_PER_CPU(struct cpu_entry, color_cpus); @@ -70,8 +71,6 @@ static raw_spinlock_t dgl_lock; #define has_resources(t, c) (1) #endif - - /* * Requeue onto domain's release or ready queue based on task state. */ @@ -94,6 +93,34 @@ static void requeue(rt_domain_t *dom, struct task_struct* t) add_release(dom, t); } +enum hrtimer_restart chunk_fire(struct hrtimer *timer) +{ + unsigned long flags; + local_irq_save(flags); + TRACE("Chunk timer fired.\n"); + litmus_reschedule_local(); + local_irq_restore(flags); + return HRTIMER_NORESTART; +} + +void chunk_arm(struct cpu_entry *entry) +{ + unsigned long fire; + if (color_chunk) { + fire = litmus_clock() + color_chunk; + TRACE("Arming chunk timer for %llu\n", fire); + __hrtimer_start_range_ns(&entry->chunk_timer, + ns_to_ktime(fire), 0, + HRTIMER_MODE_ABS_PINNED, 0); + } +} + +void chunk_cancel(struct cpu_entry *entry) +{ + TRACE("Cancelling chunk timer\n"); + hrtimer_try_to_cancel(&entry->chunk_timer); +} + /* * Relinquish resources held by @t (or its children). */ @@ -312,21 +339,28 @@ static void job_completion(struct rt_server *server) */ static void update_task(struct rt_server *server) { - int oot, sleep, block, np; + int oot, sleep, block, np, chunked; struct task_struct *t = server->linked; + lt_t last = tsk_rt(t)->last_exec_time; block = !is_running(t); oot = budget_enforced(t) && budget_exhausted(t); np = is_kernel_np(t); sleep = get_rt_flags(t) == RT_F_SLEEP; - TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d\n", - block, oot, np, sleep); + chunked = color_chunk && last && (lt_after(litmus_clock() - last, color_chunk)); + + TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d, chunk: %d\n", + block, oot, np, sleep, chunked); if (block) unlink(server); else if (oot || sleep) job_completion(server); + else if (chunked) { + unlink(server); + job_arrival(t); + } } /* @@ -498,10 +532,16 @@ static struct task_struct* color_schedule(struct task_struct *prev) entry->fifo_server.start_time = litmus_clock(); } - if (prev) + if (prev) { tsk_rt(prev)->scheduled_on = NO_CPU; - if (next) + tsk_rt(prev)->last_exec_time = 0; + chunk_cancel(entry); + } + if (next) { tsk_rt(next)->scheduled_on = entry->server.cpu; + tsk_rt(next)->last_exec_time = litmus_clock(); + chunk_arm(entry); + } entry->scheduled = next; sched_state_task_picked(); @@ -527,6 +567,7 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) tsk_rt(t)->tot_exec_time = 0; tsk_rt(t)->max_exec_time = 0; tsk_rt(t)->ctrl_page->colors_updated = 1; + tsk_rt(t)->last_exec_time = 0; release_at(t, litmus_clock()); @@ -662,6 +703,8 @@ static long color_activate_plugin(void) struct task_struct *server_task; struct cpu_entry *entry; + color_chunk = 0; + for_each_online_cpu(cpu) { entry = remote_entry(cpu); server_task = entry->fifo_server.task; @@ -809,6 +852,8 @@ static int __init init_color(void) cpu_server->running = 1; init_enforcement_timer(&fifo_server->timer); + hrtimer_init(&entry->chunk_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + entry->chunk_timer.function = chunk_fire; } fifo_domain_init(&fifo_domain, NULL, color_fifo_release); -- cgit v1.2.2 From 1cb90226816c7af7808be4c0de866c54da17ecc9 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Tue, 15 May 2012 13:39:52 -0400 Subject: Summarize schedulability with final record --- litmus/sched_color.c | 15 ++++++++++++++- litmus/sched_task_trace.c | 15 ++++++++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_color.c b/litmus/sched_color.c index e72fdc3bb7d1..44327d60aaa5 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c @@ -305,7 +305,7 @@ static void job_arrival(struct task_struct *t) static void job_completion(struct rt_server *server) { struct task_struct *t = server->linked; - lt_t et; + lt_t et, now = litmus_clock(); TRACE_TASK(t, "Job completed\n"); if (is_server(t)) @@ -320,6 +320,15 @@ static void job_completion(struct rt_server *server) tsk_rt(t)->max_exec_time = et; } + if (is_tardy(t, now)) { + lt_t miss = now - get_deadline(t); + ++tsk_rt(t)->missed; + tsk_rt(t)->total_tardy += miss; + if (lt_before(tsk_rt(t)->max_tardy, miss)) { + tsk_rt(t)->max_tardy = miss; + } + } + unlink(server); set_rt_flags(t, RT_F_SLEEP); prepare_for_next_period(t); @@ -566,6 +575,9 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) tsk_rt(t)->req = req; tsk_rt(t)->tot_exec_time = 0; tsk_rt(t)->max_exec_time = 0; + tsk_rt(t)->max_tardy = 0; + tsk_rt(t)->missed = 0; + tsk_rt(t)->total_tardy = 0; tsk_rt(t)->ctrl_page->colors_updated = 1; tsk_rt(t)->last_exec_time = 0; @@ -644,6 +656,7 @@ static void color_task_exit(struct task_struct *t) local_irq_save(flags); sched_trace_task_exit(t); + sched_trace_task_tardy(t); /* Remove from scheduler consideration */ if (is_queued(t)) { diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 48124b756be7..d4fedaa15744 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c @@ -227,7 +227,7 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, } feather_callback void do_sched_trace_task_exit(unsigned long id, - unsigned long _task) + unsigned long _task) { struct task_struct *t = (struct task_struct*) _task; #ifdef CONFIG_PLUGIN_COLOR @@ -245,6 +245,19 @@ feather_callback void do_sched_trace_task_exit(unsigned long id, } } +feather_callback void do_sched_trace_task_tardy(unsigned long id, + unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record *rec = get_record(ST_TASK_TARDY, t); + if (rec) { + rec->data.task_tardy.max_tardy = tsk_rt(t)->max_tardy; + rec->data.task_tardy.total_tardy = tsk_rt(t)->total_tardy; + rec->data.task_tardy.missed = tsk_rt(t)->missed; + put_record(rec); + } +} + feather_callback void do_sched_trace_action(unsigned long id, unsigned long _task, unsigned long action) -- cgit v1.2.2