From 592eaca1409e55407e980f71b2ec604ca3610ba5 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Tue, 21 Jun 2011 01:29:34 -0400 Subject: Avoid needlessly costly migrations. CONFIG_SCHED_CPU_AFFINITY Given a choice between several available CPUs (unlinked) on which to schedule a task, let the scheduler select the CPU closest to where that task was previously scheduled. Hopefully, this will reduce cache migration penalties. Notes: SCHED_CPU_AFFINITY is dependent upon x86 (only x86 is supported at this time). Also PFair/PD^2 does not make use of this feature. Signed-off-by: Andrea Bastoni --- litmus/Kconfig | 19 +++++++++++++++++++ litmus/Makefile | 1 + litmus/affinity.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ litmus/litmus.c | 8 ++++++++ litmus/sched_cedf.c | 33 +++++++++++++++++++++++++++++++++ litmus/sched_gsn_edf.c | 41 ++++++++++++++++++++++++++++++++++++++--- 6 files changed, 143 insertions(+), 3 deletions(-) create mode 100644 litmus/affinity.c (limited to 'litmus') diff --git a/litmus/Kconfig b/litmus/Kconfig index ad8dc8308cf0..d7fde6f97e14 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -62,6 +62,25 @@ config LITMUS_LOCKING endmenu +menu "Performance Enhancements" + +config SCHED_CPU_AFFINITY + bool "Local Migration Affinity" + depends on X86 + default y + help + Rescheduled tasks prefer CPUs near to their previously used CPU. This + may improve performance through possible preservation of cache affinity. + + Warning: May make bugs harder to find since tasks may migrate less often. + + NOTES: + * Feature is not utilized by PFair/PD^2. + + Say Yes if unsure. + +endmenu + menu "Tracing" config FEATHER_TRACE diff --git a/litmus/Makefile b/litmus/Makefile index ad9936e07b83..7338180f196f 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -21,6 +21,7 @@ obj-y = sched_plugin.o litmus.o \ obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o +obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o diff --git a/litmus/affinity.c b/litmus/affinity.c new file mode 100644 index 000000000000..9adab7a3bcd7 --- /dev/null +++ b/litmus/affinity.c @@ -0,0 +1,44 @@ +#include + +#include + +struct neighborhood neigh_info[NR_CPUS]; + +/* called by _init_litmus() */ +void init_topology(void) { + int cpu; + int i; + int chk; + int depth = num_cache_leaves; + + if (depth > NUM_CACHE_LEVELS) + depth = NUM_CACHE_LEVELS; + + for_each_online_cpu(cpu) { + for (i = 0; i < depth; ++i) { + long unsigned int firstbits; + + chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); + if (chk) { + /* failed */ + neigh_info[cpu].size[i] = 0; + } else { + /* size = num bits in mask */ + neigh_info[cpu].size[i] = + cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); + } + firstbits = *neigh_info[cpu].neighbors[i]->bits; + printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", + cpu, neigh_info[cpu].size[i], i, firstbits); + } + + /* set data for non-existent levels */ + for (; i < NUM_CACHE_LEVELS; ++i) { + neigh_info[cpu].size[i] = 0; + + printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", + cpu, neigh_info[cpu].size[i], i, 0lu); + } + } +} + diff --git a/litmus/litmus.c b/litmus/litmus.c index bb8c6c7e9dd1..73af6c3010d6 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -17,6 +17,10 @@ #include #include +#ifdef CONFIG_SCHED_CPU_AFFINITY +#include +#endif + /* Number of RT tasks that exist in the system */ atomic_t rt_task_count = ATOMIC_INIT(0); static DEFINE_RAW_SPINLOCK(task_transition_lock); @@ -540,6 +544,10 @@ static int __init _init_litmus(void) init_litmus_proc(); +#ifdef CONFIG_SCHED_CPU_AFFINITY + init_topology(); +#endif + return 0; } diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 73fe1c442a0d..e29a9fe2a8e8 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -43,6 +43,10 @@ #include +#ifdef CONFIG_SCHED_CPU_AFFINITY +#include +#endif + /* to configure the cluster size */ #include #include @@ -257,6 +261,23 @@ static noinline void requeue(struct task_struct* task) } } +#ifdef CONFIG_SCHED_CPU_AFFINITY +static cpu_entry_t* cedf_get_nearest_available_cpu( + cedf_domain_t *cluster, cpu_entry_t* start) +{ + cpu_entry_t* affinity; + + get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1); + + /* make sure CPU is in our cluster */ + if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) + return(affinity); + else + return(NULL); +} +#endif + + /* check for any necessary preemptions */ static void check_for_preemptions(cedf_domain_t *cluster) { @@ -270,8 +291,20 @@ static void check_for_preemptions(cedf_domain_t *cluster) task = __take_ready(&cluster->domain); TRACE("check_for_preemptions: attempting to link task %d to %d\n", task->pid, last->cpu); +#ifdef CONFIG_SCHED_CPU_AFFINITY + { + cpu_entry_t* affinity = + cedf_get_nearest_available_cpu(cluster, + &per_cpu(cedf_cpu_entries, task_cpu(task))); + if(affinity) + last = affinity; + else if(last->linked) + requeue(last->linked); + } +#else if (last->linked) requeue(last->linked); +#endif link_task_to_cpu(task, last); preempt(last); } diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 3092797480f8..17926e9fccdc 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -23,6 +23,10 @@ #include +#ifdef CONFIG_SCHED_CPU_AFFINITY +#include +#endif + #include /* Overview of GSN-EDF operations. @@ -253,21 +257,52 @@ static noinline void requeue(struct task_struct* task) } } +#ifdef CONFIG_SCHED_CPU_AFFINITY +static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) +{ + cpu_entry_t* affinity; + + get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, +#ifdef CONFIG_RELEASE_MASTER + gsnedf.release_master +#else + -1 +#endif + ); + + return(affinity); +} +#endif + /* check for any necessary preemptions */ static void check_for_preemptions(void) { struct task_struct *task; cpu_entry_t* last; - for(last = lowest_prio_cpu(); - edf_preemption_needed(&gsnedf, last->linked); - last = lowest_prio_cpu()) { + for (last = lowest_prio_cpu(); + edf_preemption_needed(&gsnedf, last->linked); + last = lowest_prio_cpu()) { /* preemption necessary */ task = __take_ready(&gsnedf); TRACE("check_for_preemptions: attempting to link task %d to %d\n", task->pid, last->cpu); + +#ifdef CONFIG_SCHED_CPU_AFFINITY + { + cpu_entry_t* affinity = + gsnedf_get_nearest_available_cpu( + &per_cpu(gsnedf_cpu_entries, task_cpu(task))); + if (affinity) + last = affinity; + else if (last->linked) + requeue(last->linked); + } +#else if (last->linked) requeue(last->linked); +#endif + link_task_to_cpu(task, last); preempt(last); } -- cgit v1.2.2