aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2011-06-21 01:29:34 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 11:58:39 -0400
commit592eaca1409e55407e980f71b2ec604ca3610ba5 (patch)
tree43cadd8d3f9cc150a7b108696bfabcddcff55650 /litmus
parentfb8d6602af1cbc09115544056b872b976c6349c3 (diff)
Avoid needlessly costly migrations. CONFIG_SCHED_CPU_AFFINITY
Given a choice between several available CPUs (unlinked) on which to schedule a task, let the scheduler select the CPU closest to where that task was previously scheduled. Hopefully, this will reduce cache migration penalties. Notes: SCHED_CPU_AFFINITY is dependent upon x86 (only x86 is supported at this time). Also PFair/PD^2 does not make use of this feature. Signed-off-by: Andrea Bastoni <bastoni@cs.unc.edu>
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Kconfig19
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/affinity.c44
-rw-r--r--litmus/litmus.c8
-rw-r--r--litmus/sched_cedf.c33
-rw-r--r--litmus/sched_gsn_edf.c41
6 files changed, 143 insertions, 3 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig
index ad8dc8308cf0..d7fde6f97e14 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -62,6 +62,25 @@ config LITMUS_LOCKING
62 62
63endmenu 63endmenu
64 64
65menu "Performance Enhancements"
66
67config SCHED_CPU_AFFINITY
68 bool "Local Migration Affinity"
69 depends on X86
70 default y
71 help
72 Rescheduled tasks prefer CPUs near to their previously used CPU. This
73 may improve performance through possible preservation of cache affinity.
74
75 Warning: May make bugs harder to find since tasks may migrate less often.
76
77 NOTES:
78 * Feature is not utilized by PFair/PD^2.
79
80 Say Yes if unsure.
81
82endmenu
83
65menu "Tracing" 84menu "Tracing"
66 85
67config FEATHER_TRACE 86config FEATHER_TRACE
diff --git a/litmus/Makefile b/litmus/Makefile
index ad9936e07b83..7338180f196f 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -21,6 +21,7 @@ obj-y = sched_plugin.o litmus.o \
21 21
22obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 22obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
23obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 23obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
24obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o
24 25
25obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 26obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
26obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 27obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
diff --git a/litmus/affinity.c b/litmus/affinity.c
new file mode 100644
index 000000000000..9adab7a3bcd7
--- /dev/null
+++ b/litmus/affinity.c
@@ -0,0 +1,44 @@
1#include <linux/cpu.h>
2
3#include <litmus/affinity.h>
4
5struct neighborhood neigh_info[NR_CPUS];
6
7/* called by _init_litmus() */
8void init_topology(void) {
9 int cpu;
10 int i;
11 int chk;
12 int depth = num_cache_leaves;
13
14 if (depth > NUM_CACHE_LEVELS)
15 depth = NUM_CACHE_LEVELS;
16
17 for_each_online_cpu(cpu) {
18 for (i = 0; i < depth; ++i) {
19 long unsigned int firstbits;
20
21 chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i);
22 if (chk) {
23 /* failed */
24 neigh_info[cpu].size[i] = 0;
25 } else {
26 /* size = num bits in mask */
27 neigh_info[cpu].size[i] =
28 cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]);
29 }
30 firstbits = *neigh_info[cpu].neighbors[i]->bits;
31 printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n",
32 cpu, neigh_info[cpu].size[i], i, firstbits);
33 }
34
35 /* set data for non-existent levels */
36 for (; i < NUM_CACHE_LEVELS; ++i) {
37 neigh_info[cpu].size[i] = 0;
38
39 printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n",
40 cpu, neigh_info[cpu].size[i], i, 0lu);
41 }
42 }
43}
44
diff --git a/litmus/litmus.c b/litmus/litmus.c
index bb8c6c7e9dd1..73af6c3010d6 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -17,6 +17,10 @@
17#include <litmus/litmus_proc.h> 17#include <litmus/litmus_proc.h>
18#include <litmus/sched_trace.h> 18#include <litmus/sched_trace.h>
19 19
20#ifdef CONFIG_SCHED_CPU_AFFINITY
21#include <litmus/affinity.h>
22#endif
23
20/* Number of RT tasks that exist in the system */ 24/* Number of RT tasks that exist in the system */
21atomic_t rt_task_count = ATOMIC_INIT(0); 25atomic_t rt_task_count = ATOMIC_INIT(0);
22static DEFINE_RAW_SPINLOCK(task_transition_lock); 26static DEFINE_RAW_SPINLOCK(task_transition_lock);
@@ -540,6 +544,10 @@ static int __init _init_litmus(void)
540 544
541 init_litmus_proc(); 545 init_litmus_proc();
542 546
547#ifdef CONFIG_SCHED_CPU_AFFINITY
548 init_topology();
549#endif
550
543 return 0; 551 return 0;
544} 552}
545 553
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 73fe1c442a0d..e29a9fe2a8e8 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -43,6 +43,10 @@
43 43
44#include <litmus/bheap.h> 44#include <litmus/bheap.h>
45 45
46#ifdef CONFIG_SCHED_CPU_AFFINITY
47#include <litmus/affinity.h>
48#endif
49
46/* to configure the cluster size */ 50/* to configure the cluster size */
47#include <litmus/litmus_proc.h> 51#include <litmus/litmus_proc.h>
48#include <linux/uaccess.h> 52#include <linux/uaccess.h>
@@ -257,6 +261,23 @@ static noinline void requeue(struct task_struct* task)
257 } 261 }
258} 262}
259 263
264#ifdef CONFIG_SCHED_CPU_AFFINITY
265static cpu_entry_t* cedf_get_nearest_available_cpu(
266 cedf_domain_t *cluster, cpu_entry_t* start)
267{
268 cpu_entry_t* affinity;
269
270 get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1);
271
272 /* make sure CPU is in our cluster */
273 if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map))
274 return(affinity);
275 else
276 return(NULL);
277}
278#endif
279
280
260/* check for any necessary preemptions */ 281/* check for any necessary preemptions */
261static void check_for_preemptions(cedf_domain_t *cluster) 282static void check_for_preemptions(cedf_domain_t *cluster)
262{ 283{
@@ -270,8 +291,20 @@ static void check_for_preemptions(cedf_domain_t *cluster)
270 task = __take_ready(&cluster->domain); 291 task = __take_ready(&cluster->domain);
271 TRACE("check_for_preemptions: attempting to link task %d to %d\n", 292 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
272 task->pid, last->cpu); 293 task->pid, last->cpu);
294#ifdef CONFIG_SCHED_CPU_AFFINITY
295 {
296 cpu_entry_t* affinity =
297 cedf_get_nearest_available_cpu(cluster,
298 &per_cpu(cedf_cpu_entries, task_cpu(task)));
299 if(affinity)
300 last = affinity;
301 else if(last->linked)
302 requeue(last->linked);
303 }
304#else
273 if (last->linked) 305 if (last->linked)
274 requeue(last->linked); 306 requeue(last->linked);
307#endif
275 link_task_to_cpu(task, last); 308 link_task_to_cpu(task, last);
276 preempt(last); 309 preempt(last);
277 } 310 }
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 3092797480f8..17926e9fccdc 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -23,6 +23,10 @@
23 23
24#include <litmus/bheap.h> 24#include <litmus/bheap.h>
25 25
26#ifdef CONFIG_SCHED_CPU_AFFINITY
27#include <litmus/affinity.h>
28#endif
29
26#include <linux/module.h> 30#include <linux/module.h>
27 31
28/* Overview of GSN-EDF operations. 32/* Overview of GSN-EDF operations.
@@ -253,21 +257,52 @@ static noinline void requeue(struct task_struct* task)
253 } 257 }
254} 258}
255 259
260#ifdef CONFIG_SCHED_CPU_AFFINITY
261static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start)
262{
263 cpu_entry_t* affinity;
264
265 get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries,
266#ifdef CONFIG_RELEASE_MASTER
267 gsnedf.release_master
268#else
269 -1
270#endif
271 );
272
273 return(affinity);
274}
275#endif
276
256/* check for any necessary preemptions */ 277/* check for any necessary preemptions */
257static void check_for_preemptions(void) 278static void check_for_preemptions(void)
258{ 279{
259 struct task_struct *task; 280 struct task_struct *task;
260 cpu_entry_t* last; 281 cpu_entry_t* last;
261 282
262 for(last = lowest_prio_cpu(); 283 for (last = lowest_prio_cpu();
263 edf_preemption_needed(&gsnedf, last->linked); 284 edf_preemption_needed(&gsnedf, last->linked);
264 last = lowest_prio_cpu()) { 285 last = lowest_prio_cpu()) {
265 /* preemption necessary */ 286 /* preemption necessary */
266 task = __take_ready(&gsnedf); 287 task = __take_ready(&gsnedf);
267 TRACE("check_for_preemptions: attempting to link task %d to %d\n", 288 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
268 task->pid, last->cpu); 289 task->pid, last->cpu);
290
291#ifdef CONFIG_SCHED_CPU_AFFINITY
292 {
293 cpu_entry_t* affinity =
294 gsnedf_get_nearest_available_cpu(
295 &per_cpu(gsnedf_cpu_entries, task_cpu(task)));
296 if (affinity)
297 last = affinity;
298 else if (last->linked)
299 requeue(last->linked);
300 }
301#else
269 if (last->linked) 302 if (last->linked)
270 requeue(last->linked); 303 requeue(last->linked);
304#endif
305
271 link_task_to_cpu(task, last); 306 link_task_to_cpu(task, last);
272 preempt(last); 307 preempt(last);
273 } 308 }