aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/affinity.h72
-rw-r--r--litmus/Kconfig19
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/affinity.c49
-rw-r--r--litmus/litmus.c10
-rw-r--r--litmus/sched_cedf.c33
-rw-r--r--litmus/sched_gsn_edf.c34
7 files changed, 217 insertions, 1 deletions
diff --git a/include/litmus/affinity.h b/include/litmus/affinity.h
new file mode 100644
index 000000000000..2bcb09c70046
--- /dev/null
+++ b/include/litmus/affinity.h
@@ -0,0 +1,72 @@
1#ifndef __LITMUS_AFFINITY_H
2#define __LITMUS_AFFINITY_H
3
4#include <linux/cpumask.h>
5
6#define MAX_CACHE_DEPTH 3
7
8struct neighborhood
9{
10 unsigned int size[MAX_CACHE_DEPTH];
11 cpumask_var_t neighbors[MAX_CACHE_DEPTH];
12};
13
14/* topology info is stored redundently in a big array for fast lookups */
15extern struct neighborhood neigh_info[NR_CPUS];
16
17void init_topology(void); /* called by Litmus module's _init_litmus() */
18
19/* Works like:
20void get_nearest_available_cpu(cpu_entry_t* nearest, cpu_entry_t* start, cpu_entry_t* entries, int release_master)
21
22Set release_master = -1 for no RM.
23 */
24#define get_nearest_available_cpu(nearest, start, entries, release_master) \
25{ \
26 if(!(start)->linked) \
27 { \
28 (nearest) = (start); \
29 } \
30 else \
31 { \
32 int __level; \
33 int __cpu; \
34 struct neighborhood* __neighbors = &neigh_info[(start)->cpu]; \
35 (nearest) = NULL; \
36 \
37 for(__level = 0; (__level < MAX_CACHE_DEPTH) && !(nearest); ++__level) \
38 { \
39 if(__neighbors->size[__level] > 1) \
40 { \
41 for_each_cpu(__cpu, __neighbors->neighbors[__level]) \
42 { \
43 if(__cpu != (release_master)) \
44 { \
45 cpu_entry_t* __entry = &per_cpu((entries), __cpu); \
46 if(!__entry->linked) \
47 { \
48 (nearest) = __entry; \
49 break; \
50 } \
51 } \
52 } \
53 } \
54 else if(__neighbors->size[__level] == 0) \
55 { \
56 break; \
57 } \
58 } \
59 } \
60 \
61 if((nearest)) \
62 { \
63 TRACE("P%d is closest available CPU to P%d\n", (nearest)->cpu, (start)->cpu); \
64 } \
65 else \
66 { \
67 TRACE("Could not find an available CPU close to P%d\n", \
68 (start)->cpu); \
69 } \
70}
71
72#endif
diff --git a/litmus/Kconfig b/litmus/Kconfig
index ad8dc8308cf0..65c881b6ba12 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -62,6 +62,25 @@ config LITMUS_LOCKING
62 62
63endmenu 63endmenu
64 64
65menu "Performance Enhancements"
66
67config SCHED_CPU_AFFINITY
68 bool "Local Migration Affinity"
69 default y
70 help
71 Rescheduled tasks prefer CPUs near to their previously used CPU. This
72 may improve performance through possible preservation of cache affinity.
73
74 Warning: May make bugs ahrder to find since tasks may migrate less often.
75
76 NOTES:
77 * Pfair/PD^2 does not support this option.
78 * Only x86 currently supported.
79
80 Say Yes if unsure.
81
82endmenu
83
65menu "Tracing" 84menu "Tracing"
66 85
67config FEATHER_TRACE 86config FEATHER_TRACE
diff --git a/litmus/Makefile b/litmus/Makefile
index ad9936e07b83..7338180f196f 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -21,6 +21,7 @@ obj-y = sched_plugin.o litmus.o \
21 21
22obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 22obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
23obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 23obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
24obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o
24 25
25obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 26obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
26obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 27obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
diff --git a/litmus/affinity.c b/litmus/affinity.c
new file mode 100644
index 000000000000..9d5405cf05fd
--- /dev/null
+++ b/litmus/affinity.c
@@ -0,0 +1,49 @@
1#include <linux/cpu.h>
2
3#include <litmus/affinity.h>
4
5struct neighborhood neigh_info[NR_CPUS];
6
7/* called by _init_litmus() */
8void init_topology(void)
9{
10 int cpu;
11 int i;
12 int chk;
13 int depth = num_cache_leaves;
14
15 if(depth > MAX_CACHE_DEPTH) /* L4 and greater?? */
16 depth = MAX_CACHE_DEPTH;
17
18 for_each_online_cpu(cpu)
19 {
20 for(i = 0; i < (int)depth; ++i)
21 {
22 long unsigned int firstbits;
23
24 chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i);
25 if(chk) /* failed */
26 {
27 neigh_info[cpu].size[i] = 0;
28 }
29 else
30 {
31 /* size = num bits in mask */
32 neigh_info[cpu].size[i] = cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]);
33 }
34 firstbits = *neigh_info[cpu].neighbors[i]->bits;
35 printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n",
36 cpu, neigh_info[cpu].size[i], i, firstbits);
37 }
38
39 /* set data for non-existent levels */
40 for(; i < MAX_CACHE_DEPTH; ++i)
41 {
42 neigh_info[cpu].size[i] = 0;
43
44 printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n",
45 cpu, neigh_info[cpu].size[i], i, 0lu);
46 }
47 }
48}
49
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 26938acacafc..73af6c3010d6 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -17,6 +17,10 @@
17#include <litmus/litmus_proc.h> 17#include <litmus/litmus_proc.h>
18#include <litmus/sched_trace.h> 18#include <litmus/sched_trace.h>
19 19
20#ifdef CONFIG_SCHED_CPU_AFFINITY
21#include <litmus/affinity.h>
22#endif
23
20/* Number of RT tasks that exist in the system */ 24/* Number of RT tasks that exist in the system */
21atomic_t rt_task_count = ATOMIC_INIT(0); 25atomic_t rt_task_count = ATOMIC_INIT(0);
22static DEFINE_RAW_SPINLOCK(task_transition_lock); 26static DEFINE_RAW_SPINLOCK(task_transition_lock);
@@ -115,7 +119,7 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
115 tp.cls != RT_CLASS_BEST_EFFORT) 119 tp.cls != RT_CLASS_BEST_EFFORT)
116 { 120 {
117 printk(KERN_INFO "litmus: real-time task %d rejected " 121 printk(KERN_INFO "litmus: real-time task %d rejected "
118 "because its class is invalid\n"); 122 "because its class is invalid\n", pid);
119 goto out_unlock; 123 goto out_unlock;
120 } 124 }
121 if (tp.budget_policy != NO_ENFORCEMENT && 125 if (tp.budget_policy != NO_ENFORCEMENT &&
@@ -540,6 +544,10 @@ static int __init _init_litmus(void)
540 544
541 init_litmus_proc(); 545 init_litmus_proc();
542 546
547#ifdef CONFIG_SCHED_CPU_AFFINITY
548 init_topology();
549#endif
550
543 return 0; 551 return 0;
544} 552}
545 553
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 73fe1c442a0d..1538695c1c1d 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -43,6 +43,10 @@
43 43
44#include <litmus/bheap.h> 44#include <litmus/bheap.h>
45 45
46#ifdef CONFIG_SCHED_CPU_AFFINITY
47#include <litmus/affinity.h>
48#endif
49
46/* to configure the cluster size */ 50/* to configure the cluster size */
47#include <litmus/litmus_proc.h> 51#include <litmus/litmus_proc.h>
48#include <linux/uaccess.h> 52#include <linux/uaccess.h>
@@ -257,6 +261,23 @@ static noinline void requeue(struct task_struct* task)
257 } 261 }
258} 262}
259 263
264#ifdef CONFIG_SCHED_CPU_AFFINITY
265static cpu_entry_t* cedf_get_nearest_available_cpu(
266 cedf_domain_t *cluster, cpu_entry_t* start)
267{
268 cpu_entry_t* affinity;
269
270 get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1);
271
272 /* make sure CPU is in our cluster */
273 if(cpu_isset(affinity->cpu, *cluster->cpu_map))
274 return(affinity);
275 else
276 return(NULL);
277}
278#endif
279
280
260/* check for any necessary preemptions */ 281/* check for any necessary preemptions */
261static void check_for_preemptions(cedf_domain_t *cluster) 282static void check_for_preemptions(cedf_domain_t *cluster)
262{ 283{
@@ -270,8 +291,20 @@ static void check_for_preemptions(cedf_domain_t *cluster)
270 task = __take_ready(&cluster->domain); 291 task = __take_ready(&cluster->domain);
271 TRACE("check_for_preemptions: attempting to link task %d to %d\n", 292 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
272 task->pid, last->cpu); 293 task->pid, last->cpu);
294#ifdef CONFIG_SCHED_CPU_AFFINITY
295 {
296 cpu_entry_t* affinity =
297 cedf_get_nearest_available_cpu(cluster,
298 &per_cpu(cedf_cpu_entries, task_cpu(task)));
299 if(affinity)
300 last = affinity;
301 else if(last->linked)
302 requeue(last->linked);
303 }
304#else
273 if (last->linked) 305 if (last->linked)
274 requeue(last->linked); 306 requeue(last->linked);
307#endif
275 link_task_to_cpu(task, last); 308 link_task_to_cpu(task, last);
276 preempt(last); 309 preempt(last);
277 } 310 }
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 3092797480f8..c22bba91b319 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -23,6 +23,10 @@
23 23
24#include <litmus/bheap.h> 24#include <litmus/bheap.h>
25 25
26#ifdef CONFIG_SCHED_CPU_AFFINITY
27#include <litmus/affinity.h>
28#endif
29
26#include <linux/module.h> 30#include <linux/module.h>
27 31
28/* Overview of GSN-EDF operations. 32/* Overview of GSN-EDF operations.
@@ -253,6 +257,23 @@ static noinline void requeue(struct task_struct* task)
253 } 257 }
254} 258}
255 259
260#ifdef CONFIG_SCHED_CPU_AFFINITY
261static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start)
262{
263 cpu_entry_t* affinity;
264
265 get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries,
266#ifdef CONFIG_RELEASE_MASTER
267 gsnedf.release_master
268#else
269 -1
270#endif
271 );
272
273 return(affinity);
274}
275#endif
276
256/* check for any necessary preemptions */ 277/* check for any necessary preemptions */
257static void check_for_preemptions(void) 278static void check_for_preemptions(void)
258{ 279{
@@ -266,8 +287,21 @@ static void check_for_preemptions(void)
266 task = __take_ready(&gsnedf); 287 task = __take_ready(&gsnedf);
267 TRACE("check_for_preemptions: attempting to link task %d to %d\n", 288 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
268 task->pid, last->cpu); 289 task->pid, last->cpu);
290
291#ifdef CONFIG_SCHED_CPU_AFFINITY
292 {
293 cpu_entry_t* affinity = gsnedf_get_nearest_available_cpu(
294 &per_cpu(gsnedf_cpu_entries, task_cpu(task)));
295 if(affinity)
296 last = affinity;
297 else if(last->linked)
298 requeue(last->linked);
299 }
300#else
269 if (last->linked) 301 if (last->linked)
270 requeue(last->linked); 302 requeue(last->linked);
303#endif
304
271 link_task_to_cpu(task, last); 305 link_task_to_cpu(task, last);
272 preempt(last); 306 preempt(last);
273 } 307 }