diff options
-rw-r--r-- | litmus/Kconfig | 18 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 24 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 28 |
3 files changed, 64 insertions, 6 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig index ad8dc8308cf0..651b66a77c65 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -36,6 +36,24 @@ config RELEASE_MASTER | |||
36 | 36 | ||
37 | endmenu | 37 | endmenu |
38 | 38 | ||
39 | menu "Performance Hacks" | ||
40 | |||
41 | config CPU_SCHED_BIAS | ||
42 | bool "Tasks prefer previous CPU(s)." | ||
43 | default n | ||
44 | help | ||
45 | Allow tasks to be rescheduled on their previously used CPU(s) if that | ||
46 | CPU(s) is not currently not executing real-time work. This may improve | ||
47 | performance through the possible preservation of cache affinity. | ||
48 | |||
49 | Warning: May make bugs harder to find as tasks will migrate less | ||
50 | frequently. | ||
51 | |||
52 | Only bias towards previously used CPU is currently implemented. In the | ||
53 | future: bias on CPU topology. | ||
54 | |||
55 | endmenu | ||
56 | |||
39 | menu "Real-Time Synchronization" | 57 | menu "Real-Time Synchronization" |
40 | 58 | ||
41 | config NP_SECTION | 59 | config NP_SECTION |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 73fe1c442a0d..851de716de0a 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -261,17 +261,35 @@ static noinline void requeue(struct task_struct* task) | |||
261 | static void check_for_preemptions(cedf_domain_t *cluster) | 261 | static void check_for_preemptions(cedf_domain_t *cluster) |
262 | { | 262 | { |
263 | struct task_struct *task; | 263 | struct task_struct *task; |
264 | cpu_entry_t* last; | 264 | cpu_entry_t *last; |
265 | |||
266 | #ifdef CONFIG_CPU_SCHED_BIAS | ||
267 | cpu_entry_t *pref; | ||
268 | #endif | ||
265 | 269 | ||
266 | for(last = lowest_prio_cpu(cluster); | 270 | for(last = lowest_prio_cpu(cluster); |
267 | edf_preemption_needed(&cluster->domain, last->linked); | 271 | edf_preemption_needed(&cluster->domain, last->linked); |
268 | last = lowest_prio_cpu(cluster)) { | 272 | last = lowest_prio_cpu(cluster)) { |
269 | /* preemption necessary */ | 273 | /* preemption necessary */ |
270 | task = __take_ready(&cluster->domain); | 274 | task = __take_ready(&cluster->domain); |
271 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | 275 | |
272 | task->pid, last->cpu); | 276 | #ifdef CONFIG_CPU_SCHED_BIAS |
277 | /* where was task last scheduled? */ | ||
278 | pref = &per_cpu(cedf_cpu_entries, task_cpu(task)); | ||
279 | |||
280 | if (!pref->linked) { | ||
281 | last = pref; | ||
282 | } | ||
283 | else if (last->linked) { | ||
284 | requeue(last->linked); | ||
285 | } | ||
286 | #else /* end CONFIG_CPU_SCHED_BIAS */ | ||
273 | if (last->linked) | 287 | if (last->linked) |
274 | requeue(last->linked); | 288 | requeue(last->linked); |
289 | #endif | ||
290 | |||
291 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
292 | task->pid, last->cpu); | ||
275 | link_task_to_cpu(task, last); | 293 | link_task_to_cpu(task, last); |
276 | preempt(last); | 294 | preempt(last); |
277 | } | 295 | } |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index c5c9600c33d8..dd670f5219c1 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -257,17 +257,39 @@ static noinline void requeue(struct task_struct* task) | |||
257 | static void check_for_preemptions(void) | 257 | static void check_for_preemptions(void) |
258 | { | 258 | { |
259 | struct task_struct *task; | 259 | struct task_struct *task; |
260 | cpu_entry_t* last; | 260 | cpu_entry_t *last; |
261 | |||
262 | #ifdef CONFIG_CPU_SCHED_BIAS | ||
263 | cpu_entry_t *pref; | ||
264 | #endif | ||
261 | 265 | ||
262 | for(last = lowest_prio_cpu(); | 266 | for(last = lowest_prio_cpu(); |
263 | edf_preemption_needed(&gsnedf, last->linked); | 267 | edf_preemption_needed(&gsnedf, last->linked); |
264 | last = lowest_prio_cpu()) { | 268 | last = lowest_prio_cpu()) { |
265 | /* preemption necessary */ | 269 | /* preemption necessary */ |
266 | task = __take_ready(&gsnedf); | 270 | task = __take_ready(&gsnedf); |
267 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | 271 | |
268 | task->pid, last->cpu); | 272 | #ifdef CONFIG_CPU_SCHED_BIAS |
273 | /* where was task last scheduled? */ | ||
274 | pref = &per_cpu(gsnedf_cpu_entries, task_cpu(task)); | ||
275 | |||
276 | if (!pref->linked | ||
277 | #ifdef CONFIG_RELEASE_MASTER | ||
278 | && (pref->cpu != gsnedf.release_master) | ||
279 | #endif | ||
280 | ) { | ||
281 | last = pref; | ||
282 | } | ||
283 | else if (last->linked) { | ||
284 | requeue(last->linked); | ||
285 | } | ||
286 | #else /* end CONFIG_CPU_SCHED_BIAS */ | ||
269 | if (last->linked) | 287 | if (last->linked) |
270 | requeue(last->linked); | 288 | requeue(last->linked); |
289 | #endif | ||
290 | |||
291 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
292 | task->pid, last->cpu); | ||
271 | link_task_to_cpu(task, last); | 293 | link_task_to_cpu(task, last); |
272 | preempt(last); | 294 | preempt(last); |
273 | } | 295 | } |