diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-09-23 13:29:37 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-09-23 13:29:37 -0400 |
commit | 2a4e168d2932ff470b898a57794cd87ee1a3d2a4 (patch) | |
tree | 26cb8a3f2d9822d486e41b2fdea69fb5de2c7f92 | |
parent | 334e08aeedcab61faca6be333e065c33098a8cda (diff) |
transferring machines
-rw-r--r-- | include/litmus/sched_plugin.h | 2 | ||||
-rw-r--r-- | litmus/sched_mc.c | 99 |
2 files changed, 67 insertions, 34 deletions
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7cabdddae8..32c23974e45a 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <litmus/locking.h> | 11 | #include <litmus/locking.h> |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | struct litmus_lock; | ||
15 | |||
14 | /************************ setup/tear down ********************/ | 16 | /************************ setup/tear down ********************/ |
15 | 17 | ||
16 | typedef long (*activate_plugin_t) (void); | 18 | typedef long (*activate_plugin_t) (void); |
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 6ece904ff257..3e3aaa126d3f 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -260,7 +260,59 @@ static void job_arrival(struct task_struct *task) | |||
260 | } | 260 | } |
261 | } | 261 | } |
262 | 262 | ||
263 | static void 1 | 263 | /** |
264 | * low_prio_arrival() - If CONFIG_PLUGIN_MC_REDIRECT is enabled, will | ||
265 | * redirect lower priority job_arrival work to the interrupt_cpu. | ||
266 | */ | ||
267 | static void low_prio_arrival(struct task_struct *task) | ||
268 | { | ||
269 | cpu_entry_t *entry; | ||
270 | |||
271 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | ||
272 | #ifndef CONFIG_PLUGIN_MC_REDIRECT_ALL | ||
273 | if (!is_global_task(task)) | ||
274 | goto arrive; | ||
275 | #endif | ||
276 | if (smp_processor_id() != interrupt_cpu) { | ||
277 | entry = cpus[smp_processor_id()]; | ||
278 | raw_spin_lock(&entry->redir_lock); | ||
279 | list_add(&tsk_rt(task)->list, &entry->redir); | ||
280 | raw_spin_unlock(&entry->redir_lock); | ||
281 | litmus_reschedule(interrupt_cpu); | ||
282 | } else | ||
283 | #endif | ||
284 | { | ||
285 | arrive: | ||
286 | job_arrival(task); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | ||
291 | /** | ||
292 | * fix_global_levels() - Execute redirected job arrivals on this cpu. | ||
293 | */ | ||
294 | static void fix_global_levels(void) | ||
295 | { | ||
296 | int c; | ||
297 | cpu_entry_t *e; | ||
298 | struct list_head *pos, *safe; | ||
299 | struct task_struct *t; | ||
300 | |||
301 | TRACE("Fixing global levels\n"); | ||
302 | for_each_online_cpu(c) { | ||
303 | e = cpus[c]; | ||
304 | raw_spin_lock(&e->redir_lock); | ||
305 | list_for_each_safe(pos, safe, &e->redir) { | ||
306 | t = list_entry(pos, struct task_struct, rt_param.list); | ||
307 | TRACE_TASK(t, "Arriving yo"); | ||
308 | BUG_ON(is_queued(t)); | ||
309 | list_del_init(pos); | ||
310 | job_arrival(t); | ||
311 | } | ||
312 | raw_spin_unlock(&e->redir_lock); | ||
313 | } | ||
314 | } | ||
315 | #endif | ||
264 | 316 | ||
265 | /** | 317 | /** |
266 | * link_task_to_cpu() - Logically run a task on a CPU. | 318 | * link_task_to_cpu() - Logically run a task on a CPU. |
@@ -312,33 +364,6 @@ static void preempt(domain_t *dom, crit_entry_t *ce) | |||
312 | } | 364 | } |
313 | } | 365 | } |
314 | 366 | ||
315 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | ||
316 | /** | ||
317 | * fix_global_levels() - Execute redirected job arrivals on this cpu. | ||
318 | */ | ||
319 | static void fix_global_levels(void) | ||
320 | { | ||
321 | int c; | ||
322 | cpu_entry_t *e; | ||
323 | struct list_head *pos, *safe; | ||
324 | struct task_struct *t; | ||
325 | |||
326 | TRACE("Fixing global levels\n"); | ||
327 | for_each_online_cpu(c) { | ||
328 | e = cpus[c]; | ||
329 | raw_spin_lock(&e->redir_lock); | ||
330 | list_for_each_safe(pos, safe, &e->redir) { | ||
331 | t = list_entry(pos, struct task_struct, rt_param.list); | ||
332 | TRACE_TASK(t, "Arriving yo"); | ||
333 | BUG_ON(is_queued(t)); | ||
334 | list_del_init(pos); | ||
335 | job_arrival(t); | ||
336 | } | ||
337 | raw_spin_unlock(&e->redir_lock); | ||
338 | } | ||
339 | } | ||
340 | #endif | ||
341 | |||
342 | /** | 367 | /** |
343 | * update_crit_levels() - Update criticality entries for the new cpu state. | 368 | * update_crit_levels() - Update criticality entries for the new cpu state. |
344 | * This should be called after a new task has been linked to @entry. | 369 | * This should be called after a new task has been linked to @entry. |
@@ -366,8 +391,8 @@ static void update_crit_levels(cpu_entry_t *entry) | |||
366 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { | 391 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { |
367 | ce = &entry->crit_entries[i]; | 392 | ce = &entry->crit_entries[i]; |
368 | TRACE("Checking %s\n", ce->domain->name); | 393 | TRACE("Checking %s\n", ce->domain->name); |
369 | if (!tasks[i]) continue; | 394 | if (tasks[i]) |
370 | job_arrival(tasks[i]); | 395 | low_prio_arrival(tasks[i]); |
371 | } | 396 | } |
372 | } | 397 | } |
373 | 398 | ||
@@ -664,7 +689,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
664 | domain_t *dom; | 689 | domain_t *dom; |
665 | crit_entry_t *ce; | 690 | crit_entry_t *ce; |
666 | cpu_entry_t* entry = cpus[smp_processor_id()]; | 691 | cpu_entry_t* entry = cpus[smp_processor_id()]; |
667 | int i, out_of_time, sleep, preempt, exists, blocks, global; | 692 | int i, out_of_time, sleep, preempt, exists, blocks, global, lower; |
668 | struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; | 693 | struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; |
669 | 694 | ||
670 | local_irq_save(flags); | 695 | local_irq_save(flags); |
@@ -682,6 +707,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
682 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | 707 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; |
683 | global = exists && is_global_task(entry->scheduled); | 708 | global = exists && is_global_task(entry->scheduled); |
684 | preempt = entry->scheduled != entry->linked; | 709 | preempt = entry->scheduled != entry->linked; |
710 | lower = preempt && entry->linked && | ||
711 | tsk_mc_crit(entry->scheduled) < tsk_mc_crit(entry->linked); | ||
685 | 712 | ||
686 | if (exists) { | 713 | if (exists) { |
687 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 714 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
@@ -705,10 +732,14 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
705 | if ((out_of_time || sleep) && !blocks && !preempt) | 732 | if ((out_of_time || sleep) && !blocks && !preempt) |
706 | job_completion(entry->scheduled, !sleep); | 733 | job_completion(entry->scheduled, !sleep); |
707 | /* Global scheduled tasks must wait for a deschedule before they | 734 | /* Global scheduled tasks must wait for a deschedule before they |
708 | * can rejoin a global domain. Requeue them here. | 735 | * can rejoin the global state. Rejoin them here. |
709 | */ | 736 | */ |
710 | else if (global && preempt && !blocks) | 737 | else if (global && preempt && !blocks) { |
711 | job_arrival(entry->scheduled); | 738 | if (lower) |
739 | low_prio_arrival(entry->scheduled); | ||
740 | else | ||
741 | job_arrival(entry->scheduled); | ||
742 | } | ||
712 | 743 | ||
713 | /* Pick next task if none is linked */ | 744 | /* Pick next task if none is linked */ |
714 | raw_spin_lock(&entry->lock); | 745 | raw_spin_lock(&entry->lock); |