diff options
author | Jeremy Erickson <jerickso@cs.unc.edu> | 2010-10-09 21:13:50 -0400 |
---|---|---|
committer | Jeremy Erickson <jerickso@cs.unc.edu> | 2010-10-09 21:13:50 -0400 |
commit | b9c7f11ccc4cf49c02bff214ec0de82221f11711 (patch) | |
tree | 3dccc623df56171c83085147fc222e8d5c550d3e | |
parent | 71c4f1c19357f2562168663a0bf57c4dfbb9302b (diff) |
weak level A support
-rw-r--r-- | litmus/sched_mc.c | 52 |
1 files changed, 45 insertions, 7 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index c780ff6bac43..8e201a48db24 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -123,6 +123,10 @@ DEFINE_PER_CPU(rt_domain_t, crit_b); | |||
123 | #define remote_b_queue(cpu) (&per_cpu(crit_b, cpu)) | 123 | #define remote_b_queue(cpu) (&per_cpu(crit_b, cpu)) |
124 | #define local_b_queue (&__get_cpu_var(crit_b)) | 124 | #define local_b_queue (&__get_cpu_var(crit_b)) |
125 | 125 | ||
126 | DEFINE_PER_CPU(rt_domain_t, crit_a); | ||
127 | #define remote_a_queue(cpu) (&per_cpu(crit_a, cpu)) | ||
128 | #define local_a_queue (&__get_cpu_var(crit_a)) | ||
129 | |||
126 | static rt_domain_t crit_c_d; | 130 | static rt_domain_t crit_c_d; |
127 | #define crit_c_d_lock (crit_c_d.ready_lock) | 131 | #define crit_c_d_lock (crit_c_d.ready_lock) |
128 | 132 | ||
@@ -188,10 +192,10 @@ static rt_domain_t *proper_domain(struct task_struct* task) | |||
188 | switch (task->rt_param.task_params.crit) | 192 | switch (task->rt_param.task_params.crit) |
189 | { | 193 | { |
190 | case 0: | 194 | case 0: |
191 | /*TODO: implement level A*/ | 195 | return remote_a_queue(get_partition(task)); |
196 | break; | ||
192 | case 1: | 197 | case 1: |
193 | return remote_b_queue( | 198 | return remote_b_queue(get_partition(task)); |
194 | task->rt_param.task_params.cpu); | ||
195 | break; | 199 | break; |
196 | default: | 200 | default: |
197 | /*Assume G-EDF*/ | 201 | /*Assume G-EDF*/ |
@@ -274,7 +278,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
274 | * We can only safely swap if neither task is | 278 | * We can only safely swap if neither task is |
275 | * partitioned. | 279 | * partitioned. |
276 | */ | 280 | */ |
277 | if (0 && entry != sched && | 281 | if (entry != sched && |
278 | linked->rt_param.task_params.crit > 1 && | 282 | linked->rt_param.task_params.crit > 1 && |
279 | (!sched->linked || | 283 | (!sched->linked || |
280 | sched->linked->rt_param.task_params.crit > 1)){ | 284 | sched->linked->rt_param.task_params.crit > 1)){ |
@@ -344,9 +348,12 @@ static void preempt(cpu_entry_t *entry) | |||
344 | */ | 348 | */ |
345 | static noinline void requeue(struct task_struct* task) | 349 | static noinline void requeue(struct task_struct* task) |
346 | { | 350 | { |
347 | BUG_ON(!task); | 351 | BUG_ON(!task || !is_realtime(task)); |
348 | /* sanity check before insertion */ | 352 | /* sanity check before insertion */ |
349 | BUG_ON(is_queued(task)); | 353 | BUG_ON(is_queued(task)); |
354 | /* Should never call on a level A task.*/ | ||
355 | /*Allowing this for now*/ | ||
356 | /* BUG_ON(task->rt_param.task_params.crit == 0);*/ | ||
350 | 357 | ||
351 | if (is_released(task, litmus_clock())) | 358 | if (is_released(task, litmus_clock())) |
352 | __add_ready(proper_domain(task), task); | 359 | __add_ready(proper_domain(task), task); |
@@ -376,6 +383,24 @@ static void check_for_gedf_preemptions(void) | |||
376 | } | 383 | } |
377 | } | 384 | } |
378 | 385 | ||
386 | /* Check for level A preemption*/ | ||
387 | static void check_for_a_preemption(cpu_entry_t* entry) | ||
388 | { | ||
389 | struct task_struct *task; | ||
390 | rt_domain_t *queue; | ||
391 | queue = remote_a_queue(entry->cpu); | ||
392 | |||
393 | if (__peek_ready(queue)){ | ||
394 | task = __take_ready(queue); | ||
395 | TRACE("check_for_a_preemption: attempting to link task %d to %d\n", | ||
396 | task->pid, entry->cpu); | ||
397 | if (entry->linked) | ||
398 | requeue(entry->linked); | ||
399 | link_task_to_cpu(task, entry); | ||
400 | preempt(entry); | ||
401 | } | ||
402 | } | ||
403 | |||
379 | /* Check for level B preemption*/ | 404 | /* Check for level B preemption*/ |
380 | static void check_for_pedf_preemption(cpu_entry_t* entry) | 405 | static void check_for_pedf_preemption(cpu_entry_t* entry) |
381 | { | 406 | { |
@@ -399,13 +424,17 @@ static void check_for_pedf_preemption(cpu_entry_t* entry) | |||
399 | /* mc_job_arrival: task is either resumed or released */ | 424 | /* mc_job_arrival: task is either resumed or released */ |
400 | static noinline void mc_job_arrival(struct task_struct* task) | 425 | static noinline void mc_job_arrival(struct task_struct* task) |
401 | { | 426 | { |
427 | cpu_entry_t *entry; | ||
402 | BUG_ON(!task); | 428 | BUG_ON(!task); |
403 | 429 | ||
404 | TRACE("mc_job_arrival triggered\n"); | 430 | TRACE("mc_job_arrival triggered\n"); |
405 | requeue(task); | 431 | requeue(task); |
432 | if (task->rt_param.task_params.crit == 0){ | ||
433 | check_for_a_preemption(remote_cpu_entry(get_partition(task))); | ||
434 | } | ||
406 | if (task->rt_param.task_params.crit == 1){ | 435 | if (task->rt_param.task_params.crit == 1){ |
407 | check_for_pedf_preemption(remote_cpu_entry( | 436 | check_for_pedf_preemption(remote_cpu_entry( |
408 | get_partition(task))); | 437 | get_partition(task))); |
409 | } | 438 | } |
410 | check_for_gedf_preemptions(); | 439 | check_for_gedf_preemptions(); |
411 | } | 440 | } |
@@ -424,6 +453,9 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
424 | if (rt == remote_b_queue(i)){ | 453 | if (rt == remote_b_queue(i)){ |
425 | check_for_pedf_preemption(remote_cpu_entry(i)); | 454 | check_for_pedf_preemption(remote_cpu_entry(i)); |
426 | } | 455 | } |
456 | else if (rt == remote_a_queue(i)){ | ||
457 | check_for_a_preemption(remote_cpu_entry(i)); | ||
458 | } | ||
427 | } | 459 | } |
428 | if (rt == &crit_c_d){ | 460 | if (rt == &crit_c_d){ |
429 | check_for_gedf_preemptions(); | 461 | check_for_gedf_preemptions(); |
@@ -577,7 +609,9 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
577 | /* Link pending task if we became unlinked. | 609 | /* Link pending task if we became unlinked. |
578 | */ | 610 | */ |
579 | if (!entry->linked){ | 611 | if (!entry->linked){ |
580 | ready_task = __take_ready(local_b_queue); | 612 | ready_task = __take_ready(local_a_queue); |
613 | if (!ready_task) | ||
614 | ready_task = __take_ready(local_b_queue); | ||
581 | if (!ready_task) | 615 | if (!ready_task) |
582 | ready_task = __take_ready(&crit_c_d); | 616 | ready_task = __take_ready(&crit_c_d); |
583 | link_task_to_cpu(ready_task, entry); | 617 | link_task_to_cpu(ready_task, entry); |
@@ -813,6 +847,10 @@ static int __init init_mc(void) | |||
813 | mc_edf_domain_init(remote_b_queue(i), NULL, | 847 | mc_edf_domain_init(remote_b_queue(i), NULL, |
814 | mc_release_jobs); | 848 | mc_release_jobs); |
815 | } | 849 | } |
850 | for (i = 0; i < NR_CPUS; i++){ | ||
851 | mc_edf_domain_init(remote_a_queue(i), NULL, | ||
852 | mc_release_jobs); | ||
853 | } | ||
816 | return register_sched_plugin(&mc_plugin); | 854 | return register_sched_plugin(&mc_plugin); |
817 | } | 855 | } |
818 | 856 | ||