diff options
| author | Jeremy Erickson <jerickso@cs.unc.edu> | 2010-11-01 12:38:28 -0400 |
|---|---|---|
| committer | Jeremy Erickson <jerickso@cs.unc.edu> | 2010-11-01 12:38:28 -0400 |
| commit | 52d270a12afacad23b829da2f0e3534b61345712 (patch) | |
| tree | 7264b8aa381af5b57ab3b8731de5385b69ee8c2c | |
| parent | b9c7f11ccc4cf49c02bff214ec0de82221f11711 (diff) | |
Split C/D queueswip-mc-jerickso
| -rw-r--r-- | litmus/sched_mc.c | 95 |
1 files changed, 65 insertions, 30 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 8e201a48db24..636fa5750bd3 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
| @@ -127,8 +127,9 @@ DEFINE_PER_CPU(rt_domain_t, crit_a); | |||
| 127 | #define remote_a_queue(cpu) (&per_cpu(crit_a, cpu)) | 127 | #define remote_a_queue(cpu) (&per_cpu(crit_a, cpu)) |
| 128 | #define local_a_queue (&__get_cpu_var(crit_a)) | 128 | #define local_a_queue (&__get_cpu_var(crit_a)) |
| 129 | 129 | ||
| 130 | static rt_domain_t crit_c_d; | 130 | static rt_domain_t crit_c; |
| 131 | #define crit_c_d_lock (crit_c_d.ready_lock) | 131 | static rt_domain_t crit_d; |
| 132 | #define crit_c_lock (crit_c.ready_lock) | ||
| 132 | 133 | ||
| 133 | /* BEGIN clone of edf_common.c to allow shared C/D run queue*/ | 134 | /* BEGIN clone of edf_common.c to allow shared C/D run queue*/ |
| 134 | 135 | ||
| @@ -197,10 +198,13 @@ static rt_domain_t *proper_domain(struct task_struct* task) | |||
| 197 | case 1: | 198 | case 1: |
| 198 | return remote_b_queue(get_partition(task)); | 199 | return remote_b_queue(get_partition(task)); |
| 199 | break; | 200 | break; |
| 201 | case 2: | ||
| 202 | return &crit_c; | ||
| 203 | case 3: | ||
| 204 | return &crit_d; | ||
| 200 | default: | 205 | default: |
| 201 | /*Assume G-EDF*/ | 206 | /*Should never get here*/ |
| 202 | return &crit_c_d; | 207 | BUG(); |
| 203 | break; | ||
| 204 | } | 208 | } |
| 205 | } | 209 | } |
| 206 | 210 | ||
| @@ -363,18 +367,38 @@ static noinline void requeue(struct task_struct* task) | |||
| 363 | } | 367 | } |
| 364 | } | 368 | } |
| 365 | 369 | ||
| 366 | /* check for any necessary level C and D preemptions */ | 370 | /* check for any necessary level C preemptions */ |
| 367 | static void check_for_gedf_preemptions(void) | 371 | static void check_for_c_preemptions(void) |
| 372 | { | ||
| 373 | struct task_struct *task; | ||
| 374 | cpu_entry_t* last; | ||
| 375 | |||
| 376 | for(last = lowest_prio_cpu(); | ||
| 377 | mc_edf_preemption_needed(&crit_c, last->linked); | ||
| 378 | last = lowest_prio_cpu()) { | ||
| 379 | /* preemption necessary */ | ||
| 380 | task = __take_ready(&crit_c); | ||
| 381 | TRACE("check_for_c_preemptions: attempting to link task %d to %d\n", | ||
| 382 | task->pid, last->cpu); | ||
| 383 | if (last->linked) | ||
| 384 | requeue(last->linked); | ||
| 385 | link_task_to_cpu(task, last); | ||
| 386 | preempt(last); | ||
| 387 | } | ||
| 388 | } | ||
| 389 | |||
| 390 | /* check for any necessary level D preemptions */ | ||
| 391 | static void check_for_d_preemptions(void) | ||
| 368 | { | 392 | { |
| 369 | struct task_struct *task; | 393 | struct task_struct *task; |
| 370 | cpu_entry_t* last; | 394 | cpu_entry_t* last; |
| 371 | 395 | ||
| 372 | for(last = lowest_prio_cpu(); | 396 | for(last = lowest_prio_cpu(); |
| 373 | mc_edf_preemption_needed(&crit_c_d, last->linked); | 397 | mc_edf_preemption_needed(&crit_d, last->linked); |
| 374 | last = lowest_prio_cpu()) { | 398 | last = lowest_prio_cpu()) { |
| 375 | /* preemption necessary */ | 399 | /* preemption necessary */ |
| 376 | task = __take_ready(&crit_c_d); | 400 | task = __take_ready(&crit_d); |
| 377 | TRACE("check_for_gedf_preemptions: attempting to link task %d to %d\n", | 401 | TRACE("check_for_d_preemptions: attempting to link task %d to %d\n", |
| 378 | task->pid, last->cpu); | 402 | task->pid, last->cpu); |
| 379 | if (last->linked) | 403 | if (last->linked) |
| 380 | requeue(last->linked); | 404 | requeue(last->linked); |
| @@ -432,11 +456,16 @@ static noinline void mc_job_arrival(struct task_struct* task) | |||
| 432 | if (task->rt_param.task_params.crit == 0){ | 456 | if (task->rt_param.task_params.crit == 0){ |
| 433 | check_for_a_preemption(remote_cpu_entry(get_partition(task))); | 457 | check_for_a_preemption(remote_cpu_entry(get_partition(task))); |
| 434 | } | 458 | } |
| 435 | if (task->rt_param.task_params.crit == 1){ | 459 | else if (task->rt_param.task_params.crit == 1){ |
| 436 | check_for_pedf_preemption(remote_cpu_entry( | 460 | check_for_pedf_preemption(remote_cpu_entry( |
| 437 | get_partition(task))); | 461 | get_partition(task))); |
| 438 | } | 462 | } |
| 439 | check_for_gedf_preemptions(); | 463 | else if (task->rt_param.task_params.crit == 2){ |
| 464 | check_for_c_preemptions(); | ||
| 465 | } | ||
| 466 | else if (task->rt_param.task_params.crit == 3){ | ||
| 467 | check_for_d_preemptions(); | ||
| 468 | } | ||
| 440 | } | 469 | } |
| 441 | 470 | ||
| 442 | static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | 471 | static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) |
| @@ -444,7 +473,7 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
| 444 | unsigned long flags; | 473 | unsigned long flags; |
| 445 | int i; | 474 | int i; |
| 446 | 475 | ||
| 447 | raw_spin_lock_irqsave(&crit_c_d_lock, flags); | 476 | raw_spin_lock_irqsave(&crit_c_lock, flags); |
| 448 | TRACE("mc_release_jobs triggered\n"); | 477 | TRACE("mc_release_jobs triggered\n"); |
| 449 | 478 | ||
| 450 | __merge_ready(rt, tasks); | 479 | __merge_ready(rt, tasks); |
| @@ -457,14 +486,17 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
| 457 | check_for_a_preemption(remote_cpu_entry(i)); | 486 | check_for_a_preemption(remote_cpu_entry(i)); |
| 458 | } | 487 | } |
| 459 | } | 488 | } |
| 460 | if (rt == &crit_c_d){ | 489 | if (rt == &crit_c){ |
| 461 | check_for_gedf_preemptions(); | 490 | check_for_c_preemptions(); |
| 491 | } | ||
| 492 | else if (rt == &crit_d){ | ||
| 493 | check_for_d_preemptions(); | ||
| 462 | } | 494 | } |
| 463 | 495 | ||
| 464 | raw_spin_unlock_irqrestore(&crit_c_d_lock, flags); | 496 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); |
| 465 | } | 497 | } |
| 466 | 498 | ||
| 467 | /* caller holds crit_c_d_lock */ | 499 | /* caller holds crit_c_lock */ |
| 468 | static noinline void job_completion(struct task_struct *t, int forced) | 500 | static noinline void job_completion(struct task_struct *t, int forced) |
| 469 | { | 501 | { |
| 470 | BUG_ON(!t); | 502 | BUG_ON(!t); |
| @@ -550,7 +582,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
| 550 | return NULL; | 582 | return NULL; |
| 551 | #endif | 583 | #endif |
| 552 | 584 | ||
| 553 | raw_spin_lock(&crit_c_d_lock); | 585 | raw_spin_lock(&crit_c_lock); |
| 554 | clear_will_schedule(); | 586 | clear_will_schedule(); |
| 555 | 587 | ||
| 556 | /* sanity checking */ | 588 | /* sanity checking */ |
| @@ -613,7 +645,9 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
| 613 | if (!ready_task) | 645 | if (!ready_task) |
| 614 | ready_task = __take_ready(local_b_queue); | 646 | ready_task = __take_ready(local_b_queue); |
| 615 | if (!ready_task) | 647 | if (!ready_task) |
| 616 | ready_task = __take_ready(&crit_c_d); | 648 | ready_task = __take_ready(&crit_c); |
| 649 | if (!ready_task) | ||
| 650 | ready_task = __take_ready(&crit_d); | ||
| 617 | link_task_to_cpu(ready_task, entry); | 651 | link_task_to_cpu(ready_task, entry); |
| 618 | if (ready_task) | 652 | if (ready_task) |
| 619 | TRACE_TASK(ready_task, "Linked task inside scheduler\n"); | 653 | TRACE_TASK(ready_task, "Linked task inside scheduler\n"); |
| @@ -642,10 +676,10 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
| 642 | next = prev; | 676 | next = prev; |
| 643 | 677 | ||
| 644 | /*TODO: Update name of locking, reflect that we're locking all queues*/ | 678 | /*TODO: Update name of locking, reflect that we're locking all queues*/ |
| 645 | raw_spin_unlock(&crit_c_d_lock); | 679 | raw_spin_unlock(&crit_c_lock); |
| 646 | 680 | ||
| 647 | #ifdef WANT_ALL_SCHED_EVENTS | 681 | #ifdef WANT_ALL_SCHED_EVENTS |
| 648 | TRACE("crit_c_d_lock released, next=0x%p\n", next); | 682 | TRACE("crit_c_lock released, next=0x%p\n", next); |
| 649 | 683 | ||
| 650 | if (next) | 684 | if (next) |
| 651 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 685 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
| @@ -680,7 +714,7 @@ static void mc_task_new(struct task_struct * t, int on_rq, int running) | |||
| 680 | 714 | ||
| 681 | TRACE("mixed crit: task new %d\n", t->pid); | 715 | TRACE("mixed crit: task new %d\n", t->pid); |
| 682 | 716 | ||
| 683 | raw_spin_lock_irqsave(&crit_c_d_lock, flags); | 717 | raw_spin_lock_irqsave(&crit_c_lock, flags); |
| 684 | 718 | ||
| 685 | /* setup job params */ | 719 | /* setup job params */ |
| 686 | release_at(t, litmus_clock()); | 720 | release_at(t, litmus_clock()); |
| @@ -707,7 +741,7 @@ static void mc_task_new(struct task_struct * t, int on_rq, int running) | |||
| 707 | t->rt_param.linked_on = NO_CPU; | 741 | t->rt_param.linked_on = NO_CPU; |
| 708 | 742 | ||
| 709 | mc_job_arrival(t); | 743 | mc_job_arrival(t); |
| 710 | raw_spin_unlock_irqrestore(&crit_c_d_lock, flags); | 744 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); |
| 711 | } | 745 | } |
| 712 | 746 | ||
| 713 | static void mc_task_wake_up(struct task_struct *task) | 747 | static void mc_task_wake_up(struct task_struct *task) |
| @@ -717,7 +751,7 @@ static void mc_task_wake_up(struct task_struct *task) | |||
| 717 | 751 | ||
| 718 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 752 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
| 719 | 753 | ||
| 720 | raw_spin_lock_irqsave(&crit_c_d_lock, flags); | 754 | raw_spin_lock_irqsave(&crit_c_lock, flags); |
| 721 | /* We need to take suspensions because of semaphores into | 755 | /* We need to take suspensions because of semaphores into |
| 722 | * account! If a job resumes after being suspended due to acquiring | 756 | * account! If a job resumes after being suspended due to acquiring |
| 723 | * a semaphore, it should never be treated as a new job release. | 757 | * a semaphore, it should never be treated as a new job release. |
| @@ -740,7 +774,7 @@ static void mc_task_wake_up(struct task_struct *task) | |||
| 740 | } | 774 | } |
| 741 | } | 775 | } |
| 742 | mc_job_arrival(task); | 776 | mc_job_arrival(task); |
| 743 | raw_spin_unlock_irqrestore(&crit_c_d_lock, flags); | 777 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); |
| 744 | } | 778 | } |
| 745 | 779 | ||
| 746 | static void mc_task_block(struct task_struct *t) | 780 | static void mc_task_block(struct task_struct *t) |
| @@ -750,9 +784,9 @@ static void mc_task_block(struct task_struct *t) | |||
| 750 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 784 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
| 751 | 785 | ||
| 752 | /* unlink if necessary */ | 786 | /* unlink if necessary */ |
| 753 | raw_spin_lock_irqsave(&crit_c_d_lock, flags); | 787 | raw_spin_lock_irqsave(&crit_c_lock, flags); |
| 754 | unlink(t); | 788 | unlink(t); |
| 755 | raw_spin_unlock_irqrestore(&crit_c_d_lock, flags); | 789 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); |
| 756 | 790 | ||
| 757 | BUG_ON(!is_realtime(t)); | 791 | BUG_ON(!is_realtime(t)); |
| 758 | } | 792 | } |
| @@ -763,13 +797,13 @@ static void mc_task_exit(struct task_struct * t) | |||
| 763 | unsigned long flags; | 797 | unsigned long flags; |
| 764 | 798 | ||
| 765 | /* unlink if necessary */ | 799 | /* unlink if necessary */ |
| 766 | raw_spin_lock_irqsave(&crit_c_d_lock, flags); | 800 | raw_spin_lock_irqsave(&crit_c_lock, flags); |
| 767 | unlink(t); | 801 | unlink(t); |
| 768 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 802 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
| 769 | mc_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 803 | mc_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
| 770 | tsk_rt(t)->scheduled_on = NO_CPU; | 804 | tsk_rt(t)->scheduled_on = NO_CPU; |
| 771 | } | 805 | } |
| 772 | raw_spin_unlock_irqrestore(&crit_c_d_lock, flags); | 806 | raw_spin_unlock_irqrestore(&crit_c_lock, flags); |
| 773 | 807 | ||
| 774 | BUG_ON(!is_realtime(t)); | 808 | BUG_ON(!is_realtime(t)); |
| 775 | TRACE_TASK(t, "RIP\n"); | 809 | TRACE_TASK(t, "RIP\n"); |
| @@ -842,7 +876,8 @@ static int __init init_mc(void) | |||
| 842 | entry->hn = &mc_heap_node[cpu]; | 876 | entry->hn = &mc_heap_node[cpu]; |
| 843 | bheap_node_init(&entry->hn, entry); | 877 | bheap_node_init(&entry->hn, entry); |
| 844 | } | 878 | } |
| 845 | mc_edf_domain_init(&crit_c_d, NULL, mc_release_jobs); | 879 | mc_edf_domain_init(&crit_c, NULL, mc_release_jobs); |
| 880 | mc_edf_domain_init(&crit_d, NULL, mc_release_jobs); | ||
| 846 | for (i = 0; i < NR_CPUS; i++){ | 881 | for (i = 0; i < NR_CPUS; i++){ |
| 847 | mc_edf_domain_init(remote_b_queue(i), NULL, | 882 | mc_edf_domain_init(remote_b_queue(i), NULL, |
| 848 | mc_release_jobs); | 883 | mc_release_jobs); |
