aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-10-10 00:48:19 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2012-10-10 00:48:19 -0400
commit0a84a160ea79d9479d17b02b5332413159f52be2 (patch)
tree5bdb8e596ddaca392ad1bf2587148d7b8b90ee98 /litmus/sched_mc.c
parent1b6e7d3a78db7ea7eb0e20f1bca5b49079739a7c (diff)
Server tasks stay linked when client tasks release.
Diffstat (limited to 'litmus/sched_mc.c')
-rw-r--r--litmus/sched_mc.c103
1 files changed, 66 insertions, 37 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 3fb223255792..33adc860b6ba 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -653,10 +653,7 @@ static void remove_from_all(struct task_struct* task)
653 } else { 653 } else {
654 TRACE_MC_TASK(task, "Unlinked before we got lock!\n"); 654 TRACE_MC_TASK(task, "Unlinked before we got lock!\n");
655 } 655 }
656 if (update) 656 raw_spin_unlock(&entry->lock);
657 update_crit_levels(entry);
658 else
659 raw_spin_unlock(&entry->lock);
660 } else { 657 } else {
661 TRACE_MC_TASK(task, "Not linked to anything\n"); 658 TRACE_MC_TASK(task, "Not linked to anything\n");
662 } 659 }
@@ -676,46 +673,64 @@ static void job_completion(struct task_struct *task, int forced)
676{ 673{
677 lt_t now; 674 lt_t now;
678 int release_server; 675 int release_server;
679 TRACE_MC_TASK(task, "Completed\n"); 676 struct cpu_entry *entry;
677 struct crit_entry *ce;
680 678
681 /* Logically stop the task execution */ 679 TRACE_MC_TASK(task, "Completed\n");
682 remove_from_all(task);
683 set_rt_flags(task, RT_F_SLEEP);
684 680
685 if (!forced) { 681 if (!forced) {
686 /* Userspace signaled job completion */ 682 /* Userspace signaled job completion */
687 sched_trace_task_completion(current, 0); 683 sched_trace_task_completion(current, 0);
688 setup_user_release(current, get_user_deadline(current)); 684 setup_user_release(current, get_user_deadline(current));
689
690 } 685 }
691 686
692 release_server = budget_exhausted(task);
693#ifndef CONFIG_PLUGIN_MC_LINUX_SLACK_STEALING 687#ifndef CONFIG_PLUGIN_MC_LINUX_SLACK_STEALING
694 /* Release lowest-criticality task's servers with their userspace tasks, 688 /* Release lowest-criticality task's servers with their userspace tasks,
695 * preventing them from turning into ghost tasks and forcing idleness 689 * preventing them from turning into idle ghost tasks
696 */ 690 */
697 if (tsk_mc_crit(task) == NUM_CRIT_LEVELS - 1) 691 if (tsk_mc_crit(task) == NUM_CRIT_LEVELS - 1)
698 release_server |= is_ghost(task); 692 release_server = 1;
693 else
699#endif 694#endif
695 release_server = budget_exhausted(task);
700 696
701 /* If server has run out of budget, wait until next release 697 if (release_server || forced) {
702 * TODO: Level A does this independently and should not. 698 /* Only unlink (and release resources) if the current server job
703 */ 699 * must stop logically running
704 if (release_server && CRIT_LEVEL_A != tsk_mc_crit(task)) { 700 */
705 sched_trace_server_completion(-task->pid, get_rt_job(task)); 701 remove_from_all(task);
706 prepare_for_next_period(task);
707 } 702 }
708 703
709 now = litmus_clock(); 704 if (lt_before(get_user_release(task), litmus_clock())) {
710 if (lt_before(get_user_release(task), now)) {
711 TRACE_TASK(task, "Executable task going back to running\n"); 705 TRACE_TASK(task, "Executable task going back to running\n");
712 set_rt_flags(task, RT_F_RUNNING); 706 set_rt_flags(task, RT_F_RUNNING);
713 } 707 }
714 708
709 if (release_server || forced) {
710 /* TODO: Level A does this independently and should not */
711 if (release_server && CRIT_LEVEL_A != tsk_mc_crit(task)) {
712 sched_trace_server_completion(-task->pid,
713 get_rt_job(task));
714 prepare_for_next_period(task);
715 }
715 716
716 if (is_running(task)) {
717 /* Requeue non-blocking tasks */ 717 /* Requeue non-blocking tasks */
718 job_arrival(task); 718 if (is_running(task)) {
719 job_arrival(task);
720 }
721 } else if (is_ghost(task)) {
722 entry = &per_cpu(cpus, tsk_rt(task)->linked_on);
723 ce = &entry->crit_entries[tsk_mc_crit(task)];
724
725 raw_spin_lock(&entry->lock);
726 if (ce->linked == task && is_ghost(task)) {
727 /* The task went ghost while it was linked to a CPU */
728 link_task_to_cpu(entry, NULL);
729 sched_trace_server_switch_away(ce_sid(ce),0,-task->pid,
730 get_rt_job(task));
731 start_crit(ce);
732 }
733 raw_spin_unlock(&entry->lock);
719 } 734 }
720} 735}
721 736
@@ -730,10 +745,26 @@ static void mc_ghost_exhausted(struct rt_event *e)
730static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) 745static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
731{ 746{
732 struct crit_entry *ce = container_of(timer, struct crit_entry, timer); 747 struct crit_entry *ce = container_of(timer, struct crit_entry, timer);
748 struct cpu_entry *entry = crit_cpu(ce);
749 struct task_struct *tmp = NULL;
733#endif 750#endif
734 TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock()); 751 TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock());
735 752
736 litmus_reschedule(crit_cpu(ce)->cpu); 753 raw_spin_lock(&entry->lock);
754
755 if (is_ghost(ce->linked)) {
756 update_server_time(ce->linked);
757 if (budget_exhausted(ce->linked)) {
758 tmp = ce->linked;
759 }
760 } else {
761 litmus_reschedule(crit_cpu(ce)->cpu);
762 }
763
764 raw_spin_unlock(&entry->lock);
765
766 if (tmp)
767 job_completion(tmp, 1);
737 768
738#ifndef CONFIG_MERGE_TIMERS 769#ifndef CONFIG_MERGE_TIMERS
739 return HRTIMER_NORESTART; 770 return HRTIMER_NORESTART;
@@ -775,8 +806,8 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
775 job_completion(old_link, 1); 806 job_completion(old_link, 1);
776 } else { 807 } else {
777 STRACE("old_link was null, so will call check for preempt\n"); 808 STRACE("old_link was null, so will call check for preempt\n");
778 raw_spin_unlock(dom->lock);
779 check_for_preempt(dom); 809 check_for_preempt(dom);
810 raw_spin_unlock(dom->lock);
780 } 811 }
781 return next_timer_abs; 812 return next_timer_abs;
782} 813}
@@ -863,18 +894,17 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running)
863 t->rt_param._domain = entry->crit_entries[level].domain; 894 t->rt_param._domain = entry->crit_entries[level].domain;
864 895
865 /* Userspace and kernelspace view of task state may differ. 896 /* Userspace and kernelspace view of task state may differ.
866 * Model kernel state as an additional container 897 * Model kernel state as a budget enforced container
867 */ 898 */
868 sched_trace_container_param(t->pid, t->comm); 899 sched_trace_container_param(t->pid, t->comm);
869 sched_trace_server_param(-t->pid, t->pid, 900 sched_trace_server_param(-t->pid, t->pid,
870 get_exec_cost(t), get_rt_period(t)); 901 get_exec_cost(t), get_rt_period(t));
902 tsk_rt(t)->task_params.budget_policy = PRECISE_ENFORCEMENT;
871 903
872 /* Apply chunking */ 904 /* Apply chunking */
873 if (level == CRIT_LEVEL_B) { 905 if (level == CRIT_LEVEL_B && color_chunk
874 tsk_rt(t)->task_params.budget_policy = PRECISE_ENFORCEMENT; 906 lt_after(get_exec_cost(t), color_chunk)) {
875 if (color_chunk && level == CRIT_LEVEL_B && 907 tsk_rt(t)->orig_cost = get_exec_cost(t);
876 lt_after(get_exec_cost(t), color_chunk))
877 tsk_rt(t)->orig_cost = get_exec_cost(t);
878 } 908 }
879 909
880 /* Setup color request */ 910 /* Setup color request */
@@ -1010,7 +1040,7 @@ void pick_next_task(struct cpu_entry *entry)
1010 dom = ce->domain; 1040 dom = ce->domain;
1011 1041
1012 /* Swap locks. We cannot acquire a domain lock while 1042 /* Swap locks. We cannot acquire a domain lock while
1013 * holding an entry lock or deadlocks will happen. 1043 * holding an entry lock or deadlocks will happen
1014 */ 1044 */
1015 raw_spin_unlock(&entry->lock); 1045 raw_spin_unlock(&entry->lock);
1016 raw_spin_lock(dom->lock); 1046 raw_spin_lock(dom->lock);
@@ -1060,7 +1090,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1060 1090
1061 /* Litmus gave up because it couldn't access the stack of the CPU 1091 /* Litmus gave up because it couldn't access the stack of the CPU
1062 * on which will_schedule was migrating from. Requeue it. 1092 * on which will_schedule was migrating from. Requeue it.
1063 * This really only happens in VMs. 1093 * This really only happens in VMs
1064 */ 1094 */
1065 if (entry->will_schedule && entry->will_schedule != prev) { 1095 if (entry->will_schedule && entry->will_schedule != prev) {
1066 entry->will_schedule->rt_param.scheduled_on = NO_CPU; 1096 entry->will_schedule->rt_param.scheduled_on = NO_CPU;
@@ -1106,7 +1136,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1106 * period completes unless its execution has been forcibly stopped 1136 * period completes unless its execution has been forcibly stopped
1107 */ 1137 */
1108 else if (out_of_time || sleep)/* && !preempt)*/ 1138 else if (out_of_time || sleep)/* && !preempt)*/
1109 job_completion(entry->scheduled, !sleep); 1139 job_completion(entry->scheduled, !sleep || preempt);
1110 /* Global scheduled tasks must wait for a deschedule before they 1140 /* Global scheduled tasks must wait for a deschedule before they
1111 * can rejoin the global state. Rejoin them here 1141 * can rejoin the global state. Rejoin them here
1112 */ 1142 */
@@ -1125,7 +1155,9 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1125 /* A remote processor unblocked one of our crit levels */ 1155 /* A remote processor unblocked one of our crit levels */
1126 if (entry->crit_signal != NUM_CRIT_LEVELS) { 1156 if (entry->crit_signal != NUM_CRIT_LEVELS) {
1127 ce = &entry->crit_entries[entry->crit_signal]; 1157 ce = &entry->crit_entries[entry->crit_signal];
1158 raw_spin_lock(&ce->domain->lock);
1128 check_for_preempt(ce->domain); 1159 check_for_preempt(ce->domain);
1160 raw_spin_unlock(&ce->domain->lock);
1129 entry->crit_signal = NUM_CRIT_LEVELS; 1161 entry->crit_signal = NUM_CRIT_LEVELS;
1130 } 1162 }
1131 1163
@@ -1145,12 +1177,9 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1145 1177
1146 if (next) { 1178 if (next) {
1147 tsk_rt(next)->last_exec_time = litmus_clock(); 1179 tsk_rt(next)->last_exec_time = litmus_clock();
1148 BUG_ON(!is_released(next, tsk_rt(next)->last_exec_time));
1149 BUG_ON(is_ghost(next));
1150 TRACE_MC_TASK(next, "Picked this task\n"); 1180 TRACE_MC_TASK(next, "Picked this task\n");
1151 } else if (exists && !next) 1181 } else if (exists && !next)
1152 STRACE("CPU %d becomes idle at %llu\n", 1182 STRACE("CPU %d idles at %llu\n", entry->cpu, litmus_clock());
1153 entry->cpu, litmus_clock());
1154 1183
1155 return next; 1184 return next;
1156} 1185}