diff options
author | Joshua Bakita <jbakita@cs.unc.edu> | 2020-05-17 15:45:44 -0400 |
---|---|---|
committer | Joshua Bakita <jbakita@cs.unc.edu> | 2020-05-17 15:45:44 -0400 |
commit | c537a10b8b94bef12cbc52294bdcf81d9725ea29 (patch) | |
tree | 4255e16b476ccf0166ff241cd944a70c179736d0 | |
parent | d0d2fa7abca1106c7bef5e0ee27d4911dd2b1509 (diff) |
Remove usage of `scheduled_on`
This optional portion of `rt_param` was maintained incorrectly,
and is actually completely uneeded. It was only in here because we
errantly inherited it from GSN-EDF.
-rw-r--r-- | litmus/sched_edfsc.c | 15 |
1 files changed, 5 insertions, 10 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 7ab164cab3e1..0a7d8131ab54 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -546,7 +546,6 @@ static void g_finish_switch(struct task_struct *prev) | |||
546 | // but because it's not a real task, a non-rt task is current instead | 546 | // but because it's not a real task, a non-rt task is current instead |
547 | else if (is_container(entry->linked)) { | 547 | else if (is_container(entry->linked)) { |
548 | entry->scheduled = entry->linked; | 548 | entry->scheduled = entry->linked; |
549 | entry->scheduled->rt_param.scheduled_on = entry->cpu; | ||
550 | } | 549 | } |
551 | 550 | ||
552 | // This handles requeuing when a container is descheduled | 551 | // This handles requeuing when a container is descheduled |
@@ -694,7 +693,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
694 | // jobs of fixed tasks. | 693 | // jobs of fixed tasks. |
695 | BUG_ON(entry->scheduled && !is_container(entry->scheduled) && !is_realtime(prev)); | 694 | BUG_ON(entry->scheduled && !is_container(entry->scheduled) && !is_realtime(prev)); |
696 | // Bug if we didn't think anything was scheduled, but a realtime task was running on our CPU | 695 | // Bug if we didn't think anything was scheduled, but a realtime task was running on our CPU |
697 | BUG_ON(is_realtime(prev) && !entry->scheduled && entry->cpu == tsk_rt(prev)->scheduled_on); | 696 | BUG_ON(is_realtime(prev) && tsk_rt(prev)->linked_on != NO_CPU && !entry->scheduled); |
698 | 697 | ||
699 | if (is_container(entry->scheduled)) { | 698 | if (is_container(entry->scheduled)) { |
700 | lt_t now = litmus_clock(); | 699 | lt_t now = litmus_clock(); |
@@ -794,14 +793,12 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
794 | if ((!np || blocks) && entry->linked != entry->scheduled) { | 793 | if ((!np || blocks) && entry->linked != entry->scheduled) { |
795 | /* Schedule a linked job? */ | 794 | /* Schedule a linked job? */ |
796 | if (entry->linked) { | 795 | if (entry->linked) { |
797 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
798 | next = entry->linked; | 796 | next = entry->linked; |
799 | TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id()); | 797 | TRACE_TASK(next, "scheduled on P%d\n", smp_processor_id()); |
800 | } | 798 | } |
799 | // Note what was running before | ||
801 | if (entry->scheduled) { | 800 | if (entry->scheduled) { |
802 | /* not gonna be scheduled soon */ | 801 | TRACE_TASK(entry->scheduled, "descheduled\n"); |
803 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
804 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
805 | } | 802 | } |
806 | } else if (entry->scheduled) { | 803 | } else if (entry->scheduled) { |
807 | // If we've been running a container, make sure that it has nothing new to schedule | 804 | // If we've been running a container, make sure that it has nothing new to schedule |
@@ -1066,7 +1063,6 @@ static void edfsc_task_new(struct task_struct* t, int on_rq, int is_scheduled) | |||
1066 | preempt(entry); | 1063 | preempt(entry); |
1067 | 1064 | ||
1068 | // Since `t` is not going to run again until we schedule, harmonize state | 1065 | // Since `t` is not going to run again until we schedule, harmonize state |
1069 | t->rt_param.scheduled_on = NO_CPU; | ||
1070 | t->rt_param.linked_on = NO_CPU; | 1066 | t->rt_param.linked_on = NO_CPU; |
1071 | raw_spin_unlock_irqrestore(&g_lock, flags); | 1067 | raw_spin_unlock_irqrestore(&g_lock, flags); |
1072 | 1068 | ||
@@ -1123,9 +1119,8 @@ static void edfsc_task_exit(struct task_struct* t) | |||
1123 | } else { | 1119 | } else { |
1124 | // We're in the global domain and not on the ready queues, so we must be running | 1120 | // We're in the global domain and not on the ready queues, so we must be running |
1125 | BUG_ON(t != current); | 1121 | BUG_ON(t != current); |
1126 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | ||
1127 | list_del(&t->edfsc_qnode); | 1122 | list_del(&t->edfsc_qnode); |
1128 | entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->scheduled_on); | 1123 | entry = &per_cpu(edfsc_cpu_entries, task_cpu(t)); |
1129 | BUG_ON(entry->scheduled != t); | 1124 | BUG_ON(entry->scheduled != t); |
1130 | entry->scheduled = NULL; | 1125 | entry->scheduled = NULL; |
1131 | } | 1126 | } |