aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZelin Tong <ztong@ludwig.cs.unc.edu>2020-10-23 00:11:54 -0400
committerZelin Tong <ztong@ludwig.cs.unc.edu>2020-10-23 00:11:54 -0400
commita33d3ad7eb411e0a250ca0d73a99b67313c4e28f (patch)
tree659ca1c3c0e1ec6df7b6bab9384ff7fddcb53d31
parent6bb080adebb67a15e5b9206fc390dc9338589836 (diff)
Fixed Occasional Deadlock when schedulingforbidden-zones
Problem: When multiple scheduling events occur simultaneously, a situation could happen where: - one core schedules a task that is linked to it - a check_for_preemption occurs due to another scheduling event - that task is now linked to another core - the other core tries to schedule it but can't grab the task from the runqueue of the other core Solution: Added in the scheduled_on field in gedf_reservation. This enables us in link_task_to_cpu to check for this occuring and swap the linking so the above situation doesn't happen. The code is taken and modified from link_task_to_cpu in GSN-EDF
-rw-r--r--include/litmus/reservations/gedf_reservation.h1
-rw-r--r--litmus/reservations/gedf_reservation.c44
2 files changed, 36 insertions, 9 deletions
diff --git a/include/litmus/reservations/gedf_reservation.h b/include/litmus/reservations/gedf_reservation.h
index d9694fa3ac64..578b9cad5838 100644
--- a/include/litmus/reservations/gedf_reservation.h
+++ b/include/litmus/reservations/gedf_reservation.h
@@ -11,6 +11,7 @@
11struct gedf_reservation { 11struct gedf_reservation {
12 struct reservation res; 12 struct reservation res;
13 struct gedf_cpu_entry* linked_on; 13 struct gedf_cpu_entry* linked_on;
14 struct gedf_cpu_entry* scheduled_on;
14 lt_t cur_budget; 15 lt_t cur_budget;
15 int will_remove; 16 int will_remove;
16 int blocked; 17 int blocked;
diff --git a/litmus/reservations/gedf_reservation.c b/litmus/reservations/gedf_reservation.c
index e08d3f9799e8..9f02617821a1 100644
--- a/litmus/reservations/gedf_reservation.c
+++ b/litmus/reservations/gedf_reservation.c
@@ -103,13 +103,28 @@ static void link_task_to_cpu(
103 struct gedf_reservation* linked, 103 struct gedf_reservation* linked,
104 struct gedf_cpu_entry* entry) 104 struct gedf_cpu_entry* entry)
105{ 105{
106 struct gedf_cpu_entry* on_cpu;
107 struct gedf_reservation* tmp;
106 108
107 if (entry->linked) 109 if (entry->linked)
108 entry->linked->linked_on = NULL; 110 entry->linked->linked_on = NULL;
109 111
110 if (linked) 112 if (linked) {
111 linked->linked_on = entry; 113 on_cpu = linked->scheduled_on;
112 114 if (on_cpu) {
115 BUG_ON(on_cpu->linked == linked);
116
117 if (entry != on_cpu) {
118 tmp = on_cpu->linked;
119 linked->linked_on = on_cpu;
120 on_cpu->linked = linked;
121 update_cpu_position(on_cpu, &gedf_env->cpu_heap);
122 linked = tmp;
123 }
124 }
125 if (linked)
126 linked->linked_on = entry;
127 }
113 entry->linked = linked; 128 entry->linked = linked;
114 update_cpu_position(entry, &gedf_env->cpu_heap); 129 update_cpu_position(entry, &gedf_env->cpu_heap);
115} 130}
@@ -519,8 +534,12 @@ static void gedf_env_suspend(
519 unlink(gedf_env, entry->linked); 534 unlink(gedf_env, entry->linked);
520 requeue(gedf_env, tmp); 535 requeue(gedf_env, tmp);
521 } 536 }
522 if (entry->scheduled && entry->scheduled->res.ops->on_preempt) 537 if (entry->scheduled) {
523 entry->scheduled->res.ops->on_preempt(&entry->scheduled->res, cpu); 538 if (entry->scheduled->res.ops->on_preempt)
539 entry->scheduled->res.ops->on_preempt(&entry->scheduled->res, cpu);
540 if (entry->scheduled->scheduled_on == entry)
541 entry->scheduled->scheduled_on = NULL;
542 }
524 entry->scheduled = NULL; 543 entry->scheduled = NULL;
525 544
526 /* this essentially removes the cpu from scheduling consideration */ 545 /* this essentially removes the cpu from scheduling consideration */
@@ -641,10 +660,17 @@ static struct task_struct* gedf_env_dispatch(
641 660
642 /* if linked and scheduled differ, preempt and schedule accordingly */ 661 /* if linked and scheduled differ, preempt and schedule accordingly */
643 if (!np && entry->scheduled != entry->linked) { 662 if (!np && entry->scheduled != entry->linked) {
644 if (entry->scheduled && entry->scheduled->res.ops->on_preempt) 663 if (entry->scheduled) {
645 entry->scheduled->res.ops->on_preempt(&entry->scheduled->res, cpu); 664 if (entry->scheduled->res.ops->on_preempt)
646 if (entry->linked && entry->linked->res.ops->on_schedule) 665 entry->scheduled->res.ops->on_preempt(&entry->scheduled->res, cpu);
647 entry->linked->res.ops->on_schedule(&entry->linked->res, cpu); 666 if (entry->scheduled->scheduled_on == entry)
667 entry->scheduled->scheduled_on = NULL;
668 }
669 if (entry->linked) {
670 entry->linked->scheduled_on = entry;
671 if (entry->linked->res.ops->on_schedule)
672 entry->linked->res.ops->on_schedule(&entry->linked->res, cpu);
673 }
648 entry->scheduled = entry->linked; 674 entry->scheduled = entry->linked;
649 } 675 }
650 raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags); 676 raw_spin_unlock_irqrestore(&gedf_env->domain.ready_lock, flags);