aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoshua Bakita <jbakita@cs.unc.edu>2020-05-14 23:12:02 -0400
committerJoshua Bakita <jbakita@cs.unc.edu>2020-05-14 23:12:02 -0400
commita7a2cf94018a239e11d4637b870e4e073e60a998 (patch)
tree47ee7f2e7af7140c8b61f7c546e6644d7776ef38
parent86fca2548ed619530c5f8a5ebe48d150ee71630f (diff)
Require `g_lock` in g_finish_switch()
While Linux will probably never run g_finish_switch() while we're in edfsc_gschedule() on the same core, g_finish_switch() may run simultaneously to an edfsc_gschedule() on a remote core. This may not be safe, as requeue() modifies global data structures and is currently called in g_finish_switch(). Make certain this is safe by obtaining `g_lock` for the whole function. If future refactors remove the requeue() call from this function, it is likely safe to revert this commit.
-rw-r--r--litmus/sched_edfsc.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c
index ada8ff936422..91c3d40529d3 100644
--- a/litmus/sched_edfsc.c
+++ b/litmus/sched_edfsc.c
@@ -557,8 +557,14 @@ static void g_finish_switch(struct task_struct *prev)
557{ 557{
558 cpu_entry_t* entry = this_cpu_ptr(&edfsc_cpu_entries); 558 cpu_entry_t* entry = this_cpu_ptr(&edfsc_cpu_entries);
559 struct task_struct* container = &container_tasks[entry->cpu]; 559 struct task_struct* container = &container_tasks[entry->cpu];
560 unsigned long flags;
560 BUG_ON(is_realtime(current) && tsk_rt(current)->domain == NULL); 561 BUG_ON(is_realtime(current) && tsk_rt(current)->domain == NULL);
561 562
563 // FIXME: It's really expensive to put a lock in here, but since we touch
564 // members of entry multiple times, we have to lock. Otherwise we
565 // may make an if branch based off entry->linked, and then have it
566 // change before we can set entry->scheduled.
567 raw_spin_lock_irqsave(&g_lock, flags);
562 entry->scheduled = is_realtime(current) ? current : NULL; 568 entry->scheduled = is_realtime(current) ? current : NULL;
563 // If we're scheduling a task in a container, set entry->scheduled to the container 569 // If we're scheduling a task in a container, set entry->scheduled to the container
564 if (entry->scheduled) { 570 if (entry->scheduled) {
@@ -578,6 +584,7 @@ static void g_finish_switch(struct task_struct *prev)
578 requeue(tsk_rt(container)->edfsc_params.domain->scheduled); 584 requeue(tsk_rt(container)->edfsc_params.domain->scheduled);
579 tsk_rt(container)->edfsc_params.domain->scheduled = NULL; 585 tsk_rt(container)->edfsc_params.domain->scheduled = NULL;
580 } 586 }
587 raw_spin_unlock_irqrestore(&g_lock, flags);
581#ifdef WANT_ALL_SCHED_EVENTS 588#ifdef WANT_ALL_SCHED_EVENTS
582 TRACE_TASK(prev, "switched away from\n"); 589 TRACE_TASK(prev, "switched away from\n");
583#endif 590#endif