aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorztong <ztong@cs.unc.edu>2021-02-17 17:59:26 -0500
committerztong <ztong@cs.unc.edu>2021-02-17 17:59:26 -0500
commitb36bd06a9d3b8f5b2b1de2d768266b3968052160 (patch)
tree0f79ae48f9b2d4d9d80c3d451c6450ad66ca79d2
parent5c81e639780c2bdf2fb4ad847b7a5729a717e536 (diff)
omlp_fz_police bug fixupdate_litmus_2019
-rw-r--r--kernel/sched/litmus.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--litmus/reservations/gedf_reservation.c21
4 files changed, 14 insertions, 13 deletions
diff --git a/kernel/sched/litmus.c b/kernel/sched/litmus.c
index a28aa5ab28f7..2e3ca79d39a9 100644
--- a/kernel/sched/litmus.c
+++ b/kernel/sched/litmus.c
@@ -362,7 +362,7 @@ const struct sched_class litmus_sched_class = {
362 * cpu-hotplug or cpu throttling. Allows Litmus to use up to 1.0 362 * cpu-hotplug or cpu throttling. Allows Litmus to use up to 1.0
363 * CPU capacity. 363 * CPU capacity.
364 */ 364 */
365 .next = &stop_sched_class, 365 .next = &fair_sched_class,
366 .enqueue_task = enqueue_task_litmus, 366 .enqueue_task = enqueue_task_litmus,
367 .dequeue_task = dequeue_task_litmus, 367 .dequeue_task = dequeue_task_litmus,
368 .yield_task = yield_task_litmus, 368 .yield_task = yield_task_litmus,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index babfdd90c6a5..12d40dfd715f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2363,7 +2363,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2363} 2363}
2364 2364
2365const struct sched_class rt_sched_class = { 2365const struct sched_class rt_sched_class = {
2366 .next = &fair_sched_class, 2366 .next = &litmus_sched_class,
2367 .enqueue_task = enqueue_task_rt, 2367 .enqueue_task = enqueue_task_rt,
2368 .dequeue_task = dequeue_task_rt, 2368 .dequeue_task = dequeue_task_rt,
2369 .yield_task = yield_task_rt, 2369 .yield_task = yield_task_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c4f7afbe90c0..ebbd4c05289b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1798,7 +1798,7 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
1798 * class, but existing plugins (that predate the stop-machine class) depend on 1798 * class, but existing plugins (that predate the stop-machine class) depend on
1799 * the assumption that LITMUS^RT plugins are the top scheduling class. 1799 * the assumption that LITMUS^RT plugins are the top scheduling class.
1800 */ 1800 */
1801#define sched_class_highest (&litmus_sched_class) 1801#define sched_class_highest (&stop_sched_class)
1802 1802
1803/* 1803/*
1804#ifdef CONFIG_SMP 1804#ifdef CONFIG_SMP
diff --git a/litmus/reservations/gedf_reservation.c b/litmus/reservations/gedf_reservation.c
index c5dd7032ab42..395852076773 100644
--- a/litmus/reservations/gedf_reservation.c
+++ b/litmus/reservations/gedf_reservation.c
@@ -1165,6 +1165,9 @@ static int gedf_env_omlp_access_fz_check(struct litmus_lock* l, lt_t fz_len, lt_
1165 spin_unlock_irqrestore(&gedf_env->fz_waiters[0].lock, flags); 1165 spin_unlock_irqrestore(&gedf_env->fz_waiters[0].lock, flags);
1166 spin_lock_irqsave(&gedf_env->fz_waiters[cpu].lock, flags); 1166 spin_lock_irqsave(&gedf_env->fz_waiters[cpu].lock, flags);
1167 1167
1168 BUG_ON(!tsk_rt(t)->ctrl_page);
1169 tsk_rt(t)->ctrl_page->fz_progress = FZ_DONE;
1170
1168 /* cancel fz police timer */ 1171 /* cancel fz police timer */
1169 hrtimer_try_to_cancel(&sem->police_timer); 1172 hrtimer_try_to_cancel(&sem->police_timer);
1170 1173
@@ -1173,8 +1176,6 @@ static int gedf_env_omlp_access_fz_check(struct litmus_lock* l, lt_t fz_len, lt_
1173 remaining_component_budget = timeslice_end - litmus_clock(); 1176 remaining_component_budget = timeslice_end - litmus_clock();
1174 1177
1175 if (remaining_component_budget < fz_len) { 1178 if (remaining_component_budget < fz_len) {
1176 if (likely(tsk_rt(t)->ctrl_page))
1177 tsk_rt(t)->ctrl_page->fz_progress = 0;
1178 /* go on a wait queue to be woken up when the parent reservation 1179 /* go on a wait queue to be woken up when the parent reservation
1179 * is next scheduled */ 1180 * is next scheduled */
1180 init_waitqueue_entry(&wait, t); 1181 init_waitqueue_entry(&wait, t);
@@ -1193,8 +1194,6 @@ static int gedf_env_omlp_access_fz_check(struct litmus_lock* l, lt_t fz_len, lt_
1193 } else 1194 } else
1194 spin_unlock_irqrestore(&gedf_env->fz_waiters[cpu].lock, flags); 1195 spin_unlock_irqrestore(&gedf_env->fz_waiters[cpu].lock, flags);
1195 1196
1196 BUG_ON(!tsk_rt(t)->ctrl_page);
1197
1198 tsk_rt(t)->ctrl_page->sched.np.flag = 1; 1197 tsk_rt(t)->ctrl_page->sched.np.flag = 1;
1199 tsk_rt(t)->ctrl_page->fz_progress = FZ_PRE_GPU_LAUNCH; 1198 tsk_rt(t)->ctrl_page->fz_progress = FZ_PRE_GPU_LAUNCH;
1200 hrtimer_start(&sem->police_timer, 1199 hrtimer_start(&sem->police_timer,
@@ -1293,13 +1292,16 @@ static enum hrtimer_restart omlp_fz_police(struct hrtimer *timer)
1293 unsigned long flags; 1292 unsigned long flags;
1294 struct kernel_siginfo info; 1293 struct kernel_siginfo info;
1295 1294
1295 spin_lock_irqsave(&sem->fifo_wait.lock, flags);
1296 if (t != sem->owner) {
1297 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
1298 return HRTIMER_NORESTART;
1299 }
1300
1296 BUG_ON(!tsk_rt(t)->ctrl_page); 1301 BUG_ON(!tsk_rt(t)->ctrl_page);
1297 1302
1298 if (tsk_rt(t)->ctrl_page->fz_progress) { 1303 if (tsk_rt(t)->ctrl_page->fz_progress) {
1299 /* we try to unlock for the task that violated fz */ 1304 /* we try to unlock for the task that violated fz */
1300 spin_lock_irqsave(&sem->fifo_wait.lock, flags);
1301
1302 BUG_ON(sem->owner != t);
1303 1305
1304 __gedf_env_omlp_unlock(sem, t); 1306 __gedf_env_omlp_unlock(sem, t);
1305 1307
@@ -1310,11 +1312,10 @@ static enum hrtimer_restart omlp_fz_police(struct hrtimer *timer)
1310 info.si_int = -(int)tsk_rt(t)->ctrl_page->fz_progress; 1312 info.si_int = -(int)tsk_rt(t)->ctrl_page->fz_progress;
1311 send_sig_info(SIGTERM, &info, t); 1313 send_sig_info(SIGTERM, &info, t);
1312 1314
1313 tsk_rt(t)->ctrl_page->fz_progress = FZ_DONE; 1315 //tsk_rt(t)->ctrl_page->fz_progress = FZ_DONE;
1314 tsk_rt(t)->ctrl_page->sched.np.flag = 0; 1316 tsk_rt(t)->ctrl_page->sched.np.flag = 0;
1315
1316 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
1317 } 1317 }
1318 spin_unlock_irqrestore(&sem->fifo_wait.lock, flags);
1318 1319
1319 return HRTIMER_NORESTART; 1320 return HRTIMER_NORESTART;
1320} 1321}