aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--litmus/sched_cedf.c85
-rw-r--r--litmus/sched_gsn_edf.c6
2 files changed, 84 insertions, 7 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index f030f027b486..f5c9807090a1 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -58,6 +58,10 @@
58#include <litmus/affinity.h> 58#include <litmus/affinity.h>
59#endif 59#endif
60 60
61#ifdef CONFIG_REALTIME_AUX_TASKS
62#include <litmus/aux_tasks.h>
63#endif
64
61/* to configure the cluster size */ 65/* to configure the cluster size */
62#include <litmus/litmus_proc.h> 66#include <litmus/litmus_proc.h>
63 67
@@ -313,7 +317,15 @@ static noinline void requeue(struct task_struct* task)
313 BUG_ON(is_queued(task)); 317 BUG_ON(is_queued(task));
314 318
315 if (is_released(task, litmus_clock())) 319 if (is_released(task, litmus_clock()))
316 __add_ready(&cluster->domain, task); 320#ifdef CONFIG_REALTIME_AUX_TASKS
321 if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) {
322 /* aux_task probably transitioned to real-time while it was blocked */
323 TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid);
324 unlink(task); /* really needed? */
325 }
326 else
327#endif
328 __add_ready(&cluster->domain, task);
317 else { 329 else {
318 /* it has got to wait */ 330 /* it has got to wait */
319 add_release(&cluster->domain, task); 331 add_release(&cluster->domain, task);
@@ -1019,9 +1031,14 @@ static void cedf_task_wake_up(struct task_struct *task)
1019 set_rt_flags(task, RT_F_RUNNING); // periodic model 1031 set_rt_flags(task, RT_F_RUNNING); // periodic model
1020#endif 1032#endif
1021 1033
1022 if(tsk_rt(task)->linked_on == NO_CPU) 1034#ifdef CONFIG_REALTIME_AUX_TASKS
1023 cedf_job_arrival(task); 1035 if (tsk_rt(task)->has_aux_tasks) {
1036 TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid);
1037 disable_aux_task_owner(task);
1038 }
1039#endif
1024 1040
1041 cedf_job_arrival(task);
1025 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1042 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
1026} 1043}
1027 1044
@@ -1036,7 +1053,17 @@ static void cedf_task_block(struct task_struct *t)
1036 1053
1037 /* unlink if necessary */ 1054 /* unlink if necessary */
1038 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1055 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
1056
1039 unlink(t); 1057 unlink(t);
1058
1059#ifdef CONFIG_REALTIME_AUX_TASKS
1060 if (tsk_rt(t)->has_aux_tasks) {
1061
1062 TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid);
1063 enable_aux_task_owner(t);
1064 }
1065#endif
1066
1040 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1067 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
1041 1068
1042 BUG_ON(!is_realtime(t)); 1069 BUG_ON(!is_realtime(t));
@@ -1052,8 +1079,22 @@ static void cedf_task_exit(struct task_struct * t)
1052 cedf_change_prio_pai_tasklet(t, NULL); 1079 cedf_change_prio_pai_tasklet(t, NULL);
1053#endif 1080#endif
1054 1081
1082#ifdef CONFIG_REALTIME_AUX_TASKS
1083 if (tsk_rt(t)->is_aux_task) {
1084 exit_aux_task(t); /* cannot be called with gsnedf_lock held */
1085 }
1086#endif
1087
1055 /* unlink if necessary */ 1088 /* unlink if necessary */
1056 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1089 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
1090
1091#ifdef CONFIG_REALTIME_AUX_TASKS
1092 /* make sure we clean up on our way out */
1093 if(tsk_rt(t)->has_aux_tasks) {
1094 disable_aux_task_owner(t); /* must be called witl gsnedf_lock held */
1095 }
1096#endif
1097
1057 unlink(t); 1098 unlink(t);
1058 if (tsk_rt(t)->scheduled_on != NO_CPU) { 1099 if (tsk_rt(t)->scheduled_on != NO_CPU) {
1059 cpu_entry_t *cpu; 1100 cpu_entry_t *cpu;
@@ -1092,8 +1133,16 @@ static int __increase_priority_inheritance(struct task_struct* t,
1092 int success = 1; 1133 int success = 1;
1093 int linked_on; 1134 int linked_on;
1094 int check_preempt = 0; 1135 int check_preempt = 0;
1136 cedf_domain_t* cluster;
1095 1137
1096 cedf_domain_t* cluster = task_cpu_cluster(t); 1138 if (prio_inh && prio_inh == effective_priority(t)) {
1139 /* relationship already established. */
1140 TRACE_TASK(t, "already has effective priority of %s/%d\n",
1141 prio_inh->comm, prio_inh->pid);
1142 goto out;
1143 }
1144
1145 cluster = task_cpu_cluster(t);
1097 1146
1098#ifdef CONFIG_LITMUS_NESTED_LOCKING 1147#ifdef CONFIG_LITMUS_NESTED_LOCKING
1099 /* this sanity check allows for weaker locking in protocols */ 1148 /* this sanity check allows for weaker locking in protocols */
@@ -1155,6 +1204,13 @@ static int __increase_priority_inheritance(struct task_struct* t,
1155 &cluster->domain.ready_queue); 1204 &cluster->domain.ready_queue);
1156 check_for_preemptions(cluster); 1205 check_for_preemptions(cluster);
1157 } 1206 }
1207
1208#ifdef CONFIG_REALTIME_AUX_TASKS
1209 /* propagate to aux tasks */
1210 if (tsk_rt(t)->has_aux_tasks) {
1211 aux_task_owner_increase_priority(t);
1212 }
1213#endif
1158 } 1214 }
1159#ifdef CONFIG_LITMUS_NESTED_LOCKING 1215#ifdef CONFIG_LITMUS_NESTED_LOCKING
1160 } 1216 }
@@ -1170,6 +1226,8 @@ static int __increase_priority_inheritance(struct task_struct* t,
1170 success = 0; 1226 success = 0;
1171 } 1227 }
1172#endif 1228#endif
1229
1230out:
1173 return success; 1231 return success;
1174} 1232}
1175 1233
@@ -1211,6 +1269,15 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1211 struct task_struct* prio_inh) 1269 struct task_struct* prio_inh)
1212{ 1270{
1213 int success = 1; 1271 int success = 1;
1272
1273 if (prio_inh == tsk_rt(t)->inh_task) {
1274 /* relationship already established. */
1275 TRACE_TASK(t, "already inherits priority from %s/%d\n",
1276 (prio_inh) ? prio_inh->comm : "(nil)",
1277 (prio_inh) ? prio_inh->pid : 0);
1278 goto out;
1279 }
1280
1214#ifdef CONFIG_LITMUS_NESTED_LOCKING 1281#ifdef CONFIG_LITMUS_NESTED_LOCKING
1215 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { 1282 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) {
1216#endif 1283#endif
@@ -1248,6 +1315,14 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1248 } 1315 }
1249 raw_spin_unlock(&cluster->domain.release_lock); 1316 raw_spin_unlock(&cluster->domain.release_lock);
1250 } 1317 }
1318
1319#ifdef CONFIG_REALTIME_AUX_TASKS
1320 /* propagate to aux tasks */
1321 if (tsk_rt(t)->has_aux_tasks) {
1322 aux_task_owner_decrease_priority(t);
1323 }
1324#endif
1325
1251#ifdef CONFIG_LITMUS_NESTED_LOCKING 1326#ifdef CONFIG_LITMUS_NESTED_LOCKING
1252 } 1327 }
1253 else { 1328 else {
@@ -1261,6 +1336,8 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1261 success = 0; 1336 success = 0;
1262 } 1337 }
1263#endif 1338#endif
1339
1340out:
1264 return success; 1341 return success;
1265} 1342}
1266 1343
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 5fc330f14a0e..ed9b4697a5a2 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -170,7 +170,6 @@ struct tasklet_head gsnedf_pending_tasklets;
170 * TRACE() log. 170 * TRACE() log.
171#define WANT_ALL_SCHED_EVENTS 171#define WANT_ALL_SCHED_EVENTS
172 */ 172 */
173//#define WANT_ALL_SCHED_EVENTS
174 173
175static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) 174static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b)
176{ 175{
@@ -370,8 +369,7 @@ static void check_for_preemptions(void)
370 &per_cpu(gsnedf_cpu_entries, task_cpu(task))); 369 &per_cpu(gsnedf_cpu_entries, task_cpu(task)));
371 if (affinity) 370 if (affinity)
372 last = affinity; 371 last = affinity;
373 372 else if (requeue_preempted_job(last->linked))
374 if (requeue_preempted_job(last->linked))
375 requeue(last->linked); 373 requeue(last->linked);
376 } 374 }
377#else 375#else
@@ -467,9 +465,11 @@ static void gsnedf_tick(struct task_struct* t)
467 } 465 }
468 } 466 }
469 467
468 /*
470 if(is_realtime(t)) { 469 if(is_realtime(t)) {
471 TRACE_TASK(t, "tick %llu\n", litmus_clock()); 470 TRACE_TASK(t, "tick %llu\n", litmus_clock());
472 } 471 }
472 */
473} 473}
474 474
475 475