aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-26 00:36:56 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-26 00:36:56 -0400
commitb696cbf148cdcc0a48c4e730c80f40c8e29a55f8 (patch)
tree516fae8defe1d488a6bfde25d989f802307dc6f2
parent3512dc22c9036157603a27c90cd683d1a583f1cd (diff)
P-FP: avoid cross-migrations
The first attempt at DPCP migrations caused deadlocks if to processors needed to exchange jobs. We avoid this now by first performing the context switch before enqueuing the job in the remote runqueue.
-rw-r--r--litmus/sched_pfp.c40
1 files changed, 30 insertions, 10 deletions
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index a86f7df04f85..4b14a3eedb83 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -237,6 +237,34 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
237 return next; 237 return next;
238} 238}
239 239
240#ifdef CONFIG_LITMUS_LOCKING
241
242/* prev is no longer scheduled --- see if it needs to migrate */
243static void pfp_finish_switch(struct task_struct *prev)
244{
245 pfp_domain_t *to;
246
247 if (is_realtime(prev) &&
248 is_running(prev) &&
249 get_partition(prev) != smp_processor_id()) {
250 TRACE_TASK(prev, "needs to migrate from P%d to P%d\n",
251 smp_processor_id(), get_partition(prev));
252
253 to = task_pfp(prev);
254
255 raw_spin_lock(&to->slock);
256
257 TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu);
258 requeue(prev, to);
259 if (fp_preemption_needed(&to->ready_queue, to->scheduled))
260 preempt(to);
261
262 raw_spin_unlock(&to->slock);
263
264 }
265}
266
267#endif
240 268
241/* Prepare a task for running in RT mode 269/* Prepare a task for running in RT mode
242 */ 270 */
@@ -1172,7 +1200,7 @@ static inline struct dpcp_semaphore* dpcp_from_lock(struct litmus_lock* lock)
1172static void pfp_migrate_to(int target_cpu) 1200static void pfp_migrate_to(int target_cpu)
1173{ 1201{
1174 struct task_struct* t = current; 1202 struct task_struct* t = current;
1175 pfp_domain_t *from, *to; 1203 pfp_domain_t *from;
1176 1204
1177 if (get_partition(t) == target_cpu) 1205 if (get_partition(t) == target_cpu)
1178 return; 1206 return;
@@ -1187,7 +1215,6 @@ static void pfp_migrate_to(int target_cpu)
1187 1215
1188 /* lock both pfp domains in order of address */ 1216 /* lock both pfp domains in order of address */
1189 from = task_pfp(t); 1217 from = task_pfp(t);
1190 to = remote_pfp(target_cpu);
1191 1218
1192 raw_spin_lock(&from->slock); 1219 raw_spin_lock(&from->slock);
1193 1220
@@ -1196,14 +1223,6 @@ static void pfp_migrate_to(int target_cpu)
1196 1223
1197 raw_spin_unlock(&from->slock); 1224 raw_spin_unlock(&from->slock);
1198 1225
1199 raw_spin_lock(&to->slock);
1200
1201 requeue(t, to);
1202 if (fp_preemption_needed(&to->ready_queue, to->scheduled))
1203 preempt(to);
1204
1205 raw_spin_unlock(&to->slock);
1206
1207 local_irq_enable(); 1226 local_irq_enable();
1208 preempt_enable_no_resched(); 1227 preempt_enable_no_resched();
1209 1228
@@ -1472,6 +1491,7 @@ static struct sched_plugin pfp_plugin __cacheline_aligned_in_smp = {
1472 .activate_plugin = pfp_activate_plugin, 1491 .activate_plugin = pfp_activate_plugin,
1473#ifdef CONFIG_LITMUS_LOCKING 1492#ifdef CONFIG_LITMUS_LOCKING
1474 .allocate_lock = pfp_allocate_lock, 1493 .allocate_lock = pfp_allocate_lock,
1494 .finish_switch = pfp_finish_switch,
1475#endif 1495#endif
1476}; 1496};
1477 1497