aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2010-02-03 19:56:21 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:26:27 -0400
commit9039e5f731ca5f9a0c69f8523ccfee044111d2e3 (patch)
treee4f10bbda2a85cfe96e5d8975953bc4bd5b1c981 /litmus
parentf3a6cb9af5cdb01f29ad32b01aa56a14f0da144e (diff)
Use generic preemption function in GSN- and PSN-EDF.
This patch updates non-preemptive section support in GSN- and PSN-EDF.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_gsn_edf.c24
-rw-r--r--litmus/sched_psn_edf.c52
2 files changed, 30 insertions, 46 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 2a0ee50d26a2..b9310dd6f75c 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -242,27 +242,9 @@ static noinline void unlink(struct task_struct* t)
242 242
243/* preempt - force a CPU to reschedule 243/* preempt - force a CPU to reschedule
244 */ 244 */
245static noinline void preempt(cpu_entry_t *entry) 245static void preempt(cpu_entry_t *entry)
246{ 246{
247 /* We cannot make the is_np() decision here if it is a remote CPU 247 preempt_if_preemptable(entry->scheduled, entry->cpu);
248 * because requesting exit_np() requires that we currently use the
249 * address space of the task. Thus, in the remote case we just send
250 * the IPI and let schedule() handle the problem.
251 */
252
253 if (smp_processor_id() == entry->cpu) {
254 if (entry->scheduled && is_np(entry->scheduled))
255 request_exit_np(entry->scheduled);
256 else
257 set_tsk_need_resched(current);
258 } else
259 /* in case that it is a remote CPU we have to defer the
260 * the decision to the remote CPU
261 * FIXME: We could save a few IPI's here if we leave the flag
262 * set when we are waiting for a np_exit().
263 */
264 if (!test_will_schedule(entry->cpu))
265 smp_send_reschedule(entry->cpu);
266} 248}
267 249
268/* requeue - Put an unlinked task into gsn-edf domain. 250/* requeue - Put an unlinked task into gsn-edf domain.
@@ -364,7 +346,7 @@ static void gsnedf_tick(struct task_struct* t)
364 TRACE("gsnedf_scheduler_tick: " 346 TRACE("gsnedf_scheduler_tick: "
365 "%d is preemptable " 347 "%d is preemptable "
366 " => FORCE_RESCHED\n", t->pid); 348 " => FORCE_RESCHED\n", t->pid);
367 } else { 349 } else if (is_user_np(t)) {
368 TRACE("gsnedf_scheduler_tick: " 350 TRACE("gsnedf_scheduler_tick: "
369 "%d is non-preemptable, " 351 "%d is non-preemptable, "
370 "preemption delayed.\n", t->pid); 352 "preemption delayed.\n", t->pid);
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index f0ab8ebc5111..3a93124e24f6 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -68,16 +68,7 @@ static void requeue(struct task_struct* t, rt_domain_t *edf)
68/* we assume the lock is being held */ 68/* we assume the lock is being held */
69static void preempt(psnedf_domain_t *pedf) 69static void preempt(psnedf_domain_t *pedf)
70{ 70{
71 if (smp_processor_id() == pedf->cpu) { 71 preempt_if_preemptable(pedf->scheduled, pedf->cpu);
72 if (pedf->scheduled && is_np(pedf->scheduled))
73 request_exit_np(pedf->scheduled);
74 else
75 set_tsk_need_resched(current);
76 } else
77 /* in case that it is a remote CPU we have to defer the
78 * the decision to the remote CPU
79 */
80 smp_send_reschedule(pedf->cpu);
81} 72}
82 73
83/* This check is trivial in partioned systems as we only have to consider 74/* This check is trivial in partioned systems as we only have to consider
@@ -86,16 +77,15 @@ static void preempt(psnedf_domain_t *pedf)
86static int psnedf_check_resched(rt_domain_t *edf) 77static int psnedf_check_resched(rt_domain_t *edf)
87{ 78{
88 psnedf_domain_t *pedf = container_of(edf, psnedf_domain_t, domain); 79 psnedf_domain_t *pedf = container_of(edf, psnedf_domain_t, domain);
89 int ret = 0;
90 80
91 /* because this is a callback from rt_domain_t we already hold 81 /* because this is a callback from rt_domain_t we already hold
92 * the necessary lock for the ready queue 82 * the necessary lock for the ready queue
93 */ 83 */
94 if (edf_preemption_needed(edf, pedf->scheduled)) { 84 if (edf_preemption_needed(edf, pedf->scheduled)) {
95 preempt(pedf); 85 preempt(pedf);
96 ret = 1; 86 return 1;
97 } 87 } else
98 return ret; 88 return 0;
99} 89}
100 90
101static void job_completion(struct task_struct* t) 91static void job_completion(struct task_struct* t)
@@ -121,7 +111,7 @@ static void psnedf_tick(struct task_struct *t)
121 TRACE("psnedf_scheduler_tick: " 111 TRACE("psnedf_scheduler_tick: "
122 "%d is preemptable " 112 "%d is preemptable "
123 " => FORCE_RESCHED\n", t->pid); 113 " => FORCE_RESCHED\n", t->pid);
124 } else { 114 } else if (is_user_np(t)) {
125 TRACE("psnedf_scheduler_tick: " 115 TRACE("psnedf_scheduler_tick: "
126 "%d is non-preemptable, " 116 "%d is non-preemptable, "
127 "preemption delayed.\n", t->pid); 117 "preemption delayed.\n", t->pid);
@@ -394,6 +384,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
394 rt_domain_t* edf = task_edf(t); 384 rt_domain_t* edf = task_edf(t);
395 int ret = 0; 385 int ret = 0;
396 int cpu = get_partition(current); 386 int cpu = get_partition(current);
387 int still_np;
397 388
398 389
399 /* Find new highest-priority semaphore task 390 /* Find new highest-priority semaphore task
@@ -404,23 +395,34 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
404 if (t == sem->hp.cpu_task[cpu]) 395 if (t == sem->hp.cpu_task[cpu])
405 edf_set_hp_cpu_task(sem, cpu); 396 edf_set_hp_cpu_task(sem, cpu);
406 397
407 take_np(t); 398 still_np = take_np(current);
399
400 /* Since we don't nest resources, this
401 * should always be zero */
402 BUG_ON(still_np);
403
408 if (current->rt_param.inh_task) { 404 if (current->rt_param.inh_task) {
409 TRACE_CUR("return priority of %s/%d\n", 405 TRACE_CUR("return priority of %s/%d\n",
410 current->rt_param.inh_task->comm, 406 current->rt_param.inh_task->comm,
411 current->rt_param.inh_task->pid); 407 current->rt_param.inh_task->pid);
412 spin_lock(&pedf->slock); 408 } else
409 TRACE_CUR(" no priority to return %p\n", sem);
413 410
414 /* Reset inh_task to NULL. */
415 current->rt_param.inh_task = NULL;
416 411
417 /* check if we need to reschedule */ 412 /* Always check for delayed preemptions that might have become
418 if (edf_preemption_needed(edf, current)) 413 * necessary due to non-preemptive execution.
419 preempt(pedf); 414 */
415 spin_lock(&pedf->slock);
416
417 /* Reset inh_task to NULL. */
418 current->rt_param.inh_task = NULL;
419
420 /* check if we need to reschedule */
421 if (edf_preemption_needed(edf, current))
422 preempt(pedf);
423
424 spin_unlock(&pedf->slock);
420 425
421 spin_unlock(&pedf->slock);
422 } else
423 TRACE_CUR(" no priority to return %p\n", sem);
424 426
425 return ret; 427 return ret;
426} 428}