aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_psn_edf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_psn_edf.c')
-rw-r--r--litmus/sched_psn_edf.c126
1 files changed, 0 insertions, 126 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index c1e27960576b..fc64c1722ae9 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -309,132 +309,6 @@ static void psnedf_task_exit(struct task_struct * t)
309 raw_spin_unlock_irqrestore(&pedf->slock, flags); 309 raw_spin_unlock_irqrestore(&pedf->slock, flags);
310} 310}
311 311
312#if 0
313static long psnedf_pi_block(struct pi_semaphore *sem,
314 struct task_struct *new_waiter)
315{
316 psnedf_domain_t* pedf;
317 rt_domain_t* edf;
318 struct task_struct* t;
319 int cpu = get_partition(new_waiter);
320
321 BUG_ON(!new_waiter);
322
323 if (edf_higher_prio(new_waiter, sem->hp.cpu_task[cpu])) {
324 TRACE_TASK(new_waiter, " boosts priority\n");
325 pedf = task_pedf(new_waiter);
326 edf = task_edf(new_waiter);
327
328 /* interrupts already disabled */
329 raw_spin_lock(&pedf->slock);
330
331 /* store new highest-priority task */
332 sem->hp.cpu_task[cpu] = new_waiter;
333 if (sem->holder &&
334 get_partition(sem->holder) == get_partition(new_waiter)) {
335 /* let holder inherit */
336 sem->holder->rt_param.inh_task = new_waiter;
337 t = sem->holder;
338 if (is_queued(t)) {
339 /* queued in domain*/
340 remove(edf, t);
341 /* readd to make priority change take place */
342 /* FIXME: this looks outdated */
343 if (is_released(t, litmus_clock()))
344 __add_ready(edf, t);
345 else
346 add_release(edf, t);
347 }
348 }
349
350 /* check if we need to reschedule */
351 if (edf_preemption_needed(edf, current))
352 preempt(pedf);
353
354 raw_spin_unlock(&pedf->slock);
355 }
356
357 return 0;
358}
359
360static long psnedf_inherit_priority(struct pi_semaphore *sem,
361 struct task_struct *new_owner)
362{
363 int cpu = get_partition(new_owner);
364
365 new_owner->rt_param.inh_task = sem->hp.cpu_task[cpu];
366 if (sem->hp.cpu_task[cpu] && new_owner != sem->hp.cpu_task[cpu]) {
367 TRACE_TASK(new_owner,
368 "inherited priority from %s/%d\n",
369 sem->hp.cpu_task[cpu]->comm,
370 sem->hp.cpu_task[cpu]->pid);
371 } else
372 TRACE_TASK(new_owner,
373 "cannot inherit priority: "
374 "no higher priority job waits on this CPU!\n");
375 /* make new owner non-preemptable as required by FMLP under
376 * PSN-EDF.
377 */
378 make_np(new_owner);
379 return 0;
380}
381
382
383/* This function is called on a semaphore release, and assumes that
384 * the current task is also the semaphore holder.
385 */
386static long psnedf_return_priority(struct pi_semaphore *sem)
387{
388 struct task_struct* t = current;
389 psnedf_domain_t* pedf = task_pedf(t);
390 rt_domain_t* edf = task_edf(t);
391 int ret = 0;
392 int cpu = get_partition(current);
393 int still_np;
394
395
396 /* Find new highest-priority semaphore task
397 * if holder task is the current hp.cpu_task[cpu].
398 *
399 * Calling function holds sem->wait.lock.
400 */
401 if (t == sem->hp.cpu_task[cpu])
402 edf_set_hp_cpu_task(sem, cpu);
403
404 still_np = take_np(current);
405
406 /* Since we don't nest resources, this
407 * should always be zero */
408 BUG_ON(still_np);
409
410 if (current->rt_param.inh_task) {
411 TRACE_CUR("return priority of %s/%d\n",
412 current->rt_param.inh_task->comm,
413 current->rt_param.inh_task->pid);
414 } else
415 TRACE_CUR(" no priority to return %p\n", sem);
416
417
418 /* Always check for delayed preemptions that might have become
419 * necessary due to non-preemptive execution.
420 */
421 raw_spin_lock(&pedf->slock);
422
423 /* Reset inh_task to NULL. */
424 current->rt_param.inh_task = NULL;
425
426 /* check if we need to reschedule */
427 if (edf_preemption_needed(edf, current))
428 preempt(pedf);
429
430 raw_spin_unlock(&pedf->slock);
431
432
433 return ret;
434}
435
436#endif
437
438#ifdef CONFIG_LITMUS_LOCKING 312#ifdef CONFIG_LITMUS_LOCKING
439 313
440#include <litmus/fdso.h> 314#include <litmus/fdso.h>