diff options
Diffstat (limited to 'litmus/sched_psn_edf.c')
-rw-r--r-- | litmus/sched_psn_edf.c | 52 |
1 files changed, 27 insertions, 25 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index f0ab8ebc5111..3a93124e24f6 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -68,16 +68,7 @@ static void requeue(struct task_struct* t, rt_domain_t *edf) | |||
68 | /* we assume the lock is being held */ | 68 | /* we assume the lock is being held */ |
69 | static void preempt(psnedf_domain_t *pedf) | 69 | static void preempt(psnedf_domain_t *pedf) |
70 | { | 70 | { |
71 | if (smp_processor_id() == pedf->cpu) { | 71 | preempt_if_preemptable(pedf->scheduled, pedf->cpu); |
72 | if (pedf->scheduled && is_np(pedf->scheduled)) | ||
73 | request_exit_np(pedf->scheduled); | ||
74 | else | ||
75 | set_tsk_need_resched(current); | ||
76 | } else | ||
77 | /* in case that it is a remote CPU we have to defer the | ||
78 | * the decision to the remote CPU | ||
79 | */ | ||
80 | smp_send_reschedule(pedf->cpu); | ||
81 | } | 72 | } |
82 | 73 | ||
83 | /* This check is trivial in partioned systems as we only have to consider | 74 | /* This check is trivial in partioned systems as we only have to consider |
@@ -86,16 +77,15 @@ static void preempt(psnedf_domain_t *pedf) | |||
86 | static int psnedf_check_resched(rt_domain_t *edf) | 77 | static int psnedf_check_resched(rt_domain_t *edf) |
87 | { | 78 | { |
88 | psnedf_domain_t *pedf = container_of(edf, psnedf_domain_t, domain); | 79 | psnedf_domain_t *pedf = container_of(edf, psnedf_domain_t, domain); |
89 | int ret = 0; | ||
90 | 80 | ||
91 | /* because this is a callback from rt_domain_t we already hold | 81 | /* because this is a callback from rt_domain_t we already hold |
92 | * the necessary lock for the ready queue | 82 | * the necessary lock for the ready queue |
93 | */ | 83 | */ |
94 | if (edf_preemption_needed(edf, pedf->scheduled)) { | 84 | if (edf_preemption_needed(edf, pedf->scheduled)) { |
95 | preempt(pedf); | 85 | preempt(pedf); |
96 | ret = 1; | 86 | return 1; |
97 | } | 87 | } else |
98 | return ret; | 88 | return 0; |
99 | } | 89 | } |
100 | 90 | ||
101 | static void job_completion(struct task_struct* t) | 91 | static void job_completion(struct task_struct* t) |
@@ -121,7 +111,7 @@ static void psnedf_tick(struct task_struct *t) | |||
121 | TRACE("psnedf_scheduler_tick: " | 111 | TRACE("psnedf_scheduler_tick: " |
122 | "%d is preemptable " | 112 | "%d is preemptable " |
123 | " => FORCE_RESCHED\n", t->pid); | 113 | " => FORCE_RESCHED\n", t->pid); |
124 | } else { | 114 | } else if (is_user_np(t)) { |
125 | TRACE("psnedf_scheduler_tick: " | 115 | TRACE("psnedf_scheduler_tick: " |
126 | "%d is non-preemptable, " | 116 | "%d is non-preemptable, " |
127 | "preemption delayed.\n", t->pid); | 117 | "preemption delayed.\n", t->pid); |
@@ -394,6 +384,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem) | |||
394 | rt_domain_t* edf = task_edf(t); | 384 | rt_domain_t* edf = task_edf(t); |
395 | int ret = 0; | 385 | int ret = 0; |
396 | int cpu = get_partition(current); | 386 | int cpu = get_partition(current); |
387 | int still_np; | ||
397 | 388 | ||
398 | 389 | ||
399 | /* Find new highest-priority semaphore task | 390 | /* Find new highest-priority semaphore task |
@@ -404,23 +395,34 @@ static long psnedf_return_priority(struct pi_semaphore *sem) | |||
404 | if (t == sem->hp.cpu_task[cpu]) | 395 | if (t == sem->hp.cpu_task[cpu]) |
405 | edf_set_hp_cpu_task(sem, cpu); | 396 | edf_set_hp_cpu_task(sem, cpu); |
406 | 397 | ||
407 | take_np(t); | 398 | still_np = take_np(current); |
399 | |||
400 | /* Since we don't nest resources, this | ||
401 | * should always be zero */ | ||
402 | BUG_ON(still_np); | ||
403 | |||
408 | if (current->rt_param.inh_task) { | 404 | if (current->rt_param.inh_task) { |
409 | TRACE_CUR("return priority of %s/%d\n", | 405 | TRACE_CUR("return priority of %s/%d\n", |
410 | current->rt_param.inh_task->comm, | 406 | current->rt_param.inh_task->comm, |
411 | current->rt_param.inh_task->pid); | 407 | current->rt_param.inh_task->pid); |
412 | spin_lock(&pedf->slock); | 408 | } else |
409 | TRACE_CUR(" no priority to return %p\n", sem); | ||
413 | 410 | ||
414 | /* Reset inh_task to NULL. */ | ||
415 | current->rt_param.inh_task = NULL; | ||
416 | 411 | ||
417 | /* check if we need to reschedule */ | 412 | /* Always check for delayed preemptions that might have become |
418 | if (edf_preemption_needed(edf, current)) | 413 | * necessary due to non-preemptive execution. |
419 | preempt(pedf); | 414 | */ |
415 | spin_lock(&pedf->slock); | ||
416 | |||
417 | /* Reset inh_task to NULL. */ | ||
418 | current->rt_param.inh_task = NULL; | ||
419 | |||
420 | /* check if we need to reschedule */ | ||
421 | if (edf_preemption_needed(edf, current)) | ||
422 | preempt(pedf); | ||
423 | |||
424 | spin_unlock(&pedf->slock); | ||
420 | 425 | ||
421 | spin_unlock(&pedf->slock); | ||
422 | } else | ||
423 | TRACE_CUR(" no priority to return %p\n", sem); | ||
424 | 426 | ||
425 | return ret; | 427 | return ret; |
426 | } | 428 | } |