diff options
Diffstat (limited to 'litmus/sched_psn_edf.c')
-rw-r--r-- | litmus/sched_psn_edf.c | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index eaaec38f43da..4e117be9546b 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <litmus/litmus.h> | 17 | #include <litmus/litmus.h> |
18 | #include <litmus/jobs.h> | 18 | #include <litmus/jobs.h> |
19 | #include <litmus/preempt.h> | 19 | #include <litmus/preempt.h> |
20 | #include <litmus/budget.h> | ||
20 | #include <litmus/sched_plugin.h> | 21 | #include <litmus/sched_plugin.h> |
21 | #include <litmus/edf_common.h> | 22 | #include <litmus/edf_common.h> |
22 | #include <litmus/sched_trace.h> | 23 | #include <litmus/sched_trace.h> |
@@ -132,6 +133,15 @@ static void unboost_priority(struct task_struct* t) | |||
132 | 133 | ||
133 | #endif | 134 | #endif |
134 | 135 | ||
136 | static int psnedf_preempt_check(psnedf_domain_t *pedf) | ||
137 | { | ||
138 | if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) { | ||
139 | preempt(pedf); | ||
140 | return 1; | ||
141 | } else | ||
142 | return 0; | ||
143 | } | ||
144 | |||
135 | /* This check is trivial in partioned systems as we only have to consider | 145 | /* This check is trivial in partioned systems as we only have to consider |
136 | * the CPU of the partition. | 146 | * the CPU of the partition. |
137 | */ | 147 | */ |
@@ -142,11 +152,7 @@ static int psnedf_check_resched(rt_domain_t *edf) | |||
142 | /* because this is a callback from rt_domain_t we already hold | 152 | /* because this is a callback from rt_domain_t we already hold |
143 | * the necessary lock for the ready queue | 153 | * the necessary lock for the ready queue |
144 | */ | 154 | */ |
145 | if (edf_preemption_needed(edf, pedf->scheduled)) { | 155 | return psnedf_preempt_check(pedf); |
146 | preempt(pedf); | ||
147 | return 1; | ||
148 | } else | ||
149 | return 0; | ||
150 | } | 156 | } |
151 | 157 | ||
152 | static void job_completion(struct task_struct* t, int forced) | 158 | static void job_completion(struct task_struct* t, int forced) |
@@ -301,7 +307,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
301 | } else { | 307 | } else { |
302 | requeue(t, edf); | 308 | requeue(t, edf); |
303 | /* maybe we have to reschedule */ | 309 | /* maybe we have to reschedule */ |
304 | preempt(pedf); | 310 | psnedf_preempt_check(pedf); |
305 | } | 311 | } |
306 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 312 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
307 | } | 313 | } |
@@ -337,8 +343,10 @@ static void psnedf_task_wake_up(struct task_struct *task) | |||
337 | * de-scheduling the task, i.e., wake_up() raced with schedule() | 343 | * de-scheduling the task, i.e., wake_up() raced with schedule() |
338 | * and won. | 344 | * and won. |
339 | */ | 345 | */ |
340 | if (pedf->scheduled != task) | 346 | if (pedf->scheduled != task) { |
341 | requeue(task, edf); | 347 | requeue(task, edf); |
348 | psnedf_preempt_check(pedf); | ||
349 | } | ||
342 | 350 | ||
343 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 351 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
344 | TRACE_TASK(task, "wake up done\n"); | 352 | TRACE_TASK(task, "wake up done\n"); |