aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_cedf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r--litmus/sched_cedf.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index a729d97535e9..111e4fb1c62b 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -34,6 +34,7 @@
34 34
35#include <litmus/litmus.h> 35#include <litmus/litmus.h>
36#include <litmus/jobs.h> 36#include <litmus/jobs.h>
37#include <litmus/preempt.h>
37#include <litmus/sched_plugin.h> 38#include <litmus/sched_plugin.h>
38#include <litmus/edf_common.h> 39#include <litmus/edf_common.h>
39#include <litmus/sched_trace.h> 40#include <litmus/sched_trace.h>
@@ -209,12 +210,6 @@ static noinline void unlink(struct task_struct* t)
209{ 210{
210 cpu_entry_t *entry; 211 cpu_entry_t *entry;
211 212
212 if (unlikely(!t)) {
213 TRACE_BUG_ON(!t);
214 return;
215 }
216
217
218 if (t->rt_param.linked_on != NO_CPU) { 213 if (t->rt_param.linked_on != NO_CPU) {
219 /* unlink */ 214 /* unlink */
220 entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); 215 entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on);
@@ -341,7 +336,7 @@ static void cedf_tick(struct task_struct* t)
341 /* np tasks will be preempted when they become 336 /* np tasks will be preempted when they become
342 * preemptable again 337 * preemptable again
343 */ 338 */
344 set_tsk_need_resched(t); 339 litmus_reschedule_local();
345 set_will_schedule(); 340 set_will_schedule();
346 TRACE("cedf_scheduler_tick: " 341 TRACE("cedf_scheduler_tick: "
347 "%d is preemptable " 342 "%d is preemptable "
@@ -466,6 +461,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
466 if (exists) 461 if (exists)
467 next = prev; 462 next = prev;
468 463
464 sched_state_task_picked();
469 raw_spin_unlock(&cluster->lock); 465 raw_spin_unlock(&cluster->lock);
470 466
471#ifdef WANT_ALL_SCHED_EVENTS 467#ifdef WANT_ALL_SCHED_EVENTS