diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-04-29 12:48:24 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-04-29 12:48:24 -0400 |
commit | 671c26683c0ec93b5cd092101363f0741cc4c9c1 (patch) | |
tree | 79813d21222dbb85be47c1eb993de35829dcdb36 | |
parent | 623d6fe767d67a3975c6acbf46808ac1b7d2346b (diff) |
release master support: fix new job code
-rw-r--r-- | litmus/sched_gedf.c | 20 | ||||
-rw-r--r-- | litmus/sched_ghq_edf.c | 3 |
2 files changed, 15 insertions, 8 deletions
diff --git a/litmus/sched_gedf.c b/litmus/sched_gedf.c index 753bb0418b..3e544bb159 100644 --- a/litmus/sched_gedf.c +++ b/litmus/sched_gedf.c | |||
@@ -1,3 +1,4 @@ | |||
1 | |||
1 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
2 | #include <linux/percpu.h> | 3 | #include <linux/percpu.h> |
3 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
@@ -217,7 +218,7 @@ static void check_for_preemptions(void) | |||
217 | struct task_struct *task, *unlinked; | 218 | struct task_struct *task, *unlinked; |
218 | cpu_entry_t* last; | 219 | cpu_entry_t* last; |
219 | 220 | ||
220 | 221 | ||
221 | local_irq_save(flags); | 222 | local_irq_save(flags); |
222 | while (!done) { | 223 | while (!done) { |
223 | unlinked = NULL; | 224 | unlinked = NULL; |
@@ -258,7 +259,7 @@ static void check_for_preemptions(void) | |||
258 | local_irq_restore(flags); | 259 | local_irq_restore(flags); |
259 | } | 260 | } |
260 | 261 | ||
261 | /* gedf_job_arrival: task is either resumed or released | 262 | /* gedf_job_arrival: task is either resumed or released |
262 | * call only unlocked! | 263 | * call only unlocked! |
263 | */ | 264 | */ |
264 | static noinline void gedf_job_arrival(struct task_struct* task) | 265 | static noinline void gedf_job_arrival(struct task_struct* task) |
@@ -371,7 +372,7 @@ static struct task_struct* gedf_schedule(struct task_struct * prev) | |||
371 | if (preempt && entry->linked) | 372 | if (preempt && entry->linked) |
372 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 373 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
373 | entry->linked->comm, entry->linked->pid); | 374 | entry->linked->comm, entry->linked->pid); |
374 | 375 | ||
375 | /* If a task blocks we have no choice but to reschedule. | 376 | /* If a task blocks we have no choice but to reschedule. |
376 | */ | 377 | */ |
377 | if (blocks) | 378 | if (blocks) |
@@ -471,6 +472,10 @@ static void gedf_task_new(struct task_struct * t, int on_rq, int running) | |||
471 | TRACE("gedf: task new %d\n", t->pid); | 472 | TRACE("gedf: task new %d\n", t->pid); |
472 | 473 | ||
473 | spin_lock_irqsave(&gedf_cpu_lock, flags); | 474 | spin_lock_irqsave(&gedf_cpu_lock, flags); |
475 | |||
476 | /* setup job params */ | ||
477 | release_at(t, litmus_clock()); | ||
478 | |||
474 | if (running) { | 479 | if (running) { |
475 | entry = &per_cpu(gedf_cpu_entries, task_cpu(t)); | 480 | entry = &per_cpu(gedf_cpu_entries, task_cpu(t)); |
476 | BUG_ON(entry->scheduled); | 481 | BUG_ON(entry->scheduled); |
@@ -479,15 +484,16 @@ static void gedf_task_new(struct task_struct * t, int on_rq, int running) | |||
479 | t->rt_param.scheduled_on = task_cpu(t); | 484 | t->rt_param.scheduled_on = task_cpu(t); |
480 | } else | 485 | } else |
481 | tsk_rt(t)->scheduled_on = NO_CPU; | 486 | tsk_rt(t)->scheduled_on = NO_CPU; |
482 | } else | 487 | } else { |
483 | tsk_rt(t)->scheduled_on = NO_CPU; | 488 | tsk_rt(t)->scheduled_on = NO_CPU; |
489 | } | ||
484 | tsk_rt(t)->linked_on = NO_CPU; | 490 | tsk_rt(t)->linked_on = NO_CPU; |
485 | 491 | ||
486 | /* setup job params */ | ||
487 | release_at(t, litmus_clock()); | ||
488 | spin_unlock_irqrestore(&gedf_cpu_lock, flags); | 492 | spin_unlock_irqrestore(&gedf_cpu_lock, flags); |
489 | 493 | ||
490 | gedf_job_arrival(t); | 494 | if (!running || entry->cpu == gedf.release_master) |
495 | /* schedule() will not insert task into ready_queue */ | ||
496 | gedf_job_arrival(t); | ||
491 | } | 497 | } |
492 | 498 | ||
493 | static void gedf_task_wake_up(struct task_struct *task) | 499 | static void gedf_task_wake_up(struct task_struct *task) |
diff --git a/litmus/sched_ghq_edf.c b/litmus/sched_ghq_edf.c index 9018ca5903..621d515ccb 100644 --- a/litmus/sched_ghq_edf.c +++ b/litmus/sched_ghq_edf.c | |||
@@ -593,12 +593,13 @@ static void ghqedf_task_new(struct task_struct * t, int on_rq, int running) | |||
593 | tsk_rt(t)->scheduled_on = NO_CPU; | 593 | tsk_rt(t)->scheduled_on = NO_CPU; |
594 | } else { | 594 | } else { |
595 | tsk_rt(t)->scheduled_on = NO_CPU; | 595 | tsk_rt(t)->scheduled_on = NO_CPU; |
596 | ghqedf_job_arrival(t); | ||
597 | } | 596 | } |
598 | tsk_rt(t)->linked_on = NO_CPU; | 597 | tsk_rt(t)->linked_on = NO_CPU; |
599 | 598 | ||
600 | spin_unlock_irqrestore(&ghqedf_cpu_lock, flags); | 599 | spin_unlock_irqrestore(&ghqedf_cpu_lock, flags); |
601 | 600 | ||
601 | if (!running || entry->cpu == ghqedf.release_master) | ||
602 | ghqedf_job_arrival(t); | ||
602 | } | 603 | } |
603 | 604 | ||
604 | static void ghqedf_task_wake_up(struct task_struct *task) | 605 | static void ghqedf_task_wake_up(struct task_struct *task) |