aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-04-18 15:33:00 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-04-18 15:33:00 -0400
commit491d9fb9c2bd8c9ab6078e5f27fcdb4766e0f899 (patch)
tree81c3c1ec946a8a1ed6e9ced5854a8f951481570b /kernel
parentba670d7f621e394d65da84f9a1fbec6719845f13 (diff)
Force rescheduling if a task leaves a np section is not linked.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_gsn_edf.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/kernel/sched_gsn_edf.c b/kernel/sched_gsn_edf.c
index c1e3b7b8d0..b8cc48e584 100644
--- a/kernel/sched_gsn_edf.c
+++ b/kernel/sched_gsn_edf.c
@@ -341,7 +341,8 @@ static long gsnedf_prepare_task(struct task_struct * t)
341 if (t->state == TASK_STOPPED) { 341 if (t->state == TASK_STOPPED) {
342 __setscheduler(t, SCHED_FIFO, MAX_RT_PRIO - 1); 342 __setscheduler(t, SCHED_FIFO, MAX_RT_PRIO - 1);
343 343
344 t->rt_param.scheduled_on = NO_CPU; 344 t->rt_param.scheduled_on = NO_CPU;
345 t->rt_param.is_non_preemptable = 0;
345 if (get_rt_mode() == MODE_RT_RUN) 346 if (get_rt_mode() == MODE_RT_RUN)
346 /* The action is already on. 347 /* The action is already on.
347 * Prepare immediate release 348 * Prepare immediate release
@@ -377,7 +378,9 @@ static void gsnedf_wake_up_task(struct task_struct *task)
377 task->state = TASK_RUNNING; 378 task->state = TASK_RUNNING;
378 379
379 /* FIXME: We need to take suspensions because of semaphores into 380 /* FIXME: We need to take suspensions because of semaphores into
380 * account! */ 381 * account! If a job resumes after being suspended due to acquiring
382 * a semaphore, it should never be treated as a new job release.
383 */
381 if (is_tardy(task)) { 384 if (is_tardy(task)) {
382 /* new sporadic release */ 385 /* new sporadic release */
383 prepare_new_release(task); 386 prepare_new_release(task);
@@ -431,11 +434,18 @@ static long gsnedf_exit_np(struct task_struct * t)
431{ 434{
432 unsigned long flags; 435 unsigned long flags;
433 int ret = 0; 436 int ret = 0;
437 cpu_entry_t *entry;
434 438
435 queue_lock_irqsave(&gsnedf_lock, flags); 439 queue_lock_irqsave(&gsnedf_lock, flags);
436 if (is_np(t)) 440 if (is_np(t)) {
437 t->rt_param.is_non_preemptable--; 441 t->rt_param.is_non_preemptable--;
438 else 442 entry = &__get_cpu_var(gsnedf_cpu_entries);
443 if (!is_np(t) && entry->linked != t) {
444 /* t is now preemptable and not linked */
445 BUG_ON(t != entry->scheduled);
446 set_tsk_need_resched(current);
447 }
448 } else
439 ret = -EPERM; 449 ret = -EPERM;
440 queue_unlock_irqrestore(&gsnedf_lock, flags); 450 queue_unlock_irqrestore(&gsnedf_lock, flags);
441 return ret; 451 return ret;
@@ -499,7 +509,7 @@ static sched_plugin_t s_plugin __cacheline_aligned_in_smp = {
499 .task_blocks = gsnedf_task_blocks, \ 509 .task_blocks = gsnedf_task_blocks, \
500 .enter_np = gsnedf_enter_np, \ 510 .enter_np = gsnedf_enter_np, \
501 .exit_np = gsnedf_exit_np \ 511 .exit_np = gsnedf_exit_np \
502 } 512}
503 513
504 514
505sched_plugin_t *__init init_gsn_edf_plugin(void) 515sched_plugin_t *__init init_gsn_edf_plugin(void)