diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-04 17:16:10 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-04 17:16:10 -0400 |
commit | d6ce337cf4c70c6b07fb3b4986482fb1f277394a (patch) | |
tree | ba8500f4346830868ceb03db427a472fdc85226b /kernel | |
parent | 11f62e3feaf3282d1983d0a40019a0d5e9222141 (diff) |
adaptive: allow jobs to overrun
Do not forcefully preempt tasks.
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_adaptive.c | 36 |
1 files changed, 11 insertions, 25 deletions
diff --git a/kernel/sched_adaptive.c b/kernel/sched_adaptive.c index 1942422452..f31450eaaa 100644 --- a/kernel/sched_adaptive.c +++ b/kernel/sched_adaptive.c | |||
@@ -333,28 +333,12 @@ static reschedule_check_t adaptive_scheduler_tick(void) | |||
333 | struct task_struct* t = current; | 333 | struct task_struct* t = current; |
334 | reschedule_check_t want_resched = NO_RESCHED; | 334 | reschedule_check_t want_resched = NO_RESCHED; |
335 | 335 | ||
336 | /* expire tasks even if not in real-time mode | 336 | /* Account for exec time. |
337 | * this makes sure that at the end of real-time mode | 337 | * Since we don't preempt forcefully, nothing else needs to be done. |
338 | * no task "runs away forever". | ||
339 | */ | 338 | */ |
340 | if (is_realtime(t)) | 339 | if (is_realtime(t)) |
341 | TRACE_CUR("before dec: time_slice == %u\n", t->time_slice); | 340 | t->rt_param.times.exec_time++; |
342 | 341 | ||
343 | if (is_realtime(t) && t->time_slice && !--t->time_slice) { | ||
344 | if (!is_np(t)) { /* np tasks will be preempted when they become | ||
345 | preemptable again */ | ||
346 | want_resched = FORCE_RESCHED; | ||
347 | set_will_schedule(); | ||
348 | TRACE("adaptive_scheduler_tick: " | ||
349 | "%d is preemptable " | ||
350 | " => FORCE_RESCHED\n", t->pid); | ||
351 | } else { | ||
352 | TRACE("adaptive_scheduler_tick: " | ||
353 | "%d is non-preemptable, " | ||
354 | "preemption delayed.\n", t->pid); | ||
355 | request_exit_np(t); | ||
356 | } | ||
357 | } | ||
358 | 342 | ||
359 | /* only the first CPU needs to release jobs */ | 343 | /* only the first CPU needs to release jobs */ |
360 | if (get_rt_mode() == MODE_RT_RUN && smp_processor_id() == 0) { | 344 | if (get_rt_mode() == MODE_RT_RUN && smp_processor_id() == 0) { |
@@ -380,7 +364,10 @@ static noinline void job_completion(struct task_struct *t) | |||
380 | 364 | ||
381 | sched_trace_job_completion(t); | 365 | sched_trace_job_completion(t); |
382 | 366 | ||
383 | TRACE_TASK(t, "job_completion().\n"); | 367 | TRACE_TASK(t, "job %d completes, delta WCET = %d\n", |
368 | t->rt_param.times.job_no, | ||
369 | t->rt_param.times.exec_time - | ||
370 | t->rt_param.basic_params.exec_cost); | ||
384 | 371 | ||
385 | /* set flags */ | 372 | /* set flags */ |
386 | set_rt_flags(t, RT_F_SLEEP); | 373 | set_rt_flags(t, RT_F_SLEEP); |
@@ -421,7 +408,7 @@ static int adaptive_schedule(struct task_struct * prev, | |||
421 | runqueue_t * rq) | 408 | runqueue_t * rq) |
422 | { | 409 | { |
423 | cpu_entry_t* entry = &__get_cpu_var(adaptive_cpu_entries); | 410 | cpu_entry_t* entry = &__get_cpu_var(adaptive_cpu_entries); |
424 | int out_of_time, sleep, preempt, np, exists, | 411 | int sleep, preempt, np, exists, |
425 | rt, blocks; | 412 | rt, blocks; |
426 | struct task_struct* linked; | 413 | struct task_struct* linked; |
427 | 414 | ||
@@ -436,7 +423,6 @@ static int adaptive_schedule(struct task_struct * prev, | |||
436 | /* (0) Determine state */ | 423 | /* (0) Determine state */ |
437 | exists = entry->scheduled != NULL; | 424 | exists = entry->scheduled != NULL; |
438 | blocks = exists && !is_running(entry->scheduled); | 425 | blocks = exists && !is_running(entry->scheduled); |
439 | out_of_time = exists && !entry->scheduled->time_slice; | ||
440 | np = exists && is_np(entry->scheduled); | 426 | np = exists && is_np(entry->scheduled); |
441 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | 427 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; |
442 | preempt = entry->scheduled != entry->linked; | 428 | preempt = entry->scheduled != entry->linked; |
@@ -452,7 +438,7 @@ static int adaptive_schedule(struct task_struct * prev, | |||
452 | * that we are still linked. Multiple calls to request_exit_np() don't | 438 | * that we are still linked. Multiple calls to request_exit_np() don't |
453 | * hurt. | 439 | * hurt. |
454 | */ | 440 | */ |
455 | if (np && (out_of_time || preempt || sleep)) { | 441 | if (np && (preempt || sleep)) { |
456 | unlink(entry->scheduled); | 442 | unlink(entry->scheduled); |
457 | request_exit_np(entry->scheduled); | 443 | request_exit_np(entry->scheduled); |
458 | } | 444 | } |
@@ -461,7 +447,7 @@ static int adaptive_schedule(struct task_struct * prev, | |||
461 | * budget or wants to sleep completes. We may have to reschedule after | 447 | * budget or wants to sleep completes. We may have to reschedule after |
462 | * this. | 448 | * this. |
463 | */ | 449 | */ |
464 | if (!np && (out_of_time || sleep)) | 450 | if (!np && sleep) |
465 | job_completion(entry->scheduled); | 451 | job_completion(entry->scheduled); |
466 | 452 | ||
467 | /* Stop real-time tasks when we leave real-time mode | 453 | /* Stop real-time tasks when we leave real-time mode |