diff options
author | Jeremy Erickson <jerickso@cs.unc.edu> | 2010-11-23 15:17:11 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-08-25 14:41:20 -0400 |
commit | eeb4b063c6a1b6b8e65abc5e30576df322f6e0dc (patch) | |
tree | b0194deb1a4a374500cfb000980d015612070334 | |
parent | 63d34163b825fab33ff621260f4574d7d50ae288 (diff) |
Work towards slack scheduling (compiles, but not yet complete.)
-rw-r--r-- | include/litmus/rt_param.h | 1 | ||||
-rw-r--r-- | litmus/jobs.c | 3 | ||||
-rw-r--r-- | litmus/sched_mc.c | 230 |
3 files changed, 183 insertions, 51 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index a7ea5e7b775e..3a456e7135d8 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -101,6 +101,7 @@ struct rt_job { | |||
101 | unsigned int job_no; | 101 | unsigned int job_no; |
102 | 102 | ||
103 | lt_t ghost_budget; | 103 | lt_t ghost_budget; |
104 | int is_ghost; | ||
104 | }; | 105 | }; |
105 | 106 | ||
106 | struct pfair_param; | 107 | struct pfair_param; |
diff --git a/litmus/jobs.c b/litmus/jobs.c index 36e314625d86..99b0bd9858f2 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -13,6 +13,9 @@ void prepare_for_next_period(struct task_struct *t) | |||
13 | t->rt_param.job_params.release = t->rt_param.job_params.deadline; | 13 | t->rt_param.job_params.release = t->rt_param.job_params.deadline; |
14 | t->rt_param.job_params.deadline += get_rt_period(t); | 14 | t->rt_param.job_params.deadline += get_rt_period(t); |
15 | t->rt_param.job_params.exec_time = 0; | 15 | t->rt_param.job_params.exec_time = 0; |
16 | /* mixed criticality stuff*/ | ||
17 | t->rt_param.job_params.is_ghost = 0; | ||
18 | t->rt_param.job_params.ghost_budget = 0; | ||
16 | /* update job sequence number */ | 19 | /* update job sequence number */ |
17 | t->rt_param.job_params.job_no++; | 20 | t->rt_param.job_params.job_no++; |
18 | 21 | ||
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 4db519d86aaf..1fad42aaadf2 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -95,7 +95,7 @@ typedef struct { | |||
95 | struct task_struct* scheduled; /* only RT tasks */ | 95 | struct task_struct* scheduled; /* only RT tasks */ |
96 | atomic_t will_schedule; /* prevent unneeded IPIs */ | 96 | atomic_t will_schedule; /* prevent unneeded IPIs */ |
97 | struct bheap_node* hn; | 97 | struct bheap_node* hn; |
98 | task_struct* ghost_tasks[CRIT_LEVEL_D+1]; | 98 | struct task_struct* ghost_tasks[CRIT_LEVEL_D+1]; |
99 | } cpu_entry_t; | 99 | } cpu_entry_t; |
100 | 100 | ||
101 | DEFINE_PER_CPU(cpu_entry_t, mc_cpu_entries); | 101 | DEFINE_PER_CPU(cpu_entry_t, mc_cpu_entries); |
@@ -110,7 +110,7 @@ cpu_entry_t* mc_cpus[NR_CPUS]; | |||
110 | (atomic_read(&per_cpu(mc_cpu_entries, cpu).will_schedule)) | 110 | (atomic_read(&per_cpu(mc_cpu_entries, cpu).will_schedule)) |
111 | #define remote_cpu_entry(cpu) (&per_cpu(mc_cpu_entries, cpu)) | 111 | #define remote_cpu_entry(cpu) (&per_cpu(mc_cpu_entries, cpu)) |
112 | 112 | ||
113 | #define is_ghost(t) (tsk_rt(t)->job_params.ghost_budget > 0) | 113 | #define is_ghost(t) (tsk_rt(t)->job_params.is_ghost) |
114 | 114 | ||
115 | 115 | ||
116 | /* the cpus queue themselves according to priority in here */ | 116 | /* the cpus queue themselves according to priority in here */ |
@@ -257,10 +257,41 @@ static cpu_entry_t* lowest_prio_cpu(void) | |||
257 | return hn->value; | 257 | return hn->value; |
258 | } | 258 | } |
259 | 259 | ||
260 | /* Forward Declarations*/ | ||
261 | static noinline void unlink(struct task_struct* t); | ||
262 | static noinline void job_completion(struct task_struct *t, int forced); | ||
263 | |||
264 | /* update_ghost_time - Do time accounting for a ghost job. | ||
265 | * Updates ghost budget and handles expired ghost budget. | ||
266 | * Called from unlink(), mc_tick(). | ||
267 | * TODO: LOCK ACCOUNTING SHOULD BE CONSIDERED | ||
268 | */ | ||
269 | static void update_ghost_time(struct task_struct *p) | ||
270 | { | ||
271 | u64 delta; | ||
272 | u64 clock; | ||
273 | |||
274 | BUG_ON(!is_ghost(p)); | ||
275 | clock = cpu_clock(p->rt_param.linked_on); | ||
276 | delta = clock - p->se.exec_start; | ||
277 | if (unlikely ((s64)delta < 0)) { | ||
278 | delta = 0; | ||
279 | } | ||
280 | if (p->rt_param.job_params.ghost_budget <= delta) { | ||
281 | p->rt_param.job_params.ghost_budget = 0; | ||
282 | job_completion(p, 0); | ||
283 | } | ||
284 | else{ | ||
285 | p->rt_param.job_params.ghost_budget -= delta; | ||
286 | p->se.exec_start = clock; | ||
287 | } | ||
288 | } | ||
289 | |||
260 | 290 | ||
261 | /* link_task_to_cpu - Update the link of a CPU. | 291 | /* link_task_to_cpu - Update the link of a CPU. |
262 | * Handles the case where the to-be-linked task is already | 292 | * Handles the case where the to-be-linked task is already |
263 | * scheduled on a different CPU. | 293 | * scheduled on a different CPU. |
294 | * Also handles ghost jobs and preemption of ghost jobs. | ||
264 | * Called from unlink(), prepare_preemption(), and mc_schedule() | 295 | * Called from unlink(), prepare_preemption(), and mc_schedule() |
265 | * Callers hold global lock | 296 | * Callers hold global lock |
266 | */ | 297 | */ |
@@ -270,71 +301,112 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
270 | cpu_entry_t *sched; | 301 | cpu_entry_t *sched; |
271 | struct task_struct* tmp; | 302 | struct task_struct* tmp; |
272 | int on_cpu; | 303 | int on_cpu; |
304 | int i; | ||
273 | 305 | ||
274 | BUG_ON(linked && !is_realtime(linked)); | 306 | BUG_ON(linked && !is_realtime(linked)); |
275 | BUG_ON(linked && is_realtime(linked) && | 307 | BUG_ON(linked && is_realtime(linked) && |
276 | (linked->rt_param.task_params.crit < CRIT_LEVEL_C) && | 308 | (linked->rt_param.task_params.crit < CRIT_LEVEL_C) && |
277 | (linked->rt_param.task_params.cpu != entry->cpu)); | 309 | (linked->rt_param.task_params.cpu != entry->cpu)); |
278 | 310 | ||
279 | /* Currently linked task is set to be unlinked. */ | 311 | if (linked && is_ghost(linked)) { |
280 | if (entry->linked) { | 312 | BUG_ON(entry->linked && |
281 | entry->linked->rt_param.linked_on = NO_CPU; | 313 | entry->linked->rt_param.task_params.crit < |
282 | } | 314 | linked->rt_param.task_params.crit); |
283 | 315 | tmp = entry->ghost_tasks[linked->rt_param.task_params.crit]; | |
284 | /* Link new task to CPU. */ | 316 | if (tmp) { |
285 | if (linked) { | 317 | unlink(tmp); |
286 | set_rt_flags(linked, RT_F_RUNNING); | 318 | } |
287 | /* handle task is already scheduled somewhere! */ | 319 | /* We shouldn't link a ghost job that is already somewhere |
288 | on_cpu = linked->rt_param.scheduled_on; | 320 | * else - the caller is responsible for unlinking first. |
289 | if (on_cpu != NO_CPU) { | 321 | */ |
290 | sched = &per_cpu(mc_cpu_entries, on_cpu); | 322 | BUG_ON(linked->rt_param.linked_on != NO_CPU); |
291 | /* this should only happen if not linked already */ | 323 | linked->rt_param.linked_on = entry->cpu; |
292 | BUG_ON(sched->linked == linked); | 324 | linked->se.exec_start = cpu_clock(entry->cpu); |
293 | 325 | entry->ghost_tasks[linked->rt_param.task_params.crit] = linked; | |
294 | /* If we are already scheduled on the CPU to which we | 326 | } |
295 | * wanted to link, we don't need to do the swap -- | 327 | else{ |
296 | * we just link ourselves to the CPU and depend on | 328 | /* Currently linked task is set to be unlinked. */ |
297 | * the caller to get things right. | 329 | if (entry->linked) { |
298 | * | 330 | entry->linked->rt_param.linked_on = NO_CPU; |
299 | * We can only safely swap if neither task is | 331 | } |
300 | * partitioned. | 332 | |
301 | */ | 333 | /* Link new task to CPU. */ |
302 | if (entry != sched && | 334 | if (linked) { |
303 | linked->rt_param.task_params.crit > | 335 | set_rt_flags(linked, RT_F_RUNNING); |
304 | CRIT_LEVEL_B && | 336 | /* handle task is already scheduled somewhere! */ |
305 | (!sched->linked || | 337 | on_cpu = linked->rt_param.scheduled_on; |
306 | sched->linked->rt_param.task_params.crit > | 338 | if (on_cpu != NO_CPU) { |
307 | CRIT_LEVEL_B)){ | 339 | sched = &per_cpu(mc_cpu_entries, on_cpu); |
308 | TRACE_TASK(linked, | 340 | /* this should only happen if not linked |
309 | "already scheduled on %d, updating link.\n", | 341 | * already |
310 | sched->cpu); | 342 | */ |
343 | BUG_ON(sched->linked == linked); | ||
344 | |||
345 | /* If we are already scheduled on the CPU to | ||
346 | * which we wanted to link, we don't need to do | ||
347 | * the swap -- we just link ourselves to the | ||
348 | * CPU and depend on the caller to get things | ||
349 | * right. | ||
350 | * | ||
351 | * Also, we can only safely swap if neither | ||
352 | * task is partitioned. | ||
353 | */ | ||
311 | tmp = sched->linked; | 354 | tmp = sched->linked; |
312 | linked->rt_param.linked_on = sched->cpu; | 355 | if (entry != sched && |
313 | sched->linked = linked; | 356 | linked->rt_param.task_params.crit > |
314 | update_cpu_position(sched); | 357 | CRIT_LEVEL_B && |
315 | linked = tmp; | 358 | (!tmp || tmp->rt_param.task_params.crit |
359 | > CRIT_LEVEL_B)) { | ||
360 | TRACE_TASK(linked, | ||
361 | "already scheduled on %d, updating link.\n", | ||
362 | sched->cpu); | ||
363 | linked->rt_param.linked_on = sched->cpu; | ||
364 | sched->linked = linked; | ||
365 | for (i = linked-> | ||
366 | rt_param.task_params.crit; | ||
367 | i < CRIT_LEVEL_D + 1; i++) { | ||
368 | if (sched->ghost_tasks[i]){ | ||
369 | unlink(sched-> | ||
370 | ghost_tasks[i]); | ||
371 | } | ||
372 | } | ||
373 | update_cpu_position(sched); | ||
374 | linked = tmp; | ||
375 | } | ||
376 | } | ||
377 | if (linked) { /* might be NULL due to swap */ | ||
378 | linked->rt_param.linked_on = entry->cpu; | ||
379 | for (i = linked->rt_param.task_params.crit; | ||
380 | i < CRIT_LEVEL_D + 1; i++){ | ||
381 | if (entry->ghost_tasks[i]){ | ||
382 | unlink(entry->ghost_tasks[i]); | ||
383 | /*TODO: make sure booted tasks | ||
384 | * get rescheduled if needed | ||
385 | */ | ||
386 | } | ||
387 | } | ||
316 | } | 388 | } |
317 | } | 389 | } |
318 | if (linked) /* might be NULL due to swap */ | 390 | entry->linked = linked; |
319 | linked->rt_param.linked_on = entry->cpu; | ||
320 | } | 391 | } |
321 | entry->linked = linked; | ||
322 | #ifdef WANT_ALL_SCHED_EVENTS | 392 | #ifdef WANT_ALL_SCHED_EVENTS |
323 | if (linked) | 393 | if (linked) |
324 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | 394 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); |
325 | else | 395 | else |
326 | TRACE("NULL linked to %d.\n", entry->cpu); | 396 | TRACE("NULL linked to %d.\n", entry->cpu); |
327 | #endif | 397 | #endif |
328 | update_cpu_position(entry); | 398 | update_cpu_position(entry); |
329 | } | 399 | } |
330 | 400 | ||
331 | /* unlink - Make sure a task is not linked any longer to a cpu entry | 401 | /* unlink - Make sure a task is not linked any longer to a cpu entry |
332 | * where it was linked before. | 402 | * where it was linked before. |
403 | * Can handle ghost jobs. | ||
333 | * Called by schedule, task_block, task_exit, and job_completion | 404 | * Called by schedule, task_block, task_exit, and job_completion |
334 | * Caller assumed to hold global lock | 405 | * Caller assumed to hold global lock |
335 | */ | 406 | */ |
336 | static noinline void unlink(struct task_struct* t) | 407 | static noinline void unlink(struct task_struct* t) |
337 | { | 408 | { |
409 | int cpu; | ||
338 | cpu_entry_t *entry; | 410 | cpu_entry_t *entry; |
339 | 411 | ||
340 | if (unlikely(!t)) { | 412 | if (unlikely(!t)) { |
@@ -342,11 +414,41 @@ static noinline void unlink(struct task_struct* t) | |||
342 | return; | 414 | return; |
343 | } | 415 | } |
344 | 416 | ||
345 | if (t->rt_param.linked_on != NO_CPU) { | 417 | cpu = t->rt_param.linked_on; |
418 | if (cpu != NO_CPU) { | ||
346 | /* unlink */ | 419 | /* unlink */ |
347 | entry = &per_cpu(mc_cpu_entries, t->rt_param.linked_on); | 420 | entry = remote_cpu_entry(cpu); |
348 | t->rt_param.linked_on = NO_CPU; | 421 | t->rt_param.linked_on = NO_CPU; |
349 | link_task_to_cpu(NULL, entry); | 422 | if (is_ghost(t)) { |
423 | if (t->rt_param.job_params.ghost_budget > 0){ | ||
424 | /* Job isn't finished, so do accounting. */ | ||
425 | update_ghost_time(t); | ||
426 | /* Need to check again since accounting might | ||
427 | * change value. | ||
428 | */ | ||
429 | if (t->rt_param.job_params.ghost_budget > 0) { | ||
430 | /* Still have budget, so just remove | ||
431 | * from CPU | ||
432 | */ | ||
433 | entry->ghost_tasks[ | ||
434 | t->rt_param.task_params.crit] | ||
435 | = NULL; | ||
436 | /*TODO: maybe make more efficient by | ||
437 | * only updating on C/D completion? | ||
438 | */ | ||
439 | update_cpu_position(entry); | ||
440 | } | ||
441 | } | ||
442 | else{ | ||
443 | /* Job finished, so just remove */ | ||
444 | entry->ghost_tasks[ | ||
445 | t->rt_param.task_params.crit] = NULL; | ||
446 | update_cpu_position(entry); | ||
447 | } | ||
448 | } | ||
449 | else { | ||
450 | link_task_to_cpu(NULL, entry); | ||
451 | } | ||
350 | } else if (is_queued(t)) { | 452 | } else if (is_queued(t)) { |
351 | /* This is an interesting situation: t is scheduled, | 453 | /* This is an interesting situation: t is scheduled, |
352 | * but was just recently unlinked. It cannot be | 454 | * but was just recently unlinked. It cannot be |
@@ -473,9 +575,12 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
473 | /* caller holds global_lock | 575 | /* caller holds global_lock |
474 | * Called only by mc_schedule() which holds global lock | 576 | * Called only by mc_schedule() which holds global lock |
475 | * Prepares task for next period, unlinks it, and calls mc_job_arrival | 577 | * Prepares task for next period, unlinks it, and calls mc_job_arrival |
578 | * Converts jobs to ghost jobs as necessary, or finishes end of ghost jobs. | ||
476 | */ | 579 | */ |
477 | static noinline void job_completion(struct task_struct *t, int forced) | 580 | static noinline void job_completion(struct task_struct *t, int forced) |
478 | { | 581 | { |
582 | cpu_entry_t *cpu; | ||
583 | int already_unlinked = 0; | ||
479 | BUG_ON(!t); | 584 | BUG_ON(!t); |
480 | 585 | ||
481 | sched_trace_task_completion(t, forced); | 586 | sched_trace_task_completion(t, forced); |
@@ -484,18 +589,37 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
484 | 589 | ||
485 | /* set flags */ | 590 | /* set flags */ |
486 | set_rt_flags(t, RT_F_SLEEP); | 591 | set_rt_flags(t, RT_F_SLEEP); |
592 | /*If it's not a ghost job, do ghost job conversion and return.*/ | ||
593 | if (!is_ghost(t)) { | ||
594 | cpu = remote_cpu_entry(t->rt_param.scheduled_on); | ||
595 | /*Unlink first while it's not a ghost job.*/ | ||
596 | unlink(t); | ||
597 | already_unlinked = 1; | ||
598 | t->rt_param.job_params.ghost_budget = budget_remaining(t); | ||
599 | t->rt_param.job_params.is_ghost = 1; | ||
600 | /* If we did just convert the job to ghost, we can safely | ||
601 | * reschedule it and then let schedule() determine a new | ||
602 | * job to run in the slack. | ||
603 | */ | ||
604 | if (t->rt_param.job_params.ghost_budget > 0){ | ||
605 | link_task_to_cpu(t, cpu); | ||
606 | preempt(cpu); | ||
607 | return; | ||
608 | } | ||
609 | } | ||
487 | /* prepare for next period */ | 610 | /* prepare for next period */ |
488 | if (is_ghost(t)) { | 611 | if (is_ghost(t)) { |
489 | t->rt_param.job_params.ghost_budget = 0; | 612 | t->rt_param.job_params.ghost_budget = 0; |
490 | prepare_for_next_period(t); | 613 | prepare_for_next_period(t); |
491 | } | 614 | } |
492 | else { | 615 | else { |
493 | t->rt_param.job_params.ghost_budget = budget_remaining(t) | 616 | t->rt_param.job_params.ghost_budget = budget_remaining(t); |
494 | } | 617 | } |
495 | if (is_released(t, litmus_clock())) | 618 | if (is_released(t, litmus_clock())) |
496 | sched_trace_task_release(t); | 619 | sched_trace_task_release(t); |
497 | /* unlink */ | 620 | /* unlink if we didn't above */ |
498 | unlink(t); | 621 | if (!already_unlinked) |
622 | unlink(t); | ||
499 | /* requeue | 623 | /* requeue |
500 | * But don't requeue a blocking task. */ | 624 | * But don't requeue a blocking task. */ |
501 | if (is_running(t)) | 625 | if (is_running(t)) |
@@ -513,6 +637,9 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
513 | */ | 637 | */ |
514 | static void mc_tick(struct task_struct* t) | 638 | static void mc_tick(struct task_struct* t) |
515 | { | 639 | { |
640 | if (is_ghost(t)) { | ||
641 | update_ghost_time(t); | ||
642 | } | ||
516 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | 643 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { |
517 | if (!is_np(t)) { | 644 | if (!is_np(t)) { |
518 | /* np tasks will be preempted when they become | 645 | /* np tasks will be preempted when they become |
@@ -718,6 +845,7 @@ static void mc_task_new(struct task_struct * t, int on_rq, int running) | |||
718 | /* setup job params */ | 845 | /* setup job params */ |
719 | release_at(t, litmus_clock()); | 846 | release_at(t, litmus_clock()); |
720 | t->rt_param.job_params.ghost_budget = 0; | 847 | t->rt_param.job_params.ghost_budget = 0; |
848 | t->rt_param.job_params.is_ghost = 0; | ||
721 | 849 | ||
722 | if (running) { | 850 | if (running) { |
723 | entry = &per_cpu(mc_cpu_entries, task_cpu(t)); | 851 | entry = &per_cpu(mc_cpu_entries, task_cpu(t)); |