aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-10-17 18:13:00 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-12-27 22:41:12 -0500
commitf79d7cdd5ef87f4acb6517e784c6522d7cedd515 (patch)
tree6ab1a593e88a59e2b5341534a0837ee6911b62b0
parenta4c7d351552ddfc82b2e715fb70d541e34dc1765 (diff)
Added TODOs
-rw-r--r--litmus/sched_mc.c57
1 files changed, 50 insertions, 7 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 84fdc304b434..b43064ab63b6 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -4,6 +4,49 @@
4 * Implementation of the Mixed Criticality scheduling algorithm. 4 * Implementation of the Mixed Criticality scheduling algorithm.
5 * 5 *
6 * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010) 6 * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010)
7 *
8 * Absolute first: relative time spent doing different parts of release
9 * and scheduling overhead needs to be measured and graphed.
10 *
11 * Domain locks should be more fine-grained. There is no reason to hold the
12 * ready-queue lock when adding a task to the release-queue.
13 *
14 * The levels should be converted to linked-lists so that they are more
15 * adaptable and need not be identical on all processors.
16 *
17 * The interaction between remove_from_all and other concurrent operations
18 * should be re-examined. If a job_completion and a preemption happen
19 * simultaneously, a task could be requeued, removed, then requeued again.
20 *
21 * Level-C tasks should be able to swap CPUs a-la GSN-EDF. They should also
22 * try and swap with the last CPU they were on. This could be complicated for
23 * ghost tasks.
24 *
25 * Locking for timer-merging could be infinitely more fine-grained. A second
26 * hash could select a lock to use based on queue slot. This approach might
27 * also help with add_release in rt_domains.
28 *
29 * It should be possible to reserve a CPU for ftdumping.
30 *
31 * The real_deadline business seems sloppy.
32 *
33 * The amount of data in the header file should be cut down. The use of the
34 * header file in general needs to be re-examined.
35 *
36 * The plugin needs to be modified so that it doesn't freeze when it is
37 * deactivated in a VM.
38 *
39 * The locking in check_for_preempt is not fine-grained enough.
40 *
41 * The size of the structures could be smaller. Debugging info might be
42 * excessive as things currently stand.
43 *
44 * The macro can_requeue has been expanded too much. Anything beyond
45 * scheduled_on is a hack!
46 *
47 * Domain names (rt_domain) are still clumsy.
48 *
49 * Should BE be moved into the kernel? This will require benchmarking.
7 */ 50 */
8 51
9#include <linux/spinlock.h> 52#include <linux/spinlock.h>
@@ -112,7 +155,7 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
112} 155}
113 156
114/* 157/*
115 * Return true if the domain has a higher priority ready task. The curr 158 * Return true if the domain has a higher priority ready task. The @curr
116 * task must belong to the domain. 159 * task must belong to the domain.
117 */ 160 */
118static int mc_preempt_needed(struct domain *dom, struct task_struct* curr) 161static int mc_preempt_needed(struct domain *dom, struct task_struct* curr)
@@ -385,6 +428,7 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task)
385 set_rt_flags(task, RT_F_RUNNING); 428 set_rt_flags(task, RT_F_RUNNING);
386 } 429 }
387 entry->linked = task; 430 entry->linked = task;
431
388 /* Higher criticality crit entries are now usable */ 432 /* Higher criticality crit entries are now usable */
389 for (; i < entry_level(entry) + 1; i++) { 433 for (; i < entry_level(entry) + 1; i++) {
390 ce = &entry->crit_entries[i]; 434 ce = &entry->crit_entries[i];
@@ -423,8 +467,7 @@ static void preempt(struct domain *dom, struct crit_entry *ce)
423 link_task_to_cpu(entry, task); 467 link_task_to_cpu(entry, task);
424 preempt_if_preemptable(entry->scheduled, entry->cpu); 468 preempt_if_preemptable(entry->scheduled, entry->cpu);
425 } else if (old && old == entry->linked) { 469 } else if (old && old == entry->linked) {
426 /* 470 /* Preempted a running task with a ghost job. Null needs to be
427 * Preempted a running task with a ghost job. Null needs to be
428 * running. 471 * running.
429 */ 472 */
430 link_task_to_cpu(entry, NULL); 473 link_task_to_cpu(entry, NULL);
@@ -449,7 +492,7 @@ static void update_crit_levels(struct cpu_entry *entry)
449 ce = &entry->crit_entries[i]; 492 ce = &entry->crit_entries[i];
450 493
451 global_preempted = ce->linked && 494 global_preempted = ce->linked &&
452 /* This task is running */ 495 /* This task is running on a cpu */
453 ce->linked->rt_param.scheduled_on == entry->cpu && 496 ce->linked->rt_param.scheduled_on == entry->cpu &&
454 /* But it was preempted */ 497 /* But it was preempted */
455 ce->linked != entry->linked && 498 ce->linked != entry->linked &&
@@ -1006,13 +1049,13 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1006 1049
1007 /* Peek at task here to avoid lock use */ 1050 /* Peek at task here to avoid lock use */
1008 while (!cache_next(dom)); 1051 while (!cache_next(dom));
1052
1053 /* Do domain stuff before grabbing CPU locks */
1009 dtask = dom->peek_ready(dom); 1054 dtask = dom->peek_ready(dom);
1055 fix_crit_position(ce);
1010 1056
1011 raw_spin_lock(&entry->lock); 1057 raw_spin_lock(&entry->lock);
1012 1058
1013 /* Now that we hold the domain lock...*/
1014 fix_crit_position(ce);
1015
1016 if (!entry->linked && !ce->linked && dtask && can_use(ce)) { 1059 if (!entry->linked && !ce->linked && dtask && can_use(ce)) {
1017 dom->take_ready(dom); 1060 dom->take_ready(dom);
1018 link_task_to_crit(ce, dtask); 1061 link_task_to_crit(ce, dtask);