diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-09-10 15:07:50 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-09-11 14:33:54 -0400 |
commit | aad151f9aba8c351f999002ca27fc0c8fb6bd3a3 (patch) | |
tree | d4942943e55ab43c3812f4b466df3e680494a0b3 | |
parent | ad8edd22d871d2368389398bffcc417d569a693b (diff) |
GSN-EDF: make the CPU queue a proper heap
This should be faster on high CPU counts.
The list-based implementation was O(1) + O(N) for a position update.
The heap-based implementation is O(log N) + O(log N).
-rw-r--r-- | litmus/sched_gsn_edf.c | 62 |
1 files changed, 34 insertions, 28 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index caaf58db67..171d66c18f 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/list.h> | ||
15 | 14 | ||
16 | #include <litmus/litmus.h> | 15 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 16 | #include <litmus/jobs.h> |
@@ -19,6 +18,8 @@ | |||
19 | #include <litmus/edf_common.h> | 18 | #include <litmus/edf_common.h> |
20 | #include <litmus/sched_trace.h> | 19 | #include <litmus/sched_trace.h> |
21 | 20 | ||
21 | #include <litmus/heap.h> | ||
22 | |||
22 | #include <linux/module.h> | 23 | #include <linux/module.h> |
23 | 24 | ||
24 | /* Overview of GSN-EDF operations. | 25 | /* Overview of GSN-EDF operations. |
@@ -94,8 +95,8 @@ typedef struct { | |||
94 | int cpu; | 95 | int cpu; |
95 | struct task_struct* linked; /* only RT tasks */ | 96 | struct task_struct* linked; /* only RT tasks */ |
96 | struct task_struct* scheduled; /* only RT tasks */ | 97 | struct task_struct* scheduled; /* only RT tasks */ |
97 | struct list_head list; | ||
98 | atomic_t will_schedule; /* prevent unneeded IPIs */ | 98 | atomic_t will_schedule; /* prevent unneeded IPIs */ |
99 | struct heap_node* hn; | ||
99 | } cpu_entry_t; | 100 | } cpu_entry_t; |
100 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | 101 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); |
101 | 102 | ||
@@ -112,39 +113,43 @@ cpu_entry_t* gsnedf_cpus[NR_CPUS]; | |||
112 | #define NO_CPU 0xffffffff | 113 | #define NO_CPU 0xffffffff |
113 | 114 | ||
114 | /* the cpus queue themselves according to priority in here */ | 115 | /* the cpus queue themselves according to priority in here */ |
115 | static LIST_HEAD(gsnedf_cpu_queue); | 116 | static struct heap_node gsnedf_heap_node[NR_CPUS]; |
117 | static struct heap gsnedf_cpu_heap; | ||
116 | 118 | ||
117 | static rt_domain_t gsnedf; | 119 | static rt_domain_t gsnedf; |
118 | #define gsnedf_lock (gsnedf.ready_lock) | 120 | #define gsnedf_lock (gsnedf.ready_lock) |
119 | 121 | ||
122 | |||
123 | static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) | ||
124 | { | ||
125 | cpu_entry_t *a, *b; | ||
126 | a = _a->value; | ||
127 | b = _b->value; | ||
128 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
129 | * the top of the heap. | ||
130 | */ | ||
131 | return edf_higher_prio(b->linked, a->linked); | ||
132 | } | ||
133 | |||
120 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | 134 | /* update_cpu_position - Move the cpu entry to the correct place to maintain |
121 | * order in the cpu queue. Caller must hold gsnedf lock. | 135 | * order in the cpu queue. Caller must hold gsnedf lock. |
122 | * | ||
123 | * This really should be a heap. | ||
124 | */ | 136 | */ |
125 | static void update_cpu_position(cpu_entry_t *entry) | 137 | static void update_cpu_position(cpu_entry_t *entry) |
126 | { | 138 | { |
127 | cpu_entry_t *other; | 139 | if (likely(heap_node_in_heap(entry->hn))) |
128 | struct list_head *pos; | 140 | heap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); |
141 | heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
142 | } | ||
129 | 143 | ||
130 | if (likely(in_list(&entry->list))) | 144 | /* caller must hold gsnedf lock */ |
131 | list_del(&entry->list); | 145 | static cpu_entry_t* lowest_prio_cpu(void) |
132 | /* if we do not execute real-time jobs we just move | 146 | { |
133 | * to the end of the queue | 147 | struct heap_node* hn; |
134 | */ | 148 | hn = heap_peek(cpu_lower_prio, &gsnedf_cpu_heap); |
135 | if (entry->linked) { | 149 | return hn->value; |
136 | list_for_each(pos, &gsnedf_cpu_queue) { | ||
137 | other = list_entry(pos, cpu_entry_t, list); | ||
138 | if (edf_higher_prio(entry->linked, other->linked)) { | ||
139 | __list_add(&entry->list, pos->prev, pos); | ||
140 | return; | ||
141 | } | ||
142 | } | ||
143 | } | ||
144 | /* if we get this far we have the lowest priority job */ | ||
145 | list_add_tail(&entry->list, &gsnedf_cpu_queue); | ||
146 | } | 150 | } |
147 | 151 | ||
152 | |||
148 | /* link_task_to_cpu - Update the link of a CPU. | 153 | /* link_task_to_cpu - Update the link of a CPU. |
149 | * Handles the case where the to-be-linked task is already | 154 | * Handles the case where the to-be-linked task is already |
150 | * scheduled on a different CPU. | 155 | * scheduled on a different CPU. |
@@ -292,9 +297,9 @@ static void check_for_preemptions(void) | |||
292 | struct task_struct *task; | 297 | struct task_struct *task; |
293 | cpu_entry_t* last; | 298 | cpu_entry_t* last; |
294 | 299 | ||
295 | for(last = list_entry(gsnedf_cpu_queue.prev, cpu_entry_t, list); | 300 | for(last = lowest_prio_cpu(); |
296 | edf_preemption_needed(&gsnedf, last->linked); | 301 | edf_preemption_needed(&gsnedf, last->linked); |
297 | last = list_entry(gsnedf_cpu_queue.prev, cpu_entry_t, list)) { | 302 | last = lowest_prio_cpu()) { |
298 | /* preemption necessary */ | 303 | /* preemption necessary */ |
299 | task = __take_ready(&gsnedf); | 304 | task = __take_ready(&gsnedf); |
300 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | 305 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", |
@@ -309,7 +314,6 @@ static void check_for_preemptions(void) | |||
309 | /* gsnedf_job_arrival: task is either resumed or released */ | 314 | /* gsnedf_job_arrival: task is either resumed or released */ |
310 | static noinline void gsnedf_job_arrival(struct task_struct* task) | 315 | static noinline void gsnedf_job_arrival(struct task_struct* task) |
311 | { | 316 | { |
312 | BUG_ON(list_empty(&gsnedf_cpu_queue)); | ||
313 | BUG_ON(!task); | 317 | BUG_ON(!task); |
314 | 318 | ||
315 | requeue(task); | 319 | requeue(task); |
@@ -716,6 +720,7 @@ static int __init init_gsn_edf(void) | |||
716 | int cpu; | 720 | int cpu; |
717 | cpu_entry_t *entry; | 721 | cpu_entry_t *entry; |
718 | 722 | ||
723 | heap_init(&gsnedf_cpu_heap); | ||
719 | /* initialize CPU state */ | 724 | /* initialize CPU state */ |
720 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 725 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
721 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 726 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
@@ -724,9 +729,10 @@ static int __init init_gsn_edf(void) | |||
724 | entry->linked = NULL; | 729 | entry->linked = NULL; |
725 | entry->scheduled = NULL; | 730 | entry->scheduled = NULL; |
726 | entry->cpu = cpu; | 731 | entry->cpu = cpu; |
727 | INIT_LIST_HEAD(&entry->list); | 732 | entry->hn = &gsnedf_heap_node[cpu]; |
733 | heap_node_init(&entry->hn, entry); | ||
734 | /*heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);*/ | ||
728 | } | 735 | } |
729 | |||
730 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | 736 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); |
731 | return register_sched_plugin(&gsn_edf_plugin); | 737 | return register_sched_plugin(&gsn_edf_plugin); |
732 | } | 738 | } |