aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-21 16:26:27 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-21 16:44:08 -0400
commitbdce67bc2babc2e5b3b2440964e9cf819ac814dc (patch)
tree47e4f7c90f1310fc398c5cdbf1e48339d3209764
parent5b73afc4eb1b0303cb92eb29a2ecc59c1db69537 (diff)
GSN-EDF: Use binary heap instead of binomial heap.
Use binary heap to track CPU priorities.
-rw-r--r--include/litmus/binheap.h4
-rw-r--r--litmus/sched_gsn_edf.c47
2 files changed, 27 insertions, 24 deletions
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h
index b8dd8e03da60..9fe5dc13d032 100644
--- a/include/litmus/binheap.h
+++ b/include/litmus/binheap.h
@@ -36,7 +36,7 @@ struct binheap_node {
36 * Signature of compator function. Assumed 'less-than' (min-heap). 36 * Signature of compator function. Assumed 'less-than' (min-heap).
37 * Pass in 'greater-than' for max-heap. 37 * Pass in 'greater-than' for max-heap.
38 * 38 *
39 * TODO: Consider macor-based implementation that allows comparator to be 39 * TODO: Consider macro-based implementation that allows comparator to be
40 * inlined (similar to Linux red/black tree) for greater efficiency. 40 * inlined (similar to Linux red/black tree) for greater efficiency.
41 */ 41 */
42typedef int (*binheap_order_t)(struct binheap_node *a, 42typedef int (*binheap_order_t)(struct binheap_node *a,
@@ -364,7 +364,7 @@ static inline void __binheap_add(struct binheap_node *new_node,
364{ 364{
365 /* NULL data pointers are used internally */ 365 /* NULL data pointers are used internally */
366 if(!data) { 366 if(!data) {
367 WARN(); 367 WARN_ON(!data);
368 return; 368 return;
369 } 369 }
370 370
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 6ed504f4750e..549a4dad037d 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -23,6 +23,7 @@
23#include <litmus/preempt.h> 23#include <litmus/preempt.h>
24 24
25#include <litmus/bheap.h> 25#include <litmus/bheap.h>
26#include <litmus/binheap.h>
26 27
27#ifdef CONFIG_SCHED_CPU_AFFINITY 28#ifdef CONFIG_SCHED_CPU_AFFINITY
28#include <litmus/affinity.h> 29#include <litmus/affinity.h>
@@ -103,15 +104,15 @@ typedef struct {
103 int cpu; 104 int cpu;
104 struct task_struct* linked; /* only RT tasks */ 105 struct task_struct* linked; /* only RT tasks */
105 struct task_struct* scheduled; /* only RT tasks */ 106 struct task_struct* scheduled; /* only RT tasks */
106 struct bheap_node* hn; 107 int in_heap; /* == 0/1: not in / in heap*/
108 struct binheap_node hn;
107} cpu_entry_t; 109} cpu_entry_t;
108DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); 110DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
109 111
110cpu_entry_t* gsnedf_cpus[NR_CPUS]; 112cpu_entry_t* gsnedf_cpus[NR_CPUS];
111 113
112/* the cpus queue themselves according to priority in here */ 114/* the cpus queue themselves according to priority in here */
113static struct bheap_node gsnedf_heap_node[NR_CPUS]; 115static struct binheap_handle gsnedf_cpu_heap;
114static struct bheap gsnedf_cpu_heap;
115 116
116static rt_domain_t gsnedf; 117static rt_domain_t gsnedf;
117#define gsnedf_lock (gsnedf.ready_lock) 118#define gsnedf_lock (gsnedf.ready_lock)
@@ -122,33 +123,33 @@ static rt_domain_t gsnedf;
122#define WANT_ALL_SCHED_EVENTS 123#define WANT_ALL_SCHED_EVENTS
123 */ 124 */
124 125
125static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) 126static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b)
126{ 127{
127 cpu_entry_t *a, *b; 128 cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn);
128 a = _a->value; 129 cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn);
129 b = _b->value; 130
130 /* Note that a and b are inverted: we want the lowest-priority CPU at 131 /* Note that a and b are inverted: we want the lowest-priority CPU at
131 * the top of the heap. 132 * the top of the heap.
132 */ 133 */
133 return edf_higher_prio(b->linked, a->linked); 134 return edf_higher_prio(b->linked, a->linked);
134} 135}
135 136
137
136/* update_cpu_position - Move the cpu entry to the correct place to maintain 138/* update_cpu_position - Move the cpu entry to the correct place to maintain
137 * order in the cpu queue. Caller must hold gsnedf lock. 139 * order in the cpu queue. Caller must hold gsnedf lock.
138 */ 140 */
139static void update_cpu_position(cpu_entry_t *entry) 141static void update_cpu_position(cpu_entry_t *entry)
140{ 142{
141 if (likely(bheap_node_in_heap(entry->hn))) 143 if (likely(entry->in_heap))
142 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); 144 binheap_delete(&entry->hn, &gsnedf_cpu_heap, cpu_entry_t, hn);
143 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); 145 binheap_add(&entry->hn, &gsnedf_cpu_heap, cpu_entry_t, hn);
146 entry->in_heap = 1;
144} 147}
145 148
146/* caller must hold gsnedf lock */ 149/* caller must hold gsnedf lock */
147static cpu_entry_t* lowest_prio_cpu(void) 150static cpu_entry_t* lowest_prio_cpu(void)
148{ 151{
149 struct bheap_node* hn; 152 return binheap_top_entry(&gsnedf_cpu_heap, cpu_entry_t, hn);
150 hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap);
151 return hn->value;
152} 153}
153 154
154 155
@@ -665,10 +666,10 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
665 * We can't use heap_decrease() here since 666 * We can't use heap_decrease() here since
666 * the cpu_heap is ordered in reverse direction, so 667 * the cpu_heap is ordered in reverse direction, so
667 * it is actually an increase. */ 668 * it is actually an increase. */
668 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, 669 binheap_delete(&gsnedf_cpus[linked_on]->hn,
669 gsnedf_cpus[linked_on]->hn); 670 &gsnedf_cpu_heap, cpu_entry_t, hn);
670 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, 671 binheap_add(&gsnedf_cpus[linked_on]->hn,
671 gsnedf_cpus[linked_on]->hn); 672 &gsnedf_cpu_heap, cpu_entry_t, hn);
672 } else { 673 } else {
673 /* holder may be queued: first stop queue changes */ 674 /* holder may be queued: first stop queue changes */
674 raw_spin_lock(&gsnedf.release_lock); 675 raw_spin_lock(&gsnedf.release_lock);
@@ -965,14 +966,15 @@ static long gsnedf_activate_plugin(void)
965 int cpu; 966 int cpu;
966 cpu_entry_t *entry; 967 cpu_entry_t *entry;
967 968
968 bheap_init(&gsnedf_cpu_heap); 969 INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio);
969#ifdef CONFIG_RELEASE_MASTER 970#ifdef CONFIG_RELEASE_MASTER
970 gsnedf.release_master = atomic_read(&release_master_cpu); 971 gsnedf.release_master = atomic_read(&release_master_cpu);
971#endif 972#endif
972 973
973 for_each_online_cpu(cpu) { 974 for_each_online_cpu(cpu) {
974 entry = &per_cpu(gsnedf_cpu_entries, cpu); 975 entry = &per_cpu(gsnedf_cpu_entries, cpu);
975 bheap_node_init(&entry->hn, entry); 976 INIT_BINHEAP_NODE(&entry->hn);
977 entry->in_heap = 0;
976 entry->linked = NULL; 978 entry->linked = NULL;
977 entry->scheduled = NULL; 979 entry->scheduled = NULL;
978#ifdef CONFIG_RELEASE_MASTER 980#ifdef CONFIG_RELEASE_MASTER
@@ -1013,14 +1015,15 @@ static int __init init_gsn_edf(void)
1013 int cpu; 1015 int cpu;
1014 cpu_entry_t *entry; 1016 cpu_entry_t *entry;
1015 1017
1016 bheap_init(&gsnedf_cpu_heap); 1018 INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio);
1017 /* initialize CPU state */ 1019 /* initialize CPU state */
1018 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1020 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1019 entry = &per_cpu(gsnedf_cpu_entries, cpu); 1021 entry = &per_cpu(gsnedf_cpu_entries, cpu);
1020 gsnedf_cpus[cpu] = entry; 1022 gsnedf_cpus[cpu] = entry;
1021 entry->cpu = cpu; 1023 entry->cpu = cpu;
1022 entry->hn = &gsnedf_heap_node[cpu]; 1024
1023 bheap_node_init(&entry->hn, entry); 1025 INIT_BINHEAP_NODE(&entry->hn);
1026 entry->in_heap = 0;
1024 } 1027 }
1025 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); 1028 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
1026 return register_sched_plugin(&gsn_edf_plugin); 1029 return register_sched_plugin(&gsn_edf_plugin);