aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2008-05-19 12:06:57 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-05-19 12:06:57 -0400
commit0acc5e8a1d7caa74f644ac92cab2d958cf508d6e (patch)
tree4c67132c5c0a30f597bce7ed6503a33d1dbe3938
parent37f3d488cc35844eb6d8c4e94e79f1680fcd3af8 (diff)
Use binomial heaps as priority queues.
The list-based priority queues did not perform well on the Niagara T2000. This heap-based implementation should perform much faster when queues are long.
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/litmus/edf_common.h2
-rw-r--r--include/litmus/heap.h301
-rw-r--r--include/litmus/litmus.h2
-rw-r--r--include/litmus/rt_domain.h53
-rw-r--r--include/litmus/rt_param.h17
-rw-r--r--litmus/edf_common.c10
-rw-r--r--litmus/litmus.c23
-rw-r--r--litmus/rt_domain.c32
-rwxr-xr-xlitmus/sched_cedf.c12
-rw-r--r--litmus/sched_gsn_edf.c14
-rw-r--r--litmus/sched_psn_edf.c17
12 files changed, 413 insertions, 75 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9541cc8fe8..76e28f19e9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1187,11 +1187,6 @@ struct task_struct {
1187 /* litmus parameters and state */ 1187 /* litmus parameters and state */
1188 struct rt_param rt_param; 1188 struct rt_param rt_param;
1189 1189
1190 /* allow scheduler plugins to queue in release lists, etc.
1191 * Cleanup: Move this into the rt_param struct.
1192 */
1193 struct list_head rt_list;
1194
1195 /* references to PI semaphores, etc. */ 1190 /* references to PI semaphores, etc. */
1196 struct od_table_entry* od_table; 1191 struct od_table_entry* od_table;
1197}; 1192};
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
index 37630e5c26..4dff77ae48 100644
--- a/include/litmus/edf_common.h
+++ b/include/litmus/edf_common.h
@@ -18,8 +18,6 @@ void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
18int edf_higher_prio(struct task_struct* first, 18int edf_higher_prio(struct task_struct* first,
19 struct task_struct* second); 19 struct task_struct* second);
20 20
21int edf_ready_order(struct list_head* a, struct list_head* b);
22
23int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); 21int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24 22
25int edf_set_hp_task(struct pi_semaphore *sem); 23int edf_set_hp_task(struct pi_semaphore *sem);
diff --git a/include/litmus/heap.h b/include/litmus/heap.h
new file mode 100644
index 0000000000..b26804f879
--- /dev/null
+++ b/include/litmus/heap.h
@@ -0,0 +1,301 @@
1/* heaps.h -- Binomial Heaps
2 *
3 * (c) 2008 Bjoern Brandenburg
4 */
5
6#ifndef HEAP_H
7#define HEAP_H
8
9#define NOT_IN_HEAP UINT_MAX
10
11struct heap_node {
12 struct heap_node* parent;
13 struct heap_node* next;
14 struct heap_node* child;
15
16 unsigned int degree;
17 void* value;
18 struct heap_node** ref;
19};
20
21struct heap {
22 struct heap_node* head;
23 /* We cache the minimum of the heap.
24 * This speeds up repeated peek operations.
25 */
26 struct heap_node* min;
27};
28
29typedef int (*heap_prio_t)(struct heap_node* a, struct heap_node* b);
30
31static inline void heap_init(struct heap* heap)
32{
33 heap->head = NULL;
34 heap->head = NULL;
35}
36
37static inline void heap_node_init(struct heap_node** _h, void* value)
38{
39 struct heap_node* h = *_h;
40 h->parent = NULL;
41 h->next = NULL;
42 h->child = NULL;
43 h->degree = NOT_IN_HEAP;
44 h->value = value;
45 h->ref = _h;
46}
47
48static inline int heap_node_in_heap(struct heap_node* h)
49{
50 return h->degree != NOT_IN_HEAP;
51}
52
53static inline int heap_empty(struct heap* heap)
54{
55 return heap->head == NULL && heap->min == NULL;
56}
57
58/* make child a subtree of root */
59static inline void __heap_link(struct heap_node* root,
60 struct heap_node* child)
61{
62 child->parent = root;
63 child->next = root->child;
64 root->child = child;
65 root->degree++;
66}
67
68/* merge root lists */
69static inline struct heap_node* __heap_merge(struct heap_node* a,
70 struct heap_node* b)
71{
72 struct heap_node* head = NULL;
73 struct heap_node** pos = &head;
74
75 while (a && b) {
76 if (a->degree < b->degree) {
77 *pos = a;
78 a = a->next;
79 } else {
80 *pos = b;
81 b = b->next;
82 }
83 pos = &(*pos)->next;
84 }
85 if (a)
86 *pos = a;
87 else
88 *pos = b;
89 return head;
90}
91
92/* reverse a linked list of nodes. also clears parent pointer */
93static inline struct heap_node* __heap_reverse(struct heap_node* h)
94{
95 struct heap_node* tail = NULL;
96 struct heap_node* next;
97
98 if (!h)
99 return h;
100
101 h->parent = NULL;
102 while (h->next) {
103 next = h->next;
104 h->next = tail;
105 tail = h;
106 h = next;
107 h->parent = NULL;
108 }
109 h->next = tail;
110 return h;
111}
112
113static inline void __heap_min(heap_prio_t higher_prio, struct heap* heap,
114 struct heap_node** prev, struct heap_node** node)
115{
116 struct heap_node *_prev, *cur;
117 *prev = NULL;
118
119 if (!heap->head) {
120 *node = NULL;
121 return;
122 }
123
124 *node = heap->head;
125 _prev = heap->head;
126 cur = heap->head->next;
127 while (cur) {
128 if (higher_prio(cur, *node)) {
129 *node = cur;
130 *prev = _prev;
131 }
132 _prev = cur;
133 cur = cur->next;
134 }
135}
136
137static inline void __heap_union(heap_prio_t higher_prio, struct heap* heap,
138 struct heap_node* h2)
139{
140 struct heap_node* h1;
141 struct heap_node *prev, *x, *next;
142 if (!h2)
143 return;
144 h1 = heap->head;
145 if (!h1) {
146 heap->head = h2;
147 return;
148 }
149 h1 = __heap_merge(h1, h2);
150 prev = NULL;
151 x = h1;
152 next = x->next;
153 while (next) {
154 if (x->degree != next->degree ||
155 (next->next && next->next->degree == x->degree)) {
156 /* nothing to do, advance */
157 prev = x;
158 x = next;
159 } else if (higher_prio(x, next)) {
160 /* x becomes the root of next */
161 x->next = next->next;
162 __heap_link(x, next);
163 } else {
164 /* next becomes the root of x */
165 if (prev)
166 prev->next = next;
167 else
168 h1 = next;
169 __heap_link(next, x);
170 x = next;
171 }
172 next = x->next;
173 }
174 heap->head = h1;
175}
176
177static inline struct heap_node* __heap_extract_min(heap_prio_t higher_prio,
178 struct heap* heap)
179{
180 struct heap_node *prev, *node;
181 __heap_min(higher_prio, heap, &prev, &node);
182 if (!node)
183 return NULL;
184 if (prev)
185 prev->next = node->next;
186 else
187 heap->head = node->next;
188 __heap_union(higher_prio, heap, __heap_reverse(node->child));
189 return node;
190}
191
192/* insert (and reinitialize) a node into the heap */
193static inline void heap_insert(heap_prio_t higher_prio, struct heap* heap,
194 struct heap_node* node)
195{
196 struct heap_node *min;
197 node->child = NULL;
198 node->parent = NULL;
199 node->next = NULL;
200 node->degree = 0;
201 if (heap->min && higher_prio(node, heap->min)) {
202 /* swap min cache */
203 min = heap->min;
204 min->child = NULL;
205 min->parent = NULL;
206 min->next = NULL;
207 min->degree = 0;
208 __heap_union(higher_prio, heap, min);
209 heap->min = node;
210 } else
211 __heap_union(higher_prio, heap, node);
212}
213
214static inline void __uncache_min(heap_prio_t higher_prio, struct heap* heap)
215{
216 struct heap_node* min;
217 if (heap->min) {
218 min = heap->min;
219 heap->min = NULL;
220 heap_insert(higher_prio, heap, min);
221 }
222}
223
224/* merge addition into target */
225static inline void heap_union(heap_prio_t higher_prio,
226 struct heap* target, struct heap* addition)
227{
228 /* first insert any cached minima, if necessary */
229 __uncache_min(higher_prio, target);
230 __uncache_min(higher_prio, addition);
231 __heap_union(higher_prio, target, addition->head);
232 /* this is a destructive merge */
233 addition->head = NULL;
234}
235
236static inline struct heap_node* heap_peek(heap_prio_t higher_prio,
237 struct heap* heap)
238{
239 if (!heap->min)
240 heap->min = __heap_extract_min(higher_prio, heap);
241 return heap->min;
242}
243
244static inline struct heap_node* heap_take(heap_prio_t higher_prio,
245 struct heap* heap)
246{
247 struct heap_node *node;
248 if (!heap->min)
249 heap->min = __heap_extract_min(higher_prio, heap);
250 node = heap->min;
251 heap->min = NULL;
252 if (node)
253 node->degree = NOT_IN_HEAP;
254 return node;
255}
256
257static inline void heap_delete(heap_prio_t higher_prio, struct heap* heap,
258 struct heap_node* node)
259{
260 struct heap_node *parent, *prev, *pos;
261 struct heap_node** tmp_ref;
262 void* tmp;
263
264 if (heap->min != node) {
265 /* bubble up */
266 parent = node->parent;
267 while (parent) {
268 /* swap parent and node */
269 tmp = parent->value;
270 parent->value = node->value;
271 node->value = tmp;
272 /* swap references */
273 *(parent->ref) = node;
274 *(node->ref) = parent;
275 tmp_ref = parent->ref;
276 parent->ref = node->ref;
277 node->ref = tmp_ref;
278 /* step up */
279 node = parent;
280 parent = node->parent;
281 }
282 /* now delete:
283 * first find prev */
284 prev = NULL;
285 pos = heap->head;
286 while (pos != node) {
287 prev = pos;
288 pos = pos->next;
289 }
290 /* we have prev, now remove node */
291 if (prev)
292 prev->next = node->next;
293 else
294 heap->head = node->next;
295 __heap_union(higher_prio, heap, __heap_reverse(node->child));
296 } else
297 heap->min = NULL;
298 node->degree = NOT_IN_HEAP;
299}
300
301#endif
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index bd92f44030..ca9d3c8d88 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -181,4 +181,6 @@ static inline lt_t litmus_clock(void)
181 181
182void srp_ceiling_block(void); 182void srp_ceiling_block(void);
183 183
184#define heap2task(hn) ((struct task_struct*) hn->value)
185
184#endif 186#endif
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index 6489b0c0fd..47cd123b76 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -6,6 +6,7 @@
6#define __UNC_RT_DOMAIN_H__ 6#define __UNC_RT_DOMAIN_H__
7 7
8#include <litmus/norqlock.h> 8#include <litmus/norqlock.h>
9#include <litmus/heap.h>
9 10
10struct _rt_domain; 11struct _rt_domain;
11 12
@@ -17,7 +18,7 @@ typedef struct _rt_domain {
17 18
18 /* runnable rt tasks are in here */ 19 /* runnable rt tasks are in here */
19 spinlock_t ready_lock; 20 spinlock_t ready_lock;
20 struct list_head ready_queue; 21 struct heap ready_queue;
21 22
22 /* real-time tasks waiting for release are in here */ 23 /* real-time tasks waiting for release are in here */
23 spinlock_t release_lock; 24 spinlock_t release_lock;
@@ -30,24 +31,52 @@ typedef struct _rt_domain {
30 release_job_t release_job; 31 release_job_t release_job;
31 32
32 /* how are tasks ordered in the ready queue? */ 33 /* how are tasks ordered in the ready queue? */
33 list_cmp_t order; 34 heap_prio_t order;
34} rt_domain_t; 35} rt_domain_t;
35 36
36#define next_ready(rt) \ 37static inline struct task_struct* __next_ready(rt_domain_t* rt)
37 (list_entry((rt)->ready_queue.next, struct task_struct, rt_list)) 38{
38 39 struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue);
39#define ready_jobs_pending(rt) \ 40 if (hn)
40 (!list_empty(&(rt)->ready_queue)) 41 return heap2task(hn);
42 else
43 return NULL;
44}
41 45
42void rt_domain_init(rt_domain_t *rt, list_cmp_t order, 46void rt_domain_init(rt_domain_t *rt, heap_prio_t order,
43 check_resched_needed_t check, 47 check_resched_needed_t check,
44 release_job_t relase); 48 release_job_t relase);
45 49
46void __add_ready(rt_domain_t* rt, struct task_struct *new); 50void __add_ready(rt_domain_t* rt, struct task_struct *new);
47void __add_release(rt_domain_t* rt, struct task_struct *task); 51void __add_release(rt_domain_t* rt, struct task_struct *task);
48 52
49struct task_struct* __take_ready(rt_domain_t* rt); 53static inline struct task_struct* __take_ready(rt_domain_t* rt)
50struct task_struct* __peek_ready(rt_domain_t* rt); 54{
55 struct heap_node* hn = heap_take(rt->order, &rt->ready_queue);
56 if (hn)
57 return heap2task(hn);
58 else
59 return NULL;
60}
61
62static inline struct task_struct* __peek_ready(rt_domain_t* rt)
63{
64 struct heap_node* hn = heap_peek(rt->order, &rt->ready_queue);
65 if (hn)
66 return heap2task(hn);
67 else
68 return NULL;
69}
70
71static inline int is_queued(struct task_struct *t)
72{
73 return heap_node_in_heap(tsk_rt(t)->heap_node);
74}
75
76static inline void remove(rt_domain_t* rt, struct task_struct *t)
77{
78 heap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node);
79}
51 80
52static inline void add_ready(rt_domain_t* rt, struct task_struct *new) 81static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
53{ 82{
@@ -81,7 +110,7 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task)
81 110
82static inline int __jobs_pending(rt_domain_t* rt) 111static inline int __jobs_pending(rt_domain_t* rt)
83{ 112{
84 return !list_empty(&rt->ready_queue); 113 return !heap_empty(&rt->ready_queue);
85} 114}
86 115
87static inline int jobs_pending(rt_domain_t* rt) 116static inline int jobs_pending(rt_domain_t* rt)
@@ -90,7 +119,7 @@ static inline int jobs_pending(rt_domain_t* rt)
90 int ret; 119 int ret;
91 /* first we need the write lock for rt_ready_queue */ 120 /* first we need the write lock for rt_ready_queue */
92 spin_lock_irqsave(&rt->ready_lock, flags); 121 spin_lock_irqsave(&rt->ready_lock, flags);
93 ret = __jobs_pending(rt); 122 ret = !heap_empty(&rt->ready_queue);
94 spin_unlock_irqrestore(&rt->ready_lock, flags); 123 spin_unlock_irqrestore(&rt->ready_lock, flags);
95 return ret; 124 return ret;
96} 125}
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index a5e939daa5..a7dd67a81a 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -39,6 +39,7 @@ struct rt_task {
39#ifdef __KERNEL__ 39#ifdef __KERNEL__
40 40
41struct _rt_domain; 41struct _rt_domain;
42struct heap_node;
42 43
43struct rt_job { 44struct rt_job {
44 /* Time instant the the job was or will be released. */ 45 /* Time instant the the job was or will be released. */
@@ -139,6 +140,22 @@ struct rt_param {
139 140
140 /* ready queue for this task */ 141 /* ready queue for this task */
141 struct _rt_domain* domain; 142 struct _rt_domain* domain;
143
144 /* heap element for this task
145 *
146 * Warning: Don't statically allocate this node. The heap
147 * implementation swaps these between tasks, thus after
148 * dequeuing from a heap you may end up with a different node
149 * then the one you had when enqueuing the task. For the same
150 * reason, don't obtain and store references to this node
151 * other than this pointer (which is updated by the heap
152 * implementation).
153 */
154 struct heap_node* heap_node;
155
156 /* Used by rt_domain to queue task in release list.
157 */
158 struct list_head list;
142}; 159};
143 160
144/* Possible RT flags */ 161/* Possible RT flags */
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 68c6a401af..d7567ac98e 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -60,11 +60,9 @@ int edf_higher_prio(struct task_struct* first,
60 !second->rt_param.inh_task))); 60 !second->rt_param.inh_task)));
61} 61}
62 62
63int edf_ready_order(struct list_head* a, struct list_head* b) 63int edf_ready_order(struct heap_node* a, struct heap_node* b)
64{ 64{
65 return edf_higher_prio( 65 return edf_higher_prio(heap2task(a), heap2task(b));
66 list_entry(a, struct task_struct, rt_list),
67 list_entry(b, struct task_struct, rt_list));
68} 66}
69 67
70void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, 68void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
@@ -81,7 +79,7 @@ int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t)
81{ 79{
82 /* we need the read lock for edf_ready_queue */ 80 /* we need the read lock for edf_ready_queue */
83 /* no need to preempt if there is nothing pending */ 81 /* no need to preempt if there is nothing pending */
84 if (!ready_jobs_pending(rt)) 82 if (!__jobs_pending(rt))
85 return 0; 83 return 0;
86 /* we need to reschedule if t doesn't exist */ 84 /* we need to reschedule if t doesn't exist */
87 if (!t) 85 if (!t)
@@ -92,5 +90,5 @@ int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t)
92 */ 90 */
93 91
94 /* make sure to get non-rt stuff out of the way */ 92 /* make sure to get non-rt stuff out of the way */
95 return !is_realtime(t) || edf_higher_prio(next_ready(rt), t); 93 return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t);
96} 94}
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 2084bb7de8..0dec4342ba 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -7,12 +7,14 @@
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/proc_fs.h> 9#include <linux/proc_fs.h>
10 10#include <linux/slab.h>
11 11
12#include <litmus/litmus.h> 12#include <litmus/litmus.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <litmus/sched_plugin.h> 14#include <litmus/sched_plugin.h>
15 15
16#include <litmus/heap.h>
17
16#include <litmus/trace.h> 18#include <litmus/trace.h>
17 19
18/* Number of RT tasks that exist in the system */ 20/* Number of RT tasks that exist in the system */
@@ -28,6 +30,8 @@ atomic_t __log_seq_no = ATOMIC_INIT(0);
28static LIST_HEAD(sched_sig_list); 30static LIST_HEAD(sched_sig_list);
29static DEFINE_SPINLOCK(sched_sig_list_lock); 31static DEFINE_SPINLOCK(sched_sig_list_lock);
30 32
33static struct kmem_cache * heap_node_cache;
34
31/* 35/*
32 * sys_set_task_rt_param 36 * sys_set_task_rt_param
33 * @pid: Pid of the task which scheduling parameters must be changed 37 * @pid: Pid of the task which scheduling parameters must be changed
@@ -503,14 +507,22 @@ long litmus_admit_task(struct task_struct* tsk)
503 return -EINVAL; 507 return -EINVAL;
504 } 508 }
505 509
506 INIT_LIST_HEAD(&tsk->rt_list); 510 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
507 511
508 /* avoid scheduler plugin changing underneath us */ 512 /* avoid scheduler plugin changing underneath us */
509 spin_lock_irqsave(&task_transition_lock, flags); 513 spin_lock_irqsave(&task_transition_lock, flags);
510 retval = litmus->admit_task(tsk); 514 retval = litmus->admit_task(tsk);
511 515
516 /* allocate heap node for this task */
517 tsk_rt(tsk)->heap_node = kmem_cache_alloc(heap_node_cache, GFP_ATOMIC);
518 if (!tsk_rt(tsk)->heap_node)
519 retval = -ENOMEM;
520 else
521 heap_node_init(&tsk_rt(tsk)->heap_node, tsk);
522
512 if (!retval) 523 if (!retval)
513 atomic_inc(&rt_task_count); 524 atomic_inc(&rt_task_count);
525
514 spin_unlock_irqrestore(&task_transition_lock, flags); 526 spin_unlock_irqrestore(&task_transition_lock, flags);
515 527
516 return retval; 528 return retval;
@@ -521,6 +533,8 @@ void litmus_exit_task(struct task_struct* tsk)
521{ 533{
522 if (is_realtime(tsk)) { 534 if (is_realtime(tsk)) {
523 litmus->task_exit(tsk); 535 litmus->task_exit(tsk);
536 BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node));
537 kmem_cache_free(heap_node_cache, tsk_rt(tsk)->heap_node);
524 atomic_dec(&rt_task_count); 538 atomic_dec(&rt_task_count);
525 reinit_litmus_state(tsk, 1); 539 reinit_litmus_state(tsk, 1);
526 } 540 }
@@ -771,6 +785,10 @@ static int __init _init_litmus(void)
771 785
772 register_sched_plugin(&linux_sched_plugin); 786 register_sched_plugin(&linux_sched_plugin);
773 787
788 heap_node_cache = KMEM_CACHE(heap_node, 0);
789 if (!heap_node_cache)
790 return -ENOMEM;
791
774#ifdef CONFIG_MAGIC_SYSRQ 792#ifdef CONFIG_MAGIC_SYSRQ
775 /* offer some debugging help */ 793 /* offer some debugging help */
776 if (!register_sysrq_key('q', &sysrq_kill_rt_tasks_op)) 794 if (!register_sysrq_key('q', &sysrq_kill_rt_tasks_op))
@@ -787,6 +805,7 @@ static int __init _init_litmus(void)
787static void _exit_litmus(void) 805static void _exit_litmus(void)
788{ 806{
789 exit_litmus_proc(); 807 exit_litmus_proc();
808 kmem_cache_destroy(heap_node_cache);
790} 809}
791 810
792module_init(_init_litmus); 811module_init(_init_litmus);
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 0134947ee6..b85bb9e38d 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -22,7 +22,7 @@ static int dummy_resched(rt_domain_t *rt)
22 return 0; 22 return 0;
23} 23}
24 24
25static int dummy_order(struct list_head* a, struct list_head* b) 25static int dummy_order(struct heap_node* a, struct heap_node* b)
26{ 26{
27 return 0; 27 return 0;
28} 28}
@@ -75,7 +75,7 @@ static void arm_release_timers(unsigned long _rt)
75 spin_unlock_irqrestore(&rt->release_lock, flags); 75 spin_unlock_irqrestore(&rt->release_lock, flags);
76 76
77 list_for_each_safe(pos, safe, &alt) { 77 list_for_each_safe(pos, safe, &alt) {
78 t = list_entry(pos, struct task_struct, rt_list); 78 t = list_entry(pos, struct task_struct, rt_param.list);
79 list_del(pos); 79 list_del(pos);
80 setup_job_release_timer(t); 80 setup_job_release_timer(t);
81 } 81 }
@@ -83,7 +83,7 @@ static void arm_release_timers(unsigned long _rt)
83 83
84 84
85void rt_domain_init(rt_domain_t *rt, 85void rt_domain_init(rt_domain_t *rt,
86 list_cmp_t order, 86 heap_prio_t order,
87 check_resched_needed_t check, 87 check_resched_needed_t check,
88 release_job_t release 88 release_job_t release
89 ) 89 )
@@ -95,7 +95,7 @@ void rt_domain_init(rt_domain_t *rt,
95 release = default_release_job; 95 release = default_release_job;
96 if (!order) 96 if (!order)
97 order = dummy_order; 97 order = dummy_order;
98 INIT_LIST_HEAD(&rt->ready_queue); 98 heap_init(&rt->ready_queue);
99 INIT_LIST_HEAD(&rt->release_queue); 99 INIT_LIST_HEAD(&rt->release_queue);
100 spin_lock_init(&rt->ready_lock); 100 spin_lock_init(&rt->ready_lock);
101 spin_lock_init(&rt->release_lock); 101 spin_lock_init(&rt->release_lock);
@@ -114,26 +114,10 @@ void __add_ready(rt_domain_t* rt, struct task_struct *new)
114 new->comm, new->pid, get_exec_cost(new), get_rt_period(new), 114 new->comm, new->pid, get_exec_cost(new), get_rt_period(new),
115 get_release(new), litmus_clock()); 115 get_release(new), litmus_clock());
116 116
117 if (!list_insert(&new->rt_list, &rt->ready_queue, rt->order)) 117 BUG_ON(heap_node_in_heap(tsk_rt(new)->heap_node));
118 rt->check_resched(rt);
119}
120
121struct task_struct* __take_ready(rt_domain_t* rt)
122{
123 struct task_struct *t = __peek_ready(rt);
124 118
125 /* kick it out of the ready list */ 119 heap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node);
126 if (t) 120 rt->check_resched(rt);
127 list_del(&t->rt_list);
128 return t;
129}
130
131struct task_struct* __peek_ready(rt_domain_t* rt)
132{
133 if (!list_empty(&rt->ready_queue))
134 return next_ready(rt);
135 else
136 return NULL;
137} 121}
138 122
139/* add_release - add a real-time task to the rt release queue. 123/* add_release - add a real-time task to the rt release queue.
@@ -142,7 +126,7 @@ struct task_struct* __peek_ready(rt_domain_t* rt)
142void __add_release(rt_domain_t* rt, struct task_struct *task) 126void __add_release(rt_domain_t* rt, struct task_struct *task)
143{ 127{
144 TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); 128 TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task));
145 list_add(&task->rt_list, &rt->release_queue); 129 list_add(&tsk_rt(task)->list, &rt->release_queue);
146 task->rt_param.domain = rt; 130 task->rt_param.domain = rt;
147 do_without_rqlock(&rt->arm_timers); 131 do_without_rqlock(&rt->arm_timers);
148} 132}
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 2cfa0b38ac..bc29ca4ed4 100755
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -43,7 +43,7 @@
43 * will link NULL to that CPU. If it is 43 * will link NULL to that CPU. If it is
44 * currently queued in the cedf queue for 44 * currently queued in the cedf queue for
45 * a partition, it will be removed from 45 * a partition, it will be removed from
46 * the T->rt_list. It is safe to call 46 * the rt_domain. It is safe to call
47 * unlink(T) if T is not linked. T may not 47 * unlink(T) if T is not linked. T may not
48 * be NULL. 48 * be NULL.
49 * 49 *
@@ -250,7 +250,7 @@ static noinline void unlink(struct task_struct* t)
250 entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); 250 entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on);
251 t->rt_param.linked_on = NO_CPU; 251 t->rt_param.linked_on = NO_CPU;
252 link_task_to_cpu(NULL, entry); 252 link_task_to_cpu(NULL, entry);
253 } else if (in_list(&t->rt_list)) { 253 } else if (is_queued(t)) {
254 /* This is an interesting situation: t is scheduled, 254 /* This is an interesting situation: t is scheduled,
255 * but was just recently unlinked. It cannot be 255 * but was just recently unlinked. It cannot be
256 * linked anywhere else (because then it would have 256 * linked anywhere else (because then it would have
@@ -258,7 +258,7 @@ static noinline void unlink(struct task_struct* t)
258 * queue. We must remove it from the list in this 258 * queue. We must remove it from the list in this
259 * case. 259 * case.
260 */ 260 */
261 list_del(&t->rt_list); 261 remove(task_edf(t), t);
262 } 262 }
263} 263}
264 264
@@ -298,7 +298,7 @@ static noinline void requeue(struct task_struct* task)
298 298
299 BUG_ON(!task); 299 BUG_ON(!task);
300 /* sanity check rt_list before insertion */ 300 /* sanity check rt_list before insertion */
301 BUG_ON(in_list(&task->rt_list)); 301 BUG_ON(is_queued(task));
302 302
303 /* Get correct real-time domain. */ 303 /* Get correct real-time domain. */
304 cedf = task_cedf(task); 304 cedf = task_cedf(task);
@@ -625,8 +625,6 @@ static void cedf_task_block(struct task_struct *t)
625 spin_unlock_irqrestore(&task_cedf(t)->slock, flags); 625 spin_unlock_irqrestore(&task_cedf(t)->slock, flags);
626 626
627 BUG_ON(!is_realtime(t)); 627 BUG_ON(!is_realtime(t));
628 BUG_ON(t->rt_list.next != LIST_POISON1);
629 BUG_ON(t->rt_list.prev != LIST_POISON2);
630} 628}
631 629
632static void cedf_task_exit(struct task_struct * t) 630static void cedf_task_exit(struct task_struct * t)
@@ -642,8 +640,6 @@ static void cedf_task_exit(struct task_struct * t)
642 640
643 BUG_ON(!is_realtime(t)); 641 BUG_ON(!is_realtime(t));
644 TRACE_TASK(t, "RIP\n"); 642 TRACE_TASK(t, "RIP\n");
645 BUG_ON(t->rt_list.next != LIST_POISON1);
646 BUG_ON(t->rt_list.prev != LIST_POISON2);
647} 643}
648 644
649static long cedf_admit_task(struct task_struct* tsk) 645static long cedf_admit_task(struct task_struct* tsk)
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 5d61fe053c..fb2200bc93 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -44,7 +44,7 @@
44 * structures. If it is linked to some CPU it 44 * structures. If it is linked to some CPU it
45 * will link NULL to that CPU. If it is 45 * will link NULL to that CPU. If it is
46 * currently queued in the gsnedf queue it will 46 * currently queued in the gsnedf queue it will
47 * be removed from the T->rt_list. It is safe to 47 * be removed from the rt_domain. It is safe to
48 * call unlink(T) if T is not linked. T may not 48 * call unlink(T) if T is not linked. T may not
49 * be NULL. 49 * be NULL.
50 * 50 *
@@ -217,7 +217,7 @@ static noinline void unlink(struct task_struct* t)
217 entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); 217 entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on);
218 t->rt_param.linked_on = NO_CPU; 218 t->rt_param.linked_on = NO_CPU;
219 link_task_to_cpu(NULL, entry); 219 link_task_to_cpu(NULL, entry);
220 } else if (in_list(&t->rt_list)) { 220 } else if (is_queued(t)) {
221 /* This is an interesting situation: t is scheduled, 221 /* This is an interesting situation: t is scheduled,
222 * but was just recently unlinked. It cannot be 222 * but was just recently unlinked. It cannot be
223 * linked anywhere else (because then it would have 223 * linked anywhere else (because then it would have
@@ -225,7 +225,7 @@ static noinline void unlink(struct task_struct* t)
225 * queue. We must remove it from the list in this 225 * queue. We must remove it from the list in this
226 * case. 226 * case.
227 */ 227 */
228 list_del(&t->rt_list); 228 remove(&gsnedf, t);
229 } 229 }
230} 230}
231 231
@@ -261,8 +261,8 @@ static noinline void preempt(cpu_entry_t *entry)
261static noinline void requeue(struct task_struct* task) 261static noinline void requeue(struct task_struct* task)
262{ 262{
263 BUG_ON(!task); 263 BUG_ON(!task);
264 /* sanity check rt_list before insertion */ 264 /* sanity check before insertion */
265 BUG_ON(in_list(&task->rt_list)); 265 BUG_ON(is_queued(task));
266 266
267 if (get_rt_flags(task) == RT_F_SLEEP) { 267 if (get_rt_flags(task) == RT_F_SLEEP) {
268 /* this task has expired 268 /* this task has expired
@@ -573,8 +573,6 @@ static void gsnedf_task_block(struct task_struct *t)
573 spin_unlock_irqrestore(&gsnedf_lock, flags); 573 spin_unlock_irqrestore(&gsnedf_lock, flags);
574 574
575 BUG_ON(!is_realtime(t)); 575 BUG_ON(!is_realtime(t));
576 BUG_ON(t->rt_list.next != LIST_POISON1);
577 BUG_ON(t->rt_list.prev != LIST_POISON2);
578} 576}
579 577
580 578
@@ -589,8 +587,6 @@ static void gsnedf_task_exit(struct task_struct * t)
589 587
590 BUG_ON(!is_realtime(t)); 588 BUG_ON(!is_realtime(t));
591 TRACE_TASK(t, "RIP\n"); 589 TRACE_TASK(t, "RIP\n");
592 BUG_ON(t->rt_list.next != LIST_POISON1);
593 BUG_ON(t->rt_list.prev != LIST_POISON2);
594} 590}
595 591
596static long gsnedf_pi_block(struct pi_semaphore *sem, 592static long gsnedf_pi_block(struct pi_semaphore *sem,
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 290654bb8d..a36a350160 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -249,7 +249,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
249 249
250 TRACE_TASK(task, "wake up\n"); 250 TRACE_TASK(task, "wake up\n");
251 spin_lock_irqsave(&pedf->slock, flags); 251 spin_lock_irqsave(&pedf->slock, flags);
252 BUG_ON(in_list(&task->rt_list)); 252 BUG_ON(is_queued(task));
253 /* We need to take suspensions because of semaphores into 253 /* We need to take suspensions because of semaphores into
254 * account! If a job resumes after being suspended due to acquiring 254 * account! If a job resumes after being suspended due to acquiring
255 * a semaphore, it should never be treated as a new job release. 255 * a semaphore, it should never be treated as a new job release.
@@ -273,19 +273,21 @@ static void psnedf_task_block(struct task_struct *t)
273 /* only running tasks can block, thus t is in no queue */ 273 /* only running tasks can block, thus t is in no queue */
274 TRACE_TASK(t, "block, state=%d\n", t->state); 274 TRACE_TASK(t, "block, state=%d\n", t->state);
275 BUG_ON(!is_realtime(t)); 275 BUG_ON(!is_realtime(t));
276 BUG_ON(in_list(&t->rt_list)); 276 BUG_ON(is_queued(t));
277} 277}
278 278
279static void psnedf_task_exit(struct task_struct * t) 279static void psnedf_task_exit(struct task_struct * t)
280{ 280{
281 unsigned long flags; 281 unsigned long flags;
282 psnedf_domain_t* pedf = task_pedf(t); 282 psnedf_domain_t* pedf = task_pedf(t);
283 rt_domain_t* edf;
283 284
284 spin_lock_irqsave(&pedf->slock, flags); 285 spin_lock_irqsave(&pedf->slock, flags);
285 286 if (is_queued(t)) {
286 if (in_list(&t->rt_list))
287 /* dequeue */ 287 /* dequeue */
288 list_del(&t->rt_list); 288 edf = task_edf(t);
289 remove(edf, t);
290 }
289 preempt(pedf); 291 preempt(pedf);
290 spin_unlock_irqrestore(&pedf->slock, flags); 292 spin_unlock_irqrestore(&pedf->slock, flags);
291} 293}
@@ -315,10 +317,11 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
315 /* let holder inherit */ 317 /* let holder inherit */
316 sem->holder->rt_param.inh_task = new_waiter; 318 sem->holder->rt_param.inh_task = new_waiter;
317 t = sem->holder; 319 t = sem->holder;
318 if (in_list(&t->rt_list)) { 320 if (is_queued(t)) {
319 /* queued in domain*/ 321 /* queued in domain*/
320 list_del(&t->rt_list); 322 remove(edf, t);
321 /* readd to make priority change take place */ 323 /* readd to make priority change take place */
324 /* FIXME: this looks outdated */
322 if (is_released(t, litmus_clock())) 325 if (is_released(t, litmus_clock()))
323 __add_ready(edf, t); 326 __add_ready(edf, t);
324 else 327 else