aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2008-09-11 14:23:43 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-09-11 14:33:10 -0400
commite9c5ff00452487d901f38286003a79e1dfab489a (patch)
tree1964a1611345093f6d2f74fc062d42408d5b68a2
parentec1858ca1d34e4213f886b3007f0d294fa01b867 (diff)
rt_domain: make release queue handling more efficient
Instead of having hrtimers for each task, we now have only one hrtimer per rt_domain. To-be-released tasks are grouped in mergable heaps and presented as a bunch on each release. This approach should allow us to reduce the worst-case overhead at hyperperiod boundaries significantly. 1) less hrtimer overhead 2) less calls to the active plugin 3) only one CPU handles releases at a time 4) (2) & (3) should bring down lock contention significantly
-rw-r--r--include/litmus/edf_common.h4
-rw-r--r--include/litmus/heap.h34
-rw-r--r--include/litmus/litmus.h26
-rw-r--r--include/litmus/rt_domain.h60
-rw-r--r--include/litmus/rt_param.h3
-rw-r--r--litmus/edf_common.c2
-rw-r--r--litmus/rt_domain.c230
-rwxr-xr-xlitmus/sched_cedf.c57
-rw-r--r--litmus/sched_gsn_edf.c40
-rwxr-xr-xlitmus/sched_pfair.c24
-rw-r--r--litmus/sched_psn_edf.c2
11 files changed, 355 insertions, 127 deletions
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
index 4dff77ae48..32dcf9bd6b 100644
--- a/include/litmus/edf_common.h
+++ b/include/litmus/edf_common.h
@@ -12,8 +12,8 @@
12#include <litmus/rt_domain.h> 12#include <litmus/rt_domain.h>
13 13
14 14
15void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, 15void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_job_t release); 16 release_jobs_t release);
17 17
18int edf_higher_prio(struct task_struct* first, 18int edf_higher_prio(struct task_struct* first,
19 struct task_struct* second); 19 struct task_struct* second);
diff --git a/include/litmus/heap.h b/include/litmus/heap.h
index 0b0ede4542..508e0b9f16 100644
--- a/include/litmus/heap.h
+++ b/include/litmus/heap.h
@@ -298,4 +298,38 @@ static inline void heap_delete(heap_prio_t higher_prio, struct heap* heap,
298 node->degree = NOT_IN_HEAP; 298 node->degree = NOT_IN_HEAP;
299} 299}
300 300
301/* un-inline, make it a kmemcache_t */
302static inline struct heap_node* alloc_heap_node(int gfp_flags)
303{
304 return kmalloc(sizeof(struct heap_node), gfp_flags);
305}
306
307static inline void free_heap_node(struct heap_node* hn)
308{
309 kfree(hn);
310}
311
312/* allocate a heap node for value and insert into the heap */
313static inline int heap_add(heap_prio_t higher_prio, struct heap* heap,
314 void* value, int gfp_flags)
315{
316 struct heap_node* hn = alloc_heap_node(gfp_flags);
317 if (likely(hn)) {
318 heap_node_init(&hn, value);
319 heap_insert(higher_prio, heap, hn);
320 }
321 return hn != NULL;
322}
323
324static inline void* heap_take_del(heap_prio_t higher_prio,
325 struct heap* heap)
326{
327 struct heap_node* hn = heap_take(higher_prio, heap);
328 void* ret = NULL;
329 if (hn) {
330 ret = hn->value;
331 free_heap_node(hn);
332 }
333 return ret;
334}
301#endif 335#endif
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index de2a3c2547..d3cd6e4437 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -141,10 +141,6 @@ static inline lt_t litmus_clock(void)
141/* A macro to convert from nanoseconds to ktime_t. */ 141/* A macro to convert from nanoseconds to ktime_t. */
142#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) 142#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
143 143
144/* The high-resolution release timer for a task. */
145#define release_timer(t) (tsk_rt(t)->release_timer)
146
147/* The high-resolution release timer for a task. */
148#define get_domain(t) (tsk_rt(t)->domain) 144#define get_domain(t) (tsk_rt(t)->domain)
149 145
150/* Honor the flag in the preempt_count variable that is set 146/* Honor the flag in the preempt_count variable that is set
@@ -201,5 +197,27 @@ static inline int is_np(struct task_struct *t)
201 197
202#endif 198#endif
203 199
200/* make the unit explicit */
201typedef unsigned long quanta_t;
202
203enum round {
204 FLOOR,
205 CEIL
206};
207
208
209/* Tick period is used to convert ns-specified execution
210 * costs and periods into tick-based equivalents.
211 */
212extern ktime_t tick_period;
213
214static inline quanta_t time2quanta(lt_t time, enum round round)
215{
216 s64 quantum_length = ktime_to_ns(tick_period);
217
218 if (do_div(time, quantum_length) && round == CEIL)
219 time++;
220 return (quanta_t) time;
221}
204 222
205#endif 223#endif
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index 47cd123b76..84d71d40d9 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -8,13 +8,36 @@
8#include <litmus/norqlock.h> 8#include <litmus/norqlock.h>
9#include <litmus/heap.h> 9#include <litmus/heap.h>
10 10
11#define RELEASE_QUEUE_SLOTS 127 /* prime */
12
11struct _rt_domain; 13struct _rt_domain;
12 14
13typedef int (*check_resched_needed_t)(struct _rt_domain *rt); 15typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
14typedef void (*release_job_t)(struct task_struct *t, struct _rt_domain *rt); 16typedef void (*release_jobs_t)(struct _rt_domain *rt, struct heap* tasks);
17
18int heap_earlier_release(struct heap_node *_a, struct heap_node *_b);
19
20struct release_heap {
21 struct list_head list;
22 lt_t release_time;
23 struct heap heap;
24};
25
26struct release_queue {
27 /* each slot maintains a list of release heaps sorted by release time */
28 struct list_head slot[RELEASE_QUEUE_SLOTS];
29 /* the heap of heaps orderd by release time */
30 struct heap rel_heap;
31 /* the actual timer used to trigger releases */
32 struct hrtimer timer;
33 /* used to determine when to start the timer */
34 int timer_armed;
35 /* when will it go off? */
36 lt_t timer_time;
37};
15 38
16typedef struct _rt_domain { 39typedef struct _rt_domain {
17 struct no_rqlock_work arm_timers; 40 struct no_rqlock_work arm_timer;
18 41
19 /* runnable rt tasks are in here */ 42 /* runnable rt tasks are in here */
20 spinlock_t ready_lock; 43 spinlock_t ready_lock;
@@ -22,18 +45,32 @@ typedef struct _rt_domain {
22 45
23 /* real-time tasks waiting for release are in here */ 46 /* real-time tasks waiting for release are in here */
24 spinlock_t release_lock; 47 spinlock_t release_lock;
25 struct list_head release_queue; 48 struct release_queue release_queue;
49
50 /* for moving tasks to the release queue */
51 spinlock_t tobe_lock;
52 struct list_head tobe_released;
26 53
27 /* how do we check if we need to kick another CPU? */ 54 /* how do we check if we need to kick another CPU? */
28 check_resched_needed_t check_resched; 55 check_resched_needed_t check_resched;
29 56
30 /* how do we release a job? */ 57 /* how do we release a job? */
31 release_job_t release_job; 58 release_jobs_t release_jobs;
32 59
33 /* how are tasks ordered in the ready queue? */ 60 /* how are tasks ordered in the ready queue? */
34 heap_prio_t order; 61 heap_prio_t order;
35} rt_domain_t; 62} rt_domain_t;
36 63
64/* caller must hold release_lock */
65static inline int next_release(rt_domain_t *rt, lt_t *time)
66{
67 struct heap_node* top = heap_peek(heap_earlier_release,
68 &rt->release_queue.rel_heap);
69 if (top)
70 *time = ((struct release_heap*) top->value)->release_time;
71 return top != NULL;
72}
73
37static inline struct task_struct* __next_ready(rt_domain_t* rt) 74static inline struct task_struct* __next_ready(rt_domain_t* rt)
38{ 75{
39 struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue); 76 struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue);
@@ -45,9 +82,10 @@ static inline struct task_struct* __next_ready(rt_domain_t* rt)
45 82
46void rt_domain_init(rt_domain_t *rt, heap_prio_t order, 83void rt_domain_init(rt_domain_t *rt, heap_prio_t order,
47 check_resched_needed_t check, 84 check_resched_needed_t check,
48 release_job_t relase); 85 release_jobs_t relase);
49 86
50void __add_ready(rt_domain_t* rt, struct task_struct *new); 87void __add_ready(rt_domain_t* rt, struct task_struct *new);
88void __merge_ready(rt_domain_t* rt, struct heap *tasks);
51void __add_release(rt_domain_t* rt, struct task_struct *task); 89void __add_release(rt_domain_t* rt, struct task_struct *task);
52 90
53static inline struct task_struct* __take_ready(rt_domain_t* rt) 91static inline struct task_struct* __take_ready(rt_domain_t* rt)
@@ -87,6 +125,14 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
87 spin_unlock_irqrestore(&rt->ready_lock, flags); 125 spin_unlock_irqrestore(&rt->ready_lock, flags);
88} 126}
89 127
128static inline void merge_ready(rt_domain_t* rt, struct heap* tasks)
129{
130 unsigned long flags;
131 spin_lock_irqsave(&rt->ready_lock, flags);
132 __merge_ready(rt, tasks);
133 spin_unlock_irqrestore(&rt->ready_lock, flags);
134}
135
90static inline struct task_struct* take_ready(rt_domain_t* rt) 136static inline struct task_struct* take_ready(rt_domain_t* rt)
91{ 137{
92 unsigned long flags; 138 unsigned long flags;
@@ -103,9 +149,9 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task)
103{ 149{
104 unsigned long flags; 150 unsigned long flags;
105 /* first we need the write lock for rt_ready_queue */ 151 /* first we need the write lock for rt_ready_queue */
106 spin_lock_irqsave(&rt->release_lock, flags); 152 spin_lock_irqsave(&rt->tobe_lock, flags);
107 __add_release(rt, task); 153 __add_release(rt, task);
108 spin_unlock_irqrestore(&rt->release_lock, flags); 154 spin_unlock_irqrestore(&rt->tobe_lock, flags);
109} 155}
110 156
111static inline int __jobs_pending(rt_domain_t* rt) 157static inline int __jobs_pending(rt_domain_t* rt)
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 7bb568432e..403ebc8cc9 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -137,9 +137,6 @@ struct rt_param {
137 int old_policy; 137 int old_policy;
138 int old_prio; 138 int old_prio;
139 139
140 /* The high-resolution timer used to control its release. */
141 struct hrtimer release_timer;
142
143 /* ready queue for this task */ 140 /* ready queue for this task */
144 struct _rt_domain* domain; 141 struct _rt_domain* domain;
145 142
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index d7567ac98e..253641de4f 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -66,7 +66,7 @@ int edf_ready_order(struct heap_node* a, struct heap_node* b)
66} 66}
67 67
68void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, 68void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
69 release_job_t release) 69 release_jobs_t release)
70{ 70{
71 rt_domain_init(rt, edf_ready_order, resched, release); 71 rt_domain_init(rt, edf_ready_order, resched, release);
72} 72}
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 28803087b3..27bc9143e7 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -18,6 +18,8 @@
18 18
19#include <litmus/trace.h> 19#include <litmus/trace.h>
20 20
21#include <litmus/heap.h>
22
21static int dummy_resched(rt_domain_t *rt) 23static int dummy_resched(rt_domain_t *rt)
22{ 24{
23 return 0; 25 return 0;
@@ -29,89 +31,228 @@ static int dummy_order(struct heap_node* a, struct heap_node* b)
29} 31}
30 32
31/* default implementation: use default lock */ 33/* default implementation: use default lock */
32static void default_release_job(struct task_struct* t, rt_domain_t* rt) 34static void default_release_jobs(rt_domain_t* rt, struct heap* tasks)
35{
36 merge_ready(rt, tasks);
37}
38
39static unsigned int time2slot(lt_t time)
40{
41 return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS;
42}
43
44int heap_earlier_release(struct heap_node *_a, struct heap_node *_b)
33{ 45{
34 add_ready(rt, t); 46 struct release_heap *a = _a->value;
47 struct release_heap *b = _b->value;
48 return lt_before(a->release_time, b->release_time);
35} 49}
36 50
37static enum hrtimer_restart release_job_timer(struct hrtimer *timer) 51/* Caller most hold release lock.
52 * Will return heap for given time. If no such heap exists prior to the invocation
53 * it will be created.
54 */
55static struct release_heap* get_release_heap(rt_domain_t *rt, lt_t release_time)
38{ 56{
39 struct task_struct *t; 57 struct list_head* pos;
58 struct release_heap* heap = NULL;
59 struct release_heap* rh;
60 unsigned int slot = time2slot(release_time);
61 int inserted;
62
63 /* initialize pos for the case that the list is empty */
64 pos = rt->release_queue.slot[slot].next;
65 list_for_each(pos, &rt->release_queue.slot[slot]) {
66 rh = list_entry(pos, struct release_heap, list);
67 if (release_time == rh->release_time) {
68 /* perfect match -- this happens on hyperperiod
69 * boundaries
70 */
71 heap = rh;
72 break;
73 } else if (lt_before(release_time, rh->release_time)) {
74 /* we need to insert a new node since rh is
75 * already in the future
76 */
77 break;
78 }
79 }
80 if (!heap) {
81 /* must create new node */
82 /* FIXME: use a kmemcache_t */
83 rh = kmalloc(sizeof(struct release_heap), GFP_ATOMIC);
84 if (unlikely(!rh))
85 /* Should be handled somehow.
86 * For now, let's just hope there is
87 * sufficient memory.
88 */
89 panic("rt_domain: no more memory?");
90 rh->release_time = release_time;
91 heap_init(&rh->heap);
92 list_add(&rh->list, pos->prev);
93 inserted = heap_add(heap_earlier_release,
94 &rt->release_queue.rel_heap, rh,
95 GFP_ATOMIC);
96 if (unlikely(!inserted))
97 panic("rt_domain: no more heap memory?");
98 heap = rh;
99 }
100 return heap;
101}
102
103static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
104{
105 long flags;
106 rt_domain_t *rt;
107 struct release_heap* rh;
108 struct heap tasks;
109 struct list_head list, *pos, *safe;
110 lt_t release = 109;
111 int pending;
112 int repeat;
113 enum hrtimer_mode ret = HRTIMER_NORESTART;
40 114
41 TS_RELEASE_START; 115 TS_RELEASE_START;
42 116
43 t = container_of(timer, struct task_struct, 117 INIT_LIST_HEAD(&list);
44 rt_param.release_timer); 118 heap_init(&tasks);
45 119
46 get_domain(t)->release_job(t, get_domain(t)); 120 rt = container_of(timer, rt_domain_t,
121 release_queue.timer);
47 122
48 TS_RELEASE_END; 123 do {
124 list_for_each_safe(pos, safe, &list) {
125 rh = list_entry(pos, struct release_heap, list);
126 heap_union(rt->order, &tasks, &rh->heap);
127 list_del(pos);
128 kfree(rh);
129 }
49 130
50 return HRTIMER_NORESTART; 131 /* call release callback */
51} 132 rt->release_jobs(rt, &tasks);
52 133
53static void setup_job_release_timer(struct task_struct *task)
54{
55 hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
56 release_timer(task).function = release_job_timer;
57#ifdef CONFIG_HIGH_RES_TIMERS
58 release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART;
59#endif
60 /* Expiration time of timer is release time of task. */
61 release_timer(task).expires = ns_to_ktime(get_release(task));
62 134
63 TRACE_TASK(task, "arming release timer rel=%llu at %llu\n", 135 spin_lock_irqsave(&rt->release_lock, flags);
64 get_release(task), litmus_clock()); 136 while ((pending = next_release(rt, &release))) {
137 if (lt_before(release, litmus_clock())) {
138 /* pick for release */
139 rh = heap_take_del(heap_earlier_release,
140 &rt->release_queue.rel_heap);
141 list_move(&rh->list, &list);
142 } else
143 break;
144 }
145 repeat = !list_empty(&list);
146 if (!repeat) {
147 /* last iteration, setup timers, etc. */
148 if (!pending) {
149 rt->release_queue.timer_armed = 0;
150 ret = HRTIMER_NORESTART;
151 } else {
152 rt->release_queue.timer_time = release;
153 timer->expires = ns_to_ktime(release);
154 ret = HRTIMER_RESTART;
155 }
156 }
157 spin_unlock_irqrestore(&rt->release_lock, flags);
158 } while (repeat);
65 159
66 hrtimer_start(&release_timer(task), release_timer(task).expires, 160 TS_RELEASE_END;
67 HRTIMER_MODE_ABS); 161
162 return ret;
68} 163}
69 164
70static void arm_release_timers(unsigned long _rt) 165static void arm_release_timer(unsigned long _rt)
71{ 166{
72 rt_domain_t *rt = (rt_domain_t*) _rt; 167 rt_domain_t *rt = (rt_domain_t*) _rt;
73 unsigned long flags; 168 unsigned long flags;
74 struct list_head alt; 169 struct list_head list;
75 struct list_head *pos, *safe; 170 struct list_head *pos, *safe;
76 struct task_struct* t; 171 struct task_struct* t;
172 struct release_heap* rh;
173 int earlier, armed;
174 lt_t release = 0;
175
176 local_irq_save(flags);
177 spin_lock(&rt->tobe_lock);
178 list_replace_init(&rt->tobe_released, &list);
179 spin_unlock(&rt->tobe_lock);
77 180
78 spin_lock_irqsave(&rt->release_lock, flags); 181 /* We only have to defend against the ISR since norq callbacks
79 list_replace_init(&rt->release_queue, &alt); 182 * are serialized.
80 spin_unlock_irqrestore(&rt->release_lock, flags); 183 */
81 184 spin_lock(&rt->release_lock);
82 list_for_each_safe(pos, safe, &alt) { 185
186 list_for_each_safe(pos, safe, &list) {
83 t = list_entry(pos, struct task_struct, rt_param.list); 187 t = list_entry(pos, struct task_struct, rt_param.list);
84 list_del(pos); 188 list_del(pos);
85 setup_job_release_timer(t); 189 rh = get_release_heap(rt, get_release(t));
190 heap_add(rt->order, &rh->heap, t, GFP_ATOMIC);
86 } 191 }
87}
88 192
193 next_release(rt, &release);
194 armed = rt->release_queue.timer_armed;
195 earlier = lt_before(release, rt->release_queue.timer_time);
196 /* We'll do the actual arming in a sec. The ISR doesn't care what these
197 * flags say, and it'll be true before another instance of this
198 * function can observe the flag due to the sequential nature of norq
199 * work.
200 */
201 rt->release_queue.timer_armed = 1;
202 rt->release_queue.timer_time = release;
203 spin_unlock(&rt->release_lock);
204 if (!armed || earlier) {
205 if (armed) {
206 /* need to cancel first */
207 hrtimer_cancel(&rt->release_queue.timer);
208 }
209 hrtimer_start(&rt->release_queue.timer,
210 ns_to_ktime(release),
211 HRTIMER_MODE_ABS);
212 }
213 local_irq_restore(flags);
214}
89 215
90void rt_domain_init(rt_domain_t *rt, 216void rt_domain_init(rt_domain_t *rt,
91 heap_prio_t order, 217 heap_prio_t order,
92 check_resched_needed_t check, 218 check_resched_needed_t check,
93 release_job_t release 219 release_jobs_t release
94 ) 220 )
95{ 221{
222 int i;
223
96 BUG_ON(!rt); 224 BUG_ON(!rt);
97 if (!check) 225 if (!check)
98 check = dummy_resched; 226 check = dummy_resched;
99 if (!release) 227 if (!release)
100 release = default_release_job; 228 release = default_release_jobs;
101 if (!order) 229 if (!order)
102 order = dummy_order; 230 order = dummy_order;
231
103 heap_init(&rt->ready_queue); 232 heap_init(&rt->ready_queue);
104 INIT_LIST_HEAD(&rt->release_queue); 233 INIT_LIST_HEAD(&rt->tobe_released);
234 rt->release_queue.timer_armed = 0;
235 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++)
236 INIT_LIST_HEAD(&rt->release_queue.slot[i]);
237
238 hrtimer_init(&rt->release_queue.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
239 rt->release_queue.timer.function = on_release_timer;
240#ifdef CONFIG_HIGH_RES_TIMERS
241 rt->release_queue.timer.cb_mode = HRTIMER_CB_IRQSAFE;
242#endif
243
105 spin_lock_init(&rt->ready_lock); 244 spin_lock_init(&rt->ready_lock);
106 spin_lock_init(&rt->release_lock); 245 spin_lock_init(&rt->release_lock);
246 spin_lock_init(&rt->tobe_lock);
247
107 rt->check_resched = check; 248 rt->check_resched = check;
108 rt->release_job = release; 249 rt->release_jobs = release;
109 rt->order = order; 250 rt->order = order;
110 init_no_rqlock_work(&rt->arm_timers, arm_release_timers, (unsigned long) rt); 251 init_no_rqlock_work(&rt->arm_timer, arm_release_timer, (unsigned long) rt);
111} 252}
112 253
113/* add_ready - add a real-time task to the rt ready queue. It must be runnable. 254/* add_ready - add a real-time task to the rt ready queue. It must be runnable.
114 * @new: the newly released task 255 * @new: the newly released task
115 */ 256 */
116void __add_ready(rt_domain_t* rt, struct task_struct *new) 257void __add_ready(rt_domain_t* rt, struct task_struct *new)
117{ 258{
@@ -125,14 +266,23 @@ void __add_ready(rt_domain_t* rt, struct task_struct *new)
125 rt->check_resched(rt); 266 rt->check_resched(rt);
126} 267}
127 268
269/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable.
270 * @tasks - the newly released tasks
271 */
272void __merge_ready(rt_domain_t* rt, struct heap* tasks)
273{
274 heap_union(rt->order, &rt->ready_queue, tasks);
275 rt->check_resched(rt);
276}
277
128/* add_release - add a real-time task to the rt release queue. 278/* add_release - add a real-time task to the rt release queue.
129 * @task: the sleeping task 279 * @task: the sleeping task
130 */ 280 */
131void __add_release(rt_domain_t* rt, struct task_struct *task) 281void __add_release(rt_domain_t* rt, struct task_struct *task)
132{ 282{
133 TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); 283 TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task));
134 list_add(&tsk_rt(task)->list, &rt->release_queue); 284 list_add(&tsk_rt(task)->list, &rt->tobe_released);
135 task->rt_param.domain = rt; 285 task->rt_param.domain = rt;
136 do_without_rqlock(&rt->arm_timers); 286 do_without_rqlock(&rt->arm_timer);
137} 287}
138 288
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 2ac14cded9..bebb2c7ba5 100755
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -325,56 +325,57 @@ static noinline void requeue(struct task_struct* task)
325 __add_ready(edf, task); 325 __add_ready(edf, task);
326} 326}
327 327
328static void check_for_preemptions(cedf_domain_t* cedf)
329{
330 cpu_entry_t *last;
331 struct task_struct *task;
332 struct list_head *cedf_cpu_queue;
333 cedf_cpu_queue = &cedf->cedf_cpu_queue;
334
335 for(last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list);
336 edf_preemption_needed(&cedf->domain, last->linked);
337 last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list)) {
338 /* preemption necessary */
339 task = __take_ready(&cedf->domain);
340 TRACE("check_for_preemptions: task %d linked to %d, state:%d\n",
341 task->pid, last->cpu, task->state);
342 if (last->linked)
343 requeue(last->linked);
344 link_task_to_cpu(task, last);
345 preempt(last);
346 }
347
348}
349
328/* cedf_job_arrival: task is either resumed or released */ 350/* cedf_job_arrival: task is either resumed or released */
329static noinline void cedf_job_arrival(struct task_struct* task) 351static noinline void cedf_job_arrival(struct task_struct* task)
330{ 352{
331 cpu_entry_t* last;
332 cedf_domain_t* cedf; 353 cedf_domain_t* cedf;
333 rt_domain_t* edf; 354 rt_domain_t* edf;
334 struct list_head *cedf_cpu_queue;
335 355
336 BUG_ON(!task); 356 BUG_ON(!task);
337 357
338 /* Get correct real-time domain. */ 358 /* Get correct real-time domain. */
339 cedf = task_cedf(task); 359 cedf = task_cedf(task);
340 edf = &cedf->domain; 360 edf = &cedf->domain;
341 cedf_cpu_queue = &cedf->cedf_cpu_queue;
342
343 BUG_ON(!cedf);
344 BUG_ON(!edf);
345 BUG_ON(!cedf_cpu_queue);
346 BUG_ON(list_empty(cedf_cpu_queue));
347 361
348 /* first queue arriving job */ 362 /* first queue arriving job */
349 requeue(task); 363 requeue(task);
350 364
351 /* then check for any necessary preemptions */ 365 /* then check for any necessary preemptions */
352 last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list); 366 check_for_preemptions(cedf);
353 if (edf_preemption_needed(edf, last->linked)) {
354 /* preemption necessary */
355 task = __take_ready(edf);
356 TRACE("job_arrival: task %d linked to %d, state:%d\n",
357 task->pid, last->cpu, task->state);
358 if (last->linked)
359 requeue(last->linked);
360
361 link_task_to_cpu(task, last);
362 preempt(last);
363 }
364} 367}
365 368
366/* check for current job releases */ 369/* check for current job releases */
367static void cedf_job_release(struct task_struct* t, rt_domain_t* _) 370static void cedf_release_jobs(rt_domain_t* rt, struct heap* tasks)
368{ 371{
369 cedf_domain_t* cedf = task_cedf(t); 372 cedf_domain_t* cedf = container_of(rt, cedf_domain_t, domain);
370 unsigned long flags; 373 unsigned long flags;
371 374
372 BUG_ON(!t);
373 BUG_ON(!cedf);
374
375 spin_lock_irqsave(&cedf->slock, flags); 375 spin_lock_irqsave(&cedf->slock, flags);
376 sched_trace_job_release(queued); 376
377 cedf_job_arrival(t); 377 __merge_ready(&cedf->domain, tasks);
378 check_for_preemptions(cedf);
378 spin_unlock_irqrestore(&cedf->slock, flags); 379 spin_unlock_irqrestore(&cedf->slock, flags);
379} 380}
380 381
@@ -678,7 +679,7 @@ static void cedf_domain_init(int first_cpu, int last_cpu)
678 679
679 /* Initialize cluster domain. */ 680 /* Initialize cluster domain. */
680 edf_domain_init(&new_cedf_domain->domain, NULL, 681 edf_domain_init(&new_cedf_domain->domain, NULL,
681 cedf_job_release); 682 cedf_release_jobs);
682 new_cedf_domain->first_cpu = first_cpu; 683 new_cedf_domain->first_cpu = first_cpu;
683 new_cedf_domain->last_cpu = last_cpu; 684 new_cedf_domain->last_cpu = last_cpu;
684 INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue); 685 INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue);
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index e32a4e8458..caaf58db67 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -285,41 +285,45 @@ static noinline void requeue(struct task_struct* task)
285 __add_ready(&gsnedf, task); 285 __add_ready(&gsnedf, task);
286} 286}
287 287
288/* gsnedf_job_arrival: task is either resumed or released */ 288
289static noinline void gsnedf_job_arrival(struct task_struct* task) 289/* check for any necessary preemptions */
290static void check_for_preemptions(void)
290{ 291{
292 struct task_struct *task;
291 cpu_entry_t* last; 293 cpu_entry_t* last;
292 294
293 BUG_ON(list_empty(&gsnedf_cpu_queue)); 295 for(last = list_entry(gsnedf_cpu_queue.prev, cpu_entry_t, list);
294 BUG_ON(!task); 296 edf_preemption_needed(&gsnedf, last->linked);
295 297 last = list_entry(gsnedf_cpu_queue.prev, cpu_entry_t, list)) {
296 /* first queue arriving job */
297 requeue(task);
298
299 /* then check for any necessary preemptions */
300 last = list_entry(gsnedf_cpu_queue.prev, cpu_entry_t, list);
301 if (edf_preemption_needed(&gsnedf, last->linked)) {
302 /* preemption necessary */ 298 /* preemption necessary */
303 task = __take_ready(&gsnedf); 299 task = __take_ready(&gsnedf);
304 TRACE("job_arrival: attempting to link task %d to %d\n", 300 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
305 task->pid, last->cpu); 301 task->pid, last->cpu);
306 if (last->linked) 302 if (last->linked)
307 requeue(last->linked); 303 requeue(last->linked);
308
309 link_task_to_cpu(task, last); 304 link_task_to_cpu(task, last);
310 preempt(last); 305 preempt(last);
311 } 306 }
312} 307}
313 308
314/* check for current job releases */ 309/* gsnedf_job_arrival: task is either resumed or released */
315static void gsnedf_job_release(struct task_struct* t, rt_domain_t* _) 310static noinline void gsnedf_job_arrival(struct task_struct* task)
311{
312 BUG_ON(list_empty(&gsnedf_cpu_queue));
313 BUG_ON(!task);
314
315 requeue(task);
316 check_for_preemptions();
317}
318
319static void gsnedf_release_jobs(rt_domain_t* rt, struct heap* tasks)
316{ 320{
317 unsigned long flags; 321 unsigned long flags;
318 322
319 spin_lock_irqsave(&gsnedf_lock, flags); 323 spin_lock_irqsave(&gsnedf_lock, flags);
320 324
321 sched_trace_job_release(queued); 325 __merge_ready(rt, tasks);
322 gsnedf_job_arrival(t); 326 check_for_preemptions();
323 327
324 spin_unlock_irqrestore(&gsnedf_lock, flags); 328 spin_unlock_irqrestore(&gsnedf_lock, flags);
325} 329}
@@ -723,7 +727,7 @@ static int __init init_gsn_edf(void)
723 INIT_LIST_HEAD(&entry->list); 727 INIT_LIST_HEAD(&entry->list);
724 } 728 }
725 729
726 edf_domain_init(&gsnedf, NULL, gsnedf_job_release); 730 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
727 return register_sched_plugin(&gsn_edf_plugin); 731 return register_sched_plugin(&gsn_edf_plugin);
728} 732}
729 733
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index beb303c352..9adf9c835c 100755
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -21,14 +21,6 @@
21 21
22#include <litmus/heap.h> 22#include <litmus/heap.h>
23 23
24/* Tick period is used to convert ns-specified execution
25 * costs and periods into tick-based equivalents.
26 */
27extern ktime_t tick_period;
28
29/* make the unit explicit */
30typedef unsigned long quanta_t;
31
32struct subtask { 24struct subtask {
33 /* measured in quanta relative to job release */ 25 /* measured in quanta relative to job release */
34 quanta_t release; 26 quanta_t release;
@@ -160,20 +152,6 @@ static quanta_t cur_group_deadline(struct task_struct* t)
160 return gdl; 152 return gdl;
161} 153}
162 154
163enum round {
164 FLOOR,
165 CEIL
166};
167
168static quanta_t time2quanta(lt_t time, enum round round)
169{
170 s64 quantum_length = ktime_to_ns(tick_period);
171
172 if (do_div(time, quantum_length) && round == CEIL)
173 time++;
174 return (quanta_t) time;
175}
176
177static int pfair_higher_prio(struct task_struct* first, 155static int pfair_higher_prio(struct task_struct* first,
178 struct task_struct* second) 156 struct task_struct* second)
179{ 157{
@@ -244,7 +222,7 @@ static void pfair_add_release(struct task_struct* t)
244/* pull released tasks from the release queue */ 222/* pull released tasks from the release queue */
245static void poll_releases(quanta_t time) 223static void poll_releases(quanta_t time)
246{ 224{
247 heap_union(pfair_ready_order, &pfair.ready_queue, relq(time)); 225 __merge_ready(&pfair, relq(time));
248 merge_time = time; 226 merge_time = time;
249} 227}
250 228
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 0e9c9dd80a..71dcfa1fcd 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -47,7 +47,7 @@ DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains);
47 47
48static void psnedf_domain_init(psnedf_domain_t* pedf, 48static void psnedf_domain_init(psnedf_domain_t* pedf,
49 check_resched_needed_t check, 49 check_resched_needed_t check,
50 release_job_t release, 50 release_jobs_t release,
51 int cpu) 51 int cpu)
52{ 52{
53 edf_domain_init(&pedf->domain, check, release); 53 edf_domain_init(&pedf->domain, check, release);