aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-04-19 19:25:15 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-04-19 19:25:15 -0400
commitfc757f4223edfab215d6e49cee479a09ecc17962 (patch)
tree7a82aaf3acb8b72e29449ed7ad663e7670c1eac1
parent9abdf662179d297bc60786883aefdb06a5ee2c95 (diff)
G-EDF: compiles & runs in QEMU
-rw-r--r--litmus/sched_gedf.c373
1 files changed, 232 insertions, 141 deletions
diff --git a/litmus/sched_gedf.c b/litmus/sched_gedf.c
index e22e847c5f..f469265362 100644
--- a/litmus/sched_gedf.c
+++ b/litmus/sched_gedf.c
@@ -9,39 +9,37 @@
9#include <litmus/sched_trace.h> 9#include <litmus/sched_trace.h>
10 10
11#include <litmus/heap.h> 11#include <litmus/heap.h>
12#include <litmus/cheap.h>
12 13
13#include <linux/module.h> 14#include <linux/module.h>
14 15
16#define GEDF_MAX_TASKS 1000
17
15/* cpu_entry_t - maintain the linked and scheduled state 18/* cpu_entry_t - maintain the linked and scheduled state
16 */ 19 */
17typedef struct { 20typedef struct {
18 int cpu; 21 int cpu;
19 struct task_struct* linked; /* only RT tasks */ 22 struct task_struct* linked; /* only RT tasks */
23 int picked; /* linked was seen */
20 struct task_struct* scheduled; /* only RT tasks */ 24 struct task_struct* scheduled; /* only RT tasks */
21 atomic_t will_schedule; /* prevent unneeded IPIs */
22 struct heap_node* hn; 25 struct heap_node* hn;
23} cpu_entry_t; 26} cpu_entry_t;
24DEFINE_PER_CPU(cpu_entry_t, gedf_cpu_entries); 27DEFINE_PER_CPU(cpu_entry_t, gedf_cpu_entries);
25 28
26cpu_entry_t* gedf_cpus[NR_CPUS]; 29cpu_entry_t* gedf_cpus[NR_CPUS];
27 30
28#define set_will_schedule() \
29 (atomic_set(&__get_cpu_var(gedf_cpu_entries).will_schedule, 1))
30#define clear_will_schedule() \
31 (atomic_set(&__get_cpu_var(gedf_cpu_entries).will_schedule, 0))
32#define test_will_schedule(cpu) \
33 (atomic_read(&per_cpu(gedf_cpu_entries, cpu).will_schedule))
34
35
36#define NO_CPU 0xffffffff 31#define NO_CPU 0xffffffff
37 32
38/* the cpus queue themselves according to priority in here */ 33/* the cpus queue themselves according to priority in here */
39static struct heap_node gedf_heap_node[NR_CPUS]; 34static struct heap_node gedf_heap_node[NR_CPUS];
40static struct heap gedf_cpu_heap; 35static struct heap gedf_cpu_heap;
41 36
42static rt_domain_t gedf; 37DEFINE_SPINLOCK(gedf_cpu_lock); /* synchronize access to cpu heap */
43#define gedf_lock (gedf.ready_lock)
44 38
39static struct cheap_node gedf_cheap_nodes[GEDF_MAX_TASKS];
40static struct cheap gedf_ready_queue;
41
42static rt_domain_t gedf; /* used only for the release queue */
45 43
46static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) 44static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b)
47{ 45{
@@ -54,22 +52,30 @@ static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b)
54 return edf_higher_prio(b->linked, a->linked); 52 return edf_higher_prio(b->linked, a->linked);
55} 53}
56 54
55static void remove_from_cpu_heap(cpu_entry_t* entry)
56{
57 if (likely(heap_node_in_heap(entry->hn)))
58 heap_delete(cpu_lower_prio, &gedf_cpu_heap, entry->hn);
59}
60
57/* update_cpu_position - Move the cpu entry to the correct place to maintain 61/* update_cpu_position - Move the cpu entry to the correct place to maintain
58 * order in the cpu queue. Caller must hold gedf lock. 62 * order in the cpu queue. Caller must hold gedf lock.
59 */ 63 */
60static void update_cpu_position(cpu_entry_t *entry) 64static void update_cpu_position(cpu_entry_t *entry)
61{ 65{
62 if (likely(heap_node_in_heap(entry->hn))) 66 remove_from_cpu_heap(entry);
63 heap_delete(cpu_lower_prio, &gedf_cpu_heap, entry->hn);
64 heap_insert(cpu_lower_prio, &gedf_cpu_heap, entry->hn); 67 heap_insert(cpu_lower_prio, &gedf_cpu_heap, entry->hn);
65} 68}
66 69
67/* caller must hold gedf lock */ 70/* caller must hold gedf lock */
68static cpu_entry_t* lowest_prio_cpu(void) 71static cpu_entry_t* lowest_prio_cpu(int take)
69{ 72{
70 struct heap_node* hn; 73 struct heap_node* hn;
71 hn = heap_peek(cpu_lower_prio, &gedf_cpu_heap); 74 if (take)
72 return hn->value; 75 hn = heap_take(cpu_lower_prio, &gedf_cpu_heap);
76 else
77 hn = heap_peek(cpu_lower_prio, &gedf_cpu_heap);
78 return hn ? hn->value : NULL;
73} 79}
74 80
75 81
@@ -80,7 +86,7 @@ static cpu_entry_t* lowest_prio_cpu(void)
80static noinline void link_task_to_cpu(struct task_struct* linked, 86static noinline void link_task_to_cpu(struct task_struct* linked,
81 cpu_entry_t *entry) 87 cpu_entry_t *entry)
82{ 88{
83 cpu_entry_t *sched; 89 cpu_entry_t *sched = NULL;
84 struct task_struct* tmp; 90 struct task_struct* tmp;
85 int on_cpu; 91 int on_cpu;
86 92
@@ -105,14 +111,21 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
105 * wanted to link, we don't need to do the swap -- 111 * wanted to link, we don't need to do the swap --
106 * we just link ourselves to the CPU and depend on 112 * we just link ourselves to the CPU and depend on
107 * the caller to get things right. 113 * the caller to get things right.
114 *
115 * But only swap if the other node is in the queue.
116 * If it is not, then it is being updated
117 * concurrently and some other task was already
118 * picked for it.
108 */ 119 */
109 if (entry != sched) { 120 if (entry != sched && heap_node_in_heap(sched->hn)) {
110 TRACE_TASK(linked, 121 TRACE_TASK(linked,
111 "already scheduled on %d, updating link.\n", 122 "already scheduled on %d, "
123 "updating link.\n",
112 sched->cpu); 124 sched->cpu);
113 tmp = sched->linked; 125 tmp = sched->linked;
114 linked->rt_param.linked_on = sched->cpu; 126 linked->rt_param.linked_on = sched->cpu;
115 sched->linked = linked; 127 sched->linked = linked;
128 sched->picked = 1;
116 update_cpu_position(sched); 129 update_cpu_position(sched);
117 linked = tmp; 130 linked = tmp;
118 } 131 }
@@ -121,6 +134,10 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
121 linked->rt_param.linked_on = entry->cpu; 134 linked->rt_param.linked_on = entry->cpu;
122 } 135 }
123 entry->linked = linked; 136 entry->linked = linked;
137 entry->picked = entry == sched; /* set to one if we linked to the
138 * the CPU that the task is
139 * executing on
140 */
124 if (linked) 141 if (linked)
125 TRACE_TASK(linked, "linked to %d.\n", entry->cpu); 142 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
126 else 143 else
@@ -135,25 +152,11 @@ static noinline void unlink(struct task_struct* t)
135{ 152{
136 cpu_entry_t *entry; 153 cpu_entry_t *entry;
137 154
138 if (unlikely(!t)) {
139 TRACE_BUG_ON(!t);
140 return;
141 }
142
143 if (t->rt_param.linked_on != NO_CPU) { 155 if (t->rt_param.linked_on != NO_CPU) {
144 /* unlink */ 156 /* unlink */
145 entry = &per_cpu(gedf_cpu_entries, t->rt_param.linked_on); 157 entry = &per_cpu(gedf_cpu_entries, t->rt_param.linked_on);
146 t->rt_param.linked_on = NO_CPU; 158 t->rt_param.linked_on = NO_CPU;
147 link_task_to_cpu(NULL, entry); 159 link_task_to_cpu(NULL, entry);
148 } else if (is_queued(t)) {
149 /* This is an interesting situation: t is scheduled,
150 * but was just recently unlinked. It cannot be
151 * linked anywhere else (because then it would have
152 * been relinked to this CPU), thus it must be in some
153 * queue. We must remove it from the list in this
154 * case.
155 */
156 remove(&gedf, t);
157 } 160 }
158} 161}
159 162
@@ -162,78 +165,137 @@ static noinline void unlink(struct task_struct* t)
162 */ 165 */
163static noinline void preempt(cpu_entry_t *entry) 166static noinline void preempt(cpu_entry_t *entry)
164{ 167{
165 if (smp_processor_id() == entry->cpu) { 168 if (smp_processor_id() == entry->cpu)
166 set_tsk_need_resched(current); 169 set_tsk_need_resched(current);
167 } else 170 else
168 /* in case that it is a remote CPU we have to defer the 171 smp_send_reschedule(entry->cpu);
169 * the decision to the remote CPU 172}
170 */ 173
171 if (!test_will_schedule(entry->cpu)) 174
172 smp_send_reschedule(entry->cpu); 175static void add_to_ready_queue(struct task_struct* task)
176{
177 TRACE_TASK(task, "adding to ready queue\n");
178 cheap_insert((cheap_prio_t) edf_higher_prio,
179 &gedf_ready_queue,
180 task,
181 smp_processor_id());
173} 182}
174 183
175/* requeue - Put an unlinked task into gsn-edf domain. 184/* requeue - Put an unlinked task into gsn-edf domain.
176 * Caller must hold gedf_lock. 185 * Caller must hold gedf_lock.
186 *
187 * call unlocked, but with preemptions disabled!
177 */ 188 */
178static noinline void requeue(struct task_struct* task) 189static noinline void requeue(struct task_struct* task)
179{ 190{
180 BUG_ON(!task);
181 /* sanity check before insertion */
182 BUG_ON(is_queued(task));
183
184 if (is_released(task, litmus_clock())) 191 if (is_released(task, litmus_clock()))
185 __add_ready(&gedf, task); 192 add_to_ready_queue(task);
186 else { 193 else
187 /* it has got to wait */ 194 /* it has got to wait */
188 add_release(&gedf, task); 195 add_release(&gedf, task);
189 } 196}
197
198static int preemption_required(cpu_entry_t* last,
199 struct task_struct* task)
200{
201 if (edf_higher_prio(task, last->linked)) {
202 /* yes, drop lock before dequeuing task
203 * and dequeue cpu state
204 */
205 last = lowest_prio_cpu(1);
206 lockdep_on(); /* let lockdep see we actually released it */
207 spin_unlock(&gedf_cpu_lock);
208 lockdep_off();
209 return 1;
210 } else
211 return 0;
190} 212}
191 213
192/* check for any necessary preemptions */ 214/* check for any necessary preemptions */
193static void check_for_preemptions(void) 215static void check_for_preemptions(void)
194{ 216{
195 struct task_struct *task; 217 int done = 0;
218 unsigned long flags;
219 struct task_struct *task, *unlinked;
196 cpu_entry_t* last; 220 cpu_entry_t* last;
197 221
198 for(last = lowest_prio_cpu(); 222
199 edf_preemption_needed(&gedf, last->linked); 223 local_irq_save(flags);
200 last = lowest_prio_cpu()) { 224 while (!done) {
201 /* preemption necessary */ 225 unlinked = NULL;
202 task = __take_ready(&gedf); 226 spin_lock(&gedf_cpu_lock);
203 TRACE("check_for_preemptions: attempting to link task %d to %d\n", 227 last = lowest_prio_cpu(0);
204 task->pid, last->cpu); 228 if (likely(last)) {
205 if (last->linked) 229 task = cheap_take_if(
206 requeue(last->linked); 230 (cheap_take_predicate_t) preemption_required,
207 link_task_to_cpu(task, last); 231 last,
208 preempt(last); 232 (cheap_prio_t) edf_higher_prio,
233 &gedf_ready_queue);
234 if (task) {
235 TRACE_TASK(task, "removed from ready Q\n");
236 /* cpu lock was dropped, reacquire */
237 spin_lock(&gedf_cpu_lock);
238 if (last->linked && !last->picked)
239 /* can be requeued by us */
240 unlinked = last->linked;
241 TRACE("check_for_preemptions: "
242 "attempting to link task %d to %d\n",
243 task->pid, last->cpu);
244 link_task_to_cpu(task, last);
245 update_cpu_position(last);
246 } else
247 /* no preemption required */
248 done = 1;
249 } else
250 /* all gone, being checked elsewhere? */
251 done = 1;
252 spin_unlock(&gedf_cpu_lock);
253 if (unlinked)
254 /* stick it back into the queue */
255 requeue(unlinked);
256 if (last && !done)
257 /* we have a preemption, send IPI */
258 preempt(last);
209 } 259 }
260 local_irq_restore(flags);
210} 261}
211 262
212/* gedf_job_arrival: task is either resumed or released */ 263/* gedf_job_arrival: task is either resumed or released
264 * call only unlocked!
265 */
213static noinline void gedf_job_arrival(struct task_struct* task) 266static noinline void gedf_job_arrival(struct task_struct* task)
214{ 267{
215 BUG_ON(!task);
216
217 requeue(task); 268 requeue(task);
218 check_for_preemptions(); 269 check_for_preemptions();
219} 270}
220 271
221static void gedf_release_jobs(rt_domain_t* rt, struct heap* tasks) 272static void gedf_release_jobs(rt_domain_t* rt, struct heap* tasks)
222{ 273{
274 struct heap_node* hn;
275 struct task_struct* t;
223 unsigned long flags; 276 unsigned long flags;
224 277
225 spin_lock_irqsave(&gedf_lock, flags);
226 278
227 __merge_ready(rt, tasks); 279 local_irq_save(flags);
228 check_for_preemptions(); 280 /* insert unlocked */
281 while ((hn = heap_take(edf_ready_order, tasks))) {
282 t = (struct task_struct*) hn->value;
283 TRACE_TASK(t, "to be merged into ready queue "
284 "(is_released:%d, is_running:%d)\n",
285 is_released(t, litmus_clock()),
286 is_running(t));
287 add_to_ready_queue(t);
288 }
229 289
230 spin_unlock_irqrestore(&gedf_lock, flags); 290 local_irq_restore(flags);
291 check_for_preemptions();
231} 292}
232 293
233/* caller holds gedf_lock */ 294/* caller holds gedf_lock */
234static noinline void job_completion(struct task_struct *t, int forced) 295static noinline int job_completion(cpu_entry_t* entry, int forced)
235{ 296{
236 BUG_ON(!t); 297
298 struct task_struct *t = entry->scheduled;
237 299
238 sched_trace_task_completion(t, forced); 300 sched_trace_task_completion(t, forced);
239 301
@@ -245,12 +307,20 @@ static noinline void job_completion(struct task_struct *t, int forced)
245 prepare_for_next_period(t); 307 prepare_for_next_period(t);
246 if (is_released(t, litmus_clock())) 308 if (is_released(t, litmus_clock()))
247 sched_trace_task_release(t); 309 sched_trace_task_release(t);
248 /* unlink */ 310
249 unlink(t); 311
250 /* requeue 312 if (is_released(t, litmus_clock())){
251 * But don't requeue a blocking task. */ 313 /* we changed the priority, see if we need to preempt */
252 if (is_running(t)) 314 set_rt_flags(t, RT_F_RUNNING);
253 gedf_job_arrival(t); 315 update_cpu_position(entry);
316 return 1;
317 }
318 else {
319 /* it has got to wait */
320 unlink(t);
321 add_release(&gedf, t);
322 return 0;
323 }
254} 324}
255 325
256/* gedf_tick - this function is called for every local timer 326/* gedf_tick - this function is called for every local timer
@@ -261,21 +331,17 @@ static noinline void job_completion(struct task_struct *t, int forced)
261 */ 331 */
262static void gedf_tick(struct task_struct* t) 332static void gedf_tick(struct task_struct* t)
263{ 333{
264 if (is_realtime(t) && budget_exhausted(t)) { 334 if (is_realtime(t) && budget_exhausted(t))
265 set_tsk_need_resched(t); 335 set_tsk_need_resched(t);
266 set_will_schedule();
267 }
268} 336}
269 337
270static struct task_struct* gedf_schedule(struct task_struct * prev) 338static struct task_struct* gedf_schedule(struct task_struct * prev)
271{ 339{
272 cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries); 340 cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries);
273 int out_of_time, sleep, preempt, np, exists, blocks; 341 int out_of_time, sleep, preempt, exists, blocks;
274 struct task_struct* next = NULL; 342 struct task_struct* next = NULL;
275 343
276 /* Will be released in finish_switch. */ 344 TRACE_TASK(prev, "invoked gedf_schedule.\n");
277 spin_lock(&gedf_lock);
278 clear_will_schedule();
279 345
280 /* sanity checking */ 346 /* sanity checking */
281 BUG_ON(entry->scheduled && entry->scheduled != prev); 347 BUG_ON(entry->scheduled && entry->scheduled != prev);
@@ -287,9 +353,10 @@ static struct task_struct* gedf_schedule(struct task_struct * prev)
287 blocks = exists && !is_running(entry->scheduled); 353 blocks = exists && !is_running(entry->scheduled);
288 out_of_time = exists && budget_exhausted(entry->scheduled); 354 out_of_time = exists && budget_exhausted(entry->scheduled);
289 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; 355 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
290 preempt = entry->scheduled != entry->linked;
291 356
292 TRACE_TASK(prev, "invoked gedf_schedule.\n"); 357 spin_lock(&gedf_cpu_lock);
358
359 preempt = entry->scheduled != entry->linked;
293 360
294 if (exists) 361 if (exists)
295 TRACE_TASK(prev, 362 TRACE_TASK(prev,
@@ -297,11 +364,10 @@ static struct task_struct* gedf_schedule(struct task_struct * prev)
297 "state:%d sig:%d\n", 364 "state:%d sig:%d\n",
298 blocks, out_of_time, sleep, preempt, 365 blocks, out_of_time, sleep, preempt,
299 prev->state, signal_pending(prev)); 366 prev->state, signal_pending(prev));
300 if (entry->linked && preempt) 367 if (preempt && entry->linked)
301 TRACE_TASK(prev, "will be preempted by %s/%d\n", 368 TRACE_TASK(prev, "will be preempted by %s/%d\n",
302 entry->linked->comm, entry->linked->pid); 369 entry->linked->comm, entry->linked->pid);
303 370
304
305 /* If a task blocks we have no choice but to reschedule. 371 /* If a task blocks we have no choice but to reschedule.
306 */ 372 */
307 if (blocks) 373 if (blocks)
@@ -310,16 +376,42 @@ static struct task_struct* gedf_schedule(struct task_struct * prev)
310 376
311 /* Any task that is preemptable and either exhausts its execution 377 /* Any task that is preemptable and either exhausts its execution
312 * budget or wants to sleep completes. We may have to reschedule after 378 * budget or wants to sleep completes. We may have to reschedule after
313 * this. Don't do a job completion if we block (can't have timers running 379 * this. Don't do a job completion if we block (can't have timers
314 * for blocked jobs). Preemption go first for the same reason. 380 * running for blocked jobs). Preemptions go first for the same reason.
315 */ 381 */
316 if ((out_of_time || sleep) && !blocks && !preempt) 382 if ((out_of_time || sleep) && !blocks && !preempt) {
317 job_completion(entry->scheduled, !sleep); 383 if (job_completion(entry, !sleep)) {
384 /* Task might stay with us.
385 * Drop locks and check for preemptions.
386 */
387 spin_unlock(&gedf_cpu_lock);
388 /* anything to update ? */
389 check_for_preemptions();
390 spin_lock(&gedf_cpu_lock);
391 /* if something higher priority got linked,
392 * then we need to add the task into the
393 * ready queue (since it wasn't added by
394 * check_for_preemptions b/c picked==1.
395 */
396 if (entry->linked != prev)
397 add_to_ready_queue(prev);
398 }
399 }
318 400
319 /* Link pending task if we became unlinked. 401 /* Link pending task if we became unlinked.
402 * NOTE: Do not hold locks while performing ready queue updates
403 * since we want concurrent access to the queue.
320 */ 404 */
321 if (!entry->linked) 405 if (!entry->linked) {
322 link_task_to_cpu(__take_ready(&gedf), entry); 406 if (exists)
407 /* We are committed to descheduling; erase marker
408 * before we drop the lock.
409 */
410 tsk_rt(prev)->scheduled_on = NO_CPU;
411 spin_unlock(&gedf_cpu_lock);
412 check_for_preemptions(); /* update links */
413 spin_lock(&gedf_cpu_lock);
414 }
323 415
324 /* The final scheduling decision. Do we need to switch for some reason? 416 /* The final scheduling decision. Do we need to switch for some reason?
325 * If linked is different from scheduled, then select linked as next. 417 * If linked is different from scheduled, then select linked as next.
@@ -328,13 +420,11 @@ static struct task_struct* gedf_schedule(struct task_struct * prev)
328 /* Schedule a linked job? */ 420 /* Schedule a linked job? */
329 if (entry->linked) { 421 if (entry->linked) {
330 entry->linked->rt_param.scheduled_on = entry->cpu; 422 entry->linked->rt_param.scheduled_on = entry->cpu;
423 entry->picked = 1;
331 next = entry->linked; 424 next = entry->linked;
332 } 425 }
333 if (entry->scheduled) { 426 if (entry->scheduled)
334 /* not gonna be scheduled soon */
335 entry->scheduled->rt_param.scheduled_on = NO_CPU; 427 entry->scheduled->rt_param.scheduled_on = NO_CPU;
336 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
337 }
338 } else 428 } else
339 /* Only override Linux scheduler if we have a real-time task 429 /* Only override Linux scheduler if we have a real-time task
340 * scheduled that needs to continue. 430 * scheduled that needs to continue.
@@ -342,17 +432,16 @@ static struct task_struct* gedf_schedule(struct task_struct * prev)
342 if (exists) 432 if (exists)
343 next = prev; 433 next = prev;
344 434
345 spin_unlock(&gedf_lock); 435 spin_unlock(&gedf_cpu_lock);
346 436 if (exists && preempt && !blocks)
347 TRACE("gedf_lock released, next=0x%p\n", next); 437 /* stick preempted task back into the ready queue */
348 438 gedf_job_arrival(prev);
349 439
350 if (next) 440 if (next)
351 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); 441 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
352 else if (exists && !next) 442 else if (exists && !next)
353 TRACE("becomes idle at %llu.\n", litmus_clock()); 443 TRACE("becomes idle at %llu.\n", litmus_clock());
354 444
355
356 return next; 445 return next;
357} 446}
358 447
@@ -375,9 +464,9 @@ static void gedf_task_new(struct task_struct * t, int on_rq, int running)
375 unsigned long flags; 464 unsigned long flags;
376 cpu_entry_t* entry; 465 cpu_entry_t* entry;
377 466
378 TRACE("gsn edf: task new %d\n", t->pid); 467 TRACE("gedf: task new %d\n", t->pid);
379 468
380 spin_lock_irqsave(&gedf_lock, flags); 469 spin_lock_irqsave(&gedf_cpu_lock, flags);
381 if (running) { 470 if (running) {
382 entry = &per_cpu(gedf_cpu_entries, task_cpu(t)); 471 entry = &per_cpu(gedf_cpu_entries, task_cpu(t));
383 BUG_ON(entry->scheduled); 472 BUG_ON(entry->scheduled);
@@ -389,9 +478,9 @@ static void gedf_task_new(struct task_struct * t, int on_rq, int running)
389 478
390 /* setup job params */ 479 /* setup job params */
391 release_at(t, litmus_clock()); 480 release_at(t, litmus_clock());
481 spin_unlock_irqrestore(&gedf_cpu_lock, flags);
392 482
393 gedf_job_arrival(t); 483 gedf_job_arrival(t);
394 spin_unlock_irqrestore(&gedf_lock, flags);
395} 484}
396 485
397static void gedf_task_wake_up(struct task_struct *task) 486static void gedf_task_wake_up(struct task_struct *task)
@@ -401,58 +490,43 @@ static void gedf_task_wake_up(struct task_struct *task)
401 490
402 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 491 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
403 492
404 spin_lock_irqsave(&gedf_lock, flags); 493 spin_lock_irqsave(&gedf_cpu_lock, flags);
405 /* We need to take suspensions because of semaphores into 494 now = litmus_clock();
406 * account! If a job resumes after being suspended due to acquiring 495 if (is_tardy(task, now)) {
407 * a semaphore, it should never be treated as a new job release. 496 /* new sporadic release */
408 */ 497 release_at(task, now);
409 if (get_rt_flags(task) == RT_F_EXIT_SEM) { 498 sched_trace_task_release(task);
410 set_rt_flags(task, RT_F_RUNNING);
411 } else {
412 now = litmus_clock();
413 if (is_tardy(task, now)) {
414 /* new sporadic release */
415 release_at(task, now);
416 sched_trace_task_release(task);
417 }
418 else if (task->time_slice)
419 /* came back in time before deadline
420 */
421 set_rt_flags(task, RT_F_RUNNING);
422 } 499 }
500 spin_unlock_irqrestore(&gedf_cpu_lock, flags);
423 gedf_job_arrival(task); 501 gedf_job_arrival(task);
424 spin_unlock_irqrestore(&gedf_lock, flags);
425} 502}
426 503
427static void gedf_task_block(struct task_struct *t) 504static void gedf_task_block(struct task_struct *t)
428{ 505{
429 unsigned long flags;
430
431 TRACE_TASK(t, "block at %llu\n", litmus_clock()); 506 TRACE_TASK(t, "block at %llu\n", litmus_clock());
432
433 /* unlink if necessary */
434 spin_lock_irqsave(&gedf_lock, flags);
435 unlink(t);
436 spin_unlock_irqrestore(&gedf_lock, flags);
437
438 BUG_ON(!is_realtime(t));
439} 507}
440 508
441
442static void gedf_task_exit(struct task_struct * t) 509static void gedf_task_exit(struct task_struct * t)
443{ 510{
444 unsigned long flags; 511 unsigned long flags;
445 512
446 /* unlink if necessary */ 513 /* unlink if necessary */
447 spin_lock_irqsave(&gedf_lock, flags); 514 spin_lock_irqsave(&gedf_cpu_lock, flags);
515 /* remove from CPU state, if necessary */
448 unlink(t); 516 unlink(t);
449 if (tsk_rt(t)->scheduled_on != NO_CPU) { 517 if (tsk_rt(t)->scheduled_on != NO_CPU) {
450 gedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 518 gedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
451 tsk_rt(t)->scheduled_on = NO_CPU; 519 tsk_rt(t)->scheduled_on = NO_CPU;
520 } else {
521 /* FIXME: If t is currently queued, then we need to
522 * dequeue it now; otherwise it will probably
523 * cause a crash once it is dequeued.
524 */
525 TRACE_TASK(t, "called gedf_task_exit(), "
526 "but is not scheduled!\n");
452 } 527 }
453 spin_unlock_irqrestore(&gedf_lock, flags); 528 spin_unlock_irqrestore(&gedf_cpu_lock, flags);
454 529
455 BUG_ON(!is_realtime(t));
456 TRACE_TASK(t, "RIP\n"); 530 TRACE_TASK(t, "RIP\n");
457} 531}
458 532
@@ -462,6 +536,25 @@ static long gedf_admit_task(struct task_struct* tsk)
462} 536}
463 537
464 538
539static long gedf_activate_plugin(void)
540{
541 int cpu;
542 cpu_entry_t *entry;
543
544 heap_init(&gedf_cpu_heap);
545 for_each_online_cpu(cpu) {
546 TRACE("G-EDF: Initializing CPU #%d.\n", cpu);
547 entry = &per_cpu(gedf_cpu_entries, cpu);
548 heap_node_init(&entry->hn, entry);
549 entry->linked = NULL;
550 entry->scheduled = NULL;
551 entry->picked = 0;
552 update_cpu_position(entry);
553 }
554 return 0;
555}
556
557
465/* Plugin object */ 558/* Plugin object */
466static struct sched_plugin gedf_plugin __cacheline_aligned_in_smp = { 559static struct sched_plugin gedf_plugin __cacheline_aligned_in_smp = {
467 .plugin_name = "G-EDF", 560 .plugin_name = "G-EDF",
@@ -473,7 +566,8 @@ static struct sched_plugin gedf_plugin __cacheline_aligned_in_smp = {
473 .schedule = gedf_schedule, 566 .schedule = gedf_schedule,
474 .task_wake_up = gedf_task_wake_up, 567 .task_wake_up = gedf_task_wake_up,
475 .task_block = gedf_task_block, 568 .task_block = gedf_task_block,
476 .admit_task = gedf_admit_task 569 .admit_task = gedf_admit_task,
570 .activate_plugin = gedf_activate_plugin,
477}; 571};
478 572
479 573
@@ -482,14 +576,11 @@ static int __init init_gedf(void)
482 int cpu; 576 int cpu;
483 cpu_entry_t *entry; 577 cpu_entry_t *entry;
484 578
485 heap_init(&gedf_cpu_heap); 579 cheap_init(&gedf_ready_queue, GEDF_MAX_TASKS, gedf_cheap_nodes);
486 /* initialize CPU state */ 580 /* initialize CPU state */
487 for (cpu = 0; cpu < NR_CPUS; cpu++) { 581 for (cpu = 0; cpu < NR_CPUS; cpu++) {
488 entry = &per_cpu(gedf_cpu_entries, cpu); 582 entry = &per_cpu(gedf_cpu_entries, cpu);
489 gedf_cpus[cpu] = entry; 583 gedf_cpus[cpu] = entry;
490 atomic_set(&entry->will_schedule, 0);
491 entry->linked = NULL;
492 entry->scheduled = NULL;
493 entry->cpu = cpu; 584 entry->cpu = cpu;
494 entry->hn = &gedf_heap_node[cpu]; 585 entry->hn = &gedf_heap_node[cpu];
495 heap_node_init(&entry->hn, entry); 586 heap_node_init(&entry->hn, entry);