diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-05-14 20:07:03 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-05-14 20:07:03 -0400 |
commit | 4bb1dab9fe43ad707f8c1b28f3e8bd5d47f09994 (patch) | |
tree | 022b41cca8f3e967276ee558b644c6f6ea7313fb | |
parent | c7a09a0c9c97bb12bec367b1c6180f870ee32de9 (diff) |
Per job blocking
-rw-r--r-- | include/litmus/rt_param.h | 2 | ||||
-rw-r--r-- | include/litmus/rt_server.h | 10 | ||||
-rw-r--r-- | litmus/color.c | 2 | ||||
-rw-r--r-- | litmus/rt_server.c | 15 | ||||
-rw-r--r-- | litmus/sched_color.c | 215 |
5 files changed, 99 insertions, 145 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 209a89d7b459..91fa107cafb5 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -71,6 +71,8 @@ struct control_page { | |||
71 | 71 | ||
72 | /* locking overhead tracing: time stamp prior to system call */ | 72 | /* locking overhead tracing: time stamp prior to system call */ |
73 | uint64_t ts_syscall_start; /* Feather-Trace cycles */ | 73 | uint64_t ts_syscall_start; /* Feather-Trace cycles */ |
74 | |||
75 | int colors_updated:8; | ||
74 | }; | 76 | }; |
75 | 77 | ||
76 | #ifndef __KERNEL__ | 78 | #ifndef __KERNEL__ |
diff --git a/include/litmus/rt_server.h b/include/litmus/rt_server.h index 0f3147707a3b..0e2feb6c6b0e 100644 --- a/include/litmus/rt_server.h +++ b/include/litmus/rt_server.h | |||
@@ -9,8 +9,6 @@ struct rt_server; | |||
9 | 9 | ||
10 | typedef int (*need_preempt_t)(rt_domain_t *rt, struct task_struct *t); | 10 | typedef int (*need_preempt_t)(rt_domain_t *rt, struct task_struct *t); |
11 | typedef void (*server_update_t)(struct rt_server *srv); | 11 | typedef void (*server_update_t)(struct rt_server *srv); |
12 | typedef void (*server_requeue_t)(struct rt_server *srv, struct task_struct *t); | ||
13 | typedef struct task_struct* (*server_take_t)(struct rt_server *srv); | ||
14 | 12 | ||
15 | struct rt_server { | 13 | struct rt_server { |
16 | int sid; | 14 | int sid; |
@@ -23,17 +21,11 @@ struct rt_server { | |||
23 | need_preempt_t need_preempt; | 21 | need_preempt_t need_preempt; |
24 | /* System state has changed, so should server */ | 22 | /* System state has changed, so should server */ |
25 | server_update_t update; | 23 | server_update_t update; |
26 | /* Requeue task in domain */ | ||
27 | server_requeue_t requeue; | ||
28 | /* Take next task from domain */ | ||
29 | server_take_t take; | ||
30 | }; | 24 | }; |
31 | 25 | ||
32 | void init_rt_server(struct rt_server *server, | 26 | void init_rt_server(struct rt_server *server, |
33 | int sid, int cpu, rt_domain_t *domain, | 27 | int sid, int cpu, rt_domain_t *domain, |
34 | need_preempt_t need_preempt, | 28 | need_preempt_t need_preempt, |
35 | server_requeue_t requeue, | 29 | server_update_t update); |
36 | server_update_t update, | ||
37 | server_take_t take); | ||
38 | 30 | ||
39 | #endif | 31 | #endif |
diff --git a/litmus/color.c b/litmus/color.c index 8a9f68544e8f..ecc191137137 100644 --- a/litmus/color.c +++ b/litmus/color.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <litmus/color.h> | 15 | #include <litmus/color.h> |
16 | #include <litmus/litmus.h> /* for in_list(...) */ | 16 | #include <litmus/litmus.h> /* for in_list(...) */ |
17 | 17 | ||
18 | #define PAGES_PER_COLOR 300 | 18 | #define PAGES_PER_COLOR 3072 |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * This is used only to "trick" lockdep into permitting dynamically allocated | 21 | * This is used only to "trick" lockdep into permitting dynamically allocated |
diff --git a/litmus/rt_server.c b/litmus/rt_server.c index 818588a3d317..74d7c7b0f81a 100644 --- a/litmus/rt_server.c +++ b/litmus/rt_server.c | |||
@@ -1,11 +1,5 @@ | |||
1 | #include <litmus/rt_server.h> | 1 | #include <litmus/rt_server.h> |
2 | 2 | ||
3 | |||
4 | static struct task_struct* default_server_take(struct rt_server *srv) | ||
5 | { | ||
6 | return __take_ready(srv->domain); | ||
7 | } | ||
8 | |||
9 | static void default_server_update(struct rt_server *srv) | 3 | static void default_server_update(struct rt_server *srv) |
10 | { | 4 | { |
11 | } | 5 | } |
@@ -13,18 +7,13 @@ static void default_server_update(struct rt_server *srv) | |||
13 | void init_rt_server(struct rt_server *server, | 7 | void init_rt_server(struct rt_server *server, |
14 | int sid, int cpu, rt_domain_t *domain, | 8 | int sid, int cpu, rt_domain_t *domain, |
15 | need_preempt_t need_preempt, | 9 | need_preempt_t need_preempt, |
16 | server_requeue_t requeue, | 10 | server_update_t update) |
17 | server_update_t update, | ||
18 | server_take_t take) | ||
19 | { | 11 | { |
20 | if (!need_preempt || !requeue) | 12 | if (!need_preempt) |
21 | BUG_ON(1); | 13 | BUG_ON(1); |
22 | 14 | ||
23 | server->need_preempt = need_preempt; | 15 | server->need_preempt = need_preempt; |
24 | server->requeue = requeue; | ||
25 | |||
26 | server->update = (update) ? update : default_server_update; | 16 | server->update = (update) ? update : default_server_update; |
27 | server->take = (take) ? take : default_server_take; | ||
28 | 17 | ||
29 | server->sid = sid; | 18 | server->sid = sid; |
30 | server->cpu = cpu; | 19 | server->cpu = cpu; |
diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 8554fde49c0b..f095b302ddd6 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c | |||
@@ -55,15 +55,17 @@ static raw_spinlock_t dgl_lock; | |||
55 | #define remote_entry(cpu) (&per_cpu(color_cpus, cpu)) | 55 | #define remote_entry(cpu) (&per_cpu(color_cpus, cpu)) |
56 | #define task_entry(task) remote_entry(get_partition(task)) | 56 | #define task_entry(task) remote_entry(get_partition(task)) |
57 | #define task_fserver(task) (&task_entry(task)->fifo_server.server) | 57 | #define task_fserver(task) (&task_entry(task)->fifo_server.server) |
58 | #define entry_lock(entry) (&entry->rm_domain.ready_lock) | 58 | #define entry_lock(entry) (&(entry)->rm_domain.ready_lock) |
59 | 59 | ||
60 | 60 | ||
61 | #define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->rm_domain) | 61 | #define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->rm_domain) |
62 | #define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) | 62 | #define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) |
63 | #define is_fifo_server(s) (s->sid > num_online_cpus()) | 63 | #define is_fifo_server(s) ((s)->sid > num_online_cpus()) |
64 | #define lock_if(lock, cond) do { if (cond) raw_spin_lock(lock);} while(0) | ||
65 | #define unlock_if(lock, cond) do { if (cond) raw_spin_unlock(lock);} while(0) | ||
64 | 66 | ||
65 | #ifdef CONFIG_NP_SECTION | 67 | #ifdef CONFIG_NP_SECTION |
66 | #define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) | 68 | #define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) |
67 | #else | 69 | #else |
68 | #define has_resources(t, c) (1) | 70 | #define has_resources(t, c) (1) |
69 | #endif | 71 | #endif |
@@ -212,6 +214,65 @@ static void link(struct rt_server *server, struct task_struct *task) | |||
212 | } | 214 | } |
213 | 215 | ||
214 | /* | 216 | /* |
217 | * Triggers preemption on first FIFO server which is running NULL. | ||
218 | */ | ||
219 | static void check_for_fifo_preempt(void) | ||
220 | { | ||
221 | int ret = 0, cpu; | ||
222 | struct cpu_entry *entry; | ||
223 | struct rt_server *cpu_server, *fifo_server; | ||
224 | |||
225 | TRACE("Checking for FIFO preempt\n"); | ||
226 | |||
227 | for_each_online_cpu(cpu) { | ||
228 | entry = remote_entry(cpu); | ||
229 | cpu_server = &entry->server; | ||
230 | fifo_server = &entry->fifo_server.server; | ||
231 | |||
232 | raw_spin_lock(entry_lock(entry)); | ||
233 | raw_spin_lock(&fifo_lock); | ||
234 | |||
235 | if (cpu_server->linked && is_server(cpu_server->linked) && | ||
236 | !fifo_server->linked) { | ||
237 | litmus_reschedule(cpu); | ||
238 | ret = 1; | ||
239 | } | ||
240 | |||
241 | raw_spin_unlock(&fifo_lock); | ||
242 | raw_spin_unlock(entry_lock(entry)); | ||
243 | |||
244 | if (ret) | ||
245 | break; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Rejoin a task into the system. | ||
251 | */ | ||
252 | static void job_arrival(struct task_struct *t) | ||
253 | { | ||
254 | int i; | ||
255 | rt_domain_t *dom = task_dom(task_entry(t), t); | ||
256 | struct dgl_group_req *gr = tsk_rt(t)->req; | ||
257 | struct control_page *cp = tsk_rt(t)->ctrl_page; | ||
258 | struct color_ctrl_page *ccp = tsk_rt(t)->color_ctrl_page; | ||
259 | |||
260 | /* Fill request */ | ||
261 | if (cp && ccp && cp->colors_updated) { | ||
262 | cp->colors_updated = 0; | ||
263 | dgl_group_req_init(&group_lock, gr); | ||
264 | for (i = 0; ccp->pages[i]; ++i) | ||
265 | set_req(&group_lock, gr, ccp->colors[i], ccp->pages[i]); | ||
266 | } else { | ||
267 | TRACE("Oh noz: %p %p %d\n", cp, ccp, ((cp) ? cp->colors_updated : -1)); | ||
268 | } | ||
269 | |||
270 | lock_if(&fifo_lock, is_be(t)); | ||
271 | requeue(dom, t); | ||
272 | unlock_if(&fifo_lock, is_be(t)); | ||
273 | } | ||
274 | |||
275 | /* | ||
215 | * Complete job for task linked to @server. | 276 | * Complete job for task linked to @server. |
216 | */ | 277 | */ |
217 | static void job_completion(struct rt_server *server) | 278 | static void job_completion(struct rt_server *server) |
@@ -243,7 +304,7 @@ static void job_completion(struct rt_server *server) | |||
243 | sched_trace_task_release(t); | 304 | sched_trace_task_release(t); |
244 | 305 | ||
245 | if (is_running(t)) | 306 | if (is_running(t)) |
246 | server->requeue(server, t); | 307 | job_arrival(t); |
247 | } | 308 | } |
248 | 309 | ||
249 | /* | 310 | /* |
@@ -275,7 +336,6 @@ static struct task_struct* schedule_server(struct rt_server *server) | |||
275 | { | 336 | { |
276 | struct task_struct *next; | 337 | struct task_struct *next; |
277 | struct rt_server *lserver; | 338 | struct rt_server *lserver; |
278 | int is_fifo = is_fifo_server(server); | ||
279 | 339 | ||
280 | TRACE("Scheduling server %d\n", server->sid); | 340 | TRACE("Scheduling server %d\n", server->sid); |
281 | 341 | ||
@@ -288,8 +348,7 @@ static struct task_struct* schedule_server(struct rt_server *server) | |||
288 | } | 348 | } |
289 | 349 | ||
290 | next = server->linked; | 350 | next = server->linked; |
291 | if (is_fifo) | 351 | lock_if(&fifo_lock, is_fifo_server(server)); |
292 | raw_spin_lock(&fifo_lock); | ||
293 | if ((!next || !is_np(next)) && | 352 | if ((!next || !is_np(next)) && |
294 | server->need_preempt(server->domain, next)) { | 353 | server->need_preempt(server->domain, next)) { |
295 | if (next) { | 354 | if (next) { |
@@ -300,49 +359,12 @@ static struct task_struct* schedule_server(struct rt_server *server) | |||
300 | next = __take_ready(server->domain); | 359 | next = __take_ready(server->domain); |
301 | link(server, next); | 360 | link(server, next); |
302 | } | 361 | } |
303 | if (is_fifo) | 362 | unlock_if(&fifo_lock, is_fifo_server(server)); |
304 | raw_spin_unlock(&fifo_lock); | ||
305 | 363 | ||
306 | return next; | 364 | return next; |
307 | } | 365 | } |
308 | 366 | ||
309 | /* | 367 | /* |
310 | * Dumb requeue for PRM (CPU) servers. | ||
311 | */ | ||
312 | static void rm_requeue(struct rt_server *server, struct task_struct *t) | ||
313 | { | ||
314 | BUG_ON(is_be(t)); | ||
315 | requeue(server->domain, t); | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Locking requeue for FIFO servers. | ||
320 | */ | ||
321 | static void fifo_requeue(struct rt_server *server, struct task_struct *t) | ||
322 | { | ||
323 | BUG_ON(!is_be(t)); | ||
324 | raw_spin_lock(&fifo_lock); | ||
325 | requeue(server->domain, t); | ||
326 | raw_spin_unlock(&fifo_lock); | ||
327 | } | ||
328 | |||
329 | |||
330 | /* | ||
331 | * Locking take for FIFO servers. | ||
332 | * TODO: no longer necessary. | ||
333 | */ | ||
334 | static struct task_struct* fifo_take(struct rt_server *server) | ||
335 | { | ||
336 | struct task_struct *ret; | ||
337 | |||
338 | raw_spin_lock(&fifo_lock); | ||
339 | ret = __take_ready(server->domain); | ||
340 | raw_spin_unlock(&fifo_lock); | ||
341 | |||
342 | return ret; | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * Update server state, including picking next running task and incrementing | 368 | * Update server state, including picking next running task and incrementing |
347 | * server execution time. | 369 | * server execution time. |
348 | */ | 370 | */ |
@@ -403,39 +425,6 @@ static void color_rm_release(rt_domain_t *rm, struct bheap *tasks) | |||
403 | raw_spin_unlock_irqrestore(entry_lock(entry), flags); | 425 | raw_spin_unlock_irqrestore(entry_lock(entry), flags); |
404 | } | 426 | } |
405 | 427 | ||
406 | /* | ||
407 | * Triggers preemption on first FIFO server which is running NULL. | ||
408 | */ | ||
409 | static void check_for_fifo_preempt(void) | ||
410 | { | ||
411 | int ret = 0, cpu; | ||
412 | struct cpu_entry *entry; | ||
413 | struct rt_server *cpu_server, *fifo_server; | ||
414 | |||
415 | TRACE("Checking for FIFO preempt\n"); | ||
416 | |||
417 | for_each_online_cpu(cpu) { | ||
418 | entry = remote_entry(cpu); | ||
419 | cpu_server = &entry->server; | ||
420 | fifo_server = &entry->fifo_server.server; | ||
421 | |||
422 | raw_spin_lock(entry_lock(entry)); | ||
423 | raw_spin_lock(&fifo_lock); | ||
424 | |||
425 | if (cpu_server->linked && is_server(cpu_server->linked) && | ||
426 | !fifo_server->linked) { | ||
427 | litmus_reschedule(cpu); | ||
428 | ret = 1; | ||
429 | } | ||
430 | |||
431 | raw_spin_unlock(&fifo_lock); | ||
432 | raw_spin_unlock(entry_lock(entry)); | ||
433 | |||
434 | if (ret) | ||
435 | break; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks) | 428 | static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks) |
440 | { | 429 | { |
441 | unsigned long flags; | 430 | unsigned long flags; |
@@ -471,9 +460,7 @@ static struct task_struct* color_schedule(struct task_struct *prev) | |||
471 | 460 | ||
472 | if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) { | 461 | if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) { |
473 | TRACE_TASK(prev, "Snuck in on new!\n"); | 462 | TRACE_TASK(prev, "Snuck in on new!\n"); |
474 | raw_spin_lock(&fifo_lock); | 463 | job_arrival(entry->scheduled); |
475 | requeue(task_dom(entry, prev), prev); | ||
476 | raw_spin_unlock(&fifo_lock); | ||
477 | } | 464 | } |
478 | 465 | ||
479 | /* Pick next top-level task */ | 466 | /* Pick next top-level task */ |
@@ -527,51 +514,38 @@ static struct task_struct* color_schedule(struct task_struct *prev) | |||
527 | static void color_task_new(struct task_struct *t, int on_rq, int running) | 514 | static void color_task_new(struct task_struct *t, int on_rq, int running) |
528 | { | 515 | { |
529 | unsigned long flags; | 516 | unsigned long flags; |
530 | int i; | ||
531 | raw_spinlock_t *lock; | ||
532 | struct cpu_entry *entry; | 517 | struct cpu_entry *entry; |
533 | struct dgl_group_req *req; | 518 | struct dgl_group_req *req; |
534 | color_t *colors, *pages; | ||
535 | 519 | ||
536 | TRACE_TASK(t, "New colored task\n"); | 520 | TRACE_TASK(t, "New colored task\n"); |
537 | local_irq_save(flags); | ||
538 | |||
539 | entry = (is_be(t)) ? local_entry : task_entry(t); | 521 | entry = (is_be(t)) ? local_entry : task_entry(t); |
540 | lock = task_lock(entry, t); | ||
541 | 522 | ||
542 | release_at(t, litmus_clock()); | 523 | raw_spin_lock_irqsave(entry_lock(entry), flags); |
543 | 524 | ||
544 | req = kmalloc(sizeof(*req), GFP_ATOMIC); | 525 | req = kmalloc(sizeof(*req), GFP_ATOMIC); |
545 | dgl_group_req_init(&group_lock, req); | ||
546 | tsk_rt(t)->req = req; | 526 | tsk_rt(t)->req = req; |
547 | |||
548 | tsk_rt(t)->max_exec_time = 0; | ||
549 | tsk_rt(t)->tot_exec_time = 0; | 527 | tsk_rt(t)->tot_exec_time = 0; |
528 | tsk_rt(t)->max_exec_time = 0; | ||
529 | tsk_rt(t)->ctrl_page->colors_updated = 1; | ||
550 | 530 | ||
551 | /* Fill request */ | 531 | release_at(t, litmus_clock()); |
552 | if (tsk_rt(t)->color_ctrl_page) { | ||
553 | colors = tsk_rt(t)->color_ctrl_page->colors; | ||
554 | pages = tsk_rt(t)->color_ctrl_page->pages; | ||
555 | for (i = 0; pages[i]; i++) | ||
556 | set_req(&group_lock, req, colors[i], pages[i]); | ||
557 | } | ||
558 | 532 | ||
559 | /* Join system */ | ||
560 | raw_spin_lock(lock); | ||
561 | if (running) { | 533 | if (running) { |
534 | /* No need to lock with irqs disabled */ | ||
562 | TRACE_TASK(t, "Already scheduled on %d\n", entry->server.cpu); | 535 | TRACE_TASK(t, "Already scheduled on %d\n", entry->server.cpu); |
563 | BUG_ON(entry->scheduled); | 536 | BUG_ON(entry->scheduled); |
564 | entry->scheduled = t; | 537 | entry->scheduled = t; |
565 | tsk_rt(t)->scheduled_on = entry->server.cpu; | 538 | tsk_rt(t)->scheduled_on = entry->server.cpu; |
566 | } else | 539 | } else { |
567 | requeue(task_dom(entry, t), t); | 540 | job_arrival(t); |
568 | raw_spin_unlock(lock); | 541 | } |
542 | |||
543 | raw_spin_unlock(entry_lock(entry)); | ||
569 | 544 | ||
570 | /* Trigger necessary preemptions */ | ||
571 | if (is_be(t)) | 545 | if (is_be(t)) |
572 | check_for_fifo_preempt(); | 546 | check_for_fifo_preempt(); |
573 | else | 547 | else |
574 | litmus_reschedule(entry->server.cpu); | 548 | litmus_reschedule_local(); |
575 | 549 | ||
576 | local_irq_restore(flags); | 550 | local_irq_restore(flags); |
577 | } | 551 | } |
@@ -579,13 +553,13 @@ static void color_task_new(struct task_struct *t, int on_rq, int running) | |||
579 | static void color_task_wake_up(struct task_struct *task) | 553 | static void color_task_wake_up(struct task_struct *task) |
580 | { | 554 | { |
581 | unsigned long flags; | 555 | unsigned long flags; |
582 | struct cpu_entry* entry = task_entry(task); | 556 | struct cpu_entry* entry = local_entry; |
583 | raw_spinlock_t *lock = task_lock(entry, task); | 557 | int sched; |
584 | lt_t now = litmus_clock(); | 558 | lt_t now = litmus_clock(); |
585 | 559 | ||
586 | TRACE_TASK(task, "Wake up at %llu\n", now); | 560 | TRACE_TASK(task, "Wake up at %llu\n", now); |
587 | 561 | ||
588 | local_irq_save(flags); | 562 | raw_spin_lock_irqsave(entry_lock(entry), flags); |
589 | 563 | ||
590 | /* Abuse sporadic model */ | 564 | /* Abuse sporadic model */ |
591 | if (is_tardy(task, now)) { | 565 | if (is_tardy(task, now)) { |
@@ -593,21 +567,20 @@ static void color_task_wake_up(struct task_struct *task) | |||
593 | sched_trace_task_release(task); | 567 | sched_trace_task_release(task); |
594 | } | 568 | } |
595 | 569 | ||
596 | /* Re-enter system */ | 570 | sched = (entry->scheduled == task); |
597 | if (entry->scheduled != task) { | 571 | |
598 | raw_spin_lock(lock); | 572 | if (!sched) |
599 | requeue(task_dom(entry, task), task); | 573 | job_arrival(task); |
600 | raw_spin_unlock(lock); | 574 | else |
601 | } else { | ||
602 | TRACE_TASK(task, "Is already scheduled on %d!\n", | 575 | TRACE_TASK(task, "Is already scheduled on %d!\n", |
603 | entry->scheduled); | 576 | entry->scheduled); |
604 | } | ||
605 | 577 | ||
606 | /* Trigger preemptions */ | 578 | raw_spin_unlock(entry_lock(entry)); |
607 | if (is_be(task)) | 579 | if (is_be(task)) |
608 | check_for_fifo_preempt(); | 580 | check_for_fifo_preempt(); |
609 | else | 581 | else |
610 | litmus_reschedule(entry->server.cpu); | 582 | litmus_reschedule_local(); |
583 | |||
611 | 584 | ||
612 | local_irq_restore(flags); | 585 | local_irq_restore(flags); |
613 | } | 586 | } |
@@ -619,7 +592,7 @@ static void color_task_block(struct task_struct *t) | |||
619 | BUG_ON(is_queued(t)); | 592 | BUG_ON(is_queued(t)); |
620 | } | 593 | } |
621 | 594 | ||
622 | static void color_task_exit(struct task_struct * t) | 595 | static void color_task_exit(struct task_struct *t) |
623 | { | 596 | { |
624 | unsigned long flags; | 597 | unsigned long flags; |
625 | struct cpu_entry *entry = task_entry(t); | 598 | struct cpu_entry *entry = task_entry(t); |
@@ -812,8 +785,7 @@ static int __init init_color(void) | |||
812 | cpu + num_online_cpus() + 1, | 785 | cpu + num_online_cpus() + 1, |
813 | cpu, | 786 | cpu, |
814 | &fifo_domain, | 787 | &fifo_domain, |
815 | fifo_preemption_needed, | 788 | fifo_preemption_needed, fifo_update); |
816 | fifo_requeue, fifo_update, fifo_take); | ||
817 | 789 | ||
818 | 790 | ||
819 | /* Create task struct for FIFO server */ | 791 | /* Create task struct for FIFO server */ |
@@ -833,8 +805,7 @@ static int __init init_color(void) | |||
833 | /* Create CPU server */ | 805 | /* Create CPU server */ |
834 | cpu_server = &entry->server; | 806 | cpu_server = &entry->server; |
835 | init_rt_server(cpu_server, cpu + 1, cpu, | 807 | init_rt_server(cpu_server, cpu + 1, cpu, |
836 | &entry->rm_domain, rm_preemption_needed, | 808 | &entry->rm_domain, rm_preemption_needed, NULL); |
837 | rm_requeue, NULL, NULL); | ||
838 | cpu_server->running = 1; | 809 | cpu_server->running = 1; |
839 | 810 | ||
840 | init_enforcement_timer(&fifo_server->timer); | 811 | init_enforcement_timer(&fifo_server->timer); |