aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2011-09-26 14:14:41 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2011-09-26 14:14:41 -0400
commitd9e964752ce668f8a9540e9511e0cb73cd21d480 (patch)
tree4bfb910dee3d8b91db0d75f5ebec39212144ff3a /litmus
parentd990b018b1eb12eb52713d553e5d61a3cdd1249b (diff)
checkpoint before refactor
Diffstat (limited to 'litmus')
-rw-r--r--litmus/ce_domain.c42
-rw-r--r--litmus/event_group.c2
-rw-r--r--litmus/sched_mc.c72
-rw-r--r--litmus/sched_mc_ce.c49
4 files changed, 101 insertions, 64 deletions
diff --git a/litmus/ce_domain.c b/litmus/ce_domain.c
index d6f713fbf789..8797c05d9392 100644
--- a/litmus/ce_domain.c
+++ b/litmus/ce_domain.c
@@ -31,14 +31,11 @@ void ce_requeue(domain_t *dom, struct task_struct *ts)
31 31
32 /* When coming from job completion, the task will be asleep. */ 32 /* When coming from job completion, the task will be asleep. */
33 if (asleep && just_finished < expected_job) { 33 if (asleep && just_finished < expected_job) {
34 /* this job is running behind, so don't put it to sleep */ 34 TRACE_TASK(ts, "appears behind\n");
35 set_rt_flags(ts, RT_F_RUNNING);
36 TRACE_TASK(ts, "appears behind, setting it to running again\n");
37 } else if (asleep && expected_job < just_finished) { 35 } else if (asleep && expected_job < just_finished) {
38 printk(KERN_CRIT "job %d completed in expected job %d which " 36 TRACE_TASK(ts, "job %d completed in expected job %d which "
39 "seems too early\n", just_finished, 37 "seems too early\n", just_finished,
40 expected_job); 38 expected_job);
41 BUG();
42 } 39 }
43} 40}
44 41
@@ -60,30 +57,53 @@ struct task_struct* ce_peek_and_take_ready(domain_t *dom)
60 const struct ce_dom_data *ce_data = dom->data; 57 const struct ce_dom_data *ce_data = dom->data;
61 const int exists = NULL != ce_data->should_schedule; 58 const int exists = NULL != ce_data->should_schedule;
62 const int blocked = exists && !is_running(ce_data->should_schedule); 59 const int blocked = exists && !is_running(ce_data->should_schedule);
60 const int sleeping = exists && RT_F_SLEEP ==
61 get_rt_flags(ce_data->should_schedule);
63 62
64 /* Return the task we should schedule if it is not blocked. If it is 63 TRACE("exists: %d blocked: %d sleeping: %d\n", exists, blocked,
65 * asleep, return it anyway, because the MC-scheduler might as about 64 sleeping);
66 * ghost jobs. 65
67 */ 66 /* Return the task we should schedule if it is not blocked or sleeping. */
68 if (exists && !blocked) 67 if (exists && !blocked && !sleeping)
69 ret = ce_data->should_schedule; 68 ret = ce_data->should_schedule;
70 return ret; 69 return ret;
71} 70}
72 71
72#define TS "(%s/%d:%d:%s)"
73#define TA(t) (t) ? (is_ghost(t)) ? "ghost" : t->comm : "NULL", (t) ? t->pid : 1, \
74 (t) ? t->rt_param.job_params.job_no : 1, \
75 (t && get_task_domain(t)) ? get_task_domain(t)->name : ""
76
73int ce_higher_prio(struct task_struct *_a, 77int ce_higher_prio(struct task_struct *_a,
74 struct task_struct *_b) 78 struct task_struct *_b)
75{ 79{
76 const struct task_struct *a = _a; 80 const struct task_struct *a = _a;
77 const domain_t *dom = get_task_domain(a); 81 const domain_t *dom = get_task_domain(a);
78 const struct ce_dom_data *ce_data = dom->data; 82 const struct ce_dom_data *ce_data = dom->data;
83 TRACE("a: 0x%p b: 0x%p should_schedule: 0x%p\n", a, _b,
84 ce_data->should_schedule);
85 TRACE("a: " TS "\n", TA(a));
86 TRACE("b: " TS "\n", TA(_b));
79 return (a == ce_data->should_schedule); 87 return (a == ce_data->should_schedule);
80} 88}
81 89
82void __mc_ce_timer_callback(struct hrtimer *timer); 90void __mc_ce_timer_callback(struct hrtimer *timer);
91domain_data_t *ce_domain_for(int);
92void mc_check_for_preempt(domain_t*);
83static enum hrtimer_restart ce_timer_function(struct hrtimer *timer) 93static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
84{ 94{
85 /* need to lock? */ 95 struct ce_dom_data *ce_data;
96 domain_data_t *dom_data;
97 unsigned long flags;
98
99 TRACE("timer callback\n");
100
101 ce_data = container_of(timer, struct ce_dom_data, timer);
102 dom_data = ce_domain_for(ce_data->cpu);
103 raw_spin_lock_irqsave(dom_data->domain.lock, flags);
86 __mc_ce_timer_callback(timer); 104 __mc_ce_timer_callback(timer);
105 mc_check_for_preempt(&dom_data->domain);
106 raw_spin_unlock_irqrestore(dom_data->domain.lock, flags);
87 return HRTIMER_RESTART; 107 return HRTIMER_RESTART;
88} 108}
89 109
diff --git a/litmus/event_group.c b/litmus/event_group.c
index 22c74a19d1d6..e285b5465761 100644
--- a/litmus/event_group.c
+++ b/litmus/event_group.c
@@ -122,7 +122,7 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
122{ 122{
123 struct event_list *el; 123 struct event_list *el;
124 124
125 TRACE("Adding event with prio %d @ %llu\n", event->prio, fire); 125 TRACE("Adding event with prio %d @ %llu\n", e->prio, fire);
126 126
127 raw_spin_lock(&group->queue_lock); 127 raw_spin_lock(&group->queue_lock);
128 el = get_event_list(group, e, fire, 0); 128 el = get_event_list(group, e, fire, 0);
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index bce25bc8822e..3b98a93511ab 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -104,7 +104,7 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
104 * Return true if the domain has a higher priority ready task. The curr 104 * Return true if the domain has a higher priority ready task. The curr
105 * task must belong to the domain. 105 * task must belong to the domain.
106 */ 106 */
107static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) 107noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr)
108{ 108{
109 struct task_struct *next = dom->peek_ready(dom); 109 struct task_struct *next = dom->peek_ready(dom);
110 if (!next || !curr) { 110 if (!next || !curr) {
@@ -208,7 +208,7 @@ static void link_task_to_crit(crit_entry_t *ce,
208 } 208 }
209} 209}
210 210
211static void check_for_preempt(domain_t*); 211void mc_check_for_preempt(domain_t*);
212/** 212/**
213 * job_arrival() - Called when a task re-enters the system. 213 * job_arrival() - Called when a task re-enters the system.
214 * Caller must hold no locks. 214 * Caller must hold no locks.
@@ -222,7 +222,7 @@ static void job_arrival(struct task_struct *task)
222 if (can_requeue(task)) { 222 if (can_requeue(task)) {
223 raw_spin_lock(dom->lock); 223 raw_spin_lock(dom->lock);
224 dom->requeue(dom, task); 224 dom->requeue(dom, task);
225 check_for_preempt(dom); 225 mc_check_for_preempt(dom);
226 raw_spin_unlock(dom->lock); 226 raw_spin_unlock(dom->lock);
227 } else { 227 } else {
228 /* If a global task is scheduled on one cpu, it CANNOT 228 /* If a global task is scheduled on one cpu, it CANNOT
@@ -374,18 +374,19 @@ static void update_crit_levels(cpu_entry_t *entry)
374} 374}
375 375
376/** 376/**
377 * check_for_preempt() - Causes a preemption if higher-priority tasks are ready. 377 * mc_check_for_preempt() - Causes a preemption if higher-priority tasks are ready.
378 * Caller must hold domain lock. 378 * Caller must hold domain lock.
379 * Makes gigantic nasty assumption that there is 1 global criticality level, 379 * Makes gigantic nasty assumption that there is 1 global criticality level,
380 * and it is the last one in each list, so it doesn't call update_crit.. 380 * and it is the last one in each list, so it doesn't call update_crit..
381 */ 381 */
382static void check_for_preempt(domain_t *dom) 382void mc_check_for_preempt(domain_t *dom)
383{ 383{
384 int preempted = 1; 384 int preempted = 1;
385 cpu_entry_t *entry; 385 cpu_entry_t *entry;
386 crit_entry_t *ce; 386 crit_entry_t *ce;
387 387
388 if (is_global(dom)) { 388 if (is_global(dom)) {
389 TRACE("domain: %s is global\n", dom->name);
389 /* Loop until we find a non-preemptable CPU */ 390 /* Loop until we find a non-preemptable CPU */
390 while ((ce = lowest_prio_cpu(dom)) && preempted) { 391 while ((ce = lowest_prio_cpu(dom)) && preempted) {
391 entry = crit_cpu(ce); 392 entry = crit_cpu(ce);
@@ -398,6 +399,7 @@ static void check_for_preempt(domain_t *dom)
398 raw_spin_unlock(&entry->lock); 399 raw_spin_unlock(&entry->lock);
399 } 400 }
400 } else /* Partitioned */ { 401 } else /* Partitioned */ {
402 TRACE("domain: %s is partitioned\n", dom->name);
401 ce = domain_data(dom)->crit_entry; 403 ce = domain_data(dom)->crit_entry;
402 entry = crit_cpu(ce); 404 entry = crit_cpu(ce);
403 raw_spin_lock(&entry->lock); 405 raw_spin_lock(&entry->lock);
@@ -476,6 +478,7 @@ static void job_completion(struct task_struct *task, int forced)
476 478
477 /* If it's not a ghost job, do ghost job conversion */ 479 /* If it's not a ghost job, do ghost job conversion */
478 if (!is_ghost(task)) { 480 if (!is_ghost(task)) {
481 TRACE_TASK(task, "is not a ghost task\n");
479 tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task); 482 tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task);
480 tsk_mc_data(task)->mc_job.is_ghost = 1; 483 tsk_mc_data(task)->mc_job.is_ghost = 1;
481 } 484 }
@@ -485,6 +488,7 @@ static void job_completion(struct task_struct *task, int forced)
485 * conversion. Revert back to a normal task and complete the period. 488 * conversion. Revert back to a normal task and complete the period.
486 */ 489 */
487 if (tsk_mc_data(task)->mc_job.ghost_budget == 0) { 490 if (tsk_mc_data(task)->mc_job.ghost_budget == 0) {
491 TRACE_TASK(task, "has zero ghost budget\n");
488 tsk_mc_data(task)->mc_job.is_ghost = 0; 492 tsk_mc_data(task)->mc_job.is_ghost = 0;
489 prepare_for_next_period(task); 493 prepare_for_next_period(task);
490 if (is_released(task, litmus_clock())) 494 if (is_released(task, litmus_clock()))
@@ -530,6 +534,26 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
530 return HRTIMER_NORESTART; 534 return HRTIMER_NORESTART;
531} 535}
532 536
537void __mc_ce_timer_callback(struct hrtimer *timer);
538domain_data_t *ce_domain_for(int);
539static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
540{
541 struct ce_dom_data *ce_data;
542 domain_data_t *dom_data;
543 unsigned long flags;
544
545 TRACE("timer callback\n");
546
547 ce_data = container_of(timer, struct ce_dom_data, timer);
548 dom_data = ce_domain_for(ce_data->cpu);
549 raw_spin_lock_irqsave(dom_data->domain.lock, flags);
550 __mc_ce_timer_callback(timer);
551 mc_check_for_preempt(&dom_data->domain);
552 raw_spin_unlock_irqrestore(dom_data->domain.lock, flags);
553 return HRTIMER_RESTART;
554}
555
556
533/** 557/**
534 * mc_release_jobs() - Add heap of tasks to the system, check for preemptions. 558 * mc_release_jobs() - Add heap of tasks to the system, check for preemptions.
535 */ 559 */
@@ -542,7 +566,7 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks)
542 raw_spin_lock_irqsave(dom->lock, flags); 566 raw_spin_lock_irqsave(dom->lock, flags);
543 TRACE_TASK(first, "Jobs released"); 567 TRACE_TASK(first, "Jobs released");
544 __merge_ready(rt, tasks); 568 __merge_ready(rt, tasks);
545 check_for_preempt(dom); 569 mc_check_for_preempt(dom);
546 raw_spin_unlock_irqrestore(dom->lock, flags); 570 raw_spin_unlock_irqrestore(dom->lock, flags);
547} 571}
548 572
@@ -639,25 +663,37 @@ static void mc_task_exit(struct task_struct *task)
639 local_irq_restore(flags); 663 local_irq_restore(flags);
640} 664}
641 665
666long __mc_ce_admit_task(struct task_struct*);
642/** 667/**
643 * mc_admit_task() - Return true if the task is valid. 668 * mc_admit_task() - Return true if the task is valid.
644 * Assumes there are no partitioned levels after level B. 669 * Assumes there are no partitioned levels after level B.
645 */ 670 */
646static long mc_admit_task(struct task_struct* task) 671static long mc_admit_task(struct task_struct* task)
647{ 672{
673 const enum crit_level crit = tsk_mc_crit(task);
674 long ret;
648 if (!tsk_mc_data(task)) { 675 if (!tsk_mc_data(task)) {
649 printk(KERN_WARNING "Tried to admit task with no criticality " 676 printk(KERN_WARNING "Tried to admit task with no criticality "
650 "level\n"); 677 "level\n");
651 return -EINVAL; 678 ret = -EINVAL;
679 goto out;
652 } 680 }
653 if (tsk_mc_crit(task) < CRIT_LEVEL_C && get_partition(task) == NO_CPU) { 681 if (crit < CRIT_LEVEL_C && get_partition(task) == NO_CPU) {
654 printk(KERN_WARNING "Tried to admit partitioned task with no " 682 printk(KERN_WARNING "Tried to admit partitioned task with no "
655 "partition\n"); 683 "partition\n");
656 return -EINVAL; 684 ret = -EINVAL;
685 goto out;
686 }
687 if (crit == CRIT_LEVEL_A) {
688 ret = __mc_ce_admit_task(task);
689 if (ret)
690 goto out;
657 } 691 }
658 printk(KERN_INFO "Admitted task with criticality level %d\n", 692 printk(KERN_INFO "Admitted task with criticality level %d\n",
659 tsk_mc_crit(task)); 693 tsk_mc_crit(task));
660 return 0; 694 ret = 0;
695out:
696 return ret;
661} 697}
662 698
663/** 699/**
@@ -746,6 +782,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
746 raw_spin_unlock(dom->lock); 782 raw_spin_unlock(dom->lock);
747 update_crit_levels(entry); 783 update_crit_levels(entry);
748 raw_spin_lock(&entry->lock); 784 raw_spin_lock(&entry->lock);
785 continue;
749 } 786 }
750 } 787 }
751 raw_spin_unlock(dom->lock); 788 raw_spin_unlock(dom->lock);
@@ -784,7 +821,11 @@ static long mc_activate_plugin(void)
784 */ 821 */
785void mc_release_at(struct task_struct *ts, lt_t start) 822void mc_release_at(struct task_struct *ts, lt_t start)
786{ 823{
787 ce_start(ts, start); 824 /* hack so that we can have CE timers start at the right time */
825 if (CRIT_LEVEL_A == tsk_mc_crit(ts))
826 ce_start(ts, start);
827 else
828 release_at(ts, start);
788} 829}
789 830
790long mc_deactivate_plugin(void) 831long mc_deactivate_plugin(void)
@@ -884,7 +925,6 @@ static int __init init_mc(void)
884 cpu_entry_t *entry; 925 cpu_entry_t *entry;
885 rt_domain_t *rt; 926 rt_domain_t *rt;
886 domain_data_t *dom_data; 927 domain_data_t *dom_data;
887 domain_t *dom;
888 raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */ 928 raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */
889 929
890 for_each_online_cpu(cpu) { 930 for_each_online_cpu(cpu) {
@@ -903,16 +943,10 @@ static int __init init_mc(void)
903 943
904 /* CRIT_LEVEL_A */ 944 /* CRIT_LEVEL_A */
905 dom_data = ce_domain_for(cpu); 945 dom_data = ce_domain_for(cpu);
946 ce_domain_init(/* TODO */);
906 init_local_domain(entry, dom_data, CRIT_LEVEL_A); 947 init_local_domain(entry, dom_data, CRIT_LEVEL_A);
907 a_dom = dom_data->domain.lock; 948 a_dom = dom_data->domain.lock;
908 raw_spin_lock_init(a_dom);
909 dom_data->domain.name = "LVL-A"; 949 dom_data->domain.name = "LVL-A";
910 /* Hook up the level A functions */
911 dom = &dom_data->domain;
912 dom->requeue = ce_requeue;
913 dom->peek_ready = dom->take_ready = ce_peek_and_take_ready;
914 dom->higher_prio = ce_higher_prio;
915 dom->preempt_needed = mc_preempt_needed;
916 950
917 /* CRIT_LEVEL_B */ 951 /* CRIT_LEVEL_B */
918 dom_data = &per_cpu(_mc_crit_b, cpu); 952 dom_data = &per_cpu(_mc_crit_b, cpu);
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index 8a0f9556640c..77acc67d05bd 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -23,6 +23,7 @@
23#include <litmus/sched_trace.h> 23#include <litmus/sched_trace.h>
24#include <litmus/jobs.h> 24#include <litmus/jobs.h>
25#include <litmus/sched_mc.h> 25#include <litmus/sched_mc.h>
26#include <litmus/ce_domain.h>
26 27
27static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp; 28static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp;
28 29
@@ -35,8 +36,8 @@ static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL;
35 36
36 37
37DEFINE_PER_CPU(domain_data_t, mc_ce_doms); 38DEFINE_PER_CPU(domain_data_t, mc_ce_doms);
38DEFINE_PER_CPU(rt_domain_t, mc_ce_rts);
39DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data); 39DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data);
40DEFINE_PER_CPU(raw_spinlock_t, _dom_locks);
40 41
41/* Return the address of the domain_t for this CPU, used by the 42/* Return the address of the domain_t for this CPU, used by the
42 * mixed-criticality plugin. */ 43 * mixed-criticality plugin. */
@@ -192,35 +193,11 @@ static void mc_ce_finish_switch(struct task_struct *prev)
192} 193}
193 194
194/* 195/*
195 * Called for every local timer interrupt.
196 * Linux calls this with interrupts disabled, AFAIK.
197 */
198static void mc_ce_tick(struct task_struct *ts)
199{
200 domain_data_t *dom_data = &per_cpu(mc_ce_doms, smp_processor_id());
201 domain_t *dom = &dom_data->domain;
202 struct ce_dom_data *ce_data = get_ce_data(dom_data);
203 struct task_struct *should_schedule;
204
205 if (is_realtime(ts) && CRIT_LEVEL_A == tsk_mc_crit(ts)) {
206 raw_spin_lock(dom->lock);
207 should_schedule = ce_data->should_schedule;
208 raw_spin_unlock(dom->lock);
209
210 if (!is_np(ts) && ts != should_schedule) {
211 litmus_reschedule_local();
212 } else if (is_user_np(ts)) {
213 request_exit_np(ts);
214 }
215 }
216}
217
218/*
219 * Admit task called to see if this task is permitted to enter the system. 196 * Admit task called to see if this task is permitted to enter the system.
220 * Here we look up the task's PID structure and save it in the proper slot on 197 * Here we look up the task's PID structure and save it in the proper slot on
221 * the CPU this task will run on. 198 * the CPU this task will run on.
222 */ 199 */
223static long __mc_ce_admit_task(struct task_struct *ts) 200long __mc_ce_admit_task(struct task_struct *ts)
224{ 201{
225 domain_data_t *dom_data = &per_cpu(mc_ce_doms, get_partition(ts)); 202 domain_data_t *dom_data = &per_cpu(mc_ce_doms, get_partition(ts));
226 struct ce_dom_data *ce_data = get_ce_data(dom_data); 203 struct ce_dom_data *ce_data = get_ce_data(dom_data);
@@ -315,7 +292,7 @@ static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running)
315 pid_entry = &ce_data->pid_entries[tsk_mc_data(ts)->mc_task.lvl_a_id]; 292 pid_entry = &ce_data->pid_entries[tsk_mc_data(ts)->mc_task.lvl_a_id];
316 /* initialize some task state */ 293 /* initialize some task state */
317 set_rt_flags(ts, RT_F_RUNNING); 294 set_rt_flags(ts, RT_F_RUNNING);
318 tsk_rt(ts)->job_params.job_no = 0; 295 tsk_rt(ts)->job_params.job_no = 1;
319 296
320 offset = get_cycle_offset(litmus_clock(), ce_data->cycle_time); 297 offset = get_cycle_offset(litmus_clock(), ce_data->cycle_time);
321 idx = mc_ce_schedule_at(dom, offset); 298 idx = mc_ce_schedule_at(dom, offset);
@@ -538,7 +515,7 @@ static void arm_all_timers(void)
538 if (0 == ce_data->num_pid_entries) 515 if (0 == ce_data->num_pid_entries)
539 continue; 516 continue;
540 for (idx = 0; idx < ce_data->num_pid_entries; idx++) { 517 for (idx = 0; idx < ce_data->num_pid_entries; idx++) {
541 ce_data->pid_entries[idx].expected_job = -1; 518 ce_data->pid_entries[idx].expected_job = 0;
542 } 519 }
543 TRACE("arming timer for CPU %d\n", cpu); 520 TRACE("arming timer for CPU %d\n", cpu);
544 hrtimer_start_on(cpu, &ce_data->timer_info, &ce_data->timer, 521 hrtimer_start_on(cpu, &ce_data->timer_info, &ce_data->timer,
@@ -602,7 +579,7 @@ static void clear_pid_entries(void)
602 } 579 }
603 ce_data->pid_entries[entry].budget = 0; 580 ce_data->pid_entries[entry].budget = 0;
604 ce_data->pid_entries[entry].acc_time = 0; 581 ce_data->pid_entries[entry].acc_time = 0;
605 ce_data->pid_entries[entry].expected_job = -1; 582 ce_data->pid_entries[entry].expected_job = 0;
606 } 583 }
607 } 584 }
608} 585}
@@ -623,27 +600,33 @@ static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp = {
623 .task_exit = mc_ce_task_exit, 600 .task_exit = mc_ce_task_exit,
624 .schedule = mc_ce_schedule, 601 .schedule = mc_ce_schedule,
625 .finish_switch = mc_ce_finish_switch, 602 .finish_switch = mc_ce_finish_switch,
626 .tick = mc_ce_tick,
627 .task_wake_up = mc_ce_task_wake_up, 603 .task_wake_up = mc_ce_task_wake_up,
628 .task_block = mc_ce_task_block, 604 .task_block = mc_ce_task_block,
629 .activate_plugin = mc_ce_activate_plugin, 605 .activate_plugin = mc_ce_activate_plugin,
630 .deactivate_plugin = mc_ce_deactivate_plugin, 606 .deactivate_plugin = mc_ce_deactivate_plugin,
631}; 607};
632 608
609int mc_preempt_needed(domain_t*, struct task_struct*);
633static int setup_proc(void); 610static int setup_proc(void);
634static int __init init_sched_mc_ce(void) 611static int __init init_sched_mc_ce(void)
635{ 612{
636 struct ce_dom_data *ce_data; 613 struct ce_dom_data *ce_data;
614 raw_spinlock_t *ce_lock;
637 domain_data_t *dom_data; 615 domain_data_t *dom_data;
638 domain_t *dom; 616 domain_t *dom;
639 rt_domain_t *rt;
640 int cpu, err; 617 int cpu, err;
641 618
642 for_each_online_cpu(cpu) { 619 for_each_online_cpu(cpu) {
620 ce_lock = &per_cpu(_dom_locks, cpu);
621 raw_spin_lock_init(ce_lock);
643 dom_data = &per_cpu(mc_ce_doms, cpu); 622 dom_data = &per_cpu(mc_ce_doms, cpu);
644 dom = &dom_data->domain; 623 dom = &dom_data->domain;
645 rt = &per_cpu(mc_ce_rts, cpu); 624 /* initialize the domain. the ce_ functions are for the MC
646 pd_domain_init(dom, rt, NULL, NULL, NULL, NULL, NULL); 625 * plugin */
626 /* move into ce_domain_init */
627 domain_init(dom, ce_lock, ce_requeue, ce_peek_and_take_ready,
628 ce_peek_and_take_ready, mc_preempt_needed,
629 ce_higher_prio);
647 dom->data = &per_cpu(_mc_ce_dom_data, cpu); 630 dom->data = &per_cpu(_mc_ce_dom_data, cpu);
648 ce_data = get_ce_data(dom_data); 631 ce_data = get_ce_data(dom_data);
649 hrtimer_init(&ce_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 632 hrtimer_init(&ce_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);