diff options
author | Christopher Kenna <cjk@cs.unc.edu> | 2011-10-14 03:11:19 -0400 |
---|---|---|
committer | Christopher Kenna <cjk@cs.unc.edu> | 2011-10-14 03:11:19 -0400 |
commit | 42ba03ccc49110d65a44da49a19354ae1a1351ac (patch) | |
tree | e5c73797197ecd232c047220fa1a3a718f19d42b /litmus/sched_mc.c | |
parent | 1729a3cbaf22538d3a514dea33e4fee1b411e49d (diff) |
revert the cpu mask changes
Diffstat (limited to 'litmus/sched_mc.c')
-rw-r--r-- | litmus/sched_mc.c | 45 |
1 files changed, 8 insertions, 37 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index f4037bb1dfe2..53c24ac2172c 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -60,8 +60,6 @@ DEFINE_PER_CPU(struct cpu_entry, cpus); | |||
60 | static int interrupt_cpu; | 60 | static int interrupt_cpu; |
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | struct cpumask rt_mask; | ||
64 | |||
65 | #define domain_data(dom) (container_of(dom, struct domain_data, domain)) | 63 | #define domain_data(dom) (container_of(dom, struct domain_data, domain)) |
66 | #define is_global(dom) (domain_data(dom)->heap) | 64 | #define is_global(dom) (domain_data(dom)->heap) |
67 | #define is_global_task(t) (is_global(get_task_domain(t))) | 65 | #define is_global_task(t) (is_global(get_task_domain(t))) |
@@ -352,7 +350,7 @@ static void fix_global_levels(void) | |||
352 | struct task_struct *t; | 350 | struct task_struct *t; |
353 | 351 | ||
354 | STRACE("Fixing global levels\n"); | 352 | STRACE("Fixing global levels\n"); |
355 | for_each_rt_cpu(c) { | 353 | for_each_online_cpu(c) { |
356 | e = &per_cpu(cpus, c); | 354 | e = &per_cpu(cpus, c); |
357 | raw_spin_lock(&e->redir_lock); | 355 | raw_spin_lock(&e->redir_lock); |
358 | list_for_each_safe(pos, safe, &e->redir) { | 356 | list_for_each_safe(pos, safe, &e->redir) { |
@@ -379,7 +377,6 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task) | |||
379 | TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu); | 377 | TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu); |
380 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); | 378 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); |
381 | BUG_ON(task && is_ghost(task)); | 379 | BUG_ON(task && is_ghost(task)); |
382 | BUG_ON(CONFIG_FTRACE_CPU == entry->cpu); | ||
383 | 380 | ||
384 | if (task){ | 381 | if (task){ |
385 | set_rt_flags(task, RT_F_RUNNING); | 382 | set_rt_flags(task, RT_F_RUNNING); |
@@ -772,15 +769,11 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
772 | TRACE("New mixed criticality task %d\n", t->pid); | 769 | TRACE("New mixed criticality task %d\n", t->pid); |
773 | 770 | ||
774 | /* Assign domain */ | 771 | /* Assign domain */ |
775 | if (level < CRIT_LEVEL_C) { | 772 | if (level < CRIT_LEVEL_C) |
776 | entry = &per_cpu(cpus, get_partition(t)); | 773 | entry = &per_cpu(cpus, get_partition(t)); |
777 | t->rt_param._domain = entry->crit_entries[level].domain; | 774 | else |
778 | } else { | ||
779 | entry = &per_cpu(cpus, task_cpu(t)); | 775 | entry = &per_cpu(cpus, task_cpu(t)); |
780 | t->rt_param._domain = | 776 | t->rt_param._domain = entry->crit_entries[level].domain; |
781 | per_cpu(cpus, 0).crit_entries[CRIT_LEVEL_C].domain; | ||
782 | } | ||
783 | |||
784 | 777 | ||
785 | /* Setup job params */ | 778 | /* Setup job params */ |
786 | release_at(t, litmus_clock()); | 779 | release_at(t, litmus_clock()); |
@@ -788,13 +781,8 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
788 | tsk_mc_data(t)->mc_job.is_ghost = 0; | 781 | tsk_mc_data(t)->mc_job.is_ghost = 0; |
789 | if (running) { | 782 | if (running) { |
790 | BUG_ON(entry->scheduled); | 783 | BUG_ON(entry->scheduled); |
791 | if (entry->cpu != CONFIG_FTRACE_CPU) { | 784 | entry->scheduled = t; |
792 | entry->scheduled = t; | 785 | tsk_rt(t)->scheduled_on = entry->cpu; |
793 | tsk_rt(t)->scheduled_on = entry->cpu; | ||
794 | } else { | ||
795 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
796 | preempt_if_preemptable(NULL, entry->cpu); | ||
797 | } | ||
798 | } else { | 786 | } else { |
799 | t->rt_param.scheduled_on = NO_CPU; | 787 | t->rt_param.scheduled_on = NO_CPU; |
800 | } | 788 | } |
@@ -904,10 +892,6 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
904 | int i, out_of_time, sleep, preempt, exists, blocks, global, lower; | 892 | int i, out_of_time, sleep, preempt, exists, blocks, global, lower; |
905 | struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; | 893 | struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; |
906 | 894 | ||
907 | /* Give FTRACE a CPU to run on */ | ||
908 | if (CONFIG_FTRACE_CPU == entry->cpu) | ||
909 | return NULL; | ||
910 | |||
911 | local_irq_save(flags); | 895 | local_irq_save(flags); |
912 | 896 | ||
913 | /* Litmus gave up because it couldn't access the stack of the CPU | 897 | /* Litmus gave up because it couldn't access the stack of the CPU |
@@ -1091,7 +1075,7 @@ static long mc_activate_plugin(void) | |||
1091 | #endif | 1075 | #endif |
1092 | #endif | 1076 | #endif |
1093 | 1077 | ||
1094 | for_each_rt_cpu(cpu) { | 1078 | for_each_online_cpu(cpu) { |
1095 | BUG_ON(NR_CPUS <= n); | 1079 | BUG_ON(NR_CPUS <= n); |
1096 | dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain; | 1080 | dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain; |
1097 | dom_data = domain_data(dom); | 1081 | dom_data = domain_data(dom); |
@@ -1166,7 +1150,7 @@ static void init_global_domain(struct domain_data *dom_data, enum crit_level lev | |||
1166 | dom_data->heap = heap; | 1150 | dom_data->heap = heap; |
1167 | bheap_init(heap); | 1151 | bheap_init(heap); |
1168 | 1152 | ||
1169 | for_each_rt_cpu(cpu) { | 1153 | for_each_online_cpu(cpu) { |
1170 | entry = &per_cpu(cpus, cpu); | 1154 | entry = &per_cpu(cpus, cpu); |
1171 | node = &nodes[cpu]; | 1155 | node = &nodes[cpu]; |
1172 | ce = &entry->crit_entries[level]; | 1156 | ce = &entry->crit_entries[level]; |
@@ -1215,16 +1199,6 @@ static int __init init_mc(void) | |||
1215 | raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ | 1199 | raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ |
1216 | struct ce_dom_data *ce_data; | 1200 | struct ce_dom_data *ce_data; |
1217 | 1201 | ||
1218 | #if CONFIG_FTRACE_CPU != NO_CPU | ||
1219 | cpumask_andnot(&rt_mask, cpu_online_mask, cpumask_of(CONFIG_FTRACE_CPU)); | ||
1220 | printk(KERN_INFO "LITMUS-MC: %lu %lu %lu\n", *cpumask_bits(&rt_mask), | ||
1221 | *cpumask_bits(cpu_online_mask), | ||
1222 | *cpumask_bits(cpumask_of(CONFIG_FTRACE_CPU))); | ||
1223 | for_each_rt_cpu(cpu) { | ||
1224 | printk(KERN_INFO "LITMUS-MC: Cpu: %d\n", cpu); | ||
1225 | } | ||
1226 | #endif | ||
1227 | |||
1228 | for_each_online_cpu(cpu) { | 1202 | for_each_online_cpu(cpu) { |
1229 | entry = &per_cpu(cpus, cpu); | 1203 | entry = &per_cpu(cpus, cpu); |
1230 | 1204 | ||
@@ -1232,9 +1206,6 @@ static int __init init_mc(void) | |||
1232 | entry->cpu = cpu; | 1206 | entry->cpu = cpu; |
1233 | entry->scheduled = NULL; | 1207 | entry->scheduled = NULL; |
1234 | entry->linked = NULL; | 1208 | entry->linked = NULL; |
1235 | |||
1236 | if (cpu == CONFIG_FTRACE_CPU) | ||
1237 | continue; | ||
1238 | raw_spin_lock_init(&entry->lock); | 1209 | raw_spin_lock_init(&entry->lock); |
1239 | 1210 | ||
1240 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 1211 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |