aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-10-14 14:55:09 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-10-14 14:55:09 -0400
commit9f7381b674d6e20fefde16633864d9e0aa44fa80 (patch)
tree6220bd79f9a4d40f08c18a186b04dfbfdd32fe92
parentcfb2b53f2d1c16d8d5f7197af2b034f6bc2d2fd4 (diff)
parent42ba03ccc49110d65a44da49a19354ae1a1351ac (diff)
Merge branch 'wip-mc' of ssh://cvs.cs.unc.edu/cvs/proj/litmus/repo/litmus2010 into wip-mc
-rw-r--r--include/litmus/sched_mc.h8
-rw-r--r--litmus/Kconfig9
-rw-r--r--litmus/sched_mc.c19
-rw-r--r--litmus/sched_mc_ce.c16
4 files changed, 13 insertions, 39 deletions
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h
index 8cdec04c64df..740cc11be5d7 100644
--- a/include/litmus/sched_mc.h
+++ b/include/litmus/sched_mc.h
@@ -31,14 +31,6 @@ struct mc_data {
31 struct mc_job mc_job; 31 struct mc_job mc_job;
32}; 32};
33 33
34/* Leave a CPU free for tracing stuff */
35#if CONFIG_FTRACE_CPU != NO_CPU
36extern struct cpumask rt_mask;
37#define for_each_rt_cpu(cpu) for_each_cpu((cpu), &rt_mask)
38#else
39#define for_each_rt_cpu(cpu) for_each_online_cpu(cpu)
40#endif
41
42#define tsk_mc_data(t) (tsk_rt(t)->mc_data) 34#define tsk_mc_data(t) (tsk_rt(t)->mc_data)
43#define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit) 35#define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit)
44#define is_ghost(t) (tsk_mc_data(t)->mc_job.is_ghost) 36#define is_ghost(t) (tsk_mc_data(t)->mc_job.is_ghost)
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 20610fdf9a54..d629a2843584 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -83,15 +83,8 @@ config PLUGIN_MC_REDIRECT
83 Allow processors to send work involving global state to the 83 Allow processors to send work involving global state to the
84 release-master cpu in order to avoid excess overheads during 84 release-master cpu in order to avoid excess overheads during
85 partitioned decisions. 85 partitioned decisions.
86
87config FTRACE_CPU
88 int "CPU for Tracing"
89 depends on PLUGIN_MC
90 default -1
91 help
92 Keep one CPU free for the tasks which trace and flush
93 scheduling and overhead data.
94endmenu 86endmenu
87
95endmenu 88endmenu
96 89
97menu "Real-Time Synchronization" 90menu "Real-Time Synchronization"
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index ddd5933da4f8..baded0d58cb9 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -61,8 +61,6 @@ DEFINE_PER_CPU(struct cpu_entry, cpus);
61static int interrupt_cpu; 61static int interrupt_cpu;
62#endif 62#endif
63 63
64struct cpumask rt_mask;
65
66#define domain_data(dom) (container_of(dom, struct domain_data, domain)) 64#define domain_data(dom) (container_of(dom, struct domain_data, domain))
67#define is_global(dom) (domain_data(dom)->heap) 65#define is_global(dom) (domain_data(dom)->heap)
68#define is_global_task(t) (is_global(get_task_domain(t))) 66#define is_global_task(t) (is_global(get_task_domain(t)))
@@ -353,7 +351,7 @@ static void fix_global_levels(void)
353 struct task_struct *t; 351 struct task_struct *t;
354 352
355 STRACE("Fixing global levels\n"); 353 STRACE("Fixing global levels\n");
356 for_each_rt_cpu(c) { 354 for_each_online_cpu(c) {
357 e = &per_cpu(cpus, c); 355 e = &per_cpu(cpus, c);
358 raw_spin_lock(&e->redir_lock); 356 raw_spin_lock(&e->redir_lock);
359 list_for_each_safe(pos, safe, &e->redir) { 357 list_for_each_safe(pos, safe, &e->redir) {
@@ -380,7 +378,6 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task)
380 TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu); 378 TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu);
381 BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); 379 BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu);
382 BUG_ON(task && is_ghost(task)); 380 BUG_ON(task && is_ghost(task));
383 BUG_ON(CONFIG_FTRACE_CPU == entry->cpu);
384 381
385 if (task){ 382 if (task){
386 set_rt_flags(task, RT_F_RUNNING); 383 set_rt_flags(task, RT_F_RUNNING);
@@ -896,10 +893,6 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
896 int i, out_of_time, sleep, preempt, exists, blocks, global, lower; 893 int i, out_of_time, sleep, preempt, exists, blocks, global, lower;
897 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; 894 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL;
898 895
899 /* Give FTRACE a CPU to run on */
900 if (CONFIG_FTRACE_CPU == entry->cpu)
901 return NULL;
902
903 local_irq_save(flags); 896 local_irq_save(flags);
904 897
905 /* Litmus gave up because it couldn't access the stack of the CPU 898 /* Litmus gave up because it couldn't access the stack of the CPU
@@ -1083,7 +1076,7 @@ static long mc_activate_plugin(void)
1083#endif 1076#endif
1084#endif 1077#endif
1085 1078
1086 for_each_rt_cpu(cpu) { 1079 for_each_online_cpu(cpu) {
1087 BUG_ON(NR_CPUS <= n); 1080 BUG_ON(NR_CPUS <= n);
1088 dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain; 1081 dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain;
1089 dom_data = domain_data(dom); 1082 dom_data = domain_data(dom);
@@ -1158,7 +1151,7 @@ static void init_global_domain(struct domain_data *dom_data, enum crit_level lev
1158 dom_data->heap = heap; 1151 dom_data->heap = heap;
1159 bheap_init(heap); 1152 bheap_init(heap);
1160 1153
1161 for_each_rt_cpu(cpu) { 1154 for_each_online_cpu(cpu) {
1162 entry = &per_cpu(cpus, cpu); 1155 entry = &per_cpu(cpus, cpu);
1163 node = &nodes[cpu]; 1156 node = &nodes[cpu];
1164 ce = &entry->crit_entries[level]; 1157 ce = &entry->crit_entries[level];
@@ -1207,11 +1200,7 @@ static int __init init_mc(void)
1207 raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ 1200 raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */
1208 struct ce_dom_data *ce_data; 1201 struct ce_dom_data *ce_data;
1209 1202
1210#if CONFIG_FTRACE_CPU != NO_CPU 1203 for_each_online_cpu(cpu) {
1211 cpumask_xor(&rt_mask, cpu_online_mask, cpumask_of(CONFIG_FTRACE_CPU));
1212#endif
1213
1214 for_each_rt_cpu(cpu) {
1215 entry = &per_cpu(cpus, cpu); 1204 entry = &per_cpu(cpus, cpu);
1216 1205
1217 /* CPU */ 1206 /* CPU */
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index d9be4d14e76c..af02dfdbb523 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -565,7 +565,7 @@ static int cancel_all_timers(void)
565 565
566 TRACE("cancel all timers\n"); 566 TRACE("cancel all timers\n");
567 567
568 for_each_rt_cpu(cpu) { 568 for_each_online_cpu(cpu) {
569 dom = get_domain_for(cpu); 569 dom = get_domain_for(cpu);
570 ce_data = dom->data; 570 ce_data = dom->data;
571 ce_data->should_schedule = NULL; 571 ce_data->should_schedule = NULL;
@@ -596,7 +596,7 @@ static void arm_all_timers(void)
596 596
597 TRACE("arm all timers\n"); 597 TRACE("arm all timers\n");
598 598
599 for_each_rt_cpu(cpu) { 599 for_each_online_cpu(cpu) {
600 dom = get_domain_for(cpu); 600 dom = get_domain_for(cpu);
601 ce_data = dom->data; 601 ce_data = dom->data;
602 pid_table = get_pid_table(cpu); 602 pid_table = get_pid_table(cpu);
@@ -655,7 +655,7 @@ long mc_ce_activate_plugin_common(void)
655 } 655 }
656#endif 656#endif
657 657
658 for_each_rt_cpu(cpu) { 658 for_each_online_cpu(cpu) {
659 dom = get_domain_for(cpu); 659 dom = get_domain_for(cpu);
660 ce_data = dom->data; 660 ce_data = dom->data;
661 ce_data->scheduled = NULL; 661 ce_data->scheduled = NULL;
@@ -677,7 +677,7 @@ static long mc_ce_activate_plugin(void)
677 int cpu, n = 0; 677 int cpu, n = 0;
678 long ret; 678 long ret;
679 679
680 for_each_rt_cpu(cpu) { 680 for_each_online_cpu(cpu) {
681 BUG_ON(NR_CPUS <= n); 681 BUG_ON(NR_CPUS <= n);
682 our_domains[cpu] = &per_cpu(_mc_ce_doms, cpu); 682 our_domains[cpu] = &per_cpu(_mc_ce_doms, cpu);
683 n++; 683 n++;
@@ -695,7 +695,7 @@ static void clear_pid_entries(void)
695 struct ce_pid_table *pid_table = NULL; 695 struct ce_pid_table *pid_table = NULL;
696 int cpu, entry; 696 int cpu, entry;
697 697
698 for_each_rt_cpu(cpu) { 698 for_each_online_cpu(cpu) {
699 pid_table = get_pid_table(cpu); 699 pid_table = get_pid_table(cpu);
700 pid_table->num_pid_entries = 0; 700 pid_table->num_pid_entries = 0;
701 pid_table->cycle_time = 0; 701 pid_table->cycle_time = 0;
@@ -716,7 +716,7 @@ long mc_ce_deactivate_plugin_common(void)
716{ 716{
717 int cpu; 717 int cpu;
718 cancel_all_timers(); 718 cancel_all_timers();
719 for_each_rt_cpu(cpu) { 719 for_each_online_cpu(cpu) {
720 per_cpu(domains, cpu) = NULL; 720 per_cpu(domains, cpu) = NULL;
721 } 721 }
722 return 0; 722 return 0;
@@ -746,7 +746,7 @@ static int __init init_sched_mc_ce(void)
746 struct domain *dom; 746 struct domain *dom;
747 int cpu, err; 747 int cpu, err;
748 748
749 for_each_rt_cpu(cpu) { 749 for_each_online_cpu(cpu) {
750 per_cpu(domains, cpu) = NULL; 750 per_cpu(domains, cpu) = NULL;
751 ce_lock = &per_cpu(_mc_ce_dom_locks, cpu); 751 ce_lock = &per_cpu(_mc_ce_dom_locks, cpu);
752 raw_spin_lock_init(ce_lock); 752 raw_spin_lock_init(ce_lock);
@@ -860,7 +860,7 @@ static int proc_read_ce_file(char *page, char **start, off_t off, int count,
860 goto out; 860 goto out;
861 } 861 }
862 862
863 for_each_rt_cpu(cpu) { 863 for_each_online_cpu(cpu) {
864 pid_table = get_pid_table(cpu); 864 pid_table = get_pid_table(cpu);
865 for (t = 0; t < pid_table->num_pid_entries; ++t) { 865 for (t = 0; t < pid_table->num_pid_entries; ++t) {
866 err = write_pid_entry(page + n, count - n, 866 err = write_pid_entry(page + n, count - n,