aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/sched_mc.h8
-rw-r--r--litmus/Kconfig9
-rw-r--r--litmus/sched_mc.c19
-rw-r--r--litmus/sched_mc_ce.c16
4 files changed, 39 insertions, 13 deletions
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h
index 740cc11be5d7..8cdec04c64df 100644
--- a/include/litmus/sched_mc.h
+++ b/include/litmus/sched_mc.h
@@ -31,6 +31,14 @@ struct mc_data {
31 struct mc_job mc_job; 31 struct mc_job mc_job;
32}; 32};
33 33
34/* Leave a CPU free for tracing stuff */
35#if CONFIG_FTRACE_CPU != NO_CPU
36extern struct cpumask rt_mask;
37#define for_each_rt_cpu(cpu) for_each_cpu((cpu), &rt_mask)
38#else
39#define for_each_rt_cpu(cpu) for_each_online_cpu(cpu)
40#endif
41
34#define tsk_mc_data(t) (tsk_rt(t)->mc_data) 42#define tsk_mc_data(t) (tsk_rt(t)->mc_data)
35#define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit) 43#define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit)
36#define is_ghost(t) (tsk_mc_data(t)->mc_job.is_ghost) 44#define is_ghost(t) (tsk_mc_data(t)->mc_job.is_ghost)
diff --git a/litmus/Kconfig b/litmus/Kconfig
index d629a2843584..20610fdf9a54 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -83,8 +83,15 @@ config PLUGIN_MC_REDIRECT
83 Allow processors to send work involving global state to the 83 Allow processors to send work involving global state to the
84 release-master cpu in order to avoid excess overheads during 84 release-master cpu in order to avoid excess overheads during
85 partitioned decisions. 85 partitioned decisions.
86endmenu
87 86
87config FTRACE_CPU
88 int "CPU for Tracing"
89 depends on PLUGIN_MC
90 default -1
91 help
92 Keep one CPU free for the tasks which trace and flush
93 scheduling and overhead data.
94endmenu
88endmenu 95endmenu
89 96
90menu "Real-Time Synchronization" 97menu "Real-Time Synchronization"
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 53c24ac2172c..8828919dfcf2 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -60,6 +60,8 @@ DEFINE_PER_CPU(struct cpu_entry, cpus);
60static int interrupt_cpu; 60static int interrupt_cpu;
61#endif 61#endif
62 62
63struct cpumask rt_mask;
64
63#define domain_data(dom) (container_of(dom, struct domain_data, domain)) 65#define domain_data(dom) (container_of(dom, struct domain_data, domain))
64#define is_global(dom) (domain_data(dom)->heap) 66#define is_global(dom) (domain_data(dom)->heap)
65#define is_global_task(t) (is_global(get_task_domain(t))) 67#define is_global_task(t) (is_global(get_task_domain(t)))
@@ -350,7 +352,7 @@ static void fix_global_levels(void)
350 struct task_struct *t; 352 struct task_struct *t;
351 353
352 STRACE("Fixing global levels\n"); 354 STRACE("Fixing global levels\n");
353 for_each_online_cpu(c) { 355 for_each_rt_cpu(c) {
354 e = &per_cpu(cpus, c); 356 e = &per_cpu(cpus, c);
355 raw_spin_lock(&e->redir_lock); 357 raw_spin_lock(&e->redir_lock);
356 list_for_each_safe(pos, safe, &e->redir) { 358 list_for_each_safe(pos, safe, &e->redir) {
@@ -377,6 +379,7 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task)
377 TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu); 379 TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu);
378 BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); 380 BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu);
379 BUG_ON(task && is_ghost(task)); 381 BUG_ON(task && is_ghost(task));
382 BUG_ON(CONFIG_FTRACE_CPU == entry->cpu);
380 383
381 if (task){ 384 if (task){
382 set_rt_flags(task, RT_F_RUNNING); 385 set_rt_flags(task, RT_F_RUNNING);
@@ -892,6 +895,10 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
892 int i, out_of_time, sleep, preempt, exists, blocks, global, lower; 895 int i, out_of_time, sleep, preempt, exists, blocks, global, lower;
893 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; 896 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL;
894 897
898 /* Give FTRACE a CPU to run on */
899 if (CONFIG_FTRACE_CPU == entry->cpu)
900 return NULL;
901
895 local_irq_save(flags); 902 local_irq_save(flags);
896 903
897 /* Litmus gave up because it couldn't access the stack of the CPU 904 /* Litmus gave up because it couldn't access the stack of the CPU
@@ -1075,7 +1082,7 @@ static long mc_activate_plugin(void)
1075#endif 1082#endif
1076#endif 1083#endif
1077 1084
1078 for_each_online_cpu(cpu) { 1085 for_each_rt_cpu(cpu) {
1079 BUG_ON(NR_CPUS <= n); 1086 BUG_ON(NR_CPUS <= n);
1080 dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain; 1087 dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain;
1081 dom_data = domain_data(dom); 1088 dom_data = domain_data(dom);
@@ -1150,7 +1157,7 @@ static void init_global_domain(struct domain_data *dom_data, enum crit_level lev
1150 dom_data->heap = heap; 1157 dom_data->heap = heap;
1151 bheap_init(heap); 1158 bheap_init(heap);
1152 1159
1153 for_each_online_cpu(cpu) { 1160 for_each_rt_cpu(cpu) {
1154 entry = &per_cpu(cpus, cpu); 1161 entry = &per_cpu(cpus, cpu);
1155 node = &nodes[cpu]; 1162 node = &nodes[cpu];
1156 ce = &entry->crit_entries[level]; 1163 ce = &entry->crit_entries[level];
@@ -1199,7 +1206,11 @@ static int __init init_mc(void)
1199 raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ 1206 raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */
1200 struct ce_dom_data *ce_data; 1207 struct ce_dom_data *ce_data;
1201 1208
1202 for_each_online_cpu(cpu) { 1209#if CONFIG_FTRACE_CPU != NO_CPU
1210 cpumask_xor(&rt_mask, cpu_online_mask, cpumask_of(CONFIG_FTRACE_CPU));
1211#endif
1212
1213 for_each_rt_cpu(cpu) {
1203 entry = &per_cpu(cpus, cpu); 1214 entry = &per_cpu(cpus, cpu);
1204 1215
1205 /* CPU */ 1216 /* CPU */
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index af02dfdbb523..d9be4d14e76c 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -565,7 +565,7 @@ static int cancel_all_timers(void)
565 565
566 TRACE("cancel all timers\n"); 566 TRACE("cancel all timers\n");
567 567
568 for_each_online_cpu(cpu) { 568 for_each_rt_cpu(cpu) {
569 dom = get_domain_for(cpu); 569 dom = get_domain_for(cpu);
570 ce_data = dom->data; 570 ce_data = dom->data;
571 ce_data->should_schedule = NULL; 571 ce_data->should_schedule = NULL;
@@ -596,7 +596,7 @@ static void arm_all_timers(void)
596 596
597 TRACE("arm all timers\n"); 597 TRACE("arm all timers\n");
598 598
599 for_each_online_cpu(cpu) { 599 for_each_rt_cpu(cpu) {
600 dom = get_domain_for(cpu); 600 dom = get_domain_for(cpu);
601 ce_data = dom->data; 601 ce_data = dom->data;
602 pid_table = get_pid_table(cpu); 602 pid_table = get_pid_table(cpu);
@@ -655,7 +655,7 @@ long mc_ce_activate_plugin_common(void)
655 } 655 }
656#endif 656#endif
657 657
658 for_each_online_cpu(cpu) { 658 for_each_rt_cpu(cpu) {
659 dom = get_domain_for(cpu); 659 dom = get_domain_for(cpu);
660 ce_data = dom->data; 660 ce_data = dom->data;
661 ce_data->scheduled = NULL; 661 ce_data->scheduled = NULL;
@@ -677,7 +677,7 @@ static long mc_ce_activate_plugin(void)
677 int cpu, n = 0; 677 int cpu, n = 0;
678 long ret; 678 long ret;
679 679
680 for_each_online_cpu(cpu) { 680 for_each_rt_cpu(cpu) {
681 BUG_ON(NR_CPUS <= n); 681 BUG_ON(NR_CPUS <= n);
682 our_domains[cpu] = &per_cpu(_mc_ce_doms, cpu); 682 our_domains[cpu] = &per_cpu(_mc_ce_doms, cpu);
683 n++; 683 n++;
@@ -695,7 +695,7 @@ static void clear_pid_entries(void)
695 struct ce_pid_table *pid_table = NULL; 695 struct ce_pid_table *pid_table = NULL;
696 int cpu, entry; 696 int cpu, entry;
697 697
698 for_each_online_cpu(cpu) { 698 for_each_rt_cpu(cpu) {
699 pid_table = get_pid_table(cpu); 699 pid_table = get_pid_table(cpu);
700 pid_table->num_pid_entries = 0; 700 pid_table->num_pid_entries = 0;
701 pid_table->cycle_time = 0; 701 pid_table->cycle_time = 0;
@@ -716,7 +716,7 @@ long mc_ce_deactivate_plugin_common(void)
716{ 716{
717 int cpu; 717 int cpu;
718 cancel_all_timers(); 718 cancel_all_timers();
719 for_each_online_cpu(cpu) { 719 for_each_rt_cpu(cpu) {
720 per_cpu(domains, cpu) = NULL; 720 per_cpu(domains, cpu) = NULL;
721 } 721 }
722 return 0; 722 return 0;
@@ -746,7 +746,7 @@ static int __init init_sched_mc_ce(void)
746 struct domain *dom; 746 struct domain *dom;
747 int cpu, err; 747 int cpu, err;
748 748
749 for_each_online_cpu(cpu) { 749 for_each_rt_cpu(cpu) {
750 per_cpu(domains, cpu) = NULL; 750 per_cpu(domains, cpu) = NULL;
751 ce_lock = &per_cpu(_mc_ce_dom_locks, cpu); 751 ce_lock = &per_cpu(_mc_ce_dom_locks, cpu);
752 raw_spin_lock_init(ce_lock); 752 raw_spin_lock_init(ce_lock);
@@ -860,7 +860,7 @@ static int proc_read_ce_file(char *page, char **start, off_t off, int count,
860 goto out; 860 goto out;
861 } 861 }
862 862
863 for_each_online_cpu(cpu) { 863 for_each_rt_cpu(cpu) {
864 pid_table = get_pid_table(cpu); 864 pid_table = get_pid_table(cpu);
865 for (t = 0; t < pid_table->num_pid_entries; ++t) { 865 for (t = 0; t < pid_table->num_pid_entries; ++t) {
866 err = write_pid_entry(page + n, count - n, 866 err = write_pid_entry(page + n, count - n,