aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-09-22 19:28:49 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-09-22 19:28:49 -0400
commitcacc44020cbd35ff421b62f047608ebbf09fa88d (patch)
tree90cd7c77131198e36b26c8bf859a336571114c09
parent06051baff7db5e0c1b80d7b2a873b022191cdcec (diff)
Checkpoint commit for work redistribution
-rw-r--r--litmus/Kconfig60
-rw-r--r--litmus/sched_mc.c86
2 files changed, 121 insertions, 25 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig
index b8f6a9159eb2..d629a2843584 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -23,14 +23,42 @@ config PLUGIN_PFAIR
23 23
24 If unsure, say Yes. 24 If unsure, say Yes.
25 25
26config MERGE_TIMERS
27 bool "Timer-merging Support"
28 depends on HIGH_RES_TIMERS
29 default y
30 help
31 Include support for merging timers.
32
33config MERGE_TIMERS_WINDOW
34 int "Timer-merging Window (in nanoseconds)"
35 depends on MERGE_TIMERS
36 default 1000
37 help
38 Window within which seperate timers may be merged.
39
40config RELEASE_MASTER
41 bool "Release-master Support"
42 depends on ARCH_HAS_SEND_PULL_TIMERS
43 default n
44 help
45 In GSN-EDF, allow one processor to act as a dedicated interrupt
46 processor that services all timer interrupts, but that does not schedule
47 real-time tasks. See RTSS'09 paper for details
48 (http://www.cs.unc.edu/~anderson/papers.html).
49
50menu "Mixed Criticality"
51
26config PLUGIN_MC 52config PLUGIN_MC
27 bool "Mixed Criticality Scheduler" 53 bool "Mixed Criticality Scheduler"
28 depends on X86 && SYSFS 54 depends on X86 && SYSFS
29 default y 55 default y
30 help 56 help
31 Included the mixed criticality scheduler. 57 Include the mixed criticality scheduler. This plugin depends
58 on the global release-master processor for its _REDIRECT and
59 _RELEASE_MASTER options.
32 60
33 If unsure, say Yes. 61 If unsure, say Yes.
34 62
35config PLUGIN_MC_LEVEL_A_MAX_TASKS 63config PLUGIN_MC_LEVEL_A_MAX_TASKS
36 int "Maximum level A tasks" 64 int "Maximum level A tasks"
@@ -38,18 +66,24 @@ config PLUGIN_MC_LEVEL_A_MAX_TASKS
38 range 1 128 66 range 1 128
39 default 32 67 default 32
40 help 68 help
41 The maximum number of level A tasks allowed (per-cpu) in level A. 69 The maximum number of level A tasks allowed (per-cpu) in level A.
42 70
43config RELEASE_MASTER 71config PLUGIN_MC_RELEASE_MASTER
44 bool "Release-master Support" 72 bool "Release-master support for MC"
45 depends on ARCH_HAS_SEND_PULL_TIMERS 73 depends on PLUGIN_MC && RELEASE_MASTER
46 default n 74 default y
47 help 75 help
48 Allow one processor to act as a dedicated interrupt processor 76 Send all timer interrupts to the system-wide release-master CPU.
49 that services all timer interrupts, but that does not schedule 77
50 real-time tasks. See RTSS'09 paper for details 78config PLUGIN_MC_REDIRECT
51 (http://www.cs.unc.edu/~anderson/papers.html). 79 bool "Redirect Work to Release-master"
52 Currently only supported by GSN-EDF. 80 depends on PLUGIN_MC && RELEASE_MASTER
81 default y
82 help
83 Allow processors to send work involving global state to the
84 release-master cpu in order to avoid excess overheads during
85 partitioned decisions.
86endmenu
53 87
54endmenu 88endmenu
55 89
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 0ec03b3949d2..fa2cd7ee606b 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -40,6 +40,7 @@ typedef struct {
40 int usable; 40 int usable;
41 struct hrtimer timer; 41 struct hrtimer timer;
42 struct bheap_node* node; 42 struct bheap_node* node;
43 atomic_t dirty;
43} crit_entry_t; 44} crit_entry_t;
44 45
45/** 46/**
@@ -56,6 +57,10 @@ typedef struct {
56 struct task_struct* linked; 57 struct task_struct* linked;
57 raw_spinlock_t lock; 58 raw_spinlock_t lock;
58 crit_entry_t crit_entries[NUM_CRIT_LEVELS]; 59 crit_entry_t crit_entries[NUM_CRIT_LEVELS];
60#ifdef CONFIG_PLUGIN_MC_REDIRECT
61 struct list_head redir;
62 raw_spinlock_t redir_lock;
63#endif
59} cpu_entry_t; 64} cpu_entry_t;
60 65
61/** 66/**
@@ -71,6 +76,9 @@ typedef struct {
71} domain_data_t; 76} domain_data_t;
72 77
73static cpu_entry_t* cpus[NR_CPUS]; 78static cpu_entry_t* cpus[NR_CPUS];
79#ifdef CONFIG_RELEASE_MASTER
80static int interrupt_cpu;
81#endif
74 82
75#define domain_data(dom) (container_of(dom, domain_data_t, domain)) 83#define domain_data(dom) (container_of(dom, domain_data_t, domain))
76#define is_global(dom) (domain_data(dom)->heap) 84#define is_global(dom) (domain_data(dom)->heap)
@@ -279,8 +287,7 @@ static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task)
279 * @dom Domain from which to draw higher priority task 287 * @dom Domain from which to draw higher priority task
280 * @ce CPU criticality level to preempt 288 * @ce CPU criticality level to preempt
281 * 289 *
282 * Caller must hold the lock for @dom and @ce's CPU lock. Returns 1 if 290 * Caller must hold the lock for @dom and @ce's CPU lock.
283 * a physically preemption occurred.
284 */ 291 */
285static void preempt(domain_t *dom, crit_entry_t *ce) 292static void preempt(domain_t *dom, crit_entry_t *ce)
286{ 293{
@@ -302,10 +309,34 @@ static void preempt(domain_t *dom, crit_entry_t *ce)
302 } 309 }
303} 310}
304 311
312#ifdef CONFIG_PLUGIN_MC_REDIRECT
313/**
314 * fix_global_levels() - Execute redirected job arrivals on this cpu.
315 */
316static void fix_global_levels(void)
317{
318 int c;
319 cpu_entry_t *e;
320 struct list_head *pos, *safe;
321 struct task_struct *t;
322
323 for_each_online_cpu(c) {
324 e = cpus[c];
325 raw_spin_lock(&e->redir_lock);
326 list_for_each_safe(pos, safe, &e->redir) {
327 t = list_entry(pos, struct task_struct, rt_param.list);
328 list_del(pos);
329 job_arrival(t);
330 }
331 raw_spin_unlock(&e->redir_lock);
332 }
333}
334#endif
335
305/** 336/**
306 * update_crit_levels() - Update criticality entries for the new cpu state. 337 * update_crit_levels() - Update criticality entries for the new cpu state.
307 * This should be called after a new task has been linked to @entry. 338 * This should be called after a new task has been linked to @entry.
308 * Assumes the caller holds @entry->lock, but this method will release it. 339 * The caller must hold the @entry->lock, but this method will release it.
309 */ 340 */
310static void update_crit_levels(cpu_entry_t *entry) 341static void update_crit_levels(cpu_entry_t *entry)
311{ 342{
@@ -314,24 +345,36 @@ static void update_crit_levels(cpu_entry_t *entry)
314 struct task_struct *tasks[NUM_CRIT_LEVELS]; 345 struct task_struct *tasks[NUM_CRIT_LEVELS];
315 enum crit_level level = entry_level(entry); 346 enum crit_level level = entry_level(entry);
316 347
317 /* Remove tasks from entries */ 348 /* Remove lower priority tasks from the entry */
318 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { 349 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) {
319 ce = &entry->crit_entries[i]; 350 ce = &entry->crit_entries[i];
320 tasks[i] = ce->linked; 351 tasks[i] = ce->linked;
321 ce->usable = 0; 352 ce->usable = 0;
322 if (ce->linked) { 353 if (ce->linked)
323 link_task_to_crit(ce, NULL); 354 link_task_to_crit(ce, NULL);
324 }
325 } 355 }
326 356 /* Need to unlock so we can access domains */
327 raw_spin_unlock(&entry->lock); 357 raw_spin_unlock(&entry->lock);
328 358
329 /* Put tasks back into system */ 359 /* Re-admit tasks to the system */
330 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { 360 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) {
331 ce = &entry->crit_entries[i]; 361 ce = &entry->crit_entries[i];
332 if (tasks[i]) { 362 TRACE("Checking %s\n", ce->domain->name);
363 if (!tasks[i]) continue;
364#ifdef CONFIG_PLUGIN_MC_REDIRECT
365#ifndef CONFIG_PLUGIN_MC_REDIRECT_ALL
366 if (!is_global_task(tasks[i]))
333 job_arrival(tasks[i]); 367 job_arrival(tasks[i]);
368 else
369#endif
370 {
371 raw_spin_lock(&entry->redir_lock);
372 list_add(&tsk_rt(tasks[i])->list, &entry->redir);
373 raw_spin_unlock(&entry->redir_lock);
334 } 374 }
375#else
376 job_arrival(tasks[i]);
377#endif
335 } 378 }
336} 379}
337 380
@@ -464,8 +507,8 @@ static void job_completion(struct task_struct *task, int forced)
464static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) 507static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
465{ 508{
466 unsigned long flags; 509 unsigned long flags;
467 crit_entry_t *ce = container_of(timer, crit_entry_t, timer);;
468 struct task_struct *tmp = NULL; 510 struct task_struct *tmp = NULL;
511 crit_entry_t *ce = container_of(timer, crit_entry_t, timer);;
469 512
470 local_irq_save(flags); 513 local_irq_save(flags);
471 TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing"); 514 TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing");
@@ -656,6 +699,10 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
656 prev->state, signal_pending(prev), global); 699 prev->state, signal_pending(prev), global);
657 } 700 }
658 701
702#ifdef CONFIG_PLUGIN_MC_REDIRECT
703 if (smp_processor_id() == interrupt_cpu)
704 fix_global_levels();
705#endif
659 /* If a task blocks we have no choice but to reschedule */ 706 /* If a task blocks we have no choice but to reschedule */
660 if (blocks) 707 if (blocks)
661 remove_from_all(entry->scheduled); 708 remove_from_all(entry->scheduled);
@@ -695,13 +742,12 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
695 raw_spin_unlock(dom->lock); 742 raw_spin_unlock(dom->lock);
696 update_crit_levels(entry); 743 update_crit_levels(entry);
697 raw_spin_lock(&entry->lock); 744 raw_spin_lock(&entry->lock);
698 goto picked; 745 continue;
699 } 746 }
700 } 747 }
701 raw_spin_unlock(dom->lock); 748 raw_spin_unlock(dom->lock);
702 } 749 }
703 750
704 picked:
705 /* Schedule next task */ 751 /* Schedule next task */
706 next = entry->linked; 752 next = entry->linked;
707 entry->scheduled = next; 753 entry->scheduled = next;
@@ -718,6 +764,16 @@ static struct task_struct* mc_schedule(struct task_struct * prev)
718 return next; 764 return next;
719} 765}
720 766
767static long mc_activate_plugin(void)
768{
769#ifdef CONFIG_RELEASE_MASTER
770 interrupt_cpu = atomic_read(&release_master_cpu);
771 if (interrupt_cpu == NO_CPU)
772 interrupt_cpu = 0;
773#endif
774 return 0;
775}
776
721/* ************************************************************************** 777/* **************************************************************************
722 * Initialization 778 * Initialization
723 * ************************************************************************** */ 779 * ************************************************************************** */
@@ -731,6 +787,7 @@ static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = {
731 .task_wake_up = mc_task_wake_up, 787 .task_wake_up = mc_task_wake_up,
732 .task_block = mc_task_block, 788 .task_block = mc_task_block,
733 .admit_task = mc_admit_task, 789 .admit_task = mc_admit_task,
790 .activate_plugin = mc_activate_plugin,
734}; 791};
735 792
736/* Initialize values here so that they are allocated with the module 793/* Initialize values here so that they are allocated with the module
@@ -758,6 +815,7 @@ static void init_crit_entry(crit_entry_t *ce, enum crit_level level,
758 ce->node = node; 815 ce->node = node;
759 ce->domain = &dom_data->domain; 816 ce->domain = &dom_data->domain;
760 ce->usable = 1; 817 ce->usable = 1;
818 atomic_set(&ce->dirty, 1);
761 hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 819 hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
762 ce->timer.function = mc_ghost_exhausted; 820 ce->timer.function = mc_ghost_exhausted;
763} 821}
@@ -816,6 +874,10 @@ static int __init init_mc(void)
816 entry->scheduled = NULL; 874 entry->scheduled = NULL;
817 entry->linked = NULL; 875 entry->linked = NULL;
818 raw_spin_lock_init(&entry->lock); 876 raw_spin_lock_init(&entry->lock);
877#ifdef CONFIG_PLUGIN_MC_REDIRECT
878 raw_spin_lock_init(&entry->redir_lock);
879 INIT_LIST_HEAD(&entry->redir);
880#endif
819 881
820 /* CRIT_LEVEL_A */ 882 /* CRIT_LEVEL_A */
821 dom_data = &per_cpu(_mc_crit_a, cpu); 883 dom_data = &per_cpu(_mc_crit_a, cpu);