diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-10 12:10:49 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-11 17:57:44 -0500 |
commit | fb3df2ec261d8cd6bcb8206d9d985355214d7767 (patch) | |
tree | c41f8818ad4f1b699afbe292d131c1073b4d7c6e /litmus/sched_gsn_edf.c | |
parent | 516b6601bb5f71035e8859735a25dea0da4a0211 (diff) |
Implement proper remote preemption support
To date, Litmus has just hooked into the smp_send_reschedule() IPI
handler and marked tasks as having to reschedule to implement remote
preemptions. This was never particularly clean, but so far we got away
with it. However, changes in the underlying Linux, and peculartities
of the ARM code (interrupts enabled before context switch) break this
naive approach. This patch introduces new state-machine based remote
preemption support. By examining the local state before calling
set_tsk_need_resched(), we avoid confusing the underlying Linux
scheduler. Further, this patch avoids sending unncessary IPIs.
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 19 |
1 files changed, 5 insertions, 14 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index e101768740ad..f0337cfd8631 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <litmus/edf_common.h> | 18 | #include <litmus/edf_common.h> |
19 | #include <litmus/sched_trace.h> | 19 | #include <litmus/sched_trace.h> |
20 | 20 | ||
21 | #include <litmus/preempt.h> | ||
22 | |||
21 | #include <litmus/bheap.h> | 23 | #include <litmus/bheap.h> |
22 | 24 | ||
23 | #include <linux/module.h> | 25 | #include <linux/module.h> |
@@ -95,21 +97,12 @@ typedef struct { | |||
95 | int cpu; | 97 | int cpu; |
96 | struct task_struct* linked; /* only RT tasks */ | 98 | struct task_struct* linked; /* only RT tasks */ |
97 | struct task_struct* scheduled; /* only RT tasks */ | 99 | struct task_struct* scheduled; /* only RT tasks */ |
98 | atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
99 | struct bheap_node* hn; | 100 | struct bheap_node* hn; |
100 | } cpu_entry_t; | 101 | } cpu_entry_t; |
101 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | 102 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); |
102 | 103 | ||
103 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | 104 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; |
104 | 105 | ||
105 | #define set_will_schedule() \ | ||
106 | (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 1)) | ||
107 | #define clear_will_schedule() \ | ||
108 | (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 0)) | ||
109 | #define test_will_schedule(cpu) \ | ||
110 | (atomic_read(&per_cpu(gsnedf_cpu_entries, cpu).will_schedule)) | ||
111 | |||
112 | |||
113 | /* the cpus queue themselves according to priority in here */ | 106 | /* the cpus queue themselves according to priority in here */ |
114 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | 107 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; |
115 | static struct bheap gsnedf_cpu_heap; | 108 | static struct bheap gsnedf_cpu_heap; |
@@ -341,8 +334,7 @@ static void gsnedf_tick(struct task_struct* t) | |||
341 | /* np tasks will be preempted when they become | 334 | /* np tasks will be preempted when they become |
342 | * preemptable again | 335 | * preemptable again |
343 | */ | 336 | */ |
344 | set_tsk_need_resched(t); | 337 | litmus_reschedule_local(); |
345 | set_will_schedule(); | ||
346 | TRACE("gsnedf_scheduler_tick: " | 338 | TRACE("gsnedf_scheduler_tick: " |
347 | "%d is preemptable " | 339 | "%d is preemptable " |
348 | " => FORCE_RESCHED\n", t->pid); | 340 | " => FORCE_RESCHED\n", t->pid); |
@@ -391,7 +383,6 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
391 | #endif | 383 | #endif |
392 | 384 | ||
393 | raw_spin_lock(&gsnedf_lock); | 385 | raw_spin_lock(&gsnedf_lock); |
394 | clear_will_schedule(); | ||
395 | 386 | ||
396 | /* sanity checking */ | 387 | /* sanity checking */ |
397 | BUG_ON(entry->scheduled && entry->scheduled != prev); | 388 | BUG_ON(entry->scheduled && entry->scheduled != prev); |
@@ -473,6 +464,8 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
473 | if (exists) | 464 | if (exists) |
474 | next = prev; | 465 | next = prev; |
475 | 466 | ||
467 | sched_state_task_picked(); | ||
468 | |||
476 | raw_spin_unlock(&gsnedf_lock); | 469 | raw_spin_unlock(&gsnedf_lock); |
477 | 470 | ||
478 | #ifdef WANT_ALL_SCHED_EVENTS | 471 | #ifdef WANT_ALL_SCHED_EVENTS |
@@ -780,7 +773,6 @@ static long gsnedf_activate_plugin(void) | |||
780 | for_each_online_cpu(cpu) { | 773 | for_each_online_cpu(cpu) { |
781 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 774 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
782 | bheap_node_init(&entry->hn, entry); | 775 | bheap_node_init(&entry->hn, entry); |
783 | atomic_set(&entry->will_schedule, 0); | ||
784 | entry->linked = NULL; | 776 | entry->linked = NULL; |
785 | entry->scheduled = NULL; | 777 | entry->scheduled = NULL; |
786 | #ifdef CONFIG_RELEASE_MASTER | 778 | #ifdef CONFIG_RELEASE_MASTER |
@@ -829,7 +821,6 @@ static int __init init_gsn_edf(void) | |||
829 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 821 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
830 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 822 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
831 | gsnedf_cpus[cpu] = entry; | 823 | gsnedf_cpus[cpu] = entry; |
832 | atomic_set(&entry->will_schedule, 0); | ||
833 | entry->cpu = cpu; | 824 | entry->cpu = cpu; |
834 | entry->hn = &gsnedf_heap_node[cpu]; | 825 | entry->hn = &gsnedf_heap_node[cpu]; |
835 | bheap_node_init(&entry->hn, entry); | 826 | bheap_node_init(&entry->hn, entry); |