diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-10 12:10:49 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-11 17:57:44 -0500 |
commit | fb3df2ec261d8cd6bcb8206d9d985355214d7767 (patch) | |
tree | c41f8818ad4f1b699afbe292d131c1073b4d7c6e /litmus/sched_plugin.c | |
parent | 516b6601bb5f71035e8859735a25dea0da4a0211 (diff) |
Implement proper remote preemption support
To date, Litmus has just hooked into the smp_send_reschedule() IPI
handler and marked tasks as having to reschedule to implement remote
preemptions. This was never particularly clean, but so far we got away
with it. However, changes in the underlying Linux, and peculartities
of the ARM code (interrupts enabled before context switch) break this
naive approach. This patch introduces new state-machine based remote
preemption support. By examining the local state before calling
set_tsk_need_resched(), we avoid confusing the underlying Linux
scheduler. Further, this patch avoids sending unncessary IPIs.
Diffstat (limited to 'litmus/sched_plugin.c')
-rw-r--r-- | litmus/sched_plugin.c | 45 |
1 files changed, 20 insertions, 25 deletions
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index ec04454a0cf9..d912a6494d20 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #include <litmus/litmus.h> | 11 | #include <litmus/litmus.h> |
12 | #include <litmus/sched_plugin.h> | 12 | #include <litmus/sched_plugin.h> |
13 | 13 | #include <litmus/preempt.h> | |
14 | #include <litmus/jobs.h> | 14 | #include <litmus/jobs.h> |
15 | 15 | ||
16 | /* | 16 | /* |
@@ -19,36 +19,30 @@ | |||
19 | * non-preemptive section aware and does not invoke the scheduler / send | 19 | * non-preemptive section aware and does not invoke the scheduler / send |
20 | * IPIs if the to-be-preempted task is actually non-preemptive. | 20 | * IPIs if the to-be-preempted task is actually non-preemptive. |
21 | */ | 21 | */ |
22 | void preempt_if_preemptable(struct task_struct* t, int on_cpu) | 22 | void preempt_if_preemptable(struct task_struct* t, int cpu) |
23 | { | 23 | { |
24 | /* t is the real-time task executing on CPU on_cpu If t is NULL, then | 24 | /* t is the real-time task executing on CPU on_cpu If t is NULL, then |
25 | * on_cpu is currently scheduling background work. | 25 | * on_cpu is currently scheduling background work. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | int send_ipi; | 28 | int reschedule = 0; |
29 | 29 | ||
30 | if (smp_processor_id() == on_cpu) { | 30 | if (!t) |
31 | /* local CPU case */ | 31 | /* move non-real-time task out of the way */ |
32 | if (t) { | 32 | reschedule = 1; |
33 | else { | ||
34 | if (smp_processor_id() == cpu) { | ||
35 | /* local CPU case */ | ||
33 | /* check if we need to poke userspace */ | 36 | /* check if we need to poke userspace */ |
34 | if (is_user_np(t)) | 37 | if (is_user_np(t)) |
35 | /* yes, poke it */ | 38 | /* yes, poke it */ |
36 | request_exit_np(t); | 39 | request_exit_np(t); |
37 | else | 40 | else if (!is_kernel_np(t)) |
38 | /* no, see if we are allowed to preempt the | 41 | /* only if we are allowed to preempt the |
39 | * currently-executing task */ | 42 | * currently-executing task */ |
40 | if (!is_kernel_np(t)) | 43 | reschedule = 1; |
41 | set_tsk_need_resched(t); | 44 | } else { |
42 | } else | 45 | /* remote CPU case */ |
43 | /* move non-real-time task out of the way */ | ||
44 | set_tsk_need_resched(current); | ||
45 | } else { | ||
46 | /* remote CPU case */ | ||
47 | if (!t) | ||
48 | /* currently schedules non-real-time work */ | ||
49 | send_ipi = 1; | ||
50 | else { | ||
51 | /* currently schedules real-time work */ | ||
52 | if (is_user_np(t)) { | 46 | if (is_user_np(t)) { |
53 | /* need to notify user space of delayed | 47 | /* need to notify user space of delayed |
54 | * preemption */ | 48 | * preemption */ |
@@ -60,14 +54,14 @@ void preempt_if_preemptable(struct task_struct* t, int on_cpu) | |||
60 | mb(); | 54 | mb(); |
61 | } | 55 | } |
62 | /* Only send an ipi if remote task might have raced our | 56 | /* Only send an ipi if remote task might have raced our |
63 | * request, i.e., send an IPI to make sure if it exited | 57 | * request, i.e., send an IPI to make sure in case it |
64 | * its critical section. | 58 | * exited its critical section. |
65 | */ | 59 | */ |
66 | send_ipi = !is_np(t) && !is_kernel_np(t); | 60 | reschedule = !is_np(t) && !is_kernel_np(t); |
67 | } | 61 | } |
68 | if (likely(send_ipi)) | ||
69 | smp_send_reschedule(on_cpu); | ||
70 | } | 62 | } |
63 | if (likely(reschedule)) | ||
64 | litmus_reschedule(cpu); | ||
71 | } | 65 | } |
72 | 66 | ||
73 | 67 | ||
@@ -81,6 +75,7 @@ static void litmus_dummy_finish_switch(struct task_struct * prev) | |||
81 | 75 | ||
82 | static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) | 76 | static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) |
83 | { | 77 | { |
78 | sched_state_task_picked(); | ||
84 | return NULL; | 79 | return NULL; |
85 | } | 80 | } |
86 | 81 | ||