diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-07-28 01:15:58 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-11-24 15:21:08 -0500 |
commit | 5bd89a34d89f252619d83fef3c9325e24311389e (patch) | |
tree | 0860be1b38ce94b09f8715e47a130f4da1827408 | |
parent | 81b8eb2ae452c241df9b3a1fb2116fa4d5adcb75 (diff) |
Litmus core: simplify np-section protocolwip-2011.2-bbb
User a 32-bit word for all non-preemptive section flags.
Set the "please yield soon" flag atomically when
accessing it on remotely-scheduled tasks.
-rw-r--r-- | include/litmus/litmus.h | 47 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 16 | ||||
-rw-r--r-- | litmus/litmus.c | 2 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 23 |
4 files changed, 57 insertions, 31 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index e7769ca36ec0..12af22266331 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -137,7 +137,7 @@ static inline int is_kernel_np(struct task_struct *t) | |||
137 | 137 | ||
138 | static inline int is_user_np(struct task_struct *t) | 138 | static inline int is_user_np(struct task_struct *t) |
139 | { | 139 | { |
140 | return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; | 140 | return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0; |
141 | } | 141 | } |
142 | 142 | ||
143 | static inline void request_exit_np(struct task_struct *t) | 143 | static inline void request_exit_np(struct task_struct *t) |
@@ -147,17 +147,11 @@ static inline void request_exit_np(struct task_struct *t) | |||
147 | * into the kernel at the end of a critical section. */ | 147 | * into the kernel at the end of a critical section. */ |
148 | if (likely(tsk_rt(t)->ctrl_page)) { | 148 | if (likely(tsk_rt(t)->ctrl_page)) { |
149 | TRACE_TASK(t, "setting delayed_preemption flag\n"); | 149 | TRACE_TASK(t, "setting delayed_preemption flag\n"); |
150 | tsk_rt(t)->ctrl_page->delayed_preemption = 1; | 150 | tsk_rt(t)->ctrl_page->sched.np.preempt = 1; |
151 | } | 151 | } |
152 | } | 152 | } |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline void clear_exit_np(struct task_struct *t) | ||
156 | { | ||
157 | if (likely(tsk_rt(t)->ctrl_page)) | ||
158 | tsk_rt(t)->ctrl_page->delayed_preemption = 0; | ||
159 | } | ||
160 | |||
161 | static inline void make_np(struct task_struct *t) | 155 | static inline void make_np(struct task_struct *t) |
162 | { | 156 | { |
163 | tsk_rt(t)->kernel_np++; | 157 | tsk_rt(t)->kernel_np++; |
@@ -171,6 +165,34 @@ static inline int take_np(struct task_struct *t) | |||
171 | return --tsk_rt(t)->kernel_np; | 165 | return --tsk_rt(t)->kernel_np; |
172 | } | 166 | } |
173 | 167 | ||
168 | /* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */ | ||
169 | static inline int request_exit_np_atomic(struct task_struct *t) | ||
170 | { | ||
171 | union np_flag old, new; | ||
172 | |||
173 | if (tsk_rt(t)->ctrl_page) { | ||
174 | old.raw = tsk_rt(t)->ctrl_page->sched.raw; | ||
175 | if (old.np.flag == 0) { | ||
176 | /* no longer non-preemptive */ | ||
177 | return 0; | ||
178 | } else if (old.np.preempt) { | ||
179 | /* already set, nothing for us to do */ | ||
180 | return 1; | ||
181 | } else { | ||
182 | /* non preemptive and flag not set */ | ||
183 | new.raw = old.raw; | ||
184 | new.np.preempt = 1; | ||
185 | /* if we get old back, then we atomically set the flag */ | ||
186 | return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; | ||
187 | /* If we raced with a concurrent change, then so be | ||
188 | * it. Deliver it by IPI. We don't want an unbounded | ||
189 | * retry loop here since tasks might exploit that to | ||
190 | * keep the kernel busy indefinitely. */ | ||
191 | } | ||
192 | } else | ||
193 | return 0; | ||
194 | } | ||
195 | |||
174 | #else | 196 | #else |
175 | 197 | ||
176 | static inline int is_kernel_np(struct task_struct* t) | 198 | static inline int is_kernel_np(struct task_struct* t) |
@@ -189,12 +211,19 @@ static inline void request_exit_np(struct task_struct *t) | |||
189 | BUG(); | 211 | BUG(); |
190 | } | 212 | } |
191 | 213 | ||
192 | static inline void clear_exit_np(struct task_struct* t) | 214 | static inline int request_exist_np_atomic(struct task_struct *t) |
193 | { | 215 | { |
216 | return 0; | ||
194 | } | 217 | } |
195 | 218 | ||
196 | #endif | 219 | #endif |
197 | 220 | ||
221 | static inline void clear_exit_np(struct task_struct *t) | ||
222 | { | ||
223 | if (likely(tsk_rt(t)->ctrl_page)) | ||
224 | tsk_rt(t)->ctrl_page->sched.np.preempt = 0; | ||
225 | } | ||
226 | |||
198 | static inline int is_np(struct task_struct *t) | 227 | static inline int is_np(struct task_struct *t) |
199 | { | 228 | { |
200 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 229 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 389be0775869..d6d799174160 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -42,6 +42,16 @@ struct rt_task { | |||
42 | budget_policy_t budget_policy; /* ignored by pfair */ | 42 | budget_policy_t budget_policy; /* ignored by pfair */ |
43 | }; | 43 | }; |
44 | 44 | ||
45 | union np_flag { | ||
46 | uint32_t raw; | ||
47 | struct { | ||
48 | /* Is the task currently in a non-preemptive section? */ | ||
49 | uint32_t flag:31; | ||
50 | /* Should the task call into the scheduler? */ | ||
51 | uint32_t preempt:1; | ||
52 | } np; | ||
53 | }; | ||
54 | |||
45 | /* The definition of the data that is shared between the kernel and real-time | 55 | /* The definition of the data that is shared between the kernel and real-time |
46 | * tasks via a shared page (see litmus/ctrldev.c). | 56 | * tasks via a shared page (see litmus/ctrldev.c). |
47 | * | 57 | * |
@@ -57,11 +67,7 @@ struct rt_task { | |||
57 | * determining preemption/migration overheads). | 67 | * determining preemption/migration overheads). |
58 | */ | 68 | */ |
59 | struct control_page { | 69 | struct control_page { |
60 | /* Is the task currently in a non-preemptive section? */ | 70 | volatile union np_flag sched; |
61 | int np_flag; | ||
62 | /* Should the task call into the kernel when it leaves | ||
63 | * its non-preemptive section? */ | ||
64 | int delayed_preemption; | ||
65 | 71 | ||
66 | /* to be extended */ | 72 | /* to be extended */ |
67 | }; | 73 | }; |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 73af6c3010d6..301390148d02 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -529,6 +529,8 @@ static int __init _init_litmus(void) | |||
529 | */ | 529 | */ |
530 | printk("Starting LITMUS^RT kernel\n"); | 530 | printk("Starting LITMUS^RT kernel\n"); |
531 | 531 | ||
532 | BUILD_BUG_ON(sizeof(union np_flag) != sizeof(uint32_t)); | ||
533 | |||
532 | register_sched_plugin(&linux_sched_plugin); | 534 | register_sched_plugin(&linux_sched_plugin); |
533 | 535 | ||
534 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); | 536 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d54886df1f57..00a1900d6457 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -35,29 +35,18 @@ void preempt_if_preemptable(struct task_struct* t, int cpu) | |||
35 | /* local CPU case */ | 35 | /* local CPU case */ |
36 | /* check if we need to poke userspace */ | 36 | /* check if we need to poke userspace */ |
37 | if (is_user_np(t)) | 37 | if (is_user_np(t)) |
38 | /* yes, poke it */ | 38 | /* Yes, poke it. This doesn't have to be atomic since |
39 | * the task is definitely not executing. */ | ||
39 | request_exit_np(t); | 40 | request_exit_np(t); |
40 | else if (!is_kernel_np(t)) | 41 | else if (!is_kernel_np(t)) |
41 | /* only if we are allowed to preempt the | 42 | /* only if we are allowed to preempt the |
42 | * currently-executing task */ | 43 | * currently-executing task */ |
43 | reschedule = 1; | 44 | reschedule = 1; |
44 | } else { | 45 | } else { |
45 | /* remote CPU case */ | 46 | /* Remote CPU case. Only notify if it's not a kernel |
46 | if (is_user_np(t)) { | 47 | * NP section and if we didn't set the userspace |
47 | /* need to notify user space of delayed | 48 | * flag. */ |
48 | * preemption */ | 49 | reschedule = !(is_kernel_np(t) || request_exit_np_atomic(t)); |
49 | |||
50 | /* to avoid a race, set the flag, then test | ||
51 | * again */ | ||
52 | request_exit_np(t); | ||
53 | /* make sure it got written */ | ||
54 | mb(); | ||
55 | } | ||
56 | /* Only send an ipi if remote task might have raced our | ||
57 | * request, i.e., send an IPI to make sure in case it | ||
58 | * exited its critical section. | ||
59 | */ | ||
60 | reschedule = !is_np(t) && !is_kernel_np(t); | ||
61 | } | 50 | } |
62 | } | 51 | } |
63 | if (likely(reschedule)) | 52 | if (likely(reschedule)) |