aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/litmus.h40
-rw-r--r--include/litmus/rt_param.h16
-rw-r--r--litmus/litmus.c2
-rw-r--r--litmus/sched_plugin.c23
4 files changed, 56 insertions, 25 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 5883a67c5750..727357973328 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -146,7 +146,7 @@ static inline int is_kernel_np(struct task_struct *t)
146 146
147static inline int is_user_np(struct task_struct *t) 147static inline int is_user_np(struct task_struct *t)
148{ 148{
149 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; 149 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
150} 150}
151 151
152static inline void request_exit_np(struct task_struct *t) 152static inline void request_exit_np(struct task_struct *t)
@@ -156,7 +156,7 @@ static inline void request_exit_np(struct task_struct *t)
156 * into the kernel at the end of a critical section. */ 156 * into the kernel at the end of a critical section. */
157 if (likely(tsk_rt(t)->ctrl_page)) { 157 if (likely(tsk_rt(t)->ctrl_page)) {
158 TRACE_TASK(t, "setting delayed_preemption flag\n"); 158 TRACE_TASK(t, "setting delayed_preemption flag\n");
159 tsk_rt(t)->ctrl_page->delayed_preemption = 1; 159 tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
160 } 160 }
161 } 161 }
162} 162}
@@ -164,7 +164,7 @@ static inline void request_exit_np(struct task_struct *t)
164static inline void clear_exit_np(struct task_struct *t) 164static inline void clear_exit_np(struct task_struct *t)
165{ 165{
166 if (likely(tsk_rt(t)->ctrl_page)) 166 if (likely(tsk_rt(t)->ctrl_page))
167 tsk_rt(t)->ctrl_page->delayed_preemption = 0; 167 tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
168} 168}
169 169
170static inline void make_np(struct task_struct *t) 170static inline void make_np(struct task_struct *t)
@@ -180,6 +180,34 @@ static inline int take_np(struct task_struct *t)
180 return --tsk_rt(t)->kernel_np; 180 return --tsk_rt(t)->kernel_np;
181} 181}
182 182
183/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
184static inline int request_exit_np_atomic(struct task_struct *t)
185{
186 union np_flag old, new;
187
188 if (tsk_rt(t)->ctrl_page) {
189 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
190 if (old.np.flag == 0) {
191 /* no longer non-preemptive */
192 return 0;
193 } else if (old.np.preempt) {
194 /* already set, nothing for us to do */
195 return 1;
196 } else {
197 /* non preemptive and flag not set */
198 new.raw = old.raw;
199 new.np.preempt = 1;
200 /* if we get old back, then we atomically set the flag */
201 return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
202 /* If we raced with a concurrent change, then so be
203 * it. Deliver it by IPI. We don't want an unbounded
204 * retry loop here since tasks might exploit that to
205 * keep the kernel busy indefinitely. */
206 }
207 } else
208 return 0;
209}
210
183#else 211#else
184 212
185static inline int is_kernel_np(struct task_struct* t) 213static inline int is_kernel_np(struct task_struct* t)
@@ -202,8 +230,14 @@ static inline void clear_exit_np(struct task_struct* t)
202{ 230{
203} 231}
204 232
233static inline int request_exist_np_atomic(struct task_struct *t)
234{
235 return 0;
236}
237
205#endif 238#endif
206 239
240
207static inline int is_np(struct task_struct *t) 241static inline int is_np(struct task_struct *t)
208{ 242{
209#ifdef CONFIG_SCHED_DEBUG_TRACE 243#ifdef CONFIG_SCHED_DEBUG_TRACE
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 57496662afde..57a9cefcaf7e 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -45,6 +45,16 @@ struct rt_task {
45 budget_policy_t budget_policy; /* ignored by pfair */ 45 budget_policy_t budget_policy; /* ignored by pfair */
46}; 46};
47 47
48union np_flag {
49 uint32_t raw;
50 struct {
51 /* Is the task currently in a non-preemptive section? */
52 uint32_t flag:31;
53 /* Should the task call into the scheduler? */
54 uint32_t preempt:1;
55 } np;
56};
57
48/* The definition of the data that is shared between the kernel and real-time 58/* The definition of the data that is shared between the kernel and real-time
49 * tasks via a shared page (see litmus/ctrldev.c). 59 * tasks via a shared page (see litmus/ctrldev.c).
50 * 60 *
@@ -60,11 +70,7 @@ struct rt_task {
60 * determining preemption/migration overheads). 70 * determining preemption/migration overheads).
61 */ 71 */
62struct control_page { 72struct control_page {
63 /* Is the task currently in a non-preemptive section? */ 73 volatile union np_flag sched;
64 int np_flag;
65 /* Should the task call into the kernel when it leaves
66 * its non-preemptive section? */
67 int delayed_preemption;
68 74
69 /* locking overhead tracing: time stamp prior to system call */ 75 /* locking overhead tracing: time stamp prior to system call */
70 uint64_t ts_syscall_start; /* Feather-Trace cycles */ 76 uint64_t ts_syscall_start; /* Feather-Trace cycles */
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 3424ab3e62b9..b22f84a02010 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -524,6 +524,8 @@ static int __init _init_litmus(void)
524 */ 524 */
525 printk("Starting LITMUS^RT kernel\n"); 525 printk("Starting LITMUS^RT kernel\n");
526 526
527 BUILD_BUG_ON(sizeof(union np_flag) != sizeof(uint32_t));
528
527 register_sched_plugin(&linux_sched_plugin); 529 register_sched_plugin(&linux_sched_plugin);
528 530
529 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); 531 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC);
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index d54886df1f57..00a1900d6457 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -35,29 +35,18 @@ void preempt_if_preemptable(struct task_struct* t, int cpu)
35 /* local CPU case */ 35 /* local CPU case */
36 /* check if we need to poke userspace */ 36 /* check if we need to poke userspace */
37 if (is_user_np(t)) 37 if (is_user_np(t))
38 /* yes, poke it */ 38 /* Yes, poke it. This doesn't have to be atomic since
39 * the task is definitely not executing. */
39 request_exit_np(t); 40 request_exit_np(t);
40 else if (!is_kernel_np(t)) 41 else if (!is_kernel_np(t))
41 /* only if we are allowed to preempt the 42 /* only if we are allowed to preempt the
42 * currently-executing task */ 43 * currently-executing task */
43 reschedule = 1; 44 reschedule = 1;
44 } else { 45 } else {
45 /* remote CPU case */ 46 /* Remote CPU case. Only notify if it's not a kernel
46 if (is_user_np(t)) { 47 * NP section and if we didn't set the userspace
47 /* need to notify user space of delayed 48 * flag. */
48 * preemption */ 49 reschedule = !(is_kernel_np(t) || request_exit_np_atomic(t));
49
50 /* to avoid a race, set the flag, then test
51 * again */
52 request_exit_np(t);
53 /* make sure it got written */
54 mb();
55 }
56 /* Only send an ipi if remote task might have raced our
57 * request, i.e., send an IPI to make sure in case it
58 * exited its critical section.
59 */
60 reschedule = !is_np(t) && !is_kernel_np(t);
61 } 50 }
62 } 51 }
63 if (likely(reschedule)) 52 if (likely(reschedule))