aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus')
-rw-r--r--include/litmus/litmus.h47
-rw-r--r--include/litmus/rt_param.h16
2 files changed, 49 insertions, 14 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index e7769ca36ec0..12af22266331 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -137,7 +137,7 @@ static inline int is_kernel_np(struct task_struct *t)
137 137
138static inline int is_user_np(struct task_struct *t) 138static inline int is_user_np(struct task_struct *t)
139{ 139{
140 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; 140 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
141} 141}
142 142
143static inline void request_exit_np(struct task_struct *t) 143static inline void request_exit_np(struct task_struct *t)
@@ -147,17 +147,11 @@ static inline void request_exit_np(struct task_struct *t)
147 * into the kernel at the end of a critical section. */ 147 * into the kernel at the end of a critical section. */
148 if (likely(tsk_rt(t)->ctrl_page)) { 148 if (likely(tsk_rt(t)->ctrl_page)) {
149 TRACE_TASK(t, "setting delayed_preemption flag\n"); 149 TRACE_TASK(t, "setting delayed_preemption flag\n");
150 tsk_rt(t)->ctrl_page->delayed_preemption = 1; 150 tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
151 } 151 }
152 } 152 }
153} 153}
154 154
155static inline void clear_exit_np(struct task_struct *t)
156{
157 if (likely(tsk_rt(t)->ctrl_page))
158 tsk_rt(t)->ctrl_page->delayed_preemption = 0;
159}
160
161static inline void make_np(struct task_struct *t) 155static inline void make_np(struct task_struct *t)
162{ 156{
163 tsk_rt(t)->kernel_np++; 157 tsk_rt(t)->kernel_np++;
@@ -171,6 +165,34 @@ static inline int take_np(struct task_struct *t)
171 return --tsk_rt(t)->kernel_np; 165 return --tsk_rt(t)->kernel_np;
172} 166}
173 167
168/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
169static inline int request_exit_np_atomic(struct task_struct *t)
170{
171 union np_flag old, new;
172
173 if (tsk_rt(t)->ctrl_page) {
174 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
175 if (old.np.flag == 0) {
176 /* no longer non-preemptive */
177 return 0;
178 } else if (old.np.preempt) {
179 /* already set, nothing for us to do */
180 return 1;
181 } else {
182 /* non preemptive and flag not set */
183 new.raw = old.raw;
184 new.np.preempt = 1;
185 /* if we get old back, then we atomically set the flag */
186 return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
187 /* If we raced with a concurrent change, then so be
188 * it. Deliver it by IPI. We don't want an unbounded
189 * retry loop here since tasks might exploit that to
190 * keep the kernel busy indefinitely. */
191 }
192 } else
193 return 0;
194}
195
174#else 196#else
175 197
176static inline int is_kernel_np(struct task_struct* t) 198static inline int is_kernel_np(struct task_struct* t)
@@ -189,12 +211,19 @@ static inline void request_exit_np(struct task_struct *t)
189 BUG(); 211 BUG();
190} 212}
191 213
192static inline void clear_exit_np(struct task_struct* t) 214static inline int request_exist_np_atomic(struct task_struct *t)
193{ 215{
216 return 0;
194} 217}
195 218
196#endif 219#endif
197 220
221static inline void clear_exit_np(struct task_struct *t)
222{
223 if (likely(tsk_rt(t)->ctrl_page))
224 tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
225}
226
198static inline int is_np(struct task_struct *t) 227static inline int is_np(struct task_struct *t)
199{ 228{
200#ifdef CONFIG_SCHED_DEBUG_TRACE 229#ifdef CONFIG_SCHED_DEBUG_TRACE
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 389be0775869..d6d799174160 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -42,6 +42,16 @@ struct rt_task {
42 budget_policy_t budget_policy; /* ignored by pfair */ 42 budget_policy_t budget_policy; /* ignored by pfair */
43}; 43};
44 44
45union np_flag {
46 uint32_t raw;
47 struct {
48 /* Is the task currently in a non-preemptive section? */
49 uint32_t flag:31;
50 /* Should the task call into the scheduler? */
51 uint32_t preempt:1;
52 } np;
53};
54
45/* The definition of the data that is shared between the kernel and real-time 55/* The definition of the data that is shared between the kernel and real-time
46 * tasks via a shared page (see litmus/ctrldev.c). 56 * tasks via a shared page (see litmus/ctrldev.c).
47 * 57 *
@@ -57,11 +67,7 @@ struct rt_task {
57 * determining preemption/migration overheads). 67 * determining preemption/migration overheads).
58 */ 68 */
59struct control_page { 69struct control_page {
60 /* Is the task currently in a non-preemptive section? */ 70 volatile union np_flag sched;
61 int np_flag;
62 /* Should the task call into the kernel when it leaves
63 * its non-preemptive section? */
64 int delayed_preemption;
65 71
66 /* to be extended */ 72 /* to be extended */
67}; 73};