aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/litmus.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/litmus.h')
-rw-r--r--include/litmus/litmus.h47
1 files changed, 38 insertions, 9 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index e7769ca36ec0..12af22266331 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -137,7 +137,7 @@ static inline int is_kernel_np(struct task_struct *t)
137 137
138static inline int is_user_np(struct task_struct *t) 138static inline int is_user_np(struct task_struct *t)
139{ 139{
140 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; 140 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
141} 141}
142 142
143static inline void request_exit_np(struct task_struct *t) 143static inline void request_exit_np(struct task_struct *t)
@@ -147,17 +147,11 @@ static inline void request_exit_np(struct task_struct *t)
147 * into the kernel at the end of a critical section. */ 147 * into the kernel at the end of a critical section. */
148 if (likely(tsk_rt(t)->ctrl_page)) { 148 if (likely(tsk_rt(t)->ctrl_page)) {
149 TRACE_TASK(t, "setting delayed_preemption flag\n"); 149 TRACE_TASK(t, "setting delayed_preemption flag\n");
150 tsk_rt(t)->ctrl_page->delayed_preemption = 1; 150 tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
151 } 151 }
152 } 152 }
153} 153}
154 154
155static inline void clear_exit_np(struct task_struct *t)
156{
157 if (likely(tsk_rt(t)->ctrl_page))
158 tsk_rt(t)->ctrl_page->delayed_preemption = 0;
159}
160
161static inline void make_np(struct task_struct *t) 155static inline void make_np(struct task_struct *t)
162{ 156{
163 tsk_rt(t)->kernel_np++; 157 tsk_rt(t)->kernel_np++;
@@ -171,6 +165,34 @@ static inline int take_np(struct task_struct *t)
171 return --tsk_rt(t)->kernel_np; 165 return --tsk_rt(t)->kernel_np;
172} 166}
173 167
168/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
169static inline int request_exit_np_atomic(struct task_struct *t)
170{
171 union np_flag old, new;
172
173 if (tsk_rt(t)->ctrl_page) {
174 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
175 if (old.np.flag == 0) {
176 /* no longer non-preemptive */
177 return 0;
178 } else if (old.np.preempt) {
179 /* already set, nothing for us to do */
180 return 1;
181 } else {
182 /* non preemptive and flag not set */
183 new.raw = old.raw;
184 new.np.preempt = 1;
185 /* if we get old back, then we atomically set the flag */
186 return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
187 /* If we raced with a concurrent change, then so be
188 * it. Deliver it by IPI. We don't want an unbounded
189 * retry loop here since tasks might exploit that to
190 * keep the kernel busy indefinitely. */
191 }
192 } else
193 return 0;
194}
195
174#else 196#else
175 197
176static inline int is_kernel_np(struct task_struct* t) 198static inline int is_kernel_np(struct task_struct* t)
@@ -189,12 +211,19 @@ static inline void request_exit_np(struct task_struct *t)
189 BUG(); 211 BUG();
190} 212}
191 213
192static inline void clear_exit_np(struct task_struct* t) 214static inline int request_exist_np_atomic(struct task_struct *t)
193{ 215{
216 return 0;
194} 217}
195 218
196#endif 219#endif
197 220
221static inline void clear_exit_np(struct task_struct *t)
222{
223 if (likely(tsk_rt(t)->ctrl_page))
224 tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
225}
226
198static inline int is_np(struct task_struct *t) 227static inline int is_np(struct task_struct *t)
199{ 228{
200#ifdef CONFIG_SCHED_DEBUG_TRACE 229#ifdef CONFIG_SCHED_DEBUG_TRACE