diff options
Diffstat (limited to 'include/litmus')
-rw-r--r-- | include/litmus/litmus.h | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 28fb7b331cf3..31ac72eddef7 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -178,6 +178,7 @@ static inline int take_np(struct task_struct *t) | |||
178 | static inline int request_exit_np_atomic(struct task_struct *t) | 178 | static inline int request_exit_np_atomic(struct task_struct *t) |
179 | { | 179 | { |
180 | union np_flag old, new; | 180 | union np_flag old, new; |
181 | int ok; | ||
181 | 182 | ||
182 | if (tsk_rt(t)->ctrl_page) { | 183 | if (tsk_rt(t)->ctrl_page) { |
183 | old.raw = tsk_rt(t)->ctrl_page->sched.raw; | 184 | old.raw = tsk_rt(t)->ctrl_page->sched.raw; |
@@ -186,17 +187,20 @@ static inline int request_exit_np_atomic(struct task_struct *t) | |||
186 | return 0; | 187 | return 0; |
187 | } else if (old.np.preempt) { | 188 | } else if (old.np.preempt) { |
188 | /* already set, nothing for us to do */ | 189 | /* already set, nothing for us to do */ |
190 | TRACE_TASK(t, "not setting np.preempt flag again\n"); | ||
189 | return 1; | 191 | return 1; |
190 | } else { | 192 | } else { |
191 | /* non preemptive and flag not set */ | 193 | /* non preemptive and flag not set */ |
192 | new.raw = old.raw; | 194 | new.raw = old.raw; |
193 | new.np.preempt = 1; | 195 | new.np.preempt = 1; |
194 | /* if we get old back, then we atomically set the flag */ | 196 | /* if we get old back, then we atomically set the flag */ |
195 | return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; | 197 | ok = cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; |
196 | /* If we raced with a concurrent change, then so be | 198 | /* If we raced with a concurrent change, then so be |
197 | * it. Deliver it by IPI. We don't want an unbounded | 199 | * it. Deliver it by IPI. We don't want an unbounded |
198 | * retry loop here since tasks might exploit that to | 200 | * retry loop here since tasks might exploit that to |
199 | * keep the kernel busy indefinitely. */ | 201 | * keep the kernel busy indefinitely. */ |
202 | TRACE_TASK(t, "request_exit_np => %d\n", ok); | ||
203 | return ok; | ||
200 | } | 204 | } |
201 | } else | 205 | } else |
202 | return 0; | 206 | return 0; |