aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/litmus.h
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/litmus/litmus.h
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'include/litmus/litmus.h')
-rw-r--r--include/litmus/litmus.h47
1 files changed, 38 insertions, 9 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 3df242bf272f..95d0805519de 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -138,7 +138,7 @@ static inline int is_kernel_np(struct task_struct *t)
138 138
139static inline int is_user_np(struct task_struct *t) 139static inline int is_user_np(struct task_struct *t)
140{ 140{
141 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; 141 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
142} 142}
143 143
144static inline void request_exit_np(struct task_struct *t) 144static inline void request_exit_np(struct task_struct *t)
@@ -148,17 +148,11 @@ static inline void request_exit_np(struct task_struct *t)
148 * into the kernel at the end of a critical section. */ 148 * into the kernel at the end of a critical section. */
149 if (likely(tsk_rt(t)->ctrl_page)) { 149 if (likely(tsk_rt(t)->ctrl_page)) {
150 TRACE_TASK(t, "setting delayed_preemption flag\n"); 150 TRACE_TASK(t, "setting delayed_preemption flag\n");
151 tsk_rt(t)->ctrl_page->delayed_preemption = 1; 151 tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
152 } 152 }
153 } 153 }
154} 154}
155 155
156static inline void clear_exit_np(struct task_struct *t)
157{
158 if (likely(tsk_rt(t)->ctrl_page))
159 tsk_rt(t)->ctrl_page->delayed_preemption = 0;
160}
161
162static inline void make_np(struct task_struct *t) 156static inline void make_np(struct task_struct *t)
163{ 157{
164 tsk_rt(t)->kernel_np++; 158 tsk_rt(t)->kernel_np++;
@@ -172,6 +166,34 @@ static inline int take_np(struct task_struct *t)
172 return --tsk_rt(t)->kernel_np; 166 return --tsk_rt(t)->kernel_np;
173} 167}
174 168
169/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
170static inline int request_exit_np_atomic(struct task_struct *t)
171{
172 union np_flag old, new;
173
174 if (tsk_rt(t)->ctrl_page) {
175 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
176 if (old.np.flag == 0) {
177 /* no longer non-preemptive */
178 return 0;
179 } else if (old.np.preempt) {
180 /* already set, nothing for us to do */
181 return 1;
182 } else {
183 /* non preemptive and flag not set */
184 new.raw = old.raw;
185 new.np.preempt = 1;
186 /* if we get old back, then we atomically set the flag */
187 return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
188 /* If we raced with a concurrent change, then so be
189 * it. Deliver it by IPI. We don't want an unbounded
190 * retry loop here since tasks might exploit that to
191 * keep the kernel busy indefinitely. */
192 }
193 } else
194 return 0;
195}
196
175#else 197#else
176 198
177static inline int is_kernel_np(struct task_struct* t) 199static inline int is_kernel_np(struct task_struct* t)
@@ -190,12 +212,19 @@ static inline void request_exit_np(struct task_struct *t)
190 BUG(); 212 BUG();
191} 213}
192 214
193static inline void clear_exit_np(struct task_struct* t) 215static inline int request_exit_np_atomic(struct task_struct *t)
194{ 216{
217 return 0;
195} 218}
196 219
197#endif 220#endif
198 221
222static inline void clear_exit_np(struct task_struct *t)
223{
224 if (likely(tsk_rt(t)->ctrl_page))
225 tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
226}
227
199static inline int is_np(struct task_struct *t) 228static inline int is_np(struct task_struct *t)
200{ 229{
201#ifdef CONFIG_SCHED_DEBUG_TRACE 230#ifdef CONFIG_SCHED_DEBUG_TRACE