diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-02-03 19:40:01 -0500 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 17:26:09 -0400 |
commit | fb95c290fe461de794c984bc4130741f04f9142d (patch) | |
tree | e548d3eca0e4148ee764343db6a1d9e5f9e98d05 /include/litmus/litmus.h | |
parent | b973c95c86e6710c913c01a67013605f68a3c2c3 (diff) |
Re-implement non-preemptive section support.
Re-introduce NP sections in the configuration and in litmus.h. Remove the old
np_flag from rt_param.
If CONFIG_NP_SECTION is disabled, then all non-preemptive section checks are
constant expressions which should get removed by the dead code elimination
during optimization.
Instead of re-implementing sys_exit_np(), we simply repurposed sched_yield()
for calling into the scheduler to trigger delayed preemptions.
Diffstat (limited to 'include/litmus/litmus.h')
-rw-r--r-- | include/litmus/litmus.h | 86 |
1 files changed, 80 insertions, 6 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index a03580bc707c..faaf83961dfa 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -27,11 +27,11 @@ extern atomic_t __log_seq_no; | |||
27 | do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ | 27 | do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ |
28 | "called from %p current=%s/%d state=%d " \ | 28 | "called from %p current=%s/%d state=%d " \ |
29 | "flags=%x partition=%d cpu=%d rtflags=%d"\ | 29 | "flags=%x partition=%d cpu=%d rtflags=%d"\ |
30 | " job=%u knp=%d timeslice=%u\n", \ | 30 | " job=%u timeslice=%u\n", \ |
31 | #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ | 31 | #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ |
32 | current->pid, current->state, current->flags, \ | 32 | current->pid, current->state, current->flags, \ |
33 | get_partition(current), smp_processor_id(), get_rt_flags(current), \ | 33 | get_partition(current), smp_processor_id(), get_rt_flags(current), \ |
34 | current->rt_param.job_params.job_no, current->rt_param.kernel_np, \ | 34 | current->rt_param.job_params.job_no, \ |
35 | current->rt.time_slice\ | 35 | current->rt.time_slice\ |
36 | ); } while(0); | 36 | ); } while(0); |
37 | 37 | ||
@@ -124,8 +124,6 @@ static inline lt_t litmus_clock(void) | |||
124 | (a)->rt_param.job_params.release,\ | 124 | (a)->rt_param.job_params.release,\ |
125 | (b)->rt_param.job_params.release)) | 125 | (b)->rt_param.job_params.release)) |
126 | 126 | ||
127 | #define make_np(t) do {t->rt_param.kernel_np++;} while(0); | ||
128 | #define take_np(t) do {t->rt_param.kernel_np--;} while(0); | ||
129 | 127 | ||
130 | #ifdef CONFIG_SRP | 128 | #ifdef CONFIG_SRP |
131 | void srp_ceiling_block(void); | 129 | void srp_ceiling_block(void); |
@@ -135,12 +133,88 @@ void srp_ceiling_block(void); | |||
135 | 133 | ||
136 | #define bheap2task(hn) ((struct task_struct*) hn->value) | 134 | #define bheap2task(hn) ((struct task_struct*) hn->value) |
137 | 135 | ||
138 | static inline int is_np(struct task_struct *t) | 136 | #ifdef CONFIG_NP_SECTION |
137 | |||
138 | static inline int is_kernel_np(struct task_struct *t) | ||
139 | { | 139 | { |
140 | return tsk_rt(t)->kernel_np; | 140 | return tsk_rt(t)->kernel_np; |
141 | } | 141 | } |
142 | 142 | ||
143 | #define request_exit_np(t) | 143 | static inline int is_user_np(struct task_struct *t) |
144 | { | ||
145 | return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; | ||
146 | } | ||
147 | |||
148 | static inline void request_exit_np(struct task_struct *t) | ||
149 | { | ||
150 | if (is_user_np(t)) { | ||
151 | /* Set the flag that tells user space to call | ||
152 | * into the kernel at the end of a critical section. */ | ||
153 | if (likely(tsk_rt(t)->ctrl_page)) { | ||
154 | TRACE_TASK(t, "setting delayed_preemption flag\n"); | ||
155 | tsk_rt(t)->ctrl_page->delayed_preemption = 1; | ||
156 | } | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static inline void clear_exit_np(struct task_struct *t) | ||
161 | { | ||
162 | if (likely(tsk_rt(t)->ctrl_page)) | ||
163 | tsk_rt(t)->ctrl_page->delayed_preemption = 0; | ||
164 | } | ||
165 | |||
166 | static inline void make_np(struct task_struct *t) | ||
167 | { | ||
168 | tsk_rt(t)->kernel_np++; | ||
169 | } | ||
170 | |||
171 | /* Caller should check if preemption is necessary when | ||
172 | * the function return 0. | ||
173 | */ | ||
174 | static inline int take_np(struct task_struct *t) | ||
175 | { | ||
176 | return --tsk_rt(t)->kernel_np; | ||
177 | } | ||
178 | |||
179 | #else | ||
180 | |||
181 | static inline int is_kernel_np(struct task_struct* t) | ||
182 | { | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static inline int is_user_np(struct task_struct* t) | ||
187 | { | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static inline void request_exit_np(struct task_struct *t) | ||
192 | { | ||
193 | /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */ | ||
194 | BUG(); | ||
195 | } | ||
196 | |||
197 | static inline void clear_exit_np(struct task_struct* t) | ||
198 | { | ||
199 | } | ||
200 | |||
201 | #endif | ||
202 | |||
203 | static inline int is_np(struct task_struct *t) | ||
204 | { | ||
205 | #ifdef CONFIG_SCHED_DEBUG_TRACE | ||
206 | int kernel, user; | ||
207 | kernel = is_kernel_np(t); | ||
208 | user = is_user_np(t); | ||
209 | if (kernel || user) | ||
210 | TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n", | ||
211 | |||
212 | kernel, user); | ||
213 | return kernel || user; | ||
214 | #else | ||
215 | return unlikely(is_kernel_np(t) || is_user_np(t)); | ||
216 | #endif | ||
217 | } | ||
144 | 218 | ||
145 | static inline int is_present(struct task_struct* t) | 219 | static inline int is_present(struct task_struct* t) |
146 | { | 220 | { |