aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2010-02-03 19:40:01 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:26:09 -0400
commitfb95c290fe461de794c984bc4130741f04f9142d (patch)
treee548d3eca0e4148ee764343db6a1d9e5f9e98d05 /include/litmus
parentb973c95c86e6710c913c01a67013605f68a3c2c3 (diff)
Re-implement non-preemptive section support.
Re-introduce NP sections in the configuration and in litmus.h. Remove the old np_flag from rt_param. If CONFIG_NP_SECTION is disabled, then all non-preemptive section checks are constant expressions which should get removed by the dead code elimination during optimization. Instead of re-implementing sys_exit_np(), we simply repurposed sched_yield() for calling into the scheduler to trigger delayed preemptions.
Diffstat (limited to 'include/litmus')
-rw-r--r--include/litmus/litmus.h86
-rw-r--r--include/litmus/rt_param.h27
2 files changed, 87 insertions, 26 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index a03580bc707c..faaf83961dfa 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -27,11 +27,11 @@ extern atomic_t __log_seq_no;
27 do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ 27 do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \
28 "called from %p current=%s/%d state=%d " \ 28 "called from %p current=%s/%d state=%d " \
29 "flags=%x partition=%d cpu=%d rtflags=%d"\ 29 "flags=%x partition=%d cpu=%d rtflags=%d"\
30 " job=%u knp=%d timeslice=%u\n", \ 30 " job=%u timeslice=%u\n", \
31 #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ 31 #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \
32 current->pid, current->state, current->flags, \ 32 current->pid, current->state, current->flags, \
33 get_partition(current), smp_processor_id(), get_rt_flags(current), \ 33 get_partition(current), smp_processor_id(), get_rt_flags(current), \
34 current->rt_param.job_params.job_no, current->rt_param.kernel_np, \ 34 current->rt_param.job_params.job_no, \
35 current->rt.time_slice\ 35 current->rt.time_slice\
36 ); } while(0); 36 ); } while(0);
37 37
@@ -124,8 +124,6 @@ static inline lt_t litmus_clock(void)
124 (a)->rt_param.job_params.release,\ 124 (a)->rt_param.job_params.release,\
125 (b)->rt_param.job_params.release)) 125 (b)->rt_param.job_params.release))
126 126
127#define make_np(t) do {t->rt_param.kernel_np++;} while(0);
128#define take_np(t) do {t->rt_param.kernel_np--;} while(0);
129 127
130#ifdef CONFIG_SRP 128#ifdef CONFIG_SRP
131void srp_ceiling_block(void); 129void srp_ceiling_block(void);
@@ -135,12 +133,88 @@ void srp_ceiling_block(void);
135 133
136#define bheap2task(hn) ((struct task_struct*) hn->value) 134#define bheap2task(hn) ((struct task_struct*) hn->value)
137 135
138static inline int is_np(struct task_struct *t) 136#ifdef CONFIG_NP_SECTION
137
138static inline int is_kernel_np(struct task_struct *t)
139{ 139{
140 return tsk_rt(t)->kernel_np; 140 return tsk_rt(t)->kernel_np;
141} 141}
142 142
143#define request_exit_np(t) 143static inline int is_user_np(struct task_struct *t)
144{
145 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0;
146}
147
148static inline void request_exit_np(struct task_struct *t)
149{
150 if (is_user_np(t)) {
151 /* Set the flag that tells user space to call
152 * into the kernel at the end of a critical section. */
153 if (likely(tsk_rt(t)->ctrl_page)) {
154 TRACE_TASK(t, "setting delayed_preemption flag\n");
155 tsk_rt(t)->ctrl_page->delayed_preemption = 1;
156 }
157 }
158}
159
160static inline void clear_exit_np(struct task_struct *t)
161{
162 if (likely(tsk_rt(t)->ctrl_page))
163 tsk_rt(t)->ctrl_page->delayed_preemption = 0;
164}
165
166static inline void make_np(struct task_struct *t)
167{
168 tsk_rt(t)->kernel_np++;
169}
170
171/* Caller should check if preemption is necessary when
172 * the function return 0.
173 */
174static inline int take_np(struct task_struct *t)
175{
176 return --tsk_rt(t)->kernel_np;
177}
178
179#else
180
181static inline int is_kernel_np(struct task_struct* t)
182{
183 return 0;
184}
185
186static inline int is_user_np(struct task_struct* t)
187{
188 return 0;
189}
190
191static inline void request_exit_np(struct task_struct *t)
192{
193 /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
194 BUG();
195}
196
197static inline void clear_exit_np(struct task_struct* t)
198{
199}
200
201#endif
202
203static inline int is_np(struct task_struct *t)
204{
205#ifdef CONFIG_SCHED_DEBUG_TRACE
206 int kernel, user;
207 kernel = is_kernel_np(t);
208 user = is_user_np(t);
209 if (kernel || user)
210 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
211
212 kernel, user);
213 return kernel || user;
214#else
215 return unlikely(is_kernel_np(t) || is_user_np(t));
216#endif
217}
144 218
145static inline int is_present(struct task_struct* t) 219static inline int is_present(struct task_struct* t)
146{ 220{
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 9353251fb30e..5b94d1a8eea7 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -85,7 +85,6 @@ struct rt_job {
85 unsigned int job_no; 85 unsigned int job_no;
86}; 86};
87 87
88
89struct pfair_param; 88struct pfair_param;
90 89
91/* RT task parameters for scheduling extensions 90/* RT task parameters for scheduling extensions
@@ -116,26 +115,14 @@ struct rt_param {
116 */ 115 */
117 struct task_struct* inh_task; 116 struct task_struct* inh_task;
118 117
119 /* Don't just dereference this pointer in kernel space! 118#ifdef CONFIG_NP_SECTION
120 * It might very well point to junk or nothing at all. 119 /* For the FMLP under PSN-EDF, it is required to make the task
121 * NULL indicates that the task has not requested any non-preemptable 120 * non-preemptive from kernel space. In order not to interfere with
122 * section support. 121 * user space, this counter indicates the kernel space np setting.
123 * Not inherited upon fork. 122 * kernel_np > 0 => task is non-preemptive
124 */ 123 */
125 short* np_flag; 124 unsigned int kernel_np;
126 125#endif
127 /* re-use unused counter in plugins that don't need it */
128 union {
129 /* For the FMLP under PSN-EDF, it is required to make the task
130 * non-preemptive from kernel space. In order not to interfere with
131 * user space, this counter indicates the kernel space np setting.
132 * kernel_np > 0 => task is non-preemptive
133 */
134 unsigned int kernel_np;
135
136 /* Used by GQ-EDF */
137 unsigned int last_cpu;
138 };
139 126
140 /* This field can be used by plugins to store where the task 127 /* This field can be used by plugins to store where the task
141 * is currently scheduled. It is the responsibility of the 128 * is currently scheduled. It is the responsibility of the