aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2010-02-03 19:40:01 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:26:09 -0400
commitfb95c290fe461de794c984bc4130741f04f9142d (patch)
treee548d3eca0e4148ee764343db6a1d9e5f9e98d05
parentb973c95c86e6710c913c01a67013605f68a3c2c3 (diff)
Re-implement non-preemptive section support.
Re-introduce NP sections in the configuration and in litmus.h. Remove the old np_flag from rt_param. If CONFIG_NP_SECTION is disabled, then all non-preemptive section checks are constant expressions which should get removed by the dead code elimination during optimization. Instead of re-implementing sys_exit_np(), we simply repurposed sched_yield() for calling into the scheduler to trigger delayed preemptions.
-rw-r--r--include/litmus/litmus.h86
-rw-r--r--include/litmus/rt_param.h27
-rw-r--r--litmus/Kconfig13
-rw-r--r--litmus/sched_litmus.c8
4 files changed, 106 insertions, 28 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index a03580bc707c..faaf83961dfa 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -27,11 +27,11 @@ extern atomic_t __log_seq_no;
27 do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ 27 do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \
28 "called from %p current=%s/%d state=%d " \ 28 "called from %p current=%s/%d state=%d " \
29 "flags=%x partition=%d cpu=%d rtflags=%d"\ 29 "flags=%x partition=%d cpu=%d rtflags=%d"\
30 " job=%u knp=%d timeslice=%u\n", \ 30 " job=%u timeslice=%u\n", \
31 #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ 31 #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \
32 current->pid, current->state, current->flags, \ 32 current->pid, current->state, current->flags, \
33 get_partition(current), smp_processor_id(), get_rt_flags(current), \ 33 get_partition(current), smp_processor_id(), get_rt_flags(current), \
34 current->rt_param.job_params.job_no, current->rt_param.kernel_np, \ 34 current->rt_param.job_params.job_no, \
35 current->rt.time_slice\ 35 current->rt.time_slice\
36 ); } while(0); 36 ); } while(0);
37 37
@@ -124,8 +124,6 @@ static inline lt_t litmus_clock(void)
124 (a)->rt_param.job_params.release,\ 124 (a)->rt_param.job_params.release,\
125 (b)->rt_param.job_params.release)) 125 (b)->rt_param.job_params.release))
126 126
127#define make_np(t) do {t->rt_param.kernel_np++;} while(0);
128#define take_np(t) do {t->rt_param.kernel_np--;} while(0);
129 127
130#ifdef CONFIG_SRP 128#ifdef CONFIG_SRP
131void srp_ceiling_block(void); 129void srp_ceiling_block(void);
@@ -135,12 +133,88 @@ void srp_ceiling_block(void);
135 133
136#define bheap2task(hn) ((struct task_struct*) hn->value) 134#define bheap2task(hn) ((struct task_struct*) hn->value)
137 135
138static inline int is_np(struct task_struct *t) 136#ifdef CONFIG_NP_SECTION
137
138static inline int is_kernel_np(struct task_struct *t)
139{ 139{
140 return tsk_rt(t)->kernel_np; 140 return tsk_rt(t)->kernel_np;
141} 141}
142 142
143#define request_exit_np(t) 143static inline int is_user_np(struct task_struct *t)
144{
145 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0;
146}
147
148static inline void request_exit_np(struct task_struct *t)
149{
150 if (is_user_np(t)) {
151 /* Set the flag that tells user space to call
152 * into the kernel at the end of a critical section. */
153 if (likely(tsk_rt(t)->ctrl_page)) {
154 TRACE_TASK(t, "setting delayed_preemption flag\n");
155 tsk_rt(t)->ctrl_page->delayed_preemption = 1;
156 }
157 }
158}
159
160static inline void clear_exit_np(struct task_struct *t)
161{
162 if (likely(tsk_rt(t)->ctrl_page))
163 tsk_rt(t)->ctrl_page->delayed_preemption = 0;
164}
165
166static inline void make_np(struct task_struct *t)
167{
168 tsk_rt(t)->kernel_np++;
169}
170
171/* Caller should check if preemption is necessary when
172 * the function return 0.
173 */
174static inline int take_np(struct task_struct *t)
175{
176 return --tsk_rt(t)->kernel_np;
177}
178
179#else
180
181static inline int is_kernel_np(struct task_struct* t)
182{
183 return 0;
184}
185
186static inline int is_user_np(struct task_struct* t)
187{
188 return 0;
189}
190
191static inline void request_exit_np(struct task_struct *t)
192{
193 /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */
194 BUG();
195}
196
197static inline void clear_exit_np(struct task_struct* t)
198{
199}
200
201#endif
202
203static inline int is_np(struct task_struct *t)
204{
205#ifdef CONFIG_SCHED_DEBUG_TRACE
206 int kernel, user;
207 kernel = is_kernel_np(t);
208 user = is_user_np(t);
209 if (kernel || user)
210 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
211
212 kernel, user);
213 return kernel || user;
214#else
215 return unlikely(is_kernel_np(t) || is_user_np(t));
216#endif
217}
144 218
145static inline int is_present(struct task_struct* t) 219static inline int is_present(struct task_struct* t)
146{ 220{
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 9353251fb30e..5b94d1a8eea7 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -85,7 +85,6 @@ struct rt_job {
85 unsigned int job_no; 85 unsigned int job_no;
86}; 86};
87 87
88
89struct pfair_param; 88struct pfair_param;
90 89
91/* RT task parameters for scheduling extensions 90/* RT task parameters for scheduling extensions
@@ -116,26 +115,14 @@ struct rt_param {
116 */ 115 */
117 struct task_struct* inh_task; 116 struct task_struct* inh_task;
118 117
119 /* Don't just dereference this pointer in kernel space! 118#ifdef CONFIG_NP_SECTION
120 * It might very well point to junk or nothing at all. 119 /* For the FMLP under PSN-EDF, it is required to make the task
121 * NULL indicates that the task has not requested any non-preemptable 120 * non-preemptive from kernel space. In order not to interfere with
122 * section support. 121 * user space, this counter indicates the kernel space np setting.
123 * Not inherited upon fork. 122 * kernel_np > 0 => task is non-preemptive
124 */ 123 */
125 short* np_flag; 124 unsigned int kernel_np;
126 125#endif
127 /* re-use unused counter in plugins that don't need it */
128 union {
129 /* For the FMLP under PSN-EDF, it is required to make the task
130 * non-preemptive from kernel space. In order not to interfere with
131 * user space, this counter indicates the kernel space np setting.
132 * kernel_np > 0 => task is non-preemptive
133 */
134 unsigned int kernel_np;
135
136 /* Used by GQ-EDF */
137 unsigned int last_cpu;
138 };
139 126
140 /* This field can be used by plugins to store where the task 127 /* This field can be used by plugins to store where the task
141 * is currently scheduled. It is the responsibility of the 128 * is currently scheduled. It is the responsibility of the
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 5556ae5cba45..874794f64af1 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -2,6 +2,17 @@ menu "LITMUS^RT"
2 2
3menu "Real-Time Synchronization" 3menu "Real-Time Synchronization"
4 4
5config NP_SECTION
6 bool "Non-preemptive section support"
7 default n
8 help
9 Allow tasks to become non-preemptable.
10 Note that plugins still need to explicitly support non-preemptivity.
11 Currently, only GSN-EDF and PSN-EDF have such support.
12
13 This is required to support the FMLP.
14 If disabled, all tasks will be considered preemptable at all times.
15
5config SRP 16config SRP
6 bool "Stack Resource Policy (SRP)" 17 bool "Stack Resource Policy (SRP)"
7 default n 18 default n
@@ -13,7 +24,7 @@ config SRP
13 24
14config FMLP 25config FMLP
15 bool "FMLP support" 26 bool "FMLP support"
16# depends on NP_SECTION 27 depends on NP_SECTION
17 default n 28 default n
18 help 29 help
19 Include support for deterministic multiprocessor real-time 30 Include support for deterministic multiprocessor real-time
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 64ad5db07795..c1fc7748e590 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -177,7 +177,13 @@ static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, int sleep)
177static void yield_task_litmus(struct rq *rq) 177static void yield_task_litmus(struct rq *rq)
178{ 178{
179 BUG_ON(rq->curr != current); 179 BUG_ON(rq->curr != current);
180 litmus->complete_job(); 180 /* sched_yield() is called to trigger delayed preemptions.
181 * Thus, mark the current task as needing to be rescheduled.
182 * This will cause the scheduler plugin to be invoked, which can
183 * then determine if a preemption is still required.
184 */
185 clear_exit_np(current);
186 set_tsk_need_resched(current);
181} 187}
182 188
183/* Plugins are responsible for this. 189/* Plugins are responsible for this.