aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/rt_domain.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:39:14 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:17:12 -0400
commit50ca05ff9cc85176c3ee18bf1363d3d7c34aa355 (patch)
tree59d0edd28e9e47b9cb48e6cc90d5f6488494795d /litmus/rt_domain.c
parent2a94c7bf9869a13e32de7a1fe94596de7b4789a8 (diff)
[ported from 2008.3] Add GSN-EDF plugin
- insert arm_release_timer() in add_relese() path - arm_release_timer() uses __hrtimer_start_range_ns() instead of hrtimer_start() to avoid deadlock on rq->lock.
Diffstat (limited to 'litmus/rt_domain.c')
-rw-r--r--litmus/rt_domain.c46
1 files changed, 29 insertions, 17 deletions
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 4fa834018efa..78e76421aeba 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -159,23 +159,23 @@ static void reinit_release_heap(struct task_struct* t)
159 /* initialize */ 159 /* initialize */
160 heap_init(&rh->heap); 160 heap_init(&rh->heap);
161} 161}
162 162/* arm_release_timer() - start local release timer or trigger
163static void arm_release_timer(unsigned long _rt) 163 * remote timer (pull timer)
164 *
165 * Called by add_release() with:
166 * - tobe_lock taken
167 * - IRQ disabled
168 */
169static void arm_release_timer(rt_domain_t *_rt)
164{ 170{
165 rt_domain_t *rt = (rt_domain_t*) _rt; 171 rt_domain_t *rt = _rt;
166 unsigned long flags;
167 struct list_head list; 172 struct list_head list;
168 struct list_head *pos, *safe; 173 struct list_head *pos, *safe;
169 struct task_struct* t; 174 struct task_struct* t;
170 struct release_heap* rh; 175 struct release_heap* rh;
171 176
172 /* We only have to defend against the ISR since norq callbacks
173 * are serialized.
174 */
175 TRACE("arm_release_timer() at %llu\n", litmus_clock()); 177 TRACE("arm_release_timer() at %llu\n", litmus_clock());
176 spin_lock_irqsave(&rt->tobe_lock, flags);
177 list_replace_init(&rt->tobe_released, &list); 178 list_replace_init(&rt->tobe_released, &list);
178 spin_unlock_irqrestore(&rt->tobe_lock, flags);
179 179
180 list_for_each_safe(pos, safe, &list) { 180 list_for_each_safe(pos, safe, &list) {
181 /* pick task of work list */ 181 /* pick task of work list */
@@ -184,24 +184,29 @@ static void arm_release_timer(unsigned long _rt)
184 list_del(pos); 184 list_del(pos);
185 185
186 /* put into release heap while holding release_lock */ 186 /* put into release heap while holding release_lock */
187 spin_lock_irqsave(&rt->release_lock, flags); 187 spin_lock(&rt->release_lock);
188 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); 188 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock);
189
189 rh = get_release_heap(rt, t, 0); 190 rh = get_release_heap(rt, t, 0);
190 if (!rh) { 191 if (!rh) {
191 /* need to use our own, but drop lock first */ 192 /* need to use our own, but drop lock first */
192 spin_unlock(&rt->release_lock); 193 spin_unlock(&rt->release_lock);
193 TRACE_TASK(t, "Dropped release_lock 0x%p\n", 194 TRACE_TASK(t, "Dropped release_lock 0x%p\n",
194 &rt->release_lock); 195 &rt->release_lock);
196
195 reinit_release_heap(t); 197 reinit_release_heap(t);
196 TRACE_TASK(t, "release_heap ready\n"); 198 TRACE_TASK(t, "release_heap ready\n");
199
197 spin_lock(&rt->release_lock); 200 spin_lock(&rt->release_lock);
198 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", 201 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n",
199 &rt->release_lock); 202 &rt->release_lock);
203
200 rh = get_release_heap(rt, t, 1); 204 rh = get_release_heap(rt, t, 1);
201 } 205 }
202 heap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); 206 heap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
203 TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); 207 TRACE_TASK(t, "arm_release_timer(): added to release heap\n");
204 spin_unlock_irqrestore(&rt->release_lock, flags); 208
209 spin_unlock(&rt->release_lock);
205 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); 210 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock);
206 211
207 /* To avoid arming the timer multiple times, we only let the 212 /* To avoid arming the timer multiple times, we only let the
@@ -210,9 +215,16 @@ static void arm_release_timer(unsigned long _rt)
210 */ 215 */
211 if (rh == tsk_rt(t)->rel_heap) { 216 if (rh == tsk_rt(t)->rel_heap) {
212 TRACE_TASK(t, "arming timer 0x%p\n", &rh->timer); 217 TRACE_TASK(t, "arming timer 0x%p\n", &rh->timer);
213 hrtimer_start(&rh->timer, 218 /* we cannot arm the timer using hrtimer_start()
214 ns_to_ktime(rh->release_time), 219 * as it may deadlock on rq->lock
215 HRTIMER_MODE_ABS); 220 */
221 /* FIXME now only one cpu without pulling
222 * later more cpus; hrtimer_pull should call
223 * __hrtimer_start... always with PINNED mode
224 */
225 __hrtimer_start_range_ns(&rh->timer,
226 ns_to_ktime(rh->release_time),
227 0, HRTIMER_MODE_ABS_PINNED, 0);
216 } else 228 } else
217 TRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); 229 TRACE_TASK(t, "0x%p is not my timer\n", &rh->timer);
218 } 230 }
@@ -280,8 +292,8 @@ void __add_release(rt_domain_t* rt, struct task_struct *task)
280 TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); 292 TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task));
281 list_add(&tsk_rt(task)->list, &rt->tobe_released); 293 list_add(&tsk_rt(task)->list, &rt->tobe_released);
282 task->rt_param.domain = rt; 294 task->rt_param.domain = rt;
283 /* XXX arm_release_timer() used to be activated here 295
284 * such that it would be called with the runqueue lock dropped. 296 /* start release timer */
285 */ 297 arm_release_timer(rt);
286} 298}
287 299