aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-10 19:05:19 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-05-10 19:05:19 -0400
commita866494041074a7f53b425f7221a76778e277858 (patch)
treeaad45f3396395b3e72c021a58e8ed0b82b4c9531 /kernel
parent47f0c01efae04bdcc370ec2ba6f3d7607c8cbcaa (diff)
Break out __release_pending() from try_release_pending()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/edf_common.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/kernel/edf_common.c b/kernel/edf_common.c
index a6c96e0586..00e964925c 100644
--- a/kernel/edf_common.c
+++ b/kernel/edf_common.c
@@ -157,30 +157,35 @@ void __add_release(edf_domain_t* edf, struct task_struct *task)
157 list_add(&task->rt_list, &edf->release_queue); 157 list_add(&task->rt_list, &edf->release_queue);
158} 158}
159 159
160void try_release_pending(edf_domain_t* edf) 160void __release_pending(edf_domain_t* edf)
161{ 161{
162 unsigned long flags;
163 struct list_head *pos, *save; 162 struct list_head *pos, *save;
164 struct task_struct *queued; 163 struct task_struct *queued;
164 list_for_each_safe(pos, save, &edf->release_queue) {
165 queued = list_entry(pos, struct task_struct, rt_list);
166 if (likely(is_released(queued))) {
167 /* this one is ready to go*/
168 list_del(pos);
169 set_rt_flags(queued, RT_F_RUNNING);
170
171 sched_trace_job_release(queued);
172
173 /* now it can be picked up */
174 barrier();
175 add_ready(edf, queued);
176 }
177 else
178 /* the release queue is ordered */
179 break;
180 }
181}
182
183void try_release_pending(edf_domain_t* edf)
184{
185 unsigned long flags;
165 186
166 if (spin_trylock_irqsave(&edf->release_lock, flags)) { 187 if (spin_trylock_irqsave(&edf->release_lock, flags)) {
167 list_for_each_safe(pos, save, &edf->release_queue) { 188 __release_pending(edf);
168 queued = list_entry(pos, struct task_struct, rt_list);
169 if (likely(is_released(queued))) {
170 /* this one is ready to go*/
171 list_del(pos);
172 set_rt_flags(queued, RT_F_RUNNING);
173
174 sched_trace_job_release(queued);
175
176 /* now it can be picked up */
177 barrier();
178 add_ready(edf, queued);
179 }
180 else
181 /* the release queue is ordered */
182 break;
183 }
184 spin_unlock_irqrestore(&edf->release_lock, flags); 189 spin_unlock_irqrestore(&edf->release_lock, flags);
185 } 190 }
186} 191}