diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-01-28 17:29:03 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2011-01-28 19:18:53 -0500 |
commit | 1a6154cb07727ae9716de118da15dbdb399983b9 (patch) | |
tree | 73b222136d53fff9564306b6a64204bba6203618 /litmus/sched_global_plugin.c | |
parent | b8be8fb192541fad88983ef6f9270cec1b51b59a (diff) |
Implementation of the EDZL scheduler.wip-edzl-final
Implementation of the EDZL scheduler. Zero-laxity points are
tracked by timers while jobs are in the pending state. Locking
primatives are not supported.
Diffstat (limited to 'litmus/sched_global_plugin.c')
-rw-r--r-- | litmus/sched_global_plugin.c | 106 |
1 files changed, 84 insertions, 22 deletions
diff --git a/litmus/sched_global_plugin.c b/litmus/sched_global_plugin.c index 22dffa7d62fc..e94247b66b59 100644 --- a/litmus/sched_global_plugin.c +++ b/litmus/sched_global_plugin.c | |||
@@ -105,31 +105,11 @@ struct sched_global_plugin* active_gbl_plugin; | |||
105 | /*********************************************************************/ | 105 | /*********************************************************************/ |
106 | 106 | ||
107 | /* Priority-related functions */ | 107 | /* Priority-related functions */ |
108 | int gbl_preemption_needed(struct task_struct *t) | ||
109 | { | ||
110 | /* we need the read lock for active_gbl_domain's ready_queue */ | ||
111 | /* no need to preempt if there is nothing pending */ | ||
112 | if (!__jobs_pending(&active_gbl_domain)) | ||
113 | return 0; | ||
114 | /* we need to reschedule if t doesn't exist */ | ||
115 | if (!t) | ||
116 | return 1; | ||
117 | |||
118 | /* NOTE: We cannot check for non-preemptibility since we | ||
119 | * don't know what address space we're currently in. | ||
120 | */ | ||
121 | |||
122 | /* make sure to get non-rt stuff out of the way */ | ||
123 | return !is_realtime(t) || active_gbl_plugin->prio_order(__next_ready(&active_gbl_domain), t); | ||
124 | } | ||
125 | |||
126 | int gbl_ready_order(struct bheap_node* a, struct bheap_node* b) | 108 | int gbl_ready_order(struct bheap_node* a, struct bheap_node* b) |
127 | { | 109 | { |
128 | return active_gbl_plugin->prio_order(bheap2task(a), bheap2task(b)); | 110 | return active_gbl_plugin->prio_order(bheap2task(a), bheap2task(b)); |
129 | } | 111 | } |
130 | 112 | ||
131 | |||
132 | |||
133 | int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 113 | int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) |
134 | { | 114 | { |
135 | cpu_entry_t *a, *b; | 115 | cpu_entry_t *a, *b; |
@@ -243,7 +223,6 @@ void gbl_unlink(struct task_struct* t) | |||
243 | } | 223 | } |
244 | } | 224 | } |
245 | 225 | ||
246 | |||
247 | /* preempt - force a CPU to reschedule | 226 | /* preempt - force a CPU to reschedule |
248 | */ | 227 | */ |
249 | void gbl_preempt(cpu_entry_t *entry) | 228 | void gbl_preempt(cpu_entry_t *entry) |
@@ -268,6 +247,71 @@ void gbl_requeue(struct task_struct* task) | |||
268 | } | 247 | } |
269 | } | 248 | } |
270 | 249 | ||
250 | /* | ||
251 | * update_queue_position - call after changing the priority of 'task'. | ||
252 | */ | ||
253 | void gbl_update_queue_position(struct task_struct *task) | ||
254 | { | ||
255 | /* We don't know whether task is in the ready queue. It should, but | ||
256 | * on a budget overrun it may already be in a release queue. Hence, | ||
257 | * calling unlink() is not possible since it assumes that the task is | ||
258 | * not in a release queue. | ||
259 | */ | ||
260 | |||
261 | /* Assumption: caller holds active_gbl_domain_lock */ | ||
262 | |||
263 | int check_preempt = 0; | ||
264 | |||
265 | if (tsk_rt(task)->linked_on != NO_CPU) { | ||
266 | TRACE_TASK(task, "%s: linked on %d\n", | ||
267 | __FUNCTION__, tsk_rt(task)->linked_on); | ||
268 | /* Task is scheduled; need to re-order CPUs. | ||
269 | * We can't use heap_decrease() here since | ||
270 | * the cpu_heap is ordered in reverse direction, so | ||
271 | * it is actually an increase. */ | ||
272 | bheap_delete(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap, | ||
273 | active_gbl_plugin->cpus[tsk_rt(task)->linked_on]->hn); | ||
274 | bheap_insert(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap, | ||
275 | active_gbl_plugin->cpus[tsk_rt(task)->linked_on]->hn); | ||
276 | } else { | ||
277 | /* task may be queued: first stop queue changes */ | ||
278 | raw_spin_lock(&active_gbl_domain.release_lock); | ||
279 | if (is_queued(task)) { | ||
280 | TRACE_TASK(task, "%s: is queued\n", | ||
281 | __FUNCTION__); | ||
282 | /* We need to update the position | ||
283 | * of task in some heap. Note that this | ||
284 | * may be a release heap. */ | ||
285 | check_preempt = | ||
286 | !bheap_decrease(gbl_ready_order, | ||
287 | tsk_rt(task)->heap_node); | ||
288 | } else { | ||
289 | /* Nothing to do: if it is not queued and not linked | ||
290 | * then it is currently being moved by other code | ||
291 | * (e.g., a timer interrupt handler) that will use the | ||
292 | * correct priority when enqueuing the task. */ | ||
293 | TRACE_TASK(task, "%s: is NOT queued => Done.\n", | ||
294 | __FUNCTION__); | ||
295 | } | ||
296 | raw_spin_unlock(&active_gbl_domain.release_lock); | ||
297 | |||
298 | /* If task was enqueued in a release heap, then the following | ||
299 | * preemption check is pointless, but we can't easily detect | ||
300 | * that case. If you want to fix this, then consider that | ||
301 | * simply adding a state flag requires O(n) time to update when | ||
302 | * releasing n tasks, which conflicts with the goal to have | ||
303 | * O(log n) merges. */ | ||
304 | if (check_preempt) { | ||
305 | /* heap_decrease() hit the top level of the heap: make | ||
306 | * sure preemption checks get the right task, not the | ||
307 | * potentially stale cache. */ | ||
308 | bheap_uncache_min(gbl_ready_order, | ||
309 | &active_gbl_domain.ready_queue); | ||
310 | gbl_check_for_preemptions(); | ||
311 | } | ||
312 | } | ||
313 | } | ||
314 | |||
271 | 315 | ||
272 | /* check for any necessary preemptions */ | 316 | /* check for any necessary preemptions */ |
273 | void gbl_check_for_preemptions(void) | 317 | void gbl_check_for_preemptions(void) |
@@ -276,7 +320,7 @@ void gbl_check_for_preemptions(void) | |||
276 | cpu_entry_t* last; | 320 | cpu_entry_t* last; |
277 | 321 | ||
278 | for(last = lowest_prio_cpu(); | 322 | for(last = lowest_prio_cpu(); |
279 | gbl_preemption_needed(last->linked); | 323 | active_gbl_plugin->preemption_needed(last->linked); |
280 | last = lowest_prio_cpu()) | 324 | last = lowest_prio_cpu()) |
281 | { | 325 | { |
282 | /* preemption necessary */ | 326 | /* preemption necessary */ |
@@ -392,6 +436,24 @@ void gblv_job_arrival(struct task_struct* task) | |||
392 | gbl_check_for_preemptions(); | 436 | gbl_check_for_preemptions(); |
393 | } | 437 | } |
394 | 438 | ||
439 | int gblv_preemption_needed(struct task_struct *t) | ||
440 | { | ||
441 | /* we need the read lock for active_gbl_domain's ready_queue */ | ||
442 | /* no need to preempt if there is nothing pending */ | ||
443 | if (!__jobs_pending(&active_gbl_domain)) | ||
444 | return 0; | ||
445 | /* we need to reschedule if t doesn't exist */ | ||
446 | if (!t) | ||
447 | return 1; | ||
448 | |||
449 | /* NOTE: We cannot check for non-preemptibility since we | ||
450 | * don't know what address space we're currently in. | ||
451 | */ | ||
452 | |||
453 | /* make sure to get non-rt stuff out of the way */ | ||
454 | return !is_realtime(t) || active_gbl_plugin->prio_order(__next_ready(&active_gbl_domain), t); | ||
455 | } | ||
456 | |||
395 | /* gbl_tick - this function is called for every local timer interrupt. | 457 | /* gbl_tick - this function is called for every local timer interrupt. |
396 | * | 458 | * |
397 | * checks whether the current task has expired and checks | 459 | * checks whether the current task has expired and checks |