diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-01-28 17:29:03 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2011-01-28 19:18:53 -0500 |
commit | 1a6154cb07727ae9716de118da15dbdb399983b9 (patch) | |
tree | 73b222136d53fff9564306b6a64204bba6203618 /litmus/sched_gsn_edf.c | |
parent | b8be8fb192541fad88983ef6f9270cec1b51b59a (diff) |
Implementation of the EDZL scheduler.wip-edzl-final
Implementation of the EDZL scheduler. Zero-laxity points are
tracked by timers while jobs are in the pending state. Locking
primatives are not supported.
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 70 |
1 files changed, 3 insertions, 67 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 7876d707d939..e4d17da0160a 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -63,76 +63,12 @@ static struct sched_global_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
63 | .take_ready = __take_ready, | 63 | .take_ready = __take_ready, |
64 | .add_ready = __add_ready, | 64 | .add_ready = __add_ready, |
65 | .job_arrival = gblv_job_arrival, | 65 | .job_arrival = gblv_job_arrival, |
66 | .job_completion = gbl_job_completion | 66 | .job_completion = gbl_job_completion, |
67 | .preemption_needed = gblv_preemption_needed | ||
67 | }; | 68 | }; |
68 | 69 | ||
69 | 70 | ||
70 | #ifdef CONFIG_FMLP | 71 | #ifdef CONFIG_FMLP |
71 | /* Update the queue position of a task that got it's priority boosted via | ||
72 | * priority inheritance. */ | ||
73 | static void update_queue_position(struct task_struct *holder) | ||
74 | { | ||
75 | /* We don't know whether holder is in the ready queue. It should, but | ||
76 | * on a budget overrun it may already be in a release queue. Hence, | ||
77 | * calling unlink() is not possible since it assumes that the task is | ||
78 | * not in a release queue. However, we can safely check whether | ||
79 | * sem->holder is currently in a queue or scheduled after locking both | ||
80 | * the release and the ready queue lock. */ | ||
81 | |||
82 | /* Assumption: caller holds gsnedf_lock */ | ||
83 | |||
84 | int check_preempt = 0; | ||
85 | |||
86 | if (tsk_rt(holder)->linked_on != NO_CPU) { | ||
87 | TRACE_TASK(holder, "%s: linked on %d\n", | ||
88 | __FUNCTION__, tsk_rt(holder)->linked_on); | ||
89 | /* Holder is scheduled; need to re-order CPUs. | ||
90 | * We can't use heap_decrease() here since | ||
91 | * the cpu_heap is ordered in reverse direction, so | ||
92 | * it is actually an increase. */ | ||
93 | bheap_delete(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap, | ||
94 | gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn); | ||
95 | bheap_insert(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap, | ||
96 | gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn); | ||
97 | } else { | ||
98 | /* holder may be queued: first stop queue changes */ | ||
99 | raw_spin_lock(&gsn_edf_plugin.domain.release_lock); | ||
100 | if (is_queued(holder)) { | ||
101 | TRACE_TASK(holder, "%s: is queued\n", | ||
102 | __FUNCTION__); | ||
103 | /* We need to update the position | ||
104 | * of holder in some heap. Note that this | ||
105 | * may be a release heap. */ | ||
106 | check_preempt = | ||
107 | !bheap_decrease(edf_ready_order, | ||
108 | tsk_rt(holder)->heap_node); | ||
109 | } else { | ||
110 | /* Nothing to do: if it is not queued and not linked | ||
111 | * then it is currently being moved by other code | ||
112 | * (e.g., a timer interrupt handler) that will use the | ||
113 | * correct priority when enqueuing the task. */ | ||
114 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | ||
115 | __FUNCTION__); | ||
116 | } | ||
117 | raw_spin_unlock(&gsn_edf_plugin.domain.release_lock); | ||
118 | |||
119 | /* If holder was enqueued in a release heap, then the following | ||
120 | * preemption check is pointless, but we can't easily detect | ||
121 | * that case. If you want to fix this, then consider that | ||
122 | * simply adding a state flag requires O(n) time to update when | ||
123 | * releasing n tasks, which conflicts with the goal to have | ||
124 | * O(log n) merges. */ | ||
125 | if (check_preempt) { | ||
126 | /* heap_decrease() hit the top level of the heap: make | ||
127 | * sure preemption checks get the right task, not the | ||
128 | * potentially stale cache. */ | ||
129 | bheap_uncache_min(gbl_ready_order, | ||
130 | &gsn_edf_plugin.domain.ready_queue); | ||
131 | gbl_check_for_preemptions(); | ||
132 | } | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static long gsnedf_pi_block(struct pi_semaphore *sem, | 72 | static long gsnedf_pi_block(struct pi_semaphore *sem, |
137 | struct task_struct *new_waiter) | 73 | struct task_struct *new_waiter) |
138 | { | 74 | { |
@@ -158,7 +94,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, | |||
158 | new_waiter->comm, new_waiter->pid); | 94 | new_waiter->comm, new_waiter->pid); |
159 | /* let holder inherit */ | 95 | /* let holder inherit */ |
160 | sem->holder->rt_param.inh_task = new_waiter; | 96 | sem->holder->rt_param.inh_task = new_waiter; |
161 | update_queue_position(sem->holder); | 97 | gbl_update_queue_position(sem->holder); |
162 | } | 98 | } |
163 | raw_spin_unlock(&gsnedf_lock); | 99 | raw_spin_unlock(&gsnedf_lock); |
164 | } | 100 | } |