aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2011-01-20 12:07:34 -0500
committerJeremy Erickson <jerickso@cs.unc.edu>2011-01-20 12:07:34 -0500
commite256bcd44225d9b9e84ad4bebfe5f49cd6ef9e10 (patch)
treebffd4e109db5135d628bf07f42f6b2d05ba75461 /litmus
parentf7a3cc685f9ce885b56059ea103c39bc7abd68b3 (diff)
Update CPU queue management functions to account for multiple criticalities
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_mc.c80
1 files changed, 68 insertions, 12 deletions
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 1fad42aaadf2..59d7b6f49ce5 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -94,7 +94,8 @@ typedef struct {
94 struct task_struct* linked; /* only RT tasks */ 94 struct task_struct* linked; /* only RT tasks */
95 struct task_struct* scheduled; /* only RT tasks */ 95 struct task_struct* scheduled; /* only RT tasks */
96 atomic_t will_schedule; /* prevent unneeded IPIs */ 96 atomic_t will_schedule; /* prevent unneeded IPIs */
97 struct bheap_node* hn; 97 struct bheap_node* hn_c;
98 struct bheap_node* hn_d;
98 struct task_struct* ghost_tasks[CRIT_LEVEL_D+1]; 99 struct task_struct* ghost_tasks[CRIT_LEVEL_D+1];
99} cpu_entry_t; 100} cpu_entry_t;
100 101
@@ -115,7 +116,7 @@ cpu_entry_t* mc_cpus[NR_CPUS];
115 116
116/* the cpus queue themselves according to priority in here */ 117/* the cpus queue themselves according to priority in here */
117static struct bheap_node mc_heap_node[NR_CPUS]; 118static struct bheap_node mc_heap_node[NR_CPUS];
118static struct bheap mc_cpu_heap; 119static struct bheap mc_cpu_heap_c, mc_cpu_heap_d;
119 120
120/* Create per-CPU domains for criticality A */ 121/* Create per-CPU domains for criticality A */
121DEFINE_PER_CPU(rt_domain_t, crit_a); 122DEFINE_PER_CPU(rt_domain_t, crit_a);
@@ -155,18 +156,44 @@ static int mc_edf_higher_prio(struct task_struct* first, struct task_struct*
155 return edf_higher_prio(first, second); 156 return edf_higher_prio(first, second);
156} 157}
157 158
159static int mc_edf_entry_higher_prio(cpu_entry_t* first, cpu_entry_t* second,
160 int crit)
161{
162 struct task_struct *first_active, *second_active;
163 first_active = first->linked;
164 second_active = second->linked;
165 if (first->ghost_jobs[crit]){
166 first->active = first->ghost_jobs[crit];
167 }
168 if (second->ghost_jobs[crit]){
169 second->active = second->ghost_jobs[crit];
170 }
171 return mc_edf_higher_prio(first_active, second_active);
172}
173
158/* need_to_preempt - check whether the task t needs to be preempted 174/* need_to_preempt - check whether the task t needs to be preempted
159 * call only with irqs disabled and with ready_lock acquired 175 * call only with irqs disabled and with ready_lock acquired
160 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! 176 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
161 */ 177 */
162static int mc_edf_preemption_needed(rt_domain_t* rt, struct task_struct *t) 178static int mc_edf_preemption_needed(rt_domain_t* rt, int crit,
179 cpu_entry_t* entry)
163{ 180{
181 task_struct *active_task;
182
164 /* we need the read lock for edf_ready_queue */ 183 /* we need the read lock for edf_ready_queue */
165 /* no need to preempt if there is nothing pending */ 184 /* no need to preempt if there is nothing pending */
166 if (!__jobs_pending(rt)) 185 if (!__jobs_pending(rt))
167 return 0; 186 return 0;
187
188 active_task = entry->linked;
189 /* A ghost task can only exist if we haven't scheduled something above
190 * its level
191 */
192 if (entry->ghost_tasks[crit]){
193 active_task = entry->ghost_tasks[crit];
194 }
168 /* we need to reschedule if t doesn't exist */ 195 /* we need to reschedule if t doesn't exist */
169 if (!t) 196 if (!active_task)
170 return 1; 197 return 1;
171 198
172 /* NOTE: We cannot check for non-preemptibility since we 199 /* NOTE: We cannot check for non-preemptibility since we
@@ -174,7 +201,8 @@ static int mc_edf_preemption_needed(rt_domain_t* rt, struct task_struct *t)
174 */ 201 */
175 202
176 /* make sure to get non-rt stuff out of the way */ 203 /* make sure to get non-rt stuff out of the way */
177 return !is_realtime(t) || mc_edf_higher_prio(__next_ready(rt), t); 204 return !is_realtime(active_task) ||
205 mc_edf_higher_prio(__next_ready(rt), active_task);
178} 206}
179 207
180static int mc_edf_ready_order(struct bheap_node* a, struct bheap_node* b) 208static int mc_edf_ready_order(struct bheap_node* a, struct bheap_node* b)
@@ -222,7 +250,21 @@ static rt_domain_t* domain_of(struct task_struct* task)
222/* Called by update_cpu_position and lowest_prio_cpu in bheap operations 250/* Called by update_cpu_position and lowest_prio_cpu in bheap operations
223 * Callers always have global lock 251 * Callers always have global lock
224*/ 252*/
225static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) 253static int cpu_lower_prio_c(struct bheap_node *_a, struct bheap_node *_b)
254{
255 cpu_entry_t *a, *b;
256 a = _a->value;
257 b = _b->value;
258 /* Note that a and b are inverted: we want the lowest-priority CPU at
259 * the top of the heap.
260 */
261 return mc_edf_entry_higher_prio(b, a, CRIT_LEVEL_C);
262}
263
264/* Called by update_cpu_position and lowest_prio_cpu in bheap operations
265 * Callers always have global lock
266*/
267static int cpu_lower_prio_d(struct bheap_node *_a, struct bheap_node *_b)
226{ 268{
227 cpu_entry_t *a, *b; 269 cpu_entry_t *a, *b;
228 a = _a->value; 270 a = _a->value;
@@ -230,7 +272,7 @@ static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
230 /* Note that a and b are inverted: we want the lowest-priority CPU at 272 /* Note that a and b are inverted: we want the lowest-priority CPU at
231 * the top of the heap. 273 * the top of the heap.
232 */ 274 */
233 return mc_edf_higher_prio(b->linked, a->linked); 275 return mc_edf_entry_higher_prio(b, a, CRIT_LEVEL_D);
234} 276}
235 277
236/* update_cpu_position - Move the cpu entry to the correct place to maintain 278/* update_cpu_position - Move the cpu entry to the correct place to maintain
@@ -241,19 +283,33 @@ static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
241 */ 283 */
242static void update_cpu_position(cpu_entry_t *entry) 284static void update_cpu_position(cpu_entry_t *entry)
243{ 285{
244 if (likely(bheap_node_in_heap(entry->hn))) 286 if (likely(bheap_node_in_heap(entry->hn_c)))
245 bheap_delete(cpu_lower_prio, &mc_cpu_heap, entry->hn); 287 bheap_delete(cpu_lower_prio_c, &mc_cpu_heap_c, entry->hn_c);
246 bheap_insert(cpu_lower_prio, &mc_cpu_heap, entry->hn); 288 if (likely(bheap_node_in_heap(entry->hn_d)))
289 bheap_delete(cpu_lower_prio_d, &mc_dpu_heap_d, entry->hn_d);
290 bheap_insert(cpu_lower_prio, &mc_cpu_heap_c, entry->hn_c);
291 bheap_insert(cpu_lower_prio, &mc_cpu_heap_d, entry->hn_d);
292}
293
294/* caller must hold global lock
295 * Only called when checking for gedf preemptions by check_for_gedf_preemptions,
296 * which always has global lock
297 */
298static cpu_entry_t* lowest_prio_cpu_c(void)
299{
300 struct bheap_node* hn;
301 hn = bheap_peek(cpu_lower_prio_c, &mc_cpu_heap_c);
302 return hn->value;
247} 303}
248 304
249/* caller must hold global lock 305/* caller must hold global lock
250 * Only called when checking for gedf preemptions by check_for_gedf_preemptions, 306 * Only called when checking for gedf preemptions by check_for_gedf_preemptions,
251 * which always has global lock 307 * which always has global lock
252 */ 308 */
253static cpu_entry_t* lowest_prio_cpu(void) 309static cpu_entry_t* lowest_prio_cpu_d(void)
254{ 310{
255 struct bheap_node* hn; 311 struct bheap_node* hn;
256 hn = bheap_peek(cpu_lower_prio, &mc_cpu_heap); 312 hn = bheap_peek(cpu_lower_prio_d, &mc_cpu_heap_d);
257 return hn->value; 313 return hn->value;
258} 314}
259 315