aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMac Mollison <mollison@cs.unc.edu>2010-10-05 15:47:29 -0400
committerMac Mollison <mollison@cs.unc.edu>2010-10-05 15:47:29 -0400
commit3d2ef3e4f50843d1b48e3e59d3f33f4f1fa4e5f8 (patch)
tree6c37713d790fe33ae3ba6de9bfc33beb65dac099
parent58ea2a8251ef43a8475ecf0601d97fc5644648f7 (diff)
Update for per-cpu domains
-rw-r--r--litmus/sched_mcrit.c83
1 files changed, 63 insertions, 20 deletions
diff --git a/litmus/sched_mcrit.c b/litmus/sched_mcrit.c
index 0d26b3b158da..5d57a0e43412 100644
--- a/litmus/sched_mcrit.c
+++ b/litmus/sched_mcrit.c
@@ -99,6 +99,7 @@ typedef struct {
99 struct task_struct* scheduled; /* only RT tasks */ 99 struct task_struct* scheduled; /* only RT tasks */
100 atomic_t will_schedule; /* prevent unneeded IPIs */ 100 atomic_t will_schedule; /* prevent unneeded IPIs */
101 struct bheap_node* hn; 101 struct bheap_node* hn;
102 rt_domain_t rt_domain; /* rt domain for tasks assigned to this CPU */
102} cpu_entry_t; 103} cpu_entry_t;
103DEFINE_PER_CPU(cpu_entry_t, mcrit_cpu_entries); 104DEFINE_PER_CPU(cpu_entry_t, mcrit_cpu_entries);
104 105
@@ -291,36 +292,63 @@ static void preempt(cpu_entry_t *entry)
291 */ 292 */
292static noinline void requeue(struct task_struct* task) 293static noinline void requeue(struct task_struct* task)
293{ 294{
295 rt_domain_t* domain; /* either a per-CPU domain or the mcrit global domain */
296 cpu_entry_t* entry; /* if task is partitioned, the cpu_entry for its CPU */
297
294 BUG_ON(!task); 298 BUG_ON(!task);
295 /* sanity check before insertion */ 299 /* sanity check before insertion */
296 BUG_ON(is_queued(task)); 300 BUG_ON(is_queued(task));
297 301
302 if (task->rt_param.task_params.crit > CRIT_LEVEL_C) {
303 entry = mcrit_cpus[task->rt_param.task_params.cpu]
304 domain = entry->rt_domain;
305 }
306 else {
307 domain = &mcrit;
308 }
309
298 if (is_released(task, litmus_clock())) 310 if (is_released(task, litmus_clock()))
299 __add_ready(&mcrit, task); 311 __add_ready(domain, task);
300 else { 312 else {
301 /* it has got to wait */ 313 /* it has got to wait */
302 add_release(&mcrit, task); 314 add_release(domain, task);
303 } 315 }
304} 316}
305 317
306/* check for any necessary preemptions */ 318/* check for any necessary preemptions */
307static void check_for_preemptions(void) 319static void check_for_preemptions(void)
308{ 320{
321 int cpu;
309 struct task_struct *task; 322 struct task_struct *task;
310 cpu_entry_t* last; 323 cpu_entry_t* last;
324 rt_domain_t* domain;
325
326 for (cpu = 0; cpu < NR_CPUS; cpu++) {
327 last = mcrit_cpus[cpu];
328 domain = &(last->rt_domain);
329 if (mcrit_edf_preemption_needed(domain, last->linked)) {
330 task = __take_ready(domain);
331 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
332 task->pid, last->cpu);
333 if (last->linked)
334 requeue(last->linked);
335 link_task_to_cpu(task, last);
336 preempt(last);
337 }
338 }
311 339
312 for(last = lowest_prio_cpu(); 340 for(last = lowest_prio_cpu();
313 mcrit_edf_preemption_needed(&mcrit, last->linked); 341 mcrit_edf_preemption_needed(&mcrit, last->linked);
314 last = lowest_prio_cpu()) { 342 last = lowest_prio_cpu()) {
315 /* preemption necessary */ 343 /* preemption necessary */
316 task = __take_ready(&mcrit); 344 task = __take_ready(&mcrit);
317 TRACE("check_for_preemptions: attempting to link task %d to %d\n", 345 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
318 task->pid, last->cpu); 346 task->pid, last->cpu);
319 if (last->linked) 347 if (last->linked)
320 requeue(last->linked); 348 requeue(last->linked);
321 link_task_to_cpu(task, last); 349 link_task_to_cpu(task, last);
322 preempt(last); 350 preempt(last);
323 } 351 }
324} 352}
325 353
326/* mcrit_job_arrival: task is either resumed or released */ 354/* mcrit_job_arrival: task is either resumed or released */
@@ -336,15 +364,15 @@ static void mcrit_release_jobs(rt_domain_t* rt, struct bheap* tasks)
336{ 364{
337 unsigned long flags; 365 unsigned long flags;
338 366
339 raw_spin_lock_irqsave(&mcrit_lock, flags); 367 raw_spin_lock_irqsave(&(rt->ready_lock), flags);
340 368
341 __merge_ready(rt, tasks); 369 __merge_ready(rt, tasks);
342 check_for_preemptions(); 370 check_for_preemptions();
343 371
344 raw_spin_unlock_irqrestore(&mcrit_lock, flags); 372 raw_spin_unlock_irqrestore(&(rt->ready_lock), flags);
345} 373}
346 374
347/* caller holds mcrit_lock */ 375/* caller holds mcrit_lock and/or cpu domain lock */
348static noinline void job_completion(struct task_struct *t, int forced) 376static noinline void job_completion(struct task_struct *t, int forced)
349{ 377{
350 BUG_ON(!t); 378 BUG_ON(!t);
@@ -418,6 +446,7 @@ static void mcrit_tick(struct task_struct* t)
418static struct task_struct* mcrit_schedule(struct task_struct * prev) 446static struct task_struct* mcrit_schedule(struct task_struct * prev)
419{ 447{
420 cpu_entry_t* entry = &__get_cpu_var(mcrit_cpu_entries); 448 cpu_entry_t* entry = &__get_cpu_var(mcrit_cpu_entries);
449 rt_domain_t* cpu_domain = &(cpu_entry_t->rt_domain);
421 int out_of_time, sleep, preempt, np, exists, blocks; 450 int out_of_time, sleep, preempt, np, exists, blocks;
422 struct task_struct* next = NULL; 451 struct task_struct* next = NULL;
423 452
@@ -430,6 +459,7 @@ static struct task_struct* mcrit_schedule(struct task_struct * prev)
430#endif 459#endif
431 460
432 raw_spin_lock(&mcrit_lock); 461 raw_spin_lock(&mcrit_lock);
462 raw_spin_lock(&(cpu_domain->ready_lock)); /* not sure if this is needed? */
433 clear_will_schedule(); 463 clear_will_schedule();
434 464
435 /* sanity checking */ 465 /* sanity checking */
@@ -487,7 +517,9 @@ static struct task_struct* mcrit_schedule(struct task_struct * prev)
487 517
488 /* Link pending task if we became unlinked. 518 /* Link pending task if we became unlinked.
489 */ 519 */
490 if (!entry->linked) 520 if (!entry->linked) /*first, try from this cpu's tasks */
521 link_task_to_cpu(__take_ready(cpu_domain), entry);
522 if (!entry->linked) /* still nothing? get a global task */
491 link_task_to_cpu(__take_ready(&mcrit), entry); 523 link_task_to_cpu(__take_ready(&mcrit), entry);
492 524
493 /* The final scheduling decision. Do we need to switch for some reason? 525 /* The final scheduling decision. Do we need to switch for some reason?
@@ -512,6 +544,7 @@ static struct task_struct* mcrit_schedule(struct task_struct * prev)
512 if (exists) 544 if (exists)
513 next = prev; 545 next = prev;
514 546
547 raw_spin_unlock(&(cpu_domain->ready_lock)); /* not sure if this is needed? */
515 raw_spin_unlock(&mcrit_lock); 548 raw_spin_unlock(&mcrit_lock);
516 549
517#ifdef WANT_ALL_SCHED_EVENTS 550#ifdef WANT_ALL_SCHED_EVENTS
@@ -547,11 +580,20 @@ static void mcrit_task_new(struct task_struct * t, int on_rq, int running)
547{ 580{
548 unsigned long flags; 581 unsigned long flags;
549 cpu_entry_t* entry; 582 cpu_entry_t* entry;
583 raw_spinlock_t* lock; /* lock depends on rt domain */
550 584
551 TRACE("mcrit: task new %d\n", t->pid); 585 TRACE("mcrit: task new %d\n", t->pid);
552 TRACE("mcrit: task criticality %d\n", t->rt_param.task_params.crit); 586 TRACE("mcrit: task criticality %d\n", t->rt_param.task_params.crit);
553 587
554 raw_spin_lock_irqsave(&mcrit_lock, flags); 588 if (t->rt_param.task_params.crit > CRIT_LEVEL_C) {
589 entry = mcrit_cpus[t->rt_param.task_params.cpu];
590 lock = &(entry->rt_domain.ready_lock);
591 }
592 else {
593 lock = &mcrit_lock;
594 }
595
596 raw_spin_lock_irqsave(lock, flags);
555 597
556 /* setup job params */ 598 /* setup job params */
557 release_at(t, litmus_clock()); 599 release_at(t, litmus_clock());
@@ -578,7 +620,7 @@ static void mcrit_task_new(struct task_struct * t, int on_rq, int running)
578 t->rt_param.linked_on = NO_CPU; 620 t->rt_param.linked_on = NO_CPU;
579 621
580 mcrit_job_arrival(t); 622 mcrit_job_arrival(t);
581 raw_spin_unlock_irqrestore(&mcrit_lock, flags); 623 raw_spin_unlock_irqrestore(lock, flags);
582} 624}
583 625
584static void mcrit_task_wake_up(struct task_struct *task) 626static void mcrit_task_wake_up(struct task_struct *task)
@@ -711,6 +753,7 @@ static int __init init_m_crit(void)
711 entry->cpu = cpu; 753 entry->cpu = cpu;
712 entry->hn = &mcrit_heap_node[cpu]; 754 entry->hn = &mcrit_heap_node[cpu];
713 bheap_node_init(&entry->hn, entry); 755 bheap_node_init(&entry->hn, entry);
756 edf_domain_init(&entry->rt_domain, NULL, mcrit_release_jobs);
714 } 757 }
715 mcrit_edf_domain_init(&mcrit, NULL, mcrit_release_jobs); 758 mcrit_edf_domain_init(&mcrit, NULL, mcrit_release_jobs);
716 return register_sched_plugin(&m_crit_plugin); 759 return register_sched_plugin(&m_crit_plugin);