aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--litmus/sched_pfair.c71
1 files changed, 52 insertions, 19 deletions
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 055ac623edb4..e3db82a2bdf8 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -77,8 +77,9 @@ struct pfair_state {
77 struct task_struct* local; /* the local copy of linked */ 77 struct task_struct* local; /* the local copy of linked */
78 struct task_struct* scheduled; /* what is actually scheduled */ 78 struct task_struct* scheduled; /* what is actually scheduled */
79 79
80 unsigned long missed_quanta;
81 lt_t offset; /* stagger offset */ 80 lt_t offset; /* stagger offset */
81 unsigned int missed_updates;
82 unsigned int missed_quanta;
82}; 83};
83 84
84struct pfair_cluster { 85struct pfair_cluster {
@@ -289,6 +290,15 @@ static void drop_all_references(struct task_struct *t)
289 } 290 }
290} 291}
291 292
293static void pfair_prepare_next_period(struct task_struct* t)
294{
295 struct pfair_param* p = tsk_pfair(t);
296
297 prepare_for_next_period(t);
298 get_rt_flags(t) = RT_F_RUNNING;
299 p->release += p->period;
300}
301
292/* returns 1 if the task needs to go the release queue */ 302/* returns 1 if the task needs to go the release queue */
293static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) 303static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
294{ 304{
@@ -297,10 +307,8 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
297 p->cur = (p->cur + 1) % p->quanta; 307 p->cur = (p->cur + 1) % p->quanta;
298 if (!p->cur) { 308 if (!p->cur) {
299 if (tsk_rt(t)->present) { 309 if (tsk_rt(t)->present) {
300 /* we start a new job */ 310 /* The job overran; we start a new budget allocation. */
301 prepare_for_next_period(t); 311 pfair_prepare_next_period(t);
302 get_rt_flags(t) = RT_F_RUNNING;
303 p->release += p->period;
304 } else { 312 } else {
305 /* remove task from system until it wakes */ 313 /* remove task from system until it wakes */
306 drop_all_references(t); 314 drop_all_references(t);
@@ -310,14 +318,13 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
310 } 318 }
311 } 319 }
312 to_relq = time_after(cur_release(t), time); 320 to_relq = time_after(cur_release(t), time);
313 TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d\n", 321 TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d (cur_release:%lu time:%lu)\n",
314 cpu, p->cur, to_relq); 322 cpu, p->cur, to_relq, cur_release(t), time);
315 return to_relq; 323 return to_relq;
316} 324}
317 325
318static void advance_subtasks(struct pfair_cluster *cluster, quanta_t time) 326static void advance_subtasks(struct pfair_cluster *cluster, quanta_t time)
319{ 327{
320 int missed;
321 struct task_struct* l; 328 struct task_struct* l;
322 struct pfair_param* p; 329 struct pfair_param* p;
323 struct list_head* pos; 330 struct list_head* pos;
@@ -326,15 +333,17 @@ static void advance_subtasks(struct pfair_cluster *cluster, quanta_t time)
326 list_for_each(pos, &cluster->topology.cpus) { 333 list_for_each(pos, &cluster->topology.cpus) {
327 cpu = from_cluster_list(pos); 334 cpu = from_cluster_list(pos);
328 l = cpu->linked; 335 l = cpu->linked;
329 missed = cpu->linked != cpu->local; 336 cpu->missed_updates += cpu->linked != cpu->local;
330 if (l) { 337 if (l) {
331 p = tsk_pfair(l); 338 p = tsk_pfair(l);
332 p->last_quantum = time; 339 p->last_quantum = time;
333 p->last_cpu = cpu_id(cpu); 340 p->last_cpu = cpu_id(cpu);
334 if (advance_subtask(time, l, cpu_id(cpu))) { 341 if (advance_subtask(time, l, cpu_id(cpu))) {
335 cpu->linked = NULL; 342 //cpu->linked = NULL;
336 sched_trace_task_release(l); 343 PTRACE_TASK(l, "should go to release queue. "
337 add_release(&cluster->pfair, l); 344 "scheduled_on=%d present=%d\n",
345 tsk_rt(l)->scheduled_on,
346 tsk_rt(l)->present);
338 } 347 }
339 } 348 }
340 } 349 }
@@ -455,7 +464,7 @@ static void schedule_next_quantum(struct pfair_cluster *cluster, quanta_t time)
455 list_for_each(pos, &cluster->topology.cpus) { 464 list_for_each(pos, &cluster->topology.cpus) {
456 cpu = from_cluster_list(pos); 465 cpu = from_cluster_list(pos);
457 if (cpu->linked) 466 if (cpu->linked)
458 PTRACE_TASK(pstate[cpu]->linked, 467 PTRACE_TASK(cpu->linked,
459 " linked on %d.\n", cpu_id(cpu)); 468 " linked on %d.\n", cpu_id(cpu));
460 else 469 else
461 PTRACE("(null) linked on %d.\n", cpu_id(cpu)); 470 PTRACE("(null) linked on %d.\n", cpu_id(cpu));
@@ -590,23 +599,40 @@ static int safe_to_schedule(struct task_struct* t, int cpu)
590static struct task_struct* pfair_schedule(struct task_struct * prev) 599static struct task_struct* pfair_schedule(struct task_struct * prev)
591{ 600{
592 struct pfair_state* state = &__get_cpu_var(pfair_state); 601 struct pfair_state* state = &__get_cpu_var(pfair_state);
593 int blocks; 602 struct pfair_cluster* cluster = cpu_cluster(state);
603 int blocks, completion, out_of_time;
594 struct task_struct* next = NULL; 604 struct task_struct* next = NULL;
595 605
596#ifdef CONFIG_RELEASE_MASTER 606#ifdef CONFIG_RELEASE_MASTER
597 /* Bail out early if we are the release master. 607 /* Bail out early if we are the release master.
598 * The release master never schedules any real-time tasks. 608 * The release master never schedules any real-time tasks.
599 */ 609 */
600 if (cpu_cluster(state)->pfair.release_master == cpu_id(state)) 610 if (unlikely(cluster->pfair.release_master == cpu_id(state)))
601 return NULL; 611 return NULL;
602#endif 612#endif
603 613
604 raw_spin_lock(cpu_lock(state)); 614 raw_spin_lock(cpu_lock(state));
605 615
606 if (is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP) 616 blocks = is_realtime(prev) && !is_running(prev);
617 completion = is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP;
618 out_of_time = is_realtime(prev) && time_after(cur_release(prev),
619 state->local_tick);
620
621 if (is_realtime(prev))
622 PTRACE_TASK(prev, "blocks:%d completion:%d out_of_time:%d\n",
623 blocks, completion, out_of_time);
624
625 if (completion) {
607 sched_trace_task_completion(prev, 0); 626 sched_trace_task_completion(prev, 0);
627 pfair_prepare_next_period(prev);
628 prepare_release(prev, cur_release(prev));
629 }
608 630
609 blocks = is_realtime(prev) && !is_running(prev); 631 if (!blocks && (completion || out_of_time)) {
632 drop_all_references(prev);
633 sched_trace_task_release(prev);
634 add_release(&cluster->pfair, prev);
635 }
610 636
611 if (state->local && safe_to_schedule(state->local, cpu_id(state))) 637 if (state->local && safe_to_schedule(state->local, cpu_id(state)))
612 next = state->local; 638 next = state->local;
@@ -679,9 +705,12 @@ static void pfair_task_wake_up(struct task_struct *t)
679 release_at(t, now); 705 release_at(t, now);
680 prepare_release(t, time2quanta(now, CEIL)); 706 prepare_release(t, time2quanta(now, CEIL));
681 sched_trace_task_release(t); 707 sched_trace_task_release(t);
682 __add_ready(&cluster->pfair, t);
683 } 708 }
684 709
710 /* only add to ready queue if the task isn't still linked somewhere */
711 if (tsk_rt(t)->linked_on == NO_CPU)
712 __add_ready(&cluster->pfair, t);
713
685 check_preempt(t); 714 check_preempt(t);
686 715
687 raw_spin_unlock_irqrestore(cluster_lock(cluster), flags); 716 raw_spin_unlock_irqrestore(cluster_lock(cluster), flags);
@@ -879,8 +908,11 @@ static void cleanup_clusters(void)
879 num_pfair_clusters = 0; 908 num_pfair_clusters = 0;
880 909
881 /* avoid stale pointers */ 910 /* avoid stale pointers */
882 for (i = 0; i < num_online_cpus(); i++) 911 for (i = 0; i < num_online_cpus(); i++) {
883 pstate[i]->topology.cluster = NULL; 912 pstate[i]->topology.cluster = NULL;
913 printk("P%d missed %u updates and %u quanta.\n", cpu_id(pstate[i]),
914 pstate[i]->missed_updates, pstate[i]->missed_quanta);
915 }
884} 916}
885 917
886static long pfair_activate_plugin(void) 918static long pfair_activate_plugin(void)
@@ -926,6 +958,7 @@ static long pfair_activate_plugin(void)
926 state->cur_tick = now; 958 state->cur_tick = now;
927 state->local_tick = now; 959 state->local_tick = now;
928 state->missed_quanta = 0; 960 state->missed_quanta = 0;
961 state->missed_updates = 0;
929 state->offset = cpu_stagger_offset(i); 962 state->offset = cpu_stagger_offset(i);
930 printk(KERN_ERR "cpus[%d] set; %d\n", i, num_online_cpus()); 963 printk(KERN_ERR "cpus[%d] set; %d\n", i, num_online_cpus());
931 cpus[i] = &state->topology; 964 cpus[i] = &state->topology;