aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_pfair.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_pfair.c')
-rw-r--r--litmus/sched_pfair.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 2ea39223e7f0..ea77d3295290 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -12,6 +12,7 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/slab.h>
15 16
16#include <litmus/litmus.h> 17#include <litmus/litmus.h>
17#include <litmus/jobs.h> 18#include <litmus/jobs.h>
@@ -415,7 +416,7 @@ static void schedule_next_quantum(quanta_t time)
415 /* called with interrupts disabled */ 416 /* called with interrupts disabled */
416 PTRACE("--- Q %lu at %llu PRE-SPIN\n", 417 PTRACE("--- Q %lu at %llu PRE-SPIN\n",
417 time, litmus_clock()); 418 time, litmus_clock());
418 spin_lock(&pfair_lock); 419 raw_spin_lock(&pfair_lock);
419 PTRACE("<<< Q %lu at %llu\n", 420 PTRACE("<<< Q %lu at %llu\n",
420 time, litmus_clock()); 421 time, litmus_clock());
421 422
@@ -448,7 +449,7 @@ static void schedule_next_quantum(quanta_t time)
448 } 449 }
449 PTRACE(">>> Q %lu at %llu\n", 450 PTRACE(">>> Q %lu at %llu\n",
450 time, litmus_clock()); 451 time, litmus_clock());
451 spin_unlock(&pfair_lock); 452 raw_spin_unlock(&pfair_lock);
452} 453}
453 454
454static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) 455static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state)
@@ -564,7 +565,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
564 int blocks; 565 int blocks;
565 struct task_struct* next = NULL; 566 struct task_struct* next = NULL;
566 567
567 spin_lock(&pfair_lock); 568 raw_spin_lock(&pfair_lock);
568 569
569 blocks = is_realtime(prev) && !is_running(prev); 570 blocks = is_realtime(prev) && !is_running(prev);
570 571
@@ -577,7 +578,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
577 tsk_rt(next)->scheduled_on = state->cpu; 578 tsk_rt(next)->scheduled_on = state->cpu;
578 } 579 }
579 580
580 spin_unlock(&pfair_lock); 581 raw_spin_unlock(&pfair_lock);
581 582
582 if (next) 583 if (next)
583 TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", 584 TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n",
@@ -594,7 +595,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
594 595
595 TRACE("pfair: task new %d state:%d\n", t->pid, t->state); 596 TRACE("pfair: task new %d state:%d\n", t->pid, t->state);
596 597
597 spin_lock_irqsave(&pfair_lock, flags); 598 raw_spin_lock_irqsave(&pfair_lock, flags);
598 if (running) 599 if (running)
599 t->rt_param.scheduled_on = task_cpu(t); 600 t->rt_param.scheduled_on = task_cpu(t);
600 else 601 else
@@ -605,7 +606,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
605 pfair_add_release(t); 606 pfair_add_release(t);
606 check_preempt(t); 607 check_preempt(t);
607 608
608 spin_unlock_irqrestore(&pfair_lock, flags); 609 raw_spin_unlock_irqrestore(&pfair_lock, flags);
609} 610}
610 611
611static void pfair_task_wake_up(struct task_struct *t) 612static void pfair_task_wake_up(struct task_struct *t)
@@ -616,7 +617,7 @@ static void pfair_task_wake_up(struct task_struct *t)
616 TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", 617 TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n",
617 litmus_clock(), cur_release(t), pfair_time); 618 litmus_clock(), cur_release(t), pfair_time);
618 619
619 spin_lock_irqsave(&pfair_lock, flags); 620 raw_spin_lock_irqsave(&pfair_lock, flags);
620 621
621 /* It is a little unclear how to deal with Pfair 622 /* It is a little unclear how to deal with Pfair
622 * tasks that block for a while and then wake. For now, 623 * tasks that block for a while and then wake. For now,
@@ -637,7 +638,7 @@ static void pfair_task_wake_up(struct task_struct *t)
637 638
638 check_preempt(t); 639 check_preempt(t);
639 640
640 spin_unlock_irqrestore(&pfair_lock, flags); 641 raw_spin_unlock_irqrestore(&pfair_lock, flags);
641 TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); 642 TRACE_TASK(t, "wake up done at %llu\n", litmus_clock());
642} 643}
643 644
@@ -661,12 +662,12 @@ static void pfair_task_exit(struct task_struct * t)
661 * might not be the same as the CPU that the PFAIR scheduler 662 * might not be the same as the CPU that the PFAIR scheduler
662 * has chosen for it. 663 * has chosen for it.
663 */ 664 */
664 spin_lock_irqsave(&pfair_lock, flags); 665 raw_spin_lock_irqsave(&pfair_lock, flags);
665 666
666 TRACE_TASK(t, "RIP, state:%d\n", t->state); 667 TRACE_TASK(t, "RIP, state:%d\n", t->state);
667 drop_all_references(t); 668 drop_all_references(t);
668 669
669 spin_unlock_irqrestore(&pfair_lock, flags); 670 raw_spin_unlock_irqrestore(&pfair_lock, flags);
670 671
671 kfree(t->rt_param.pfair); 672 kfree(t->rt_param.pfair);
672 t->rt_param.pfair = NULL; 673 t->rt_param.pfair = NULL;
@@ -680,7 +681,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
680 681
681 BUG_ON(!is_realtime(task)); 682 BUG_ON(!is_realtime(task));
682 683
683 spin_lock_irqsave(&pfair_lock, flags); 684 raw_spin_lock_irqsave(&pfair_lock, flags);
684 release_at(task, start); 685 release_at(task, start);
685 release = time2quanta(start, CEIL); 686 release = time2quanta(start, CEIL);
686 687
@@ -698,7 +699,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
698 */ 699 */
699 tsk_pfair(task)->sporadic_release = 0; 700 tsk_pfair(task)->sporadic_release = 0;
700 701
701 spin_unlock_irqrestore(&pfair_lock, flags); 702 raw_spin_unlock_irqrestore(&pfair_lock, flags);
702} 703}
703 704
704static void init_subtask(struct subtask* sub, unsigned long i, 705static void init_subtask(struct subtask* sub, unsigned long i,