diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:45:13 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:57:07 -0400 |
commit | a66246f9e973a68fb9955a2fa7663a2e02afbd30 (patch) | |
tree | ebdf77a3cf491c0d0b77af3d9622f33013af5856 /litmus/sched_pfair.c | |
parent | 6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (diff) |
Change most LitmusRT spinlock_t in raw_spinlock_t
Adapt to new schema for spinlock:
(tglx 20091217)
spinlock - the weakest one, which might sleep in RT
raw_spinlock - spinlock which always spins even on RT
arch_spinlock - the hardware level architecture dependent implementation
----
Most probably, all the spinlocks changed by this commit will be true
spinning lock (raw_spinlock) in PreemptRT (so hopefully we'll need few
changes when porting Litmmus to PreemptRT).
There are a couple of spinlock that the kernel still defines as
spinlock_t (therefore no changes reported in this commit) that might cause
us troubles:
- wait_queue_t lock is defined as spinlock_t; it is used in:
* fmlp.c -- sem->wait.lock
* sync.c -- ts_release.wait.lock
- rwlock_t used in fifo implementation in sched_trace.c
* this need probably to be changed to something always spinning in RT
at the expense of increased locking time.
----
This commit also fixes warnings and errors due to the need to include
slab.h when using kmalloc() and friends.
----
This commit does not compile.
Diffstat (limited to 'litmus/sched_pfair.c')
-rw-r--r-- | litmus/sched_pfair.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index 2ea39223e7f0..ea77d3295290 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/slab.h> | ||
15 | 16 | ||
16 | #include <litmus/litmus.h> | 17 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 18 | #include <litmus/jobs.h> |
@@ -415,7 +416,7 @@ static void schedule_next_quantum(quanta_t time) | |||
415 | /* called with interrupts disabled */ | 416 | /* called with interrupts disabled */ |
416 | PTRACE("--- Q %lu at %llu PRE-SPIN\n", | 417 | PTRACE("--- Q %lu at %llu PRE-SPIN\n", |
417 | time, litmus_clock()); | 418 | time, litmus_clock()); |
418 | spin_lock(&pfair_lock); | 419 | raw_spin_lock(&pfair_lock); |
419 | PTRACE("<<< Q %lu at %llu\n", | 420 | PTRACE("<<< Q %lu at %llu\n", |
420 | time, litmus_clock()); | 421 | time, litmus_clock()); |
421 | 422 | ||
@@ -448,7 +449,7 @@ static void schedule_next_quantum(quanta_t time) | |||
448 | } | 449 | } |
449 | PTRACE(">>> Q %lu at %llu\n", | 450 | PTRACE(">>> Q %lu at %llu\n", |
450 | time, litmus_clock()); | 451 | time, litmus_clock()); |
451 | spin_unlock(&pfair_lock); | 452 | raw_spin_unlock(&pfair_lock); |
452 | } | 453 | } |
453 | 454 | ||
454 | static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) | 455 | static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) |
@@ -564,7 +565,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
564 | int blocks; | 565 | int blocks; |
565 | struct task_struct* next = NULL; | 566 | struct task_struct* next = NULL; |
566 | 567 | ||
567 | spin_lock(&pfair_lock); | 568 | raw_spin_lock(&pfair_lock); |
568 | 569 | ||
569 | blocks = is_realtime(prev) && !is_running(prev); | 570 | blocks = is_realtime(prev) && !is_running(prev); |
570 | 571 | ||
@@ -577,7 +578,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
577 | tsk_rt(next)->scheduled_on = state->cpu; | 578 | tsk_rt(next)->scheduled_on = state->cpu; |
578 | } | 579 | } |
579 | 580 | ||
580 | spin_unlock(&pfair_lock); | 581 | raw_spin_unlock(&pfair_lock); |
581 | 582 | ||
582 | if (next) | 583 | if (next) |
583 | TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", | 584 | TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", |
@@ -594,7 +595,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) | |||
594 | 595 | ||
595 | TRACE("pfair: task new %d state:%d\n", t->pid, t->state); | 596 | TRACE("pfair: task new %d state:%d\n", t->pid, t->state); |
596 | 597 | ||
597 | spin_lock_irqsave(&pfair_lock, flags); | 598 | raw_spin_lock_irqsave(&pfair_lock, flags); |
598 | if (running) | 599 | if (running) |
599 | t->rt_param.scheduled_on = task_cpu(t); | 600 | t->rt_param.scheduled_on = task_cpu(t); |
600 | else | 601 | else |
@@ -605,7 +606,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) | |||
605 | pfair_add_release(t); | 606 | pfair_add_release(t); |
606 | check_preempt(t); | 607 | check_preempt(t); |
607 | 608 | ||
608 | spin_unlock_irqrestore(&pfair_lock, flags); | 609 | raw_spin_unlock_irqrestore(&pfair_lock, flags); |
609 | } | 610 | } |
610 | 611 | ||
611 | static void pfair_task_wake_up(struct task_struct *t) | 612 | static void pfair_task_wake_up(struct task_struct *t) |
@@ -616,7 +617,7 @@ static void pfair_task_wake_up(struct task_struct *t) | |||
616 | TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", | 617 | TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", |
617 | litmus_clock(), cur_release(t), pfair_time); | 618 | litmus_clock(), cur_release(t), pfair_time); |
618 | 619 | ||
619 | spin_lock_irqsave(&pfair_lock, flags); | 620 | raw_spin_lock_irqsave(&pfair_lock, flags); |
620 | 621 | ||
621 | /* It is a little unclear how to deal with Pfair | 622 | /* It is a little unclear how to deal with Pfair |
622 | * tasks that block for a while and then wake. For now, | 623 | * tasks that block for a while and then wake. For now, |
@@ -637,7 +638,7 @@ static void pfair_task_wake_up(struct task_struct *t) | |||
637 | 638 | ||
638 | check_preempt(t); | 639 | check_preempt(t); |
639 | 640 | ||
640 | spin_unlock_irqrestore(&pfair_lock, flags); | 641 | raw_spin_unlock_irqrestore(&pfair_lock, flags); |
641 | TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); | 642 | TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); |
642 | } | 643 | } |
643 | 644 | ||
@@ -661,12 +662,12 @@ static void pfair_task_exit(struct task_struct * t) | |||
661 | * might not be the same as the CPU that the PFAIR scheduler | 662 | * might not be the same as the CPU that the PFAIR scheduler |
662 | * has chosen for it. | 663 | * has chosen for it. |
663 | */ | 664 | */ |
664 | spin_lock_irqsave(&pfair_lock, flags); | 665 | raw_spin_lock_irqsave(&pfair_lock, flags); |
665 | 666 | ||
666 | TRACE_TASK(t, "RIP, state:%d\n", t->state); | 667 | TRACE_TASK(t, "RIP, state:%d\n", t->state); |
667 | drop_all_references(t); | 668 | drop_all_references(t); |
668 | 669 | ||
669 | spin_unlock_irqrestore(&pfair_lock, flags); | 670 | raw_spin_unlock_irqrestore(&pfair_lock, flags); |
670 | 671 | ||
671 | kfree(t->rt_param.pfair); | 672 | kfree(t->rt_param.pfair); |
672 | t->rt_param.pfair = NULL; | 673 | t->rt_param.pfair = NULL; |
@@ -680,7 +681,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start) | |||
680 | 681 | ||
681 | BUG_ON(!is_realtime(task)); | 682 | BUG_ON(!is_realtime(task)); |
682 | 683 | ||
683 | spin_lock_irqsave(&pfair_lock, flags); | 684 | raw_spin_lock_irqsave(&pfair_lock, flags); |
684 | release_at(task, start); | 685 | release_at(task, start); |
685 | release = time2quanta(start, CEIL); | 686 | release = time2quanta(start, CEIL); |
686 | 687 | ||
@@ -698,7 +699,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start) | |||
698 | */ | 699 | */ |
699 | tsk_pfair(task)->sporadic_release = 0; | 700 | tsk_pfair(task)->sporadic_release = 0; |
700 | 701 | ||
701 | spin_unlock_irqrestore(&pfair_lock, flags); | 702 | raw_spin_unlock_irqrestore(&pfair_lock, flags); |
702 | } | 703 | } |
703 | 704 | ||
704 | static void init_subtask(struct subtask* sub, unsigned long i, | 705 | static void init_subtask(struct subtask* sub, unsigned long i, |