diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:45:13 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:57:07 -0400 |
commit | a66246f9e973a68fb9955a2fa7663a2e02afbd30 (patch) | |
tree | ebdf77a3cf491c0d0b77af3d9622f33013af5856 /litmus/sched_cedf.c | |
parent | 6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (diff) |
Change most LitmusRT spinlock_t in raw_spinlock_t
Adapt to new schema for spinlock:
(tglx 20091217)
spinlock - the weakest one, which might sleep in RT
raw_spinlock - spinlock which always spins even on RT
arch_spinlock - the hardware level architecture dependent implementation
----
Most probably, all the spinlocks changed by this commit will be true
spinning lock (raw_spinlock) in PreemptRT (so hopefully we'll need few
changes when porting Litmmus to PreemptRT).
There are a couple of spinlock that the kernel still defines as
spinlock_t (therefore no changes reported in this commit) that might cause
us troubles:
- wait_queue_t lock is defined as spinlock_t; it is used in:
* fmlp.c -- sem->wait.lock
* sync.c -- ts_release.wait.lock
- rwlock_t used in fifo implementation in sched_trace.c
* this need probably to be changed to something always spinning in RT
at the expense of increased locking time.
----
This commit also fixes warnings and errors due to the need to include
slab.h when using kmalloc() and friends.
----
This commit does not compile.
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index da44b451c9ad..118fbd14fe25 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/slab.h> | ||
27 | 28 | ||
28 | #include <litmus/litmus.h> | 29 | #include <litmus/litmus.h> |
29 | #include <litmus/jobs.h> | 30 | #include <litmus/jobs.h> |
@@ -281,12 +282,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
281 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | 282 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); |
282 | unsigned long flags; | 283 | unsigned long flags; |
283 | 284 | ||
284 | spin_lock_irqsave(&cluster->lock, flags); | 285 | raw_spin_lock_irqsave(&cluster->lock, flags); |
285 | 286 | ||
286 | __merge_ready(&cluster->domain, tasks); | 287 | __merge_ready(&cluster->domain, tasks); |
287 | check_for_preemptions(cluster); | 288 | check_for_preemptions(cluster); |
288 | 289 | ||
289 | spin_unlock_irqrestore(&cluster->lock, flags); | 290 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
290 | } | 291 | } |
291 | 292 | ||
292 | /* caller holds cedf_lock */ | 293 | /* caller holds cedf_lock */ |
@@ -367,7 +368,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
367 | int out_of_time, sleep, preempt, np, exists, blocks; | 368 | int out_of_time, sleep, preempt, np, exists, blocks; |
368 | struct task_struct* next = NULL; | 369 | struct task_struct* next = NULL; |
369 | 370 | ||
370 | spin_lock(&cluster->lock); | 371 | raw_spin_lock(&cluster->lock); |
371 | clear_will_schedule(); | 372 | clear_will_schedule(); |
372 | 373 | ||
373 | /* sanity checking */ | 374 | /* sanity checking */ |
@@ -448,7 +449,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
448 | if (exists) | 449 | if (exists) |
449 | next = prev; | 450 | next = prev; |
450 | 451 | ||
451 | spin_unlock(&cluster->lock); | 452 | raw_spin_unlock(&cluster->lock); |
452 | 453 | ||
453 | #ifdef WANT_ALL_SCHED_EVENTS | 454 | #ifdef WANT_ALL_SCHED_EVENTS |
454 | TRACE("cedf_lock released, next=0x%p\n", next); | 455 | TRACE("cedf_lock released, next=0x%p\n", next); |
@@ -490,7 +491,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
490 | /* the cluster doesn't change even if t is running */ | 491 | /* the cluster doesn't change even if t is running */ |
491 | cluster = task_cpu_cluster(t); | 492 | cluster = task_cpu_cluster(t); |
492 | 493 | ||
493 | spin_lock_irqsave(&cluster->domain.ready_lock, flags); | 494 | raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); |
494 | 495 | ||
495 | /* setup job params */ | 496 | /* setup job params */ |
496 | release_at(t, litmus_clock()); | 497 | release_at(t, litmus_clock()); |
@@ -507,7 +508,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
507 | t->rt_param.linked_on = NO_CPU; | 508 | t->rt_param.linked_on = NO_CPU; |
508 | 509 | ||
509 | cedf_job_arrival(t); | 510 | cedf_job_arrival(t); |
510 | spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); | 511 | raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); |
511 | } | 512 | } |
512 | 513 | ||
513 | static void cedf_task_wake_up(struct task_struct *task) | 514 | static void cedf_task_wake_up(struct task_struct *task) |
@@ -520,7 +521,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
520 | 521 | ||
521 | cluster = task_cpu_cluster(task); | 522 | cluster = task_cpu_cluster(task); |
522 | 523 | ||
523 | spin_lock_irqsave(&cluster->lock, flags); | 524 | raw_spin_lock_irqsave(&cluster->lock, flags); |
524 | /* We need to take suspensions because of semaphores into | 525 | /* We need to take suspensions because of semaphores into |
525 | * account! If a job resumes after being suspended due to acquiring | 526 | * account! If a job resumes after being suspended due to acquiring |
526 | * a semaphore, it should never be treated as a new job release. | 527 | * a semaphore, it should never be treated as a new job release. |
@@ -543,7 +544,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
543 | } | 544 | } |
544 | } | 545 | } |
545 | cedf_job_arrival(task); | 546 | cedf_job_arrival(task); |
546 | spin_unlock_irqrestore(&cluster->lock, flags); | 547 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
547 | } | 548 | } |
548 | 549 | ||
549 | static void cedf_task_block(struct task_struct *t) | 550 | static void cedf_task_block(struct task_struct *t) |
@@ -556,9 +557,9 @@ static void cedf_task_block(struct task_struct *t) | |||
556 | cluster = task_cpu_cluster(t); | 557 | cluster = task_cpu_cluster(t); |
557 | 558 | ||
558 | /* unlink if necessary */ | 559 | /* unlink if necessary */ |
559 | spin_lock_irqsave(&cluster->lock, flags); | 560 | raw_spin_lock_irqsave(&cluster->lock, flags); |
560 | unlink(t); | 561 | unlink(t); |
561 | spin_unlock_irqrestore(&cluster->lock, flags); | 562 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
562 | 563 | ||
563 | BUG_ON(!is_realtime(t)); | 564 | BUG_ON(!is_realtime(t)); |
564 | } | 565 | } |
@@ -570,13 +571,13 @@ static void cedf_task_exit(struct task_struct * t) | |||
570 | cedf_domain_t *cluster = task_cpu_cluster(t); | 571 | cedf_domain_t *cluster = task_cpu_cluster(t); |
571 | 572 | ||
572 | /* unlink if necessary */ | 573 | /* unlink if necessary */ |
573 | spin_lock_irqsave(&cluster->lock, flags); | 574 | raw_spin_lock_irqsave(&cluster->lock, flags); |
574 | unlink(t); | 575 | unlink(t); |
575 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 576 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
576 | cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 577 | cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
577 | tsk_rt(t)->scheduled_on = NO_CPU; | 578 | tsk_rt(t)->scheduled_on = NO_CPU; |
578 | } | 579 | } |
579 | spin_unlock_irqrestore(&cluster->lock, flags); | 580 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
580 | 581 | ||
581 | BUG_ON(!is_realtime(t)); | 582 | BUG_ON(!is_realtime(t)); |
582 | TRACE_TASK(t, "RIP\n"); | 583 | TRACE_TASK(t, "RIP\n"); |