diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /litmus/sched_cedf.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index e57a11afda16..f5b77080cc4f 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
29 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/slab.h> | ||
31 | 32 | ||
32 | #include <litmus/litmus.h> | 33 | #include <litmus/litmus.h> |
33 | #include <litmus/jobs.h> | 34 | #include <litmus/jobs.h> |
@@ -285,12 +286,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
285 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | 286 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); |
286 | unsigned long flags; | 287 | unsigned long flags; |
287 | 288 | ||
288 | spin_lock_irqsave(&cluster->lock, flags); | 289 | raw_spin_lock_irqsave(&cluster->lock, flags); |
289 | 290 | ||
290 | __merge_ready(&cluster->domain, tasks); | 291 | __merge_ready(&cluster->domain, tasks); |
291 | check_for_preemptions(cluster); | 292 | check_for_preemptions(cluster); |
292 | 293 | ||
293 | spin_unlock_irqrestore(&cluster->lock, flags); | 294 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
294 | } | 295 | } |
295 | 296 | ||
296 | /* caller holds cedf_lock */ | 297 | /* caller holds cedf_lock */ |
@@ -371,7 +372,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
371 | int out_of_time, sleep, preempt, np, exists, blocks; | 372 | int out_of_time, sleep, preempt, np, exists, blocks; |
372 | struct task_struct* next = NULL; | 373 | struct task_struct* next = NULL; |
373 | 374 | ||
374 | spin_lock(&cluster->lock); | 375 | raw_spin_lock(&cluster->lock); |
375 | clear_will_schedule(); | 376 | clear_will_schedule(); |
376 | 377 | ||
377 | /* sanity checking */ | 378 | /* sanity checking */ |
@@ -454,7 +455,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
454 | if (exists) | 455 | if (exists) |
455 | next = prev; | 456 | next = prev; |
456 | 457 | ||
457 | spin_unlock(&cluster->lock); | 458 | raw_spin_unlock(&cluster->lock); |
458 | 459 | ||
459 | #ifdef WANT_ALL_SCHED_EVENTS | 460 | #ifdef WANT_ALL_SCHED_EVENTS |
460 | TRACE("cedf_lock released, next=0x%p\n", next); | 461 | TRACE("cedf_lock released, next=0x%p\n", next); |
@@ -496,7 +497,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
496 | /* the cluster doesn't change even if t is running */ | 497 | /* the cluster doesn't change even if t is running */ |
497 | cluster = task_cpu_cluster(t); | 498 | cluster = task_cpu_cluster(t); |
498 | 499 | ||
499 | spin_lock_irqsave(&cluster->domain.ready_lock, flags); | 500 | raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); |
500 | 501 | ||
501 | /* setup job params */ | 502 | /* setup job params */ |
502 | release_at(t, litmus_clock()); | 503 | release_at(t, litmus_clock()); |
@@ -513,7 +514,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
513 | t->rt_param.linked_on = NO_CPU; | 514 | t->rt_param.linked_on = NO_CPU; |
514 | 515 | ||
515 | cedf_job_arrival(t); | 516 | cedf_job_arrival(t); |
516 | spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); | 517 | raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); |
517 | } | 518 | } |
518 | 519 | ||
519 | static void cedf_task_wake_up(struct task_struct *task) | 520 | static void cedf_task_wake_up(struct task_struct *task) |
@@ -526,7 +527,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
526 | 527 | ||
527 | cluster = task_cpu_cluster(task); | 528 | cluster = task_cpu_cluster(task); |
528 | 529 | ||
529 | spin_lock_irqsave(&cluster->lock, flags); | 530 | raw_spin_lock_irqsave(&cluster->lock, flags); |
530 | /* We need to take suspensions because of semaphores into | 531 | /* We need to take suspensions because of semaphores into |
531 | * account! If a job resumes after being suspended due to acquiring | 532 | * account! If a job resumes after being suspended due to acquiring |
532 | * a semaphore, it should never be treated as a new job release. | 533 | * a semaphore, it should never be treated as a new job release. |
@@ -549,7 +550,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
549 | } | 550 | } |
550 | } | 551 | } |
551 | cedf_job_arrival(task); | 552 | cedf_job_arrival(task); |
552 | spin_unlock_irqrestore(&cluster->lock, flags); | 553 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
553 | } | 554 | } |
554 | 555 | ||
555 | static void cedf_task_block(struct task_struct *t) | 556 | static void cedf_task_block(struct task_struct *t) |
@@ -562,9 +563,9 @@ static void cedf_task_block(struct task_struct *t) | |||
562 | cluster = task_cpu_cluster(t); | 563 | cluster = task_cpu_cluster(t); |
563 | 564 | ||
564 | /* unlink if necessary */ | 565 | /* unlink if necessary */ |
565 | spin_lock_irqsave(&cluster->lock, flags); | 566 | raw_spin_lock_irqsave(&cluster->lock, flags); |
566 | unlink(t); | 567 | unlink(t); |
567 | spin_unlock_irqrestore(&cluster->lock, flags); | 568 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
568 | 569 | ||
569 | BUG_ON(!is_realtime(t)); | 570 | BUG_ON(!is_realtime(t)); |
570 | } | 571 | } |
@@ -576,13 +577,13 @@ static void cedf_task_exit(struct task_struct * t) | |||
576 | cedf_domain_t *cluster = task_cpu_cluster(t); | 577 | cedf_domain_t *cluster = task_cpu_cluster(t); |
577 | 578 | ||
578 | /* unlink if necessary */ | 579 | /* unlink if necessary */ |
579 | spin_lock_irqsave(&cluster->lock, flags); | 580 | raw_spin_lock_irqsave(&cluster->lock, flags); |
580 | unlink(t); | 581 | unlink(t); |
581 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 582 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
582 | cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 583 | cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
583 | tsk_rt(t)->scheduled_on = NO_CPU; | 584 | tsk_rt(t)->scheduled_on = NO_CPU; |
584 | } | 585 | } |
585 | spin_unlock_irqrestore(&cluster->lock, flags); | 586 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
586 | 587 | ||
587 | BUG_ON(!is_realtime(t)); | 588 | BUG_ON(!is_realtime(t)); |
588 | TRACE_TASK(t, "RIP\n"); | 589 | TRACE_TASK(t, "RIP\n"); |