aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-26 22:03:18 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2011-11-24 15:21:08 -0500
commit81b8eb2ae452c241df9b3a1fb2116fa4d5adcb75 (patch)
tree8ec7804f346b2e573ce384eb221b9e7a81872b75
parent71083a7604e93e44536edde032706348f3a752ca (diff)
C-EDF: rename lock -> cluster_lock
The macro lock conflicts with locking protocols...
-rw-r--r--litmus/sched_cedf.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 8b3f8a7e2609..480c62bc895b 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -99,7 +99,7 @@ typedef struct clusterdomain {
99 struct bheap_node *heap_node; 99 struct bheap_node *heap_node;
100 struct bheap cpu_heap; 100 struct bheap cpu_heap;
101 /* lock for this cluster */ 101 /* lock for this cluster */
102#define lock domain.ready_lock 102#define cluster_lock domain.ready_lock
103} cedf_domain_t; 103} cedf_domain_t;
104 104
105/* a cedf_domain per cluster; allocation is done at init/activation time */ 105/* a cedf_domain per cluster; allocation is done at init/activation time */
@@ -331,12 +331,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
331 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); 331 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain);
332 unsigned long flags; 332 unsigned long flags;
333 333
334 raw_spin_lock_irqsave(&cluster->lock, flags); 334 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
335 335
336 __merge_ready(&cluster->domain, tasks); 336 __merge_ready(&cluster->domain, tasks);
337 check_for_preemptions(cluster); 337 check_for_preemptions(cluster);
338 338
339 raw_spin_unlock_irqrestore(&cluster->lock, flags); 339 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
340} 340}
341 341
342/* caller holds cedf_lock */ 342/* caller holds cedf_lock */
@@ -427,7 +427,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
427 } 427 }
428#endif 428#endif
429 429
430 raw_spin_lock(&cluster->lock); 430 raw_spin_lock(&cluster->cluster_lock);
431 clear_will_schedule(); 431 clear_will_schedule();
432 432
433 /* sanity checking */ 433 /* sanity checking */
@@ -511,7 +511,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
511 next = prev; 511 next = prev;
512 512
513 sched_state_task_picked(); 513 sched_state_task_picked();
514 raw_spin_unlock(&cluster->lock); 514 raw_spin_unlock(&cluster->cluster_lock);
515 515
516#ifdef WANT_ALL_SCHED_EVENTS 516#ifdef WANT_ALL_SCHED_EVENTS
517 TRACE("cedf_lock released, next=0x%p\n", next); 517 TRACE("cedf_lock released, next=0x%p\n", next);
@@ -553,7 +553,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
553 /* the cluster doesn't change even if t is running */ 553 /* the cluster doesn't change even if t is running */
554 cluster = task_cpu_cluster(t); 554 cluster = task_cpu_cluster(t);
555 555
556 raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); 556 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
557 557
558 /* setup job params */ 558 /* setup job params */
559 release_at(t, litmus_clock()); 559 release_at(t, litmus_clock());
@@ -580,7 +580,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
580 t->rt_param.linked_on = NO_CPU; 580 t->rt_param.linked_on = NO_CPU;
581 581
582 cedf_job_arrival(t); 582 cedf_job_arrival(t);
583 raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); 583 raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags);
584} 584}
585 585
586static void cedf_task_wake_up(struct task_struct *task) 586static void cedf_task_wake_up(struct task_struct *task)
@@ -593,7 +593,7 @@ static void cedf_task_wake_up(struct task_struct *task)
593 593
594 cluster = task_cpu_cluster(task); 594 cluster = task_cpu_cluster(task);
595 595
596 raw_spin_lock_irqsave(&cluster->lock, flags); 596 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
597 /* We need to take suspensions because of semaphores into 597 /* We need to take suspensions because of semaphores into
598 * account! If a job resumes after being suspended due to acquiring 598 * account! If a job resumes after being suspended due to acquiring
599 * a semaphore, it should never be treated as a new job release. 599 * a semaphore, it should never be treated as a new job release.
@@ -616,7 +616,7 @@ static void cedf_task_wake_up(struct task_struct *task)
616 } 616 }
617 } 617 }
618 cedf_job_arrival(task); 618 cedf_job_arrival(task);
619 raw_spin_unlock_irqrestore(&cluster->lock, flags); 619 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
620} 620}
621 621
622static void cedf_task_block(struct task_struct *t) 622static void cedf_task_block(struct task_struct *t)
@@ -629,9 +629,9 @@ static void cedf_task_block(struct task_struct *t)
629 cluster = task_cpu_cluster(t); 629 cluster = task_cpu_cluster(t);
630 630
631 /* unlink if necessary */ 631 /* unlink if necessary */
632 raw_spin_lock_irqsave(&cluster->lock, flags); 632 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
633 unlink(t); 633 unlink(t);
634 raw_spin_unlock_irqrestore(&cluster->lock, flags); 634 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
635 635
636 BUG_ON(!is_realtime(t)); 636 BUG_ON(!is_realtime(t));
637} 637}
@@ -643,7 +643,7 @@ static void cedf_task_exit(struct task_struct * t)
643 cedf_domain_t *cluster = task_cpu_cluster(t); 643 cedf_domain_t *cluster = task_cpu_cluster(t);
644 644
645 /* unlink if necessary */ 645 /* unlink if necessary */
646 raw_spin_lock_irqsave(&cluster->lock, flags); 646 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
647 unlink(t); 647 unlink(t);
648 if (tsk_rt(t)->scheduled_on != NO_CPU) { 648 if (tsk_rt(t)->scheduled_on != NO_CPU) {
649 cpu_entry_t *cpu; 649 cpu_entry_t *cpu;
@@ -651,7 +651,7 @@ static void cedf_task_exit(struct task_struct * t)
651 cpu->scheduled = NULL; 651 cpu->scheduled = NULL;
652 tsk_rt(t)->scheduled_on = NO_CPU; 652 tsk_rt(t)->scheduled_on = NO_CPU;
653 } 653 }
654 raw_spin_unlock_irqrestore(&cluster->lock, flags); 654 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
655 655
656 BUG_ON(!is_realtime(t)); 656 BUG_ON(!is_realtime(t));
657 TRACE_TASK(t, "RIP\n"); 657 TRACE_TASK(t, "RIP\n");