diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-07-26 22:03:18 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-07-26 22:03:18 -0400 |
commit | e7b660efe777acd863442e5d863d2f681db353ff (patch) | |
tree | 91adb8b8badd8ce9676df6a6670c81e98e522a96 /litmus | |
parent | 5d39a47acefbba8b5caebc6b7b8ef94983735aac (diff) |
C-EDF: rename lock -> cluster_lock
The macro lock conflicts with locking protocols...
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/sched_cedf.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 1838c6b6fb0c..1950783c0388 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -95,7 +95,7 @@ typedef struct clusterdomain { | |||
95 | struct bheap_node *heap_node; | 95 | struct bheap_node *heap_node; |
96 | struct bheap cpu_heap; | 96 | struct bheap cpu_heap; |
97 | /* lock for this cluster */ | 97 | /* lock for this cluster */ |
98 | #define lock domain.ready_lock | 98 | #define cluster_lock domain.ready_lock |
99 | } cedf_domain_t; | 99 | } cedf_domain_t; |
100 | 100 | ||
101 | /* a cedf_domain per cluster; allocation is done at init/activation time */ | 101 | /* a cedf_domain per cluster; allocation is done at init/activation time */ |
@@ -292,12 +292,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
292 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | 292 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); |
293 | unsigned long flags; | 293 | unsigned long flags; |
294 | 294 | ||
295 | raw_spin_lock_irqsave(&cluster->lock, flags); | 295 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
296 | 296 | ||
297 | __merge_ready(&cluster->domain, tasks); | 297 | __merge_ready(&cluster->domain, tasks); |
298 | check_for_preemptions(cluster); | 298 | check_for_preemptions(cluster); |
299 | 299 | ||
300 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 300 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
301 | } | 301 | } |
302 | 302 | ||
303 | /* caller holds cedf_lock */ | 303 | /* caller holds cedf_lock */ |
@@ -388,7 +388,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
388 | } | 388 | } |
389 | #endif | 389 | #endif |
390 | 390 | ||
391 | raw_spin_lock(&cluster->lock); | 391 | raw_spin_lock(&cluster->cluster_lock); |
392 | clear_will_schedule(); | 392 | clear_will_schedule(); |
393 | 393 | ||
394 | /* sanity checking */ | 394 | /* sanity checking */ |
@@ -472,7 +472,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
472 | next = prev; | 472 | next = prev; |
473 | 473 | ||
474 | sched_state_task_picked(); | 474 | sched_state_task_picked(); |
475 | raw_spin_unlock(&cluster->lock); | 475 | raw_spin_unlock(&cluster->cluster_lock); |
476 | 476 | ||
477 | #ifdef WANT_ALL_SCHED_EVENTS | 477 | #ifdef WANT_ALL_SCHED_EVENTS |
478 | TRACE("cedf_lock released, next=0x%p\n", next); | 478 | TRACE("cedf_lock released, next=0x%p\n", next); |
@@ -514,7 +514,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
514 | /* the cluster doesn't change even if t is running */ | 514 | /* the cluster doesn't change even if t is running */ |
515 | cluster = task_cpu_cluster(t); | 515 | cluster = task_cpu_cluster(t); |
516 | 516 | ||
517 | raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); | 517 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
518 | 518 | ||
519 | /* setup job params */ | 519 | /* setup job params */ |
520 | release_at(t, litmus_clock()); | 520 | release_at(t, litmus_clock()); |
@@ -541,7 +541,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
541 | t->rt_param.linked_on = NO_CPU; | 541 | t->rt_param.linked_on = NO_CPU; |
542 | 542 | ||
543 | cedf_job_arrival(t); | 543 | cedf_job_arrival(t); |
544 | raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); | 544 | raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); |
545 | } | 545 | } |
546 | 546 | ||
547 | static void cedf_task_wake_up(struct task_struct *task) | 547 | static void cedf_task_wake_up(struct task_struct *task) |
@@ -554,7 +554,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
554 | 554 | ||
555 | cluster = task_cpu_cluster(task); | 555 | cluster = task_cpu_cluster(task); |
556 | 556 | ||
557 | raw_spin_lock_irqsave(&cluster->lock, flags); | 557 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
558 | /* We need to take suspensions because of semaphores into | 558 | /* We need to take suspensions because of semaphores into |
559 | * account! If a job resumes after being suspended due to acquiring | 559 | * account! If a job resumes after being suspended due to acquiring |
560 | * a semaphore, it should never be treated as a new job release. | 560 | * a semaphore, it should never be treated as a new job release. |
@@ -577,7 +577,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
577 | } | 577 | } |
578 | } | 578 | } |
579 | cedf_job_arrival(task); | 579 | cedf_job_arrival(task); |
580 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 580 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
581 | } | 581 | } |
582 | 582 | ||
583 | static void cedf_task_block(struct task_struct *t) | 583 | static void cedf_task_block(struct task_struct *t) |
@@ -590,9 +590,9 @@ static void cedf_task_block(struct task_struct *t) | |||
590 | cluster = task_cpu_cluster(t); | 590 | cluster = task_cpu_cluster(t); |
591 | 591 | ||
592 | /* unlink if necessary */ | 592 | /* unlink if necessary */ |
593 | raw_spin_lock_irqsave(&cluster->lock, flags); | 593 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
594 | unlink(t); | 594 | unlink(t); |
595 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 595 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
596 | 596 | ||
597 | BUG_ON(!is_realtime(t)); | 597 | BUG_ON(!is_realtime(t)); |
598 | } | 598 | } |
@@ -604,7 +604,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
604 | cedf_domain_t *cluster = task_cpu_cluster(t); | 604 | cedf_domain_t *cluster = task_cpu_cluster(t); |
605 | 605 | ||
606 | /* unlink if necessary */ | 606 | /* unlink if necessary */ |
607 | raw_spin_lock_irqsave(&cluster->lock, flags); | 607 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
608 | unlink(t); | 608 | unlink(t); |
609 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 609 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
610 | cpu_entry_t *cpu; | 610 | cpu_entry_t *cpu; |
@@ -612,7 +612,7 @@ static void cedf_task_exit(struct task_struct * t) | |||
612 | cpu->scheduled = NULL; | 612 | cpu->scheduled = NULL; |
613 | tsk_rt(t)->scheduled_on = NO_CPU; | 613 | tsk_rt(t)->scheduled_on = NO_CPU; |
614 | } | 614 | } |
615 | raw_spin_unlock_irqrestore(&cluster->lock, flags); | 615 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
616 | 616 | ||
617 | BUG_ON(!is_realtime(t)); | 617 | BUG_ON(!is_realtime(t)); |
618 | TRACE_TASK(t, "RIP\n"); | 618 | TRACE_TASK(t, "RIP\n"); |