aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_gsn_edf.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-21 12:23:57 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:20:00 -0400
commitee09f78d8faa0b988088d93142e6f5f8a6e75394 (patch)
treebc1e0b5db121be3de47d967973310d610ad943a2 /litmus/sched_gsn_edf.c
parent0b28a3122d6917784701377e15a863489aee1c6c (diff)
Refactor binomial heap names: heap -> bheap
- Binomial heap "heap" names conflicted with priority heap of cgroup in kernel - This patch change binomial heap "heap" names in "bheap"
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r--litmus/sched_gsn_edf.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index a223e69f2efb..9f256be86cf7 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -18,7 +18,7 @@
18#include <litmus/edf_common.h> 18#include <litmus/edf_common.h>
19#include <litmus/sched_trace.h> 19#include <litmus/sched_trace.h>
20 20
21#include <litmus/heap.h> 21#include <litmus/bheap.h>
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24 24
@@ -96,7 +96,7 @@ typedef struct {
96 struct task_struct* linked; /* only RT tasks */ 96 struct task_struct* linked; /* only RT tasks */
97 struct task_struct* scheduled; /* only RT tasks */ 97 struct task_struct* scheduled; /* only RT tasks */
98 atomic_t will_schedule; /* prevent unneeded IPIs */ 98 atomic_t will_schedule; /* prevent unneeded IPIs */
99 struct heap_node* hn; 99 struct bheap_node* hn;
100} cpu_entry_t; 100} cpu_entry_t;
101DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); 101DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
102 102
@@ -111,14 +111,14 @@ cpu_entry_t* gsnedf_cpus[NR_CPUS];
111 111
112 112
113/* the cpus queue themselves according to priority in here */ 113/* the cpus queue themselves according to priority in here */
114static struct heap_node gsnedf_heap_node[NR_CPUS]; 114static struct bheap_node gsnedf_heap_node[NR_CPUS];
115static struct heap gsnedf_cpu_heap; 115static struct bheap gsnedf_cpu_heap;
116 116
117static rt_domain_t gsnedf; 117static rt_domain_t gsnedf;
118#define gsnedf_lock (gsnedf.ready_lock) 118#define gsnedf_lock (gsnedf.ready_lock)
119 119
120 120
121static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) 121static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
122{ 122{
123 cpu_entry_t *a, *b; 123 cpu_entry_t *a, *b;
124 a = _a->value; 124 a = _a->value;
@@ -134,16 +134,16 @@ static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b)
134 */ 134 */
135static void update_cpu_position(cpu_entry_t *entry) 135static void update_cpu_position(cpu_entry_t *entry)
136{ 136{
137 if (likely(heap_node_in_heap(entry->hn))) 137 if (likely(bheap_node_in_heap(entry->hn)))
138 heap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); 138 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
139 heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); 139 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
140} 140}
141 141
142/* caller must hold gsnedf lock */ 142/* caller must hold gsnedf lock */
143static cpu_entry_t* lowest_prio_cpu(void) 143static cpu_entry_t* lowest_prio_cpu(void)
144{ 144{
145 struct heap_node* hn; 145 struct bheap_node* hn;
146 hn = heap_peek(cpu_lower_prio, &gsnedf_cpu_heap); 146 hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap);
147 return hn->value; 147 return hn->value;
148} 148}
149 149
@@ -304,7 +304,7 @@ static noinline void gsnedf_job_arrival(struct task_struct* task)
304 check_for_preemptions(); 304 check_for_preemptions();
305} 305}
306 306
307static void gsnedf_release_jobs(rt_domain_t* rt, struct heap* tasks) 307static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
308{ 308{
309 unsigned long flags; 309 unsigned long flags;
310 310
@@ -628,9 +628,9 @@ static void update_queue_position(struct task_struct *holder)
628 * We can't use heap_decrease() here since 628 * We can't use heap_decrease() here since
629 * the cpu_heap is ordered in reverse direction, so 629 * the cpu_heap is ordered in reverse direction, so
630 * it is actually an increase. */ 630 * it is actually an increase. */
631 heap_delete(cpu_lower_prio, &gsnedf_cpu_heap, 631 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap,
632 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); 632 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn);
633 heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, 633 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap,
634 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); 634 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn);
635 } else { 635 } else {
636 /* holder may be queued: first stop queue changes */ 636 /* holder may be queued: first stop queue changes */
@@ -642,7 +642,7 @@ static void update_queue_position(struct task_struct *holder)
642 * of holder in some heap. Note that this 642 * of holder in some heap. Note that this
643 * may be a release heap. */ 643 * may be a release heap. */
644 check_preempt = 644 check_preempt =
645 !heap_decrease(edf_ready_order, 645 !bheap_decrease(edf_ready_order,
646 tsk_rt(holder)->heap_node); 646 tsk_rt(holder)->heap_node);
647 } else { 647 } else {
648 /* Nothing to do: if it is not queued and not linked 648 /* Nothing to do: if it is not queued and not linked
@@ -664,7 +664,7 @@ static void update_queue_position(struct task_struct *holder)
664 /* heap_decrease() hit the top level of the heap: make 664 /* heap_decrease() hit the top level of the heap: make
665 * sure preemption checks get the right task, not the 665 * sure preemption checks get the right task, not the
666 * potentially stale cache. */ 666 * potentially stale cache. */
667 heap_uncache_min(edf_ready_order, 667 bheap_uncache_min(edf_ready_order,
668 &gsnedf.ready_queue); 668 &gsnedf.ready_queue);
669 check_for_preemptions(); 669 check_for_preemptions();
670 } 670 }
@@ -770,12 +770,12 @@ static long gsnedf_activate_plugin(void)
770 int cpu; 770 int cpu;
771 cpu_entry_t *entry; 771 cpu_entry_t *entry;
772 772
773 heap_init(&gsnedf_cpu_heap); 773 bheap_init(&gsnedf_cpu_heap);
774 gsnedf.release_master = atomic_read(&release_master_cpu); 774 gsnedf.release_master = atomic_read(&release_master_cpu);
775 775
776 for_each_online_cpu(cpu) { 776 for_each_online_cpu(cpu) {
777 entry = &per_cpu(gsnedf_cpu_entries, cpu); 777 entry = &per_cpu(gsnedf_cpu_entries, cpu);
778 heap_node_init(&entry->hn, entry); 778 bheap_node_init(&entry->hn, entry);
779 atomic_set(&entry->will_schedule, 0); 779 atomic_set(&entry->will_schedule, 0);
780 entry->linked = NULL; 780 entry->linked = NULL;
781 entry->scheduled = NULL; 781 entry->scheduled = NULL;
@@ -816,7 +816,7 @@ static int __init init_gsn_edf(void)
816 int cpu; 816 int cpu;
817 cpu_entry_t *entry; 817 cpu_entry_t *entry;
818 818
819 heap_init(&gsnedf_cpu_heap); 819 bheap_init(&gsnedf_cpu_heap);
820 /* initialize CPU state */ 820 /* initialize CPU state */
821 for (cpu = 0; cpu < NR_CPUS; cpu++) { 821 for (cpu = 0; cpu < NR_CPUS; cpu++) {
822 entry = &per_cpu(gsnedf_cpu_entries, cpu); 822 entry = &per_cpu(gsnedf_cpu_entries, cpu);
@@ -824,7 +824,7 @@ static int __init init_gsn_edf(void)
824 atomic_set(&entry->will_schedule, 0); 824 atomic_set(&entry->will_schedule, 0);
825 entry->cpu = cpu; 825 entry->cpu = cpu;
826 entry->hn = &gsnedf_heap_node[cpu]; 826 entry->hn = &gsnedf_heap_node[cpu];
827 heap_node_init(&entry->hn, entry); 827 bheap_node_init(&entry->hn, entry);
828 } 828 }
829 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); 829 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
830 return register_sched_plugin(&gsn_edf_plugin); 830 return register_sched_plugin(&gsn_edf_plugin);