diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-21 16:39:26 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-21 16:44:54 -0400 |
commit | ee525fe7ba4edf4da2d293629ffdff2caa9ad02b (patch) | |
tree | 36739b207eea30749ab115e0dc6fc250cadc92f6 | |
parent | bdce67bc2babc2e5b3b2440964e9cf819ac814dc (diff) |
C-EDF: Use binary heap instead of binomial heap.
Use binary heap for ordering priority of CPUs.
-rw-r--r-- | litmus/sched_cedf.c | 41 |
1 files changed, 18 insertions, 23 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 480c62bc895b..d498bf39d4df 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <litmus/clustered.h> | 42 | #include <litmus/clustered.h> |
43 | 43 | ||
44 | #include <litmus/bheap.h> | 44 | #include <litmus/bheap.h> |
45 | #include <litmus/binheap.h> | ||
45 | 46 | ||
46 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 47 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
47 | #include <litmus/affinity.h> | 48 | #include <litmus/affinity.h> |
@@ -70,7 +71,9 @@ typedef struct { | |||
70 | struct task_struct* linked; /* only RT tasks */ | 71 | struct task_struct* linked; /* only RT tasks */ |
71 | struct task_struct* scheduled; /* only RT tasks */ | 72 | struct task_struct* scheduled; /* only RT tasks */ |
72 | atomic_t will_schedule; /* prevent unneeded IPIs */ | 73 | atomic_t will_schedule; /* prevent unneeded IPIs */ |
73 | struct bheap_node* hn; | 74 | |
75 | int in_heap; /* == 0/1: not in / in heap*/ | ||
76 | struct binheap_node hn; | ||
74 | } cpu_entry_t; | 77 | } cpu_entry_t; |
75 | 78 | ||
76 | /* one cpu_entry_t per CPU */ | 79 | /* one cpu_entry_t per CPU */ |
@@ -96,8 +99,7 @@ typedef struct clusterdomain { | |||
96 | /* map of this cluster cpus */ | 99 | /* map of this cluster cpus */ |
97 | cpumask_var_t cpu_map; | 100 | cpumask_var_t cpu_map; |
98 | /* the cpus queue themselves according to priority in here */ | 101 | /* the cpus queue themselves according to priority in here */ |
99 | struct bheap_node *heap_node; | 102 | struct binheap_handle cpu_heap; |
100 | struct bheap cpu_heap; | ||
101 | /* lock for this cluster */ | 103 | /* lock for this cluster */ |
102 | #define cluster_lock domain.ready_lock | 104 | #define cluster_lock domain.ready_lock |
103 | } cedf_domain_t; | 105 | } cedf_domain_t; |
@@ -115,11 +117,11 @@ cedf_domain_t *cedf; | |||
115 | */ | 117 | */ |
116 | #define VERBOSE_INIT | 118 | #define VERBOSE_INIT |
117 | 119 | ||
118 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 120 | static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) |
119 | { | 121 | { |
120 | cpu_entry_t *a, *b; | 122 | cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); |
121 | a = _a->value; | 123 | cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); |
122 | b = _b->value; | 124 | |
123 | /* Note that a and b are inverted: we want the lowest-priority CPU at | 125 | /* Note that a and b are inverted: we want the lowest-priority CPU at |
124 | * the top of the heap. | 126 | * the top of the heap. |
125 | */ | 127 | */ |
@@ -133,20 +135,16 @@ static void update_cpu_position(cpu_entry_t *entry) | |||
133 | { | 135 | { |
134 | cedf_domain_t *cluster = entry->cluster; | 136 | cedf_domain_t *cluster = entry->cluster; |
135 | 137 | ||
136 | if (likely(bheap_node_in_heap(entry->hn))) | 138 | if (likely(entry->in_heap)) |
137 | bheap_delete(cpu_lower_prio, | 139 | binheap_delete(&entry->hn, &cluster->cpu_heap, cpu_entry_t, hn); |
138 | &cluster->cpu_heap, | 140 | binheap_add(&entry->hn, &cluster->cpu_heap, cpu_entry_t, hn); |
139 | entry->hn); | 141 | entry->in_heap = 1; |
140 | |||
141 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, entry->hn); | ||
142 | } | 142 | } |
143 | 143 | ||
144 | /* caller must hold cedf lock */ | 144 | /* caller must hold cedf lock */ |
145 | static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) | 145 | static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) |
146 | { | 146 | { |
147 | struct bheap_node* hn; | 147 | return binheap_top_entry(&cluster->cpu_heap, cpu_entry_t, hn); |
148 | hn = bheap_peek(cpu_lower_prio, &cluster->cpu_heap); | ||
149 | return hn->value; | ||
150 | } | 148 | } |
151 | 149 | ||
152 | 150 | ||
@@ -689,7 +687,6 @@ static void cleanup_cedf(void) | |||
689 | if (clusters_allocated) { | 687 | if (clusters_allocated) { |
690 | for (i = 0; i < num_clusters; i++) { | 688 | for (i = 0; i < num_clusters; i++) { |
691 | kfree(cedf[i].cpus); | 689 | kfree(cedf[i].cpus); |
692 | kfree(cedf[i].heap_node); | ||
693 | free_cpumask_var(cedf[i].cpu_map); | 690 | free_cpumask_var(cedf[i].cpu_map); |
694 | } | 691 | } |
695 | 692 | ||
@@ -749,10 +746,7 @@ static long cedf_activate_plugin(void) | |||
749 | 746 | ||
750 | cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), | 747 | cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), |
751 | GFP_ATOMIC); | 748 | GFP_ATOMIC); |
752 | cedf[i].heap_node = kmalloc( | 749 | INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio); |
753 | cluster_size * sizeof(struct bheap_node), | ||
754 | GFP_ATOMIC); | ||
755 | bheap_init(&(cedf[i].cpu_heap)); | ||
756 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); | 750 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); |
757 | 751 | ||
758 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) | 752 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) |
@@ -795,8 +789,9 @@ static long cedf_activate_plugin(void) | |||
795 | atomic_set(&entry->will_schedule, 0); | 789 | atomic_set(&entry->will_schedule, 0); |
796 | entry->cpu = ccpu; | 790 | entry->cpu = ccpu; |
797 | entry->cluster = &cedf[i]; | 791 | entry->cluster = &cedf[i]; |
798 | entry->hn = &(cedf[i].heap_node[cpu_count]); | 792 | |
799 | bheap_node_init(&entry->hn, entry); | 793 | INIT_BINHEAP_NODE(&entry->hn); |
794 | entry->in_heap = 0; | ||
800 | 795 | ||
801 | cpu_count++; | 796 | cpu_count++; |
802 | 797 | ||