aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2014-03-13 15:38:38 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2014-03-13 15:38:38 -0400
commit9f09e173b0414f9c80fce7dd9f5324eec1ecc5f1 (patch)
tree3433ac150c1aa9b74cd5b86c62d14d7dfd3296b4
parent1debd1dda255e3b6ace78fd14e022b63a329caf0 (diff)
C-EDF: Use sbinheap to track CPU priority
This patch updated C-EDF to use the more efficient sbinheap data structure (instead of binheap) to track CPU priorities.
-rw-r--r--litmus/sched_cedf.c48
1 files changed, 29 insertions, 19 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index b95ff3967633..a6b61926eda1 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -44,6 +44,7 @@
44 44
45#include <litmus/bheap.h> 45#include <litmus/bheap.h>
46#include <litmus/binheap.h> 46#include <litmus/binheap.h>
47#include <litmus/sbinheap.h>
47#include <litmus/trace.h> 48#include <litmus/trace.h>
48 49
49/* to configure the cluster size */ 50/* to configure the cluster size */
@@ -98,7 +99,7 @@ typedef struct {
98 struct task_struct* linked; /* only RT tasks */ 99 struct task_struct* linked; /* only RT tasks */
99 struct task_struct* scheduled; /* only RT tasks */ 100 struct task_struct* scheduled; /* only RT tasks */
100 atomic_t will_schedule; /* prevent unneeded IPIs */ 101 atomic_t will_schedule; /* prevent unneeded IPIs */
101 struct binheap_node hn; 102 sbinheap_node_t hn;
102} cpu_entry_t; 103} cpu_entry_t;
103 104
104/* one cpu_entry_t per CPU */ 105/* one cpu_entry_t per CPU */
@@ -124,7 +125,7 @@ typedef struct clusterdomain {
124 /* map of this cluster cpus */ 125 /* map of this cluster cpus */
125 cpumask_var_t cpu_map; 126 cpumask_var_t cpu_map;
126 /* the cpus queue themselves according to priority in here */ 127 /* the cpus queue themselves according to priority in here */
127 struct binheap cpu_heap; 128 struct sbinheap cpu_heap;
128 129
129#define cluster_lock domain.ready_lock 130#define cluster_lock domain.ready_lock
130 131
@@ -299,10 +300,11 @@ static raw_spinlock_t* cedf_get_dgl_spinlock(struct task_struct *t)
299 */ 300 */
300#define VERBOSE_INIT 301#define VERBOSE_INIT
301 302
302static int cpu_lower_prio(const struct binheap_node *_a, const struct binheap_node *_b) 303static int cpu_lower_prio(const struct sbinheap_node *_a,
304 const struct sbinheap_node *_b)
303{ 305{
304 const cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); 306 const cpu_entry_t *a = sbinheap_entry(_a, cpu_entry_t, hn);
305 const cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); 307 const cpu_entry_t *b = sbinheap_entry(_b, cpu_entry_t, hn);
306 308
307 /* Note that a and b are inverted: we want the lowest-priority CPU at 309 /* Note that a and b are inverted: we want the lowest-priority CPU at
308 * the top of the heap. 310 * the top of the heap.
@@ -317,17 +319,17 @@ static void update_cpu_position(cpu_entry_t *entry)
317{ 319{
318 cedf_domain_t *cluster = entry->cluster; 320 cedf_domain_t *cluster = entry->cluster;
319 321
320 if (likely(binheap_is_in_heap(&entry->hn))) { 322 if (likely(sbinheap_is_in_heap(&entry->hn))) {
321 binheap_delete(&entry->hn, &cluster->cpu_heap); 323 sbinheap_delete(&entry->hn, &cluster->cpu_heap);
322 } 324 }
323 325
324 binheap_add(&entry->hn, &cluster->cpu_heap, cpu_entry_t, hn); 326 sbinheap_add(&entry->hn, &cluster->cpu_heap, cpu_entry_t, hn);
325} 327}
326 328
327/* caller must hold cedf lock */ 329/* caller must hold cedf lock */
328static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) 330static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster)
329{ 331{
330 return binheap_top_entry(&cluster->cpu_heap, cpu_entry_t, hn); 332 return sbinheap_top_entry(&cluster->cpu_heap, cpu_entry_t, hn);
331} 333}
332 334
333static noinline void unlink(struct task_struct* t); 335static noinline void unlink(struct task_struct* t);
@@ -852,8 +854,8 @@ static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t,
852 if (tsk_rt(to_update)->linked_on != NO_CPU) { 854 if (tsk_rt(to_update)->linked_on != NO_CPU) {
853 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, 855 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries,
854 tsk_rt(to_update)->linked_on); 856 tsk_rt(to_update)->linked_on);
855 BUG_ON(!binheap_is_in_heap(&entry->hn)); 857 BUG_ON(!sbinheap_is_in_heap(&entry->hn));
856 binheap_delete(&entry->hn, &cluster->cpu_heap); 858 sbinheap_delete(&entry->hn, &cluster->cpu_heap);
857 } 859 }
858 } 860 }
859 for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, 861 for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots,
@@ -869,7 +871,7 @@ static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t,
869 if (tsk_rt(to_update)->linked_on != NO_CPU) { 871 if (tsk_rt(to_update)->linked_on != NO_CPU) {
870 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, 872 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries,
871 tsk_rt(to_update)->linked_on); 873 tsk_rt(to_update)->linked_on);
872 binheap_add(&entry->hn, &cluster->cpu_heap, 874 sbinheap_add(&entry->hn, &cluster->cpu_heap,
873 cpu_entry_t, hn); 875 cpu_entry_t, hn);
874 } 876 }
875 } 877 }
@@ -1067,8 +1069,8 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t,
1067 if (tsk_rt(to_update)->linked_on != NO_CPU) { 1069 if (tsk_rt(to_update)->linked_on != NO_CPU) {
1068 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, 1070 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries,
1069 tsk_rt(to_update)->linked_on); 1071 tsk_rt(to_update)->linked_on);
1070 BUG_ON(!binheap_is_in_heap(&entry->hn)); 1072 BUG_ON(!sbinheap_is_in_heap(&entry->hn));
1071 binheap_delete(&entry->hn, &cluster->cpu_heap); 1073 sbinheap_delete(&entry->hn, &cluster->cpu_heap);
1072 } 1074 }
1073 } 1075 }
1074 for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, 1076 for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots,
@@ -1084,7 +1086,7 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t,
1084 if (tsk_rt(to_update)->linked_on != NO_CPU) { 1086 if (tsk_rt(to_update)->linked_on != NO_CPU) {
1085 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, 1087 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries,
1086 tsk_rt(to_update)->linked_on); 1088 tsk_rt(to_update)->linked_on);
1087 binheap_add(&entry->hn, &cluster->cpu_heap, 1089 sbinheap_add(&entry->hn, &cluster->cpu_heap,
1088 cpu_entry_t, hn); 1090 cpu_entry_t, hn);
1089 } 1091 }
1090 } 1092 }
@@ -1775,9 +1777,9 @@ static int __increase_priority_inheritance(struct task_struct* t,
1775 * We can't use heap_decrease() here since 1777 * We can't use heap_decrease() here since
1776 * the cpu_heap is ordered in reverse direction, so 1778 * the cpu_heap is ordered in reverse direction, so
1777 * it is actually an increase. */ 1779 * it is actually an increase. */
1778 binheap_delete(&per_cpu(cedf_cpu_entries, linked_on).hn, 1780 sbinheap_delete(&per_cpu(cedf_cpu_entries, linked_on).hn,
1779 &cluster->cpu_heap); 1781 &cluster->cpu_heap);
1780 binheap_add(&per_cpu(cedf_cpu_entries, linked_on).hn, 1782 sbinheap_add(&per_cpu(cedf_cpu_entries, linked_on).hn,
1781 &cluster->cpu_heap, cpu_entry_t, hn); 1783 &cluster->cpu_heap, cpu_entry_t, hn);
1782 1784
1783 /* tell prio_inh that we're __running__ with its priority */ 1785 /* tell prio_inh that we're __running__ with its priority */
@@ -2363,6 +2365,7 @@ static void cleanup_cedf(void)
2363 if (clusters_allocated) { 2365 if (clusters_allocated) {
2364 for (i = 0; i < num_clusters; i++) { 2366 for (i = 0; i < num_clusters; i++) {
2365 kfree(cedf[i].cpus); 2367 kfree(cedf[i].cpus);
2368 kfree(cedf[i].cpu_heap.buf);
2366 free_cpumask_var(cedf[i].cpu_map); 2369 free_cpumask_var(cedf[i].cpu_map);
2367 } 2370 }
2368 2371
@@ -2507,7 +2510,14 @@ static long cedf_activate_plugin(void)
2507 2510
2508 cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), 2511 cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t),
2509 GFP_ATOMIC); 2512 GFP_ATOMIC);
2510 INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio); 2513
2514 cedf[i].cpu_heap.compare = cpu_lower_prio;
2515 cedf[i].cpu_heap.size = 0;
2516 cedf[i].cpu_heap.max_size = cluster_size;
2517 cedf[i].cpu_heap.buf =
2518 kmalloc(cluster_size * sizeof(struct sbinheap_node), GFP_ATOMIC);
2519 INIT_SBINHEAP(&(cedf[i].cpu_heap));
2520
2511 edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); 2521 edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs);
2512 2522
2513 if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) 2523 if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC))
@@ -2565,7 +2575,7 @@ static long cedf_activate_plugin(void)
2565 memset(entry, 0, sizeof(*entry)); 2575 memset(entry, 0, sizeof(*entry));
2566 entry->cpu = ccpu; 2576 entry->cpu = ccpu;
2567 entry->cluster = &cedf[i]; 2577 entry->cluster = &cedf[i];
2568 INIT_BINHEAP_NODE(&entry->hn); 2578 INIT_SBINHEAP_NODE(&entry->hn);
2569 mb(); 2579 mb();
2570 2580
2571 ++cpu_count; 2581 ++cpu_count;