aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2013-05-10 18:22:39 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2013-05-10 18:22:39 -0400
commit075fd6aa34f6f5dcc535bcc674ce43fb1b50b40f (patch)
tree44567509732611ce0c56b668aad81c0787fca181
parentc999bc50d6f051ec6d3d4e03ee18245e1720f96e (diff)
Lookup table EDF-oswip-edf-os
-rw-r--r--include/litmus/rt_param.h8
-rw-r--r--litmus/litmus.c15
-rw-r--r--litmus/sched_edf_os.c45
3 files changed, 44 insertions, 24 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index bc3bbd07ef26..9208b0eeffef 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -67,17 +67,18 @@ struct edfos_params {
67 unsigned int first_cpu; 67 unsigned int first_cpu;
68 /* Whether this task is a migrating task*/ 68 /* Whether this task is a migrating task*/
69 unsigned int migrat; 69 unsigned int migrat;
70 unsigned int* lookup_table;
70 /* Time of next subtask release or deadline */ 71 /* Time of next subtask release or deadline */
71 int heap_data[NR_CPUS_EDF_OS]; 72 /*int heap_data[NR_CPUS_EDF_OS];*/
72 /* Fraction of this task exec_cost that each CPU should handle. 73 /* Fraction of this task exec_cost that each CPU should handle.
73 * We keep the fraction divided in num/denom : a matrix of 74 * We keep the fraction divided in num/denom : a matrix of
74 * (NR_CPUS_EDF_OS rows) x (2 columns). 75 * (NR_CPUS_EDF_OS rows) x (2 columns).
75 * The first column is the numerator of the fraction. 76 * The first column is the numerator of the fraction.
76 * The second column is the denominator. 77 * The second column is the denominator.
77 */ 78 */
78 lt_t fraction[NR_CPUS_EDF_OS][2]; 79 /*lt_t fraction[NR_CPUS_EDF_OS][2];
79 struct bheap release_queue; 80 struct bheap release_queue;
80 struct bheap ready_queue; 81 struct bheap ready_queue;*/
81}; 82};
82 83
83/* Parameters for NPS-F semi-partitioned scheduling algorithm. 84/* Parameters for NPS-F semi-partitioned scheduling algorithm.
@@ -128,6 +129,7 @@ struct rt_task {
128 unsigned int cpu; 129 unsigned int cpu;
129 task_class_t cls; 130 task_class_t cls;
130 budget_policy_t budget_policy; /* ignored by pfair */ 131 budget_policy_t budget_policy; /* ignored by pfair */
132 unsigned int lookup_len; /* Only used by EDF-os. */
131 133
132 /* parameters used by the semi-partitioned algorithms */ 134 /* parameters used by the semi-partitioned algorithms */
133 union { 135 union {
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 2f780222d8e8..debfc45bb8e6 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -75,6 +75,7 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
75{ 75{
76 struct rt_task tp; 76 struct rt_task tp;
77 struct task_struct *target; 77 struct task_struct *target;
78 int * lookup_table;
78 int retval = -EINVAL; 79 int retval = -EINVAL;
79 80
80 printk("Setting up rt task parameters for process %d.\n", pid); 81 printk("Setting up rt task parameters for process %d.\n", pid);
@@ -124,7 +125,19 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
124 pid, tp.budget_policy); 125 pid, tp.budget_policy);
125 goto out_unlock; 126 goto out_unlock;
126 } 127 }
127 128 if (tp.lookup_len > 0) {
129 lookup_table = kmalloc(tp.lookup_len * sizeof(unsigned int),
130 GFP_KERNEL);
131 if (!lookup_table)
132 goto out_unlock;
133 if (copy_from_user(lookup_table, tp.lookup_table, tp.lookup_len
134 * sizeof(lookup_table))) {
135 retval = -EFAULT;
136 kfree(lookup_table);
137 goto out_unlock;
138 }
139 tp.lookup_table = lookup_table;
140 }
128 target->rt_param.task_params = tp; 141 target->rt_param.task_params = tp;
129 142
130 retval = 0; 143 retval = 0;
diff --git a/litmus/sched_edf_os.c b/litmus/sched_edf_os.c
index e021d22b5129..82e42593f764 100644
--- a/litmus/sched_edf_os.c
+++ b/litmus/sched_edf_os.c
@@ -82,10 +82,10 @@ int edfos_ready_order(struct bheap_node* a, struct bheap_node* b)
82 return edfos_higher_prio(bheap2task(a), bheap2task(b)); 82 return edfos_higher_prio(bheap2task(a), bheap2task(b));
83} 83}
84 84
85static int fakepfair_ready_order(struct bheap_node* a, struct bheap_node* b) 85/*static int fakepfair_ready_order(struct bheap_node* a, struct bheap_node* b)
86{ 86{
87 return *((int*)a->value) < *((int*)b->value); 87 return *((int*)a->value) < *((int*)b->value);
88} 88}*/
89 89
90/* need_to_preempt - check whether the task t needs to be preempted 90/* need_to_preempt - check whether the task t needs to be preempted
91 * call only with irqs disabled and with ready_lock acquired 91 * call only with irqs disabled and with ready_lock acquired
@@ -279,13 +279,16 @@ static int compute_pfair_release(lt_t wt_num, lt_t wt_den,
279 279
280static int next_cpu_for_job(struct task_struct *t) 280static int next_cpu_for_job(struct task_struct *t)
281{ 281{
282 unsigned int cpu; 282 return edfos_params(t).lookup_table[tsk_rt(t)->job_params.job_no %
283 tsk_rt(t)->task_params.lookup_len];
284
285 /*unsigned int cpu;
283 lt_t next_rel; 286 lt_t next_rel;
284 struct bheap_node* node; 287 struct bheap_node* node;
285 BUG_ON(!is_migrat_task(t)); 288 BUG_ON(!is_migrat_task(t));*/
286 289
287 /* Process any new subtask releases. */ 290 /* Process any new subtask releases. */
288 node = bheap_peek(fakepfair_ready_order, 291 /*node = bheap_peek(fakepfair_ready_order,
289 &edfos_params(t).release_queue); 292 &edfos_params(t).release_queue);
290 while (node && *((int*)node->value) <= tsk_rt(t)->job_params.job_no) { 293 while (node && *((int*)node->value) <= tsk_rt(t)->job_params.job_no) {
291 node = bheap_take(fakepfair_ready_order, 294 node = bheap_take(fakepfair_ready_order,
@@ -300,10 +303,10 @@ static int next_cpu_for_job(struct task_struct *t)
300 &edfos_params(t).ready_queue, node); 303 &edfos_params(t).ready_queue, node);
301 node = bheap_peek(fakepfair_ready_order, 304 node = bheap_peek(fakepfair_ready_order,
302 &edfos_params(t).release_queue); 305 &edfos_params(t).release_queue);
303 } 306 }*/
304 307
305 /* Choose the next Pfair subtask. */ 308 /* Choose the next Pfair subtask. */
306 node = bheap_take(fakepfair_ready_order, 309 /*node = bheap_take(fakepfair_ready_order,
307 &edfos_params(t).ready_queue); 310 &edfos_params(t).ready_queue);
308 BUG_ON(!node); 311 BUG_ON(!node);
309 cpu = ((int*)node->value) - edfos_params(t).heap_data; 312 cpu = ((int*)node->value) - edfos_params(t).heap_data;
@@ -313,9 +316,9 @@ static int next_cpu_for_job(struct task_struct *t)
313 tsk_rt(t)->semi_part.cpu_job_no[cpu] 316 tsk_rt(t)->semi_part.cpu_job_no[cpu]
314 + 1); 317 + 1);
315 if (next_rel <= tsk_rt(t)->job_params.job_no) 318 if (next_rel <= tsk_rt(t)->job_params.job_no)
316 { 319 {*/
317 /* Next subtask already released. */ 320 /* Next subtask already released. */
318 *((int*)node->value) = compute_pfair_deadline( 321/* *((int*)node->value) = compute_pfair_deadline(
319 edfos_params(t).fraction[cpu][0], 322 edfos_params(t).fraction[cpu][0],
320 edfos_params(t).fraction[cpu][1], 323 edfos_params(t).fraction[cpu][1],
321 tsk_rt(t)->semi_part.cpu_job_no[cpu] + 324 tsk_rt(t)->semi_part.cpu_job_no[cpu] +
@@ -324,9 +327,9 @@ static int next_cpu_for_job(struct task_struct *t)
324 &edfos_params(t).ready_queue, node); 327 &edfos_params(t).ready_queue, node);
325 } 328 }
326 else 329 else
327 { 330 {*/
328 /* Next subtask not yet released. */ 331 /* Next subtask not yet released. */
329 *((int*)node->value) = next_rel; 332/* *((int*)node->value) = next_rel;
330 bheap_insert(fakepfair_ready_order, 333 bheap_insert(fakepfair_ready_order,
331 &edfos_params(t).release_queue, node); 334 &edfos_params(t).release_queue, node);
332 } 335 }
@@ -334,7 +337,7 @@ static int next_cpu_for_job(struct task_struct *t)
334 TRACE_TASK(t, "%u = %u * %u / %u\n", 337 TRACE_TASK(t, "%u = %u * %u / %u\n",
335 t->rt_param.job_params.job_no, cur_cpu_job_no(t), 338 t->rt_param.job_params.job_no, cur_cpu_job_no(t),
336 cur_cpu_fract_den(t), cur_cpu_fract_num(t)); 339 cur_cpu_fract_den(t), cur_cpu_fract_num(t));
337 return cpu; 340 return cpu;*/
338} 341}
339 342
340/* If needed (the share for task t on this CPU is exhausted), updates 343/* If needed (the share for task t on this CPU is exhausted), updates
@@ -484,13 +487,13 @@ static void edfos_task_new(struct task_struct * t, int on_rq, int running)
484 unsigned long flags; 487 unsigned long flags;
485 unsigned int i; 488 unsigned int i;
486 489
487 if (edfos_params(t).migrat) { 490/* if (edfos_params(t).migrat) {
488 bheap_init(&edfos_params(t).release_queue); 491 bheap_init(&edfos_params(t).release_queue);
489 bheap_init(&edfos_params(t).ready_queue); 492 bheap_init(&edfos_params(t).ready_queue);
490 for (i = 0; i < NR_CPUS_EDF_OS; i++) { 493 for (i = 0; i < NR_CPUS_EDF_OS; i++) {
491 if (i == t->rt_param.task_params.cpu) { 494 if (i == t->rt_param.task_params.cpu) {*/
492 /* Initial CPU - setup next release. */ 495 /* Initial CPU - setup next release. */
493 edfos_params(t).heap_data[i] = 496/* edfos_params(t).heap_data[i] =
494 compute_pfair_release( 497 compute_pfair_release(
495 edfos_params(t).fraction[i][0], 498 edfos_params(t).fraction[i][0],
496 edfos_params(t).fraction[i][1], 2); 499 edfos_params(t).fraction[i][1], 2);
@@ -499,11 +502,11 @@ static void edfos_task_new(struct task_struct * t, int on_rq, int running)
499 &edfos_params(t).heap_data[i], 502 &edfos_params(t).heap_data[i],
500 GFP_ATOMIC); 503 GFP_ATOMIC);
501 } 504 }
502 else if (edfos_params(t).fraction[i][0] > 0) { 505 else if (edfos_params(t).fraction[i][0] > 0) {*/
503 /* Non-initial CPU - already released, setup 506 /* Non-initial CPU - already released, setup
504 * deadline. 507 * deadline.
505 */ 508 */
506 edfos_params(t).heap_data[i] = 509/* edfos_params(t).heap_data[i] =
507 compute_pfair_deadline( 510 compute_pfair_deadline(
508 edfos_params(t).fraction[i][0], 511 edfos_params(t).fraction[i][0],
509 edfos_params(t).fraction[i][1], 1); 512 edfos_params(t).fraction[i][1], 1);
@@ -513,7 +516,7 @@ static void edfos_task_new(struct task_struct * t, int on_rq, int running)
513 GFP_ATOMIC); 516 GFP_ATOMIC);
514 } 517 }
515 } 518 }
516 } 519 }*/
517 520
518 TRACE_TASK(t, "EDF-os: task new, cpu = %d\n", 521 TRACE_TASK(t, "EDF-os: task new, cpu = %d\n",
519 t->rt_param.task_params.cpu); 522 t->rt_param.task_params.cpu);
@@ -610,10 +613,12 @@ static void edfos_task_exit(struct task_struct * t)
610 edfos->scheduled = NULL; 613 edfos->scheduled = NULL;
611 614
612 /* Deallocate heap nodes. */ 615 /* Deallocate heap nodes. */
613 while (bheap_take_del(fakepfair_ready_order, 616/* while (bheap_take_del(fakepfair_ready_order,
614 &edfos_params(t).release_queue)) {} 617 &edfos_params(t).release_queue)) {}
615 while (bheap_take_del(fakepfair_ready_order, 618 while (bheap_take_del(fakepfair_ready_order,
616 &edfos_params(t).ready_queue)) {} 619 &edfos_params(t).ready_queue)) {}*/
620 if (tsk_rt(t)->task_params.lookup_len)
621 kfree(edfos_params(t).lookup_table);
617 622
618 TRACE_TASK(t, "RIP\n"); 623 TRACE_TASK(t, "RIP\n");
619 624