aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2013-05-02 13:45:01 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2013-05-02 13:45:01 -0400
commit474e71a36830d2330f69d26906e004a93c811717 (patch)
treeddba64182e80bf230949e1032c1561c894ee72ef
parent82dd09f1315957602e537bb747abbb576e0576d4 (diff)
First version of EDF-os plugin before fixing compile errors
-rw-r--r--include/litmus/rt_param.h11
-rw-r--r--litmus/sched_edf_os.c160
2 files changed, 120 insertions, 51 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 49796bc62d0f..b741645c4e59 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -1,4 +1,5 @@
1#include <linux/threads.h> 1#include <linux/threads.h>
2#include <litmus/bheap.h>
2 3
3/* 4/*
4 * Definition of the scheduler plugin interface. 5 * Definition of the scheduler plugin interface.
@@ -63,8 +64,12 @@ struct edffm_params {
63struct edfos_params { 64struct edfos_params {
64 /* EDF-os where can a migratory task execute? */ 65 /* EDF-os where can a migratory task execute? */
65 unsigned int cpus[NR_CPUS]; 66 unsigned int cpus[NR_CPUS];
66 /* how many cpus are used by this task? */ 67 /* Whether this task is a migrating task*/
67 unsigned int nr_cpus; 68 unsigned int migrat;
69 /* ID of the first or only CPU*/
70 unsigned int first_cpu;
71 /* Time of next subtask release or deadline */
72 int heap_data[NR_CPUS];
68 /* Fraction of this task exec_cost that each CPU should handle. 73 /* Fraction of this task exec_cost that each CPU should handle.
69 * We keep the fraction divided in num/denom : a matrix of 74 * We keep the fraction divided in num/denom : a matrix of
70 * (NR_CPUS rows) x (2 columns). 75 * (NR_CPUS rows) x (2 columns).
@@ -72,6 +77,8 @@ struct edfos_params {
72 * The second column is the denominator. 77 * The second column is the denominator.
73 */ 78 */
74 lt_t fraction[2][NR_CPUS]; 79 lt_t fraction[2][NR_CPUS];
80 struct bheap release_queue;
81 struct bheap ready_queue;
75}; 82};
76 83
77/* Parameters for NPS-F semi-partitioned scheduling algorithm. 84/* Parameters for NPS-F semi-partitioned scheduling algorithm.
diff --git a/litmus/sched_edf_os.c b/litmus/sched_edf_os.c
index 984d0678ee5a..c125fd7ee632 100644
--- a/litmus/sched_edf_os.c
+++ b/litmus/sched_edf_os.c
@@ -35,37 +35,16 @@ DEFINE_PER_CPU(edfos_domain_t, edfos_domains);
35#define edfos_params(t) (t->rt_param.task_params.semi_part.os) 35#define edfos_params(t) (t->rt_param.task_params.semi_part.os)
36 36
37/* Is the task a migratory task? */ 37/* Is the task a migratory task? */
38#define is_migrat_task(task) (edfos_params(task).nr_cpus) 38#define is_migrat_task(task) (edfos_params(task).migrat)
39/* t is on the wrong CPU (it should be requeued properly) */ 39/* t is on the wrong CPU (it should be requeued properly) */
40#define wrong_cpu(t) is_migrat_task((t)) && task_cpu((t)) != get_partition((t)) 40#define wrong_cpu(t) is_migrat_task((t)) \
41/* Get next CPU */ 41 && task_cpu((t)) != get_partition((t))
42#define migrat_next_cpu(t) \
43 ((tsk_rt(t)->task_params.cpu == edfos_params(t).cpus[0]) ? \
44 edfos_params(t).cpus[1] : \
45 edfos_params(t).cpus[0])
46/* Get current cpu */
47#define migrat_cur_cpu(t) \
48 ((tsk_rt(t)->task_params.cpu == edfos_params(t).cpus[0]) ? \
49 edfos_params(t).cpus[0] : \
50 edfos_params(t).cpus[1])
51/* Manipulate share for current cpu */ 42/* Manipulate share for current cpu */
52#define cur_cpu_fract_num(t) \ 43#define cur_cpu_fract_num(t) edfos_params(t).fraction[0][get_partition(t)]
53 ((tsk_rt(t)->task_params.cpu == edfos_params(t).cpus[0]) ? \ 44#define cur_cpu_fract_den(t) edfos_params(t).fraction[1][get_partition(t)]
54 edfos_params(t).fraction[0][0] : \
55 edfos_params(t).fraction[0][1])
56#define cur_cpu_fract_den(t) \
57 ((tsk_rt(t)->task_params.cpu == edfos_params(t).cpus[0]) ? \
58 edfos_params(t).fraction[1][0] : \
59 edfos_params(t).fraction[1][1])
60/* Get job number for current cpu */ 45/* Get job number for current cpu */
61#define cur_cpu_job_no(t) \ 46#define cur_cpu_job_no(t) \
62 ((tsk_rt(t)->task_params.cpu == edfos_params(t).cpus[0]) ? \ 47 tsk_rt(t)->semi_part.cpu_job_no[get_partition(t)]
63 tsk_rt(t)->semi_part.cpu_job_no[0] : \
64 tsk_rt(t)->semi_part.cpu_job_no[1])
65/* What is the current cpu position in the array? */
66#define edfos_cpu_pos(cpu,t) \
67 ((cpu == edfos_params(t).cpus[0]) ? \
68 0 : 1)
69 48
70/* 49/*
71 * EDF-os: migratory tasks have higher prio than fixed, EDF in both classes. 50 * EDF-os: migratory tasks have higher prio than fixed, EDF in both classes.
@@ -73,14 +52,18 @@ DEFINE_PER_CPU(edfos_domain_t, edfos_domains);
73 */ 52 */
74int edfos_higher_prio(struct task_struct* first, struct task_struct* second) 53int edfos_higher_prio(struct task_struct* first, struct task_struct* second)
75{ 54{
76 if ((first && edfos_params(first).nr_cpus) || 55 if ((first && edfos_params(first).migrat) ||
77 (second && edfos_params(second).nr_cpus)) { 56 (second && edfos_params(second).migrat)) {
78 if ((first && edfos_params(first).nr_cpus) && 57 if ((first && edfos_params(first).migrat) &&
79 (second && edfos_params(second).nr_cpus)) 58 (second && edfos_params(second).migrat))
80 /* both are migrating */ 59 /* both are migrating */
81 return edf_higher_prio(first, second); 60 if (edfos_params(first).first_cpu <
61 edfos_params(second).first_cpu)
62 return 1;
63 else
64 return 0;
82 65
83 if (first && edfos_params(first).nr_cpus) 66 if (first && edfos_params(first).migrat)
84 /* first is migrating */ 67 /* first is migrating */
85 return 1; 68 return 1;
86 else 69 else
@@ -97,6 +80,11 @@ int edfos_ready_order(struct bheap_node* a, struct bheap_node* b)
97 return edfos_higher_prio(bheap2task(a), bheap2task(b)); 80 return edfos_higher_prio(bheap2task(a), bheap2task(b));
98} 81}
99 82
83static int fakepfair_ready_order(struct bheap_node* a, struct bheap_node* b)
84{
85 return *((int*)a->value) < ((int*)b->value);
86}
87
100/* need_to_preempt - check whether the task t needs to be preempted 88/* need_to_preempt - check whether the task t needs to be preempted
101 * call only with irqs disabled and with ready_lock acquired 89 * call only with irqs disabled and with ready_lock acquired
102 */ 90 */
@@ -260,31 +248,84 @@ static void requeue(struct task_struct* t, rt_domain_t *edf)
260 */ 248 */
261static void update_job_counter(struct task_struct *t) 249static void update_job_counter(struct task_struct *t)
262{ 250{
263 int cpu_pos; 251 t->rt_param.semi_part.cpu_job_no[get_partition(t)]++;
264
265 /* Which CPU counter should be incremented? */
266 cpu_pos = edfos_cpu_pos(t->rt_param.task_params.cpu, t);
267 t->rt_param.semi_part.cpu_job_no[cpu_pos]++;
268 252
269 TRACE_TASK(t, "job_no = %d, cpu_job_no(pos %d) = %d, cpu %d\n", 253 TRACE_TASK(t, "job_no = %d, cpu_job_no(pos %d) = %d, cpu %d\n",
270 t->rt_param.job_params.job_no, cpu_pos, cur_cpu_job_no(t), 254 t->rt_param.job_params.job_no, get_partition(t),
271 t->rt_param.task_params.cpu); 255 cur_cpu_job_no(t), t->rt_param.task_params.cpu);
256}
257
258
259static int compute_pfair_deadline(unsigned int wt_num, unsigned int wt_den,
260 unsigned int job_no)
261{
262 unsigned int num, den, dead;
263 num = job_no * wt_den;
264 den = wt_num;
265 dead = num / den;
266 if (num % den > 0)
267 dead++;
268 return dead;
269}
270
271static int compute_pfair_release(unsigned int wt_num, unsigned int wt_den,
272 unsigned int job_no)
273{
274 return ((job_no - 1) * wt_den) / wt_num;
272} 275}
273 276
274/* What is the next cpu for this job? (eq. 8, in EDF-Fm paper) */ 277/* What is the next cpu for this job? (eq. 8, in EDF-Fm paper) */
275static int next_cpu_for_job(struct task_struct *t) 278static int next_cpu_for_job(struct task_struct *t)
276{ 279{
280 unsigned int cpu, next_rel;
281 struct bheap_node* node;
277 BUG_ON(!is_migrat_task(t)); 282 BUG_ON(!is_migrat_task(t));
283
284 /* Process any new subtask releases. */
285 node = bheap_peek(fakepfair_ready_order,
286 &edfos_params(t).release_queue);
287 while (node && *((int*)node->value) <= job_param(t)->job_no)
288 {
289 node = bheap_take(fakepfair_ready_order,
290 &edfos_params(t).release_queue);
291 cpu = ((int*)node->value) - edfos_params(t)->heap_data;
292 *((int*)node->value) = compute_pfair_deadline(fraction[0][cpu],
293 fraction[1][cpu],
294 edfos_param(t)->cpu_job_no[cpu] + 1);
295 bheap_insert(fakepfair_ready_order,
296 &edfos_params(t).ready_queue, node);
297 node = bheap_peek(fakepfair_ready_order,
298 &edfos_params(t).release_queue);
299 }
300
301 /* Choose the next Pfair subtask. */
302 node = bheap_take(fakepfair_ready_order,
303 &edfos_params(t).ready_queue);
304 cpu = ((int*)node->value) - edfos_params(t)->heap_data;
305
306 next_rel = compute_pfair_release(fraction[0][cpu], fraction[1][cpu],
307 edfos_params(t)->cpu_job_no[cpu] + 1);
308 if (next_rel < job_params(t)->job_no)
309 {
310 /* Next subtask already released. */
311 (*(int*)node->value) = compute_pfair_deadline(fraction[0][cpu],
312 fraction[1][cpu],
313 edfos_params(t)->cpu_job_no[cpu]);
314 bheap_insert(fakepfair_ready_order,
315 &edfos_params(t).ready_queue, node);
316 }
317 else
318 {
319 /* Next subtask not yet released. */
320 (*(int*)node->value) = next_rel;
321 bheap_insert(fakepfair_ready_order,
322 &edfos_params(t).release_queue, node);
323 }
278 324
279 TRACE_TASK(t, "%u = %u * %u / %u\n", 325 TRACE_TASK(t, "%u = %u * %u / %u\n",
280 t->rt_param.job_params.job_no, cur_cpu_job_no(t), 326 t->rt_param.job_params.job_no, cur_cpu_job_no(t),
281 cur_cpu_fract_den(t), cur_cpu_fract_num(t)); 327 cur_cpu_fract_den(t), cur_cpu_fract_num(t));
282 if ((t->rt_param.job_params.job_no) == 328 return cpu;
283 (((lt_t) cur_cpu_job_no(t) * cur_cpu_fract_den(t)) /
284 cur_cpu_fract_num(t)))
285 return edfos_params(t).cpus[0];
286
287 return edfos_params(t).cpus[1];
288} 329}
289 330
290/* If needed (the share for task t on this CPU is exhausted), updates 331/* If needed (the share for task t on this CPU is exhausted), updates
@@ -292,15 +333,16 @@ static int next_cpu_for_job(struct task_struct *t)
292 */ 333 */
293static void change_migrat_cpu_if_needed(struct task_struct *t) 334static void change_migrat_cpu_if_needed(struct task_struct *t)
294{ 335{
336 int cpu;
295 BUG_ON(!is_migrat_task(t)); 337 BUG_ON(!is_migrat_task(t));
296 /* EDF-os: if it is a migrating task and it has already executed 338 /* EDF-os: if it is a migrating task and it has already executed
297 * the required number of jobs on this CPU, we need to move it 339 * the required number of jobs on this CPU, we need to move it
298 * on its next CPU; changing the cpu here will affect the requeue 340 * on its next CPU; changing the cpu here will affect the requeue
299 * and the next release 341 * and the next release
300 */ 342 */
301 if (unlikely(next_cpu_for_job(t) != migrat_cur_cpu(t))) { 343 cpu = next_cpu_for_job(t);
302 344 if (unlikely(cpu != get_partition(t))) {
303 tsk_rt(t)->task_params.cpu = migrat_next_cpu(t); 345 tsk_rt(t)->task_params.cpu = cpu;
304 TRACE_TASK(t, "EDF-os: will migrate job %d -> %d\n", 346 TRACE_TASK(t, "EDF-os: will migrate job %d -> %d\n",
305 task_cpu(t), tsk_rt(t)->task_params.cpu); 347 task_cpu(t), tsk_rt(t)->task_params.cpu);
306 return; 348 return;
@@ -431,6 +473,19 @@ static void edfos_task_new(struct task_struct * t, int on_rq, int running)
431 rt_domain_t* edf = task_edf(t); 473 rt_domain_t* edf = task_edf(t);
432 edfos_domain_t* edfos = task_edfos(t); 474 edfos_domain_t* edfos = task_edfos(t);
433 unsigned long flags; 475 unsigned long flags;
476 unsigned int i;
477
478 for (i = 0; i < NR_CPUS; i++)
479 {
480 edfos_params(t)->heap_data[i] = compute_pfair_deadline(
481 edfos_params(t)->fraction[0][i],
482 edfos_params(t)->fraction[1][i],
483 0);
484 bheap_add(fakepfair_ready_order, &edfos_params(t)->ready_queue,
485 &edfos_params(t)->heap_data[i], GFP_ATOMIC);
486 }
487 /* Pick the first CPU to execute on. */
488 change_migrat_cpu_if_needed(t);
434 489
435 TRACE_TASK(t, "EDF-os: task new, cpu = %d\n", 490 TRACE_TASK(t, "EDF-os: task new, cpu = %d\n",
436 t->rt_param.task_params.cpu); 491 t->rt_param.task_params.cpu);
@@ -516,6 +571,7 @@ static void edfos_task_exit(struct task_struct * t)
516 unsigned long flags; 571 unsigned long flags;
517 edfos_domain_t* edfos = task_edfos(t); 572 edfos_domain_t* edfos = task_edfos(t);
518 rt_domain_t* edf; 573 rt_domain_t* edf;
574 unsigned int i;
519 575
520 raw_spin_lock_irqsave(&edfos->slock, flags); 576 raw_spin_lock_irqsave(&edfos->slock, flags);
521 if (is_queued(t)) { 577 if (is_queued(t)) {
@@ -526,6 +582,12 @@ static void edfos_task_exit(struct task_struct * t)
526 if (edfos->scheduled == t) 582 if (edfos->scheduled == t)
527 edfos->scheduled = NULL; 583 edfos->scheduled = NULL;
528 584
585 /* Deallocate heap nodes. */
586 while (bheap_take_del(fakepfair_ready_order,
587 &edfos_params(t)->release_queue)) {}
588 while (bheap_take_del(fakepfair_ready_order,
589 &edfos_params(t)->ready_queue)) {}
590
529 TRACE_TASK(t, "RIP\n"); 591 TRACE_TASK(t, "RIP\n");
530 592
531 preempt(edfos); 593 preempt(edfos);