aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorClara Hobbs <cghobbs@cs.unc.edu>2019-04-18 16:06:36 -0400
committerClara Hobbs <cghobbs@cs.unc.edu>2019-04-18 16:06:36 -0400
commit8af58c76f7b38fffaccafbb5385ef7c615cf06ea (patch)
treead37b4886dbf32af41f497fb589a3d5c12fbb51e
parent3d4334bb0588d40b7067c5b221331fd5055d809b (diff)
Various little clean-up operations
-rw-r--r--include/litmus/rt_param.h4
-rw-r--r--litmus/sched_edfsc.c98
2 files changed, 47 insertions, 55 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 7baea47d831a..68c58c58190e 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -5,8 +5,6 @@
5#ifndef _LINUX_RT_PARAM_H_ 5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_ 6#define _LINUX_RT_PARAM_H_
7 7
8#include
9
10/* Litmus time type. */ 8/* Litmus time type. */
11typedef unsigned long long lt_t; 9typedef unsigned long long lt_t;
12 10
@@ -112,7 +110,7 @@ struct reservation_config {
112 }; 110 };
113}; 111};
114 112
115//struct for edfsc_parameters 113// struct for edfsc_parameters
116typedef struct { 114typedef struct {
117 cont_domain_t* container_domain; 115 cont_domain_t* container_domain;
118 struct task_struct* container_task; 116 struct task_struct* container_task;
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c
index 1f6e82a9b954..282ba18ad3f0 100644
--- a/litmus/sched_edfsc.c
+++ b/litmus/sched_edfsc.c
@@ -25,7 +25,6 @@ typedef struct {
25 struct task_struct* scheduled; //container or migrating task 25 struct task_struct* scheduled; //container or migrating task
26 atomic_t will_schedule; 26 atomic_t will_schedule;
27 struct bheap_node* hn; 27 struct bheap_node* hn;
28 int task_util, future_task_util;
29} cpu_entry_t; 28} cpu_entry_t;
30 29
31typedef struct { 30typedef struct {
@@ -33,10 +32,9 @@ typedef struct {
33 struct task_struct* container; 32 struct task_struct* container;
34 struct task_struct* scheduled; //fixed task 33 struct task_struct* scheduled; //fixed task
35 lt_t scheduled_last_exec_time; //exec_time of the scheduled task when it was last scheduled 34 lt_t scheduled_last_exec_time; //exec_time of the scheduled task when it was last scheduled
36 lt_t changed_budget; //change to scheduled task's exec time due to container budget constraints 35 lt_t changed_budget; //change to scheduled task's exec time due to container budget constraints
37 int task_util, future_task_util; 36 u64 task_util, future_task_util;
38 #define c_lock domain.ready_lock 37 #define c_lock domain.ready_lock
39
40} cont_domain_t; 38} cont_domain_t;
41 39
42struct list_head pending_adds; 40struct list_head pending_adds;
@@ -52,6 +50,8 @@ static cont_domaint_t container_domains[NR_CPUS];
52static rt_domain_t gsched_domain; 50static rt_domain_t gsched_domain;
53#define g_lock (gsched_domain.ready_lock) 51#define g_lock (gsched_domain.ready_lock)
54 52
53u64 task_util, future_task_util;
54
55static u64 cont_job_id; 55static u64 cont_job_id;
56 56
57#define is_container(task) task->container_domain == NULL 57#define is_container(task) task->container_domain == NULL
@@ -95,7 +95,7 @@ static noinline void requeue(struct task_struct* task, rt_domain_t* domain)
95 95
96static struct bheap_node edfsc_cpu_heap_node[NR_CPUS]; 96static struct bheap_node edfsc_cpu_heap_node[NR_CPUS];
97static struct bheap edfsc_cpu_heap; 97static struct bheap edfsc_cpu_heap;
98 98
99static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) 99static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
100{ 100{
101 cpu_entry_t *a, *b; 101 cpu_entry_t *a, *b;
@@ -211,7 +211,7 @@ static void g_preempt_check(void)
211{ 211{
212 struct task_struct *task; 212 struct task_struct *task;
213 cpu_entry_t *last; 213 cpu_entry_t *last;
214 214
215#ifdef CONFIG_PREFER_LOCAL_LINKING 215#ifdef CONFIG_PREFER_LOCAL_LINKING
216 cpu_entry_t *local; 216 cpu_entry_t *local;
217 217
@@ -247,13 +247,13 @@ static void g_preempt_check(void)
247static int c_preempt_check(container_domain_t* container) 247static int c_preempt_check(container_domain_t* container)
248{ 248{
249 if (edf_preemption_needed(&container->domain, container->scheduled)) { 249 if (edf_preemption_needed(&container->domain, container->scheduled)) {
250 preempt(pedf); 250 preempt(&container->domain);
251 return 1; 251 return 1;
252 } else 252 } else
253 return 0; 253 return 0;
254} 254}
255 255
256//migrating or container task job_completion, called from edfsc_gschedule 256// migrating or container task job_completion, called from edfsc_gschedule
257static noinline void g_job_completion(struct task_struct* t, int forced) 257static noinline void g_job_completion(struct task_struct* t, int forced)
258{ 258{
259 BUG_ON(!t); 259 BUG_ON(!t);
@@ -264,18 +264,12 @@ static noinline void g_job_completion(struct task_struct* t, int forced)
264 264
265 /* set flags */ 265 /* set flags */
266 tsk_rt(t)->completed = 0; 266 tsk_rt(t)->completed = 0;
267 267
268 //is at later of job completion and job deadline
269 if (is_migrating(t) && t->rt_param.job_params.lateness > 0 && tsk_rt(t)->edfsc_params.will_remove) {
270 //update task utilizations
271 }
272
273 if (is_migrating(t) && tsk_rt(t)->edfsc_params.will_remove) { 268 if (is_migrating(t) && tsk_rt(t)->edfsc_params.will_remove) {
274 if (t->rt_param.job_params.lateness > 0) { 269 if (t->rt_param.job_params.lateness > 0) {
275 //remove the task now 270 // TODO remove the task now
276 } 271 }
277 } 272 } else {
278 else {
279 /* prepare for next period */ 273 /* prepare for next period */
280 prepare_for_next_period(t); 274 prepare_for_next_period(t);
281 if (is_early_releasing(t) || is_released(t, litmus_clock())) 275 if (is_early_releasing(t) || is_released(t, litmus_clock()))
@@ -291,25 +285,24 @@ static noinline void g_job_completion(struct task_struct* t, int forced)
291 } 285 }
292} 286}
293 287
294//fixed task job_completion, called from edfsc_cschedule 288// fixed task job_completion, called from edfsc_cschedule
295static void c_job_completion(struct task_struct* t, int forced) 289static void c_job_completion(struct task_struct* t, int forced)
296{ 290{
297 sched_trace_task_completion(t, forced); 291 sched_trace_task_completion(t, forced);
298 TRACE_TASK(t, "job_completion(forced=%d).\n", forced); 292 TRACE_TASK(t, "job_completion(forced=%d).\n", forced);
299 293
300 tsk_rt(t)->completed = 0; 294 tsk_rt(t)->completed = 0;
301 295
302 if (tsk_rt(t)->edfsc_params.will_remove) { 296 if (tsk_rt(t)->edfsc_params.will_remove) {
303 if (t->rt_param.job_params.lateness > 0) { 297 if (t->rt_param.job_params.lateness > 0) {
304 //remove the task now 298 // TODO remove the task now
305 } 299 }
306 } 300 } else {
307 else {
308 prepare_for_next_period(t); 301 prepare_for_next_period(t);
309 } 302 }
310} 303}
311 304
312//need to update cpu entries after global scheduling 305// need to update cpu entries after global scheduling
313static void g_finish_switch(struct task_struct *prev) 306static void g_finish_switch(struct task_struct *prev)
314{ 307{
315 cpu_entry_t* entry = this_cpu_ptr(&gsnedf_cpu_entries); 308 cpu_entry_t* entry = this_cpu_ptr(&gsnedf_cpu_entries);
@@ -321,12 +314,12 @@ static void g_finish_switch(struct task_struct *prev)
321#endif 314#endif
322} 315}
323 316
324//takes in the container_domain pointer in container task_struct 317// takes in the container_domain pointer in container task_struct
325//assuming prev is previous task running on the processor before calling schedule 318// assuming prev is previous task running on the processor before calling schedule
326static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) 319static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev)
327{ 320{
328 rt_domain_t* edf = cedf.domain; 321 rt_domain_t* edf = cedf.domain;
329 322
330 struct task_struct* next; 323 struct task_struct* next;
331 324
332 int out_of_time, sleep, preempt, 325 int out_of_time, sleep, preempt,
@@ -373,7 +366,7 @@ static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_stru
373 * this. 366 * this.
374 */ 367 */
375 if (!np && (out_of_time || sleep)) { 368 if (!np && (out_of_time || sleep)) {
376 job_completion(cedf->scheduled, !sleep); 369 c_job_completion(cedf->scheduled, !sleep);
377 resched = 1; 370 resched = 1;
378 } 371 }
379 372
@@ -424,14 +417,15 @@ static struct task_struct* edfsc_gschedule(struct task_struct * prev)
424 BUG_ON(entry->scheduled && entry->scheduled != prev && entry->scheduled->container_domain != NULL); 417 BUG_ON(entry->scheduled && entry->scheduled != prev && entry->scheduled->container_domain != NULL);
425 BUG_ON(entry->scheduled && !is_realtime(prev)); 418 BUG_ON(entry->scheduled && !is_realtime(prev));
426 BUG_ON(is_realtime(prev) && !entry->scheduled); 419 BUG_ON(is_realtime(prev) && !entry->scheduled);
427 420
428 if (prev && is_fixed(prev)) { //update container budget if fixed task 421 // update container budget if fixed task
422 if (prev && is_fixed(prev)) {
429 cont_domain_t* cdomain = tsk_rt(prev->container_task)->edfsc_params.container_domain; 423 cont_domain_t* cdomain = tsk_rt(prev->container_task)->edfsc_params.container_domain;
430 prev->job_params.exec_time -= cdomain->changed_budget; 424 prev->job_params.exec_time -= cdomain->changed_budget;
431 tsk_rt(prev->edfsc_params)->container_task->job_params.exec_time += 425 tsk_rt(prev->edfsc_params)->container_task->job_params.exec_time +=
432 prev->job_params.exec_time - cdomain->scheduled_last_exec_time; 426 prev->job_params.exec_time - cdomain->scheduled_last_exec_time;
433 } 427 }
434 428
435 /* (0) Determine state */ 429 /* (0) Determine state */
436 exists = entry->scheduled != NULL; 430 exists = entry->scheduled != NULL;
437 is_cont = exists && entry->scheduled->container_domain == NULL; 431 is_cont = exists && entry->scheduled->container_domain == NULL;
@@ -452,10 +446,10 @@ static struct task_struct* edfsc_gschedule(struct task_struct * prev)
452 "state:%d sig:%d is_cont:%d\n", 446 "state:%d sig:%d is_cont:%d\n",
453 blocks, out_of_time, np, sleep, preempt, 447 blocks, out_of_time, np, sleep, preempt,
454 prev->state, signal_pending(prev), is_cont); 448 prev->state, signal_pending(prev), is_cont);
455 449
456 if (is_cont && !sleep && !preempt && !out_of_time) 450 if (is_cont && !sleep && !preempt && !out_of_time)
457 return edfsc_cschedule(tsk_rt(entry->scheduled)->edfsc_params.container_domain, prev); 451 return edfsc_cschedule(tsk_rt(entry->scheduled)->edfsc_params.container_domain, prev);
458 452
459 if (entry->linked && preempt) 453 if (entry->linked && preempt)
460 TRACE_TASK(prev, "will be preempted by %s/%d\n", 454 TRACE_TASK(prev, "will be preempted by %s/%d\n",
461 entry->linked->comm, entry->linked->pid); 455 entry->linked->comm, entry->linked->pid);
@@ -529,48 +523,49 @@ static struct task_struct* edfsc_gschedule(struct task_struct * prev)
529 return next; 523 return next;
530} 524}
531 525
532static void edfsc_task_new(struct task_struct* t, int on_rq, int is_scheduled) { 526static void edfsc_task_new(struct task_struct* t, int on_rq, int is_scheduled)
533 527{
534 unsigned long flags; 528 unsigned long flags;
535 cpu_entry_t* entry; 529 cpu_entry_t* entry;
536 530
537 raw_spin_lock_irqsave(&g_lock, flags); 531 raw_spin_lock_irqsave(&g_lock, flags);
538 532
539 tsk_rt(t)->edfsc_params.will_remove = 0; 533 tsk_rt(t)->edfsc_params.will_remove = 0;
540 tsk_rt(t).sporadic_release = 0; 534 tsk_rt(t).sporadic_release = 0;
541 hrtimer_init(&(tsk_rt(t)->edfsc_params.deadline_timer), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 535 hrtimer_init(&(tsk_rt(t)->edfsc_params.deadline_timer), CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
542 536
543 list_add_tail(tsk_rt(t)->edfsc_params.qnode, pending_adds); 537 list_add_tail(tsk_rt(t)->edfsc_params.qnode, pending_adds);
544 538
545 if (is_scheduled) { 539 if (is_scheduled) {
546 entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); 540 entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t));
547 BUG_ON(entry->scheduled); 541 BUG_ON(entry->scheduled);
548 preempt(entry) 542 preempt(entry)
549 } 543 }
550 544
551 t->rt_param.scheduled_on = NO_CPU; 545 t->rt_param.scheduled_on = NO_CPU;
552 t->rt_param.linked_on = NO_CPU; 546 t->rt_param.linked_on = NO_CPU;
553 547
554 raw_spin_unlock_irqrestore(&g_lock, flags); 548 raw_spin_unlock_irqrestore(&g_lock, flags);
555} 549}
556 550
557//finds the task_struct of the hrtimer set by task_exit 551// finds the task_struct of the hrtimer set by task_exit
558static struct task_struct* task_of_timer(struct hrtimer* timer) { 552static struct task_struct* task_of_timer(struct hrtimer* timer)
553{
559 edfsc_params* a = container_of(timer, edfsc_params, deadline_timer); 554 edfsc_params* a = container_of(timer, edfsc_params, deadline_timer);
560 rt_param* b = container_of(a, rt_params, edfsc_params); 555 rt_param* b = container_of(a, rt_params, edfsc_params);
561 return container_of(b, struct task_struct, rt_param); 556 return container_of(b, struct task_struct, rt_param);
562} 557}
563 558
564static void edfsc_task_exit(struct task_struct* t) { 559static void edfsc_task_exit(struct task_struct* t)
565 560{
566 unsigned long flags; 561 unsigned long flags;
567 562
568 raw_spin_lock_irqsave(&g_lock, flags); 563 raw_spin_lock_irqsave(&g_lock, flags);
569 564
570 tsk_rt(t)->edfsc_params.will_remove = 1; 565 tsk_rt(t)->edfsc_params.will_remove = 1;
571 566
572 if (is_released(t, litmus_clock())) { 567 if (is_released(t, litmus_clock())) {
573 hrtimer_start(&(tsk_rt(t)->edfsc_params.deadline_timer), 568 hrtimer_start(&(tsk_rt(t)->edfsc_params.deadline_timer),
574 ns_to_ktime(t->rt_param.job_params.deadline), 569 ns_to_ktime(t->rt_param.job_params.deadline),
575 HRTIMER_MODE_ABS_PINNED); 570 HRTIMER_MODE_ABS_PINNED);
576 tsk_rt(t)->edfsc_params.deadline_timer.function = asdf; //TODO: hook up task removal function 571 tsk_rt(t)->edfsc_params.deadline_timer.function = asdf; //TODO: hook up task removal function
@@ -579,7 +574,6 @@ static void edfsc_task_exit(struct task_struct* t) {
579 //update system utilizations 574 //update system utilizations
580 //next job release will detect will_remove and remove the job 575 //next job release will detect will_remove and remove the job
581 } 576 }
582 577
583 raw_spin_unlock_irqrestore(&g_lock, flags); 578 raw_spin_unlock_irqrestore(&g_lock, flags);
584 579}
585} \ No newline at end of file