aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_gsn_edf_split.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_gsn_edf_split.c')
-rw-r--r--litmus/sched_gsn_edf_split.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/litmus/sched_gsn_edf_split.c b/litmus/sched_gsn_edf_split.c
index dac86900fd06..0fe324e2c156 100644
--- a/litmus/sched_gsn_edf_split.c
+++ b/litmus/sched_gsn_edf_split.c
@@ -16,7 +16,7 @@
16#include <litmus/litmus.h> 16#include <litmus/litmus.h>
17#include <litmus/jobs.h> 17#include <litmus/jobs.h>
18#include <litmus/sched_plugin.h> 18#include <litmus/sched_plugin.h>
19#include <litmus/edf_common.h> 19#include <litmus/edf_split_common.h>
20#include <litmus/sched_trace.h> 20#include <litmus/sched_trace.h>
21#include <litmus/trace.h> 21#include <litmus/trace.h>
22 22
@@ -65,7 +65,7 @@ inline static int get_slice_num(struct task_struct* t)
65 } 65 }
66} 66}
67 67
68/* Returns the appropriate deadline.*/ 68/* Returns the appropriate subjob deadline.*/
69inline static lt_t get_proper_deadline(struct task_struct* t) 69inline static lt_t get_proper_deadline(struct task_struct* t)
70{ 70{
71 return t->rt_param.job_params.release + 71 return t->rt_param.job_params.release +
@@ -76,8 +76,8 @@ inline static lt_t get_proper_deadline(struct task_struct* t)
76/* Tells us if the current deadline is too small.*/ 76/* Tells us if the current deadline is too small.*/
77inline static int needs_deadline_move(struct task_struct* t) 77inline static int needs_deadline_move(struct task_struct* t)
78{ 78{
79 BUG_ON(get_proper_deadline(t) < t->rt_param.job_params.deadline); 79 BUG_ON(get_proper_deadline(t) < t->rt_param.job_params.subjob_deadline);
80 return get_proper_deadline(t) != t->rt_param.job_params.deadline; 80 return get_proper_deadline(t) != t->rt_param.job_params.subjob_deadline;
81} 81}
82 82
83/*Returns execution time until the next deadline move. 83/*Returns execution time until the next deadline move.
@@ -172,7 +172,7 @@ static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
172 /* Note that a and b are inverted: we want the lowest-priority CPU at 172 /* Note that a and b are inverted: we want the lowest-priority CPU at
173 * the top of the heap. 173 * the top of the heap.
174 */ 174 */
175 return edf_higher_prio(b->linked, a->linked); 175 return edf_split_higher_prio(b->linked, a->linked);
176} 176}
177 177
178/* update_cpu_position - Move the cpu entry to the correct place to maintain 178/* update_cpu_position - Move the cpu entry to the correct place to maintain
@@ -324,7 +324,7 @@ static void check_for_preemptions(void)
324 cpu_entry_t *last; 324 cpu_entry_t *last;
325 325
326 for (last = lowest_prio_cpu(); 326 for (last = lowest_prio_cpu();
327 edf_preemption_needed(&gsnedfsplit, last->linked); 327 edf_split_preemption_needed(&gsnedfsplit, last->linked);
328 last = lowest_prio_cpu()) { 328 last = lowest_prio_cpu()) {
329 /* preemption necessary */ 329 /* preemption necessary */
330 task = __take_ready(&gsnedfsplit); 330 task = __take_ready(&gsnedfsplit);
@@ -391,9 +391,10 @@ static noinline void job_completion(struct task_struct *t, int forced)
391 t->rt_param.job_params.deadline = t->rt_param.job_params.release + 391 t->rt_param.job_params.deadline = t->rt_param.job_params.release +
392 t->rt_param.task_params.period; 392 t->rt_param.task_params.period;
393 prepare_for_next_period(t); 393 prepare_for_next_period(t);
394 /* We now set the deadline to what it should be for scheduling priority. 394 /* We now set the subjob deadline to what it should be for scheduling
395 * priority.
395 */ 396 */
396 t->rt_param.job_params.deadline = get_proper_deadline(t); 397 t->rt_param.job_params.subjob_deadline = get_proper_deadline(t);
397 if (is_released(t, litmus_clock())) 398 if (is_released(t, litmus_clock()))
398 sched_trace_task_release(t); 399 sched_trace_task_release(t);
399 /* unlink */ 400 /* unlink */
@@ -413,7 +414,7 @@ static void move_deadline(struct task_struct *t)
413 if (queue){ 414 if (queue){
414 remove(&gsnedfsplit, t); 415 remove(&gsnedfsplit, t);
415 } 416 }
416 t->rt_param.job_params.deadline = get_proper_deadline(t); 417 t->rt_param.job_params.subjob_deadline = get_proper_deadline(t);
417 if (on_cpu != NO_CPU){ 418 if (on_cpu != NO_CPU){
418 /*If previously scheduled, may need to schedule something 419 /*If previously scheduled, may need to schedule something
419 else.*/ 420 else.*/
@@ -623,7 +624,7 @@ static void gsnedfsplit_release_at(struct task_struct *t, lt_t start)
623{ 624{
624 t->rt_param.job_params.deadline = start; 625 t->rt_param.job_params.deadline = start;
625 prepare_for_next_period(t); 626 prepare_for_next_period(t);
626 t->rt_param.job_params.deadline = get_proper_deadline(t); 627 t->rt_param.job_params.subjob_deadline = get_proper_deadline(t);
627 set_rt_flags(t, RT_F_RUNNING); 628 set_rt_flags(t, RT_F_RUNNING);
628} 629}
629 630
@@ -776,7 +777,7 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
776 * heap. Note that this could be a release heap if we 777 * heap. Note that this could be a release heap if we
777 * budget enforcement is used and this job overran. */ 778 * budget enforcement is used and this job overran. */
778 check_preempt = 779 check_preempt =
779 !bheap_decrease(edf_ready_order, 780 !bheap_decrease(edf_split_ready_order,
780 tsk_rt(t)->heap_node); 781 tsk_rt(t)->heap_node);
781 } else { 782 } else {
782 /* Nothing to do: if it is not queued and not linked 783 /* Nothing to do: if it is not queued and not linked
@@ -799,7 +800,7 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
799 /* heap_decrease() hit the top level of the heap: make 800 /* heap_decrease() hit the top level of the heap: make
800 * sure preemption checks get the right task, not the 801 * sure preemption checks get the right task, not the
801 * potentially stale cache. */ 802 * potentially stale cache. */
802 bheap_uncache_min(edf_ready_order, 803 bheap_uncache_min(edf_split_ready_order,
803 &gsnedfsplit.ready_queue); 804 &gsnedfsplit.ready_queue);
804 check_for_preemptions(); 805 check_for_preemptions();
805 } 806 }
@@ -862,7 +863,7 @@ static struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem,
862 task_list)->private; 863 task_list)->private;
863 864
864 /* Compare task prios, find high prio task. */ 865 /* Compare task prios, find high prio task. */
865 if (queued != skip && edf_higher_prio(queued, found)) 866 if (queued != skip && edf_split_higher_prio(queued, found))
866 found = queued; 867 found = queued;
867 } 868 }
868 return found; 869 return found;
@@ -891,9 +892,9 @@ int gsnedfsplit_fmlp_lock(struct litmus_lock* l)
891 __add_wait_queue_tail_exclusive(&sem->wait, &wait); 892 __add_wait_queue_tail_exclusive(&sem->wait, &wait);
892 893
893 /* check if we need to activate priority inheritance */ 894 /* check if we need to activate priority inheritance */
894 if (edf_higher_prio(t, sem->hp_waiter)) { 895 if (edf_split_higher_prio(t, sem->hp_waiter)) {
895 sem->hp_waiter = t; 896 sem->hp_waiter = t;
896 if (edf_higher_prio(t, sem->owner)) 897 if (edf_split_higher_prio(t, sem->owner))
897 set_priority_inheritance(sem->owner, sem->hp_waiter); 898 set_priority_inheritance(sem->owner, sem->hp_waiter);
898 } 899 }
899 900
@@ -1123,7 +1124,7 @@ static int __init init_gsn_edf(void)
1123 entry->split_timer.function = on_split_timeout; 1124 entry->split_timer.function = on_split_timeout;
1124 bheap_node_init(&entry->hn, entry); 1125 bheap_node_init(&entry->hn, entry);
1125 } 1126 }
1126 edf_domain_init(&gsnedfsplit, NULL, gsnedfsplit_release_jobs); 1127 edf_split_domain_init(&gsnedfsplit, NULL, gsnedfsplit_release_jobs);
1127 return register_sched_plugin(&gsn_edf_plugin); 1128 return register_sched_plugin(&gsn_edf_plugin);
1128} 1129}
1129 1130