aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_edf_wm.c77
1 files changed, 74 insertions, 3 deletions
diff --git a/litmus/sched_edf_wm.c b/litmus/sched_edf_wm.c
index 324ca8f28ab7..0098a3aab5b5 100644
--- a/litmus/sched_edf_wm.c
+++ b/litmus/sched_edf_wm.c
@@ -41,6 +41,80 @@ DEFINE_PER_CPU(wm_domain_t, wm_domains);
41#define domain_of_task(task) (remote_domain(get_partition(task))) 41#define domain_of_task(task) (remote_domain(get_partition(task)))
42#define domain_from_timer(t) (container_of((t), wm_domain_t, enforcement_timer)) 42#define domain_from_timer(t) (container_of((t), wm_domain_t, enforcement_timer))
43 43
44static int is_sliced_task(struct task_struct* t)
45{
46 return tsk_rt(t)->task_params.semi_part.wm.count;
47}
48
49static void compute_slice_params(struct task_struct* t)
50{
51 struct rt_param* p = tsk_rt(t);
52 /* Here we do a little trick to make the generic EDF code
53 * play well with job slices. We overwrite the job-level
54 * release and deadline fields with the slice-specific values
55 * so that we can enqueue this task in an EDF rt_domain_t
56 * without issue. The actual values are cached in the semi_part.wm
57 * structure. */
58 p->job_params.deadline = p->semi_part.wm.job_release +
59 p->semi_part.wm.slice->deadline;
60 p->job_params.release = p->semi_part.wm.job_release +
61 p->semi_part.wm.slice->offset;
62
63 /* Similarly, we play a trick on the cpu field. */
64 p->task_params.cpu = p->semi_part.wm.slice->cpu;
65
66 /* update the per-slice budget reference */
67 p->semi_part.wm.exec_time = p->job_params.exec_time;
68}
69
70static void complete_sliced_job(struct task_struct* t)
71{
72 struct rt_param* p = tsk_rt(t);
73
74 /* We need to undo our trickery to the
75 * job parameters (see above). */
76 p->job_params.release = p->semi_part.wm.job_release;
77 p->job_params.deadline = p->semi_part.wm.job_deadline;
78
79 /* Ok, now let generic code do the actual work. */
80 prepare_for_next_period(t);
81
82 /* And finally cache the updated parameters. */
83 p->semi_part.wm.job_release = p->job_params.release;
84 p->semi_part.wm.job_deadline = p->job_params.deadline;
85}
86
87static void advance_next_slice(struct task_struct* t, int completion_signaled)
88{
89 int idx;
90 struct rt_param* p = tsk_rt(t);
91
92 /* make sure this is actually a sliced job */
93 BUG_ON(!is_sliced_task(t));
94
95 /* determine index of current slice */
96 idx = p->semi_part.wm.slice -
97 p->task_params.semi_part.wm.slices;
98
99 if (completion_signaled)
100 idx = 0;
101 else
102 /* increment and wrap around, if necessary */
103 idx = (idx + 1) % p->task_params.semi_part.wm.count;
104
105 /* point to next slice */
106 p->semi_part.wm.slice =
107 p->task_params.semi_part.wm.slices + idx;
108
109 /* Check if we need to update essential job parameters. */
110 if (!idx)
111 /* job completion */
112 complete_sliced_job(t);
113
114 /* Update job parameters for new slice. */
115 compute_slice_params(t);
116}
117
44/* we assume the lock is being held */ 118/* we assume the lock is being held */
45static void preempt(wm_domain_t *dom) 119static void preempt(wm_domain_t *dom)
46{ 120{
@@ -91,9 +165,6 @@ static void requeue(struct task_struct* t, rt_domain_t *edf)
91 add_release(edf, t); /* it has got to wait */ 165 add_release(edf, t); /* it has got to wait */
92} 166}
93 167
94/* This check is trivial in partioned systems as we only have to consider
95 * the CPU of the partition.
96 */
97static int wm_check_resched(rt_domain_t *edf) 168static int wm_check_resched(rt_domain_t *edf)
98{ 169{
99 wm_domain_t *dom = container_of(edf, wm_domain_t, domain); 170 wm_domain_t *dom = container_of(edf, wm_domain_t, domain);