aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGuruprasad Aphale <gurua@cs.unc.edu>2010-07-24 20:59:25 -0400
committerGuruprasad Aphale <gurua@cs.unc.edu>2010-07-24 20:59:25 -0400
commit8debcda9cbe6b85029c7efa2a257dd761e254f7d (patch)
treefab34b114d49db759902835e9afce114da2e1d29
parent643adc7a65b7e1bf851dd8dd64549fb60750ec37 (diff)
Added timer functionality to edf-wm scheduler
Added functions required for timer control to the current scheduler. Still, it does not support migrations and works same as PSN-EDF.
-rw-r--r--litmus/sched_edf_wm.c138
1 files changed, 76 insertions, 62 deletions
diff --git a/litmus/sched_edf_wm.c b/litmus/sched_edf_wm.c
index c6fa1d3d30bf..5bfe5df0de90 100644
--- a/litmus/sched_edf_wm.c
+++ b/litmus/sched_edf_wm.c
@@ -30,27 +30,41 @@ typedef struct {
30 * protects the domain and serializes scheduling decisions 30 * protects the domain and serializes scheduling decisions
31 */ 31 */
32#define slock domain.ready_lock 32#define slock domain.ready_lock
33 33/* HRTIMER: set for a subjob. We also need to set a flag, as tick() may be called by linux scheduler, and also the timer might go off before subjob has finished execution, and in that case we need to set the timer again*/
34 struct hrtimer timer;
35 int timer_flag;
36 lt_t end_timer;
34} edfwm_domain_t; 37} edfwm_domain_t;
35 38
36DEFINE_PER_CPU(edfwm_domain_t, edfwm_domains); 39DEFINE_PER_CPU(edfwm_domain_t, edfwm_domains);
37 40
38#define local_edf (&__get_cpu_var(edfwm_domains).domain) 41#define local_edf (&__get_cpu_var(edfwm_domains).domain)
39#define local_pedf (&__get_cpu_var(edfwm_domains)) 42#define local_edfwm (&__get_cpu_var(edfwm_domains))
40#define remote_edf(cpu) (&per_cpu(edfwm_domains, cpu).domain) 43#define remote_edf(cpu) (&per_cpu(edfwm_domains, cpu).domain)
41#define remote_pedf(cpu) (&per_cpu(edfwm_domains, cpu)) 44#define remote_edfwm(cpu) (&per_cpu(edfwm_domains, cpu))
42#define task_edf(task) remote_edf(get_partition(task)) 45#define task_edf(task) remote_edf(get_partition(task))
43#define task_pedf(task) remote_pedf(get_partition(task)) 46#define task_edfwm(task) remote_edfwm(get_partition(task))
44 47
48static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
49{
50 scheduler_tick();
51 return HRTIMER_NORESTART;
52}
45 53
46static void edfwm_domain_init(edfwm_domain_t* pedf, 54static void edfwm_domain_init(edfwm_domain_t* edfwm,
47 check_resched_needed_t check, 55 check_resched_needed_t check,
48 release_jobs_t release, 56 release_jobs_t release,
49 int cpu) 57 int cpu)
50{ 58{
51 edf_domain_init(&pedf->domain, check, release); 59 edf_domain_init(&edfwm->domain, check, release);
52 pedf->cpu = cpu; 60 edfwm->cpu = cpu;
53 pedf->scheduled = NULL; 61 edfwm->scheduled = NULL;
62// edfwm->block = 0;
63
64 hrtimer_init(&edfwm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
65 edfwm->timer.function = on_release_timer;
66 edfwm->end_timer = 0;
67 edfwm->timer_flag = 0;
54} 68}
55 69
56static void requeue(struct task_struct* t, rt_domain_t *edf) 70static void requeue(struct task_struct* t, rt_domain_t *edf)
@@ -66,9 +80,9 @@ static void requeue(struct task_struct* t, rt_domain_t *edf)
66} 80}
67 81
68/* we assume the lock is being held */ 82/* we assume the lock is being held */
69static void preempt(edfwm_domain_t *pedf) 83static void preempt(edfwm_domain_t *edfwm)
70{ 84{
71 preempt_if_preemptable(pedf->scheduled, pedf->cpu); 85 preempt_if_preemptable(edfwm->scheduled, edfwm->cpu);
72} 86}
73 87
74/* This check is trivial in partioned systems as we only have to consider 88/* This check is trivial in partioned systems as we only have to consider
@@ -76,13 +90,13 @@ static void preempt(edfwm_domain_t *pedf)
76 */ 90 */
77static int edfwm_check_resched(rt_domain_t *edf) 91static int edfwm_check_resched(rt_domain_t *edf)
78{ 92{
79 edfwm_domain_t *pedf = container_of(edf, edfwm_domain_t, domain); 93 edfwm_domain_t *edfwm = container_of(edf, edfwm_domain_t, domain);
80 94
81 /* because this is a callback from rt_domain_t we already hold 95 /* because this is a callback from rt_domain_t we already hold
82 * the necessary lock for the ready queue 96 * the necessary lock for the ready queue
83 */ 97 */
84 if (edf_preemption_needed(edf, pedf->scheduled)) { 98 if (edf_preemption_needed(edf, edfwm->scheduled)) {
85 preempt(pedf); 99 preempt(edfwm);
86 return 1; 100 return 1;
87 } else 101 } else
88 return 0; 102 return 0;
@@ -99,13 +113,13 @@ static void job_completion(struct task_struct* t, int forced)
99 113
100static void edfwm_tick(struct task_struct *t) 114static void edfwm_tick(struct task_struct *t)
101{ 115{
102 edfwm_domain_t *pedf = local_pedf; 116 edfwm_domain_t *edfwm = local_edfwm;
103 117
104 /* Check for inconsistency. We don't need the lock for this since 118 /* Check for inconsistency. We don't need the lock for this since
105 * ->scheduled is only changed in schedule, which obviously is not 119 * ->scheduled is only changed in schedule, which obviously is not
106 * executing in parallel on this CPU 120 * executing in parallel on this CPU
107 */ 121 */
108 BUG_ON(is_realtime(t) && t != pedf->scheduled); 122 BUG_ON(is_realtime(t) && t != edfwm->scheduled);
109 123
110 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { 124 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
111 if (!is_np(t)) { 125 if (!is_np(t)) {
@@ -124,30 +138,30 @@ static void edfwm_tick(struct task_struct *t)
124 138
125static struct task_struct* edfwm_schedule(struct task_struct * prev) 139static struct task_struct* edfwm_schedule(struct task_struct * prev)
126{ 140{
127 edfwm_domain_t* pedf = local_pedf; 141 edfwm_domain_t* edfwm = local_edfwm;
128 rt_domain_t* edf = &pedf->domain; 142 rt_domain_t* edf = &edfwm->domain;
129 struct task_struct* next; 143 struct task_struct* next;
130 144
131 int out_of_time, sleep, preempt, 145 int out_of_time, sleep, preempt,
132 np, exists, blocks, resched; 146 np, exists, blocks, resched;
133 147
134 raw_spin_lock(&pedf->slock); 148 raw_spin_lock(&edfwm->slock);
135 149
136 /* sanity checking 150 /* sanity checking
137 * differently from gedf, when a task exits (dead) 151 * differently from gedf, when a task exits (dead)
138 * pedf->schedule may be null and prev _is_ realtime 152 * edfwm->schedule may be null and prev _is_ realtime
139 */ 153 */
140 BUG_ON(pedf->scheduled && pedf->scheduled != prev); 154 BUG_ON(edfwm->scheduled && edfwm->scheduled != prev);
141 BUG_ON(pedf->scheduled && !is_realtime(prev)); 155 BUG_ON(edfwm->scheduled && !is_realtime(prev));
142 156
143 /* (0) Determine state */ 157 /* (0) Determine state */
144 exists = pedf->scheduled != NULL; 158 exists = edfwm->scheduled != NULL;
145 blocks = exists && !is_running(pedf->scheduled); 159 blocks = exists && !is_running(edfwm->scheduled);
146 out_of_time = exists && 160 out_of_time = exists &&
147 budget_enforced(pedf->scheduled) && 161 budget_enforced(edfwm->scheduled) &&
148 budget_exhausted(pedf->scheduled); 162 budget_exhausted(edfwm->scheduled);
149 np = exists && is_np(pedf->scheduled); 163 np = exists && is_np(edfwm->scheduled);
150 sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP; 164 sleep = exists && get_rt_flags(edfwm->scheduled) == RT_F_SLEEP;
151 preempt = edf_preemption_needed(edf, prev); 165 preempt = edf_preemption_needed(edf, prev);
152 166
153 /* If we need to preempt do so. 167 /* If we need to preempt do so.
@@ -165,14 +179,14 @@ static struct task_struct* edfwm_schedule(struct task_struct * prev)
165 * Multiple calls to request_exit_np() don't hurt. 179 * Multiple calls to request_exit_np() don't hurt.
166 */ 180 */
167 if (np && (out_of_time || preempt || sleep)) 181 if (np && (out_of_time || preempt || sleep))
168 request_exit_np(pedf->scheduled); 182 request_exit_np(edfwm->scheduled);
169 183
170 /* Any task that is preemptable and either exhausts its execution 184 /* Any task that is preemptable and either exhausts its execution
171 * budget or wants to sleep completes. We may have to reschedule after 185 * budget or wants to sleep completes. We may have to reschedule after
172 * this. 186 * this.
173 */ 187 */
174 if (!np && (out_of_time || sleep) && !blocks) { 188 if (!np && (out_of_time || sleep) && !blocks) {
175 job_completion(pedf->scheduled, !sleep); 189 job_completion(edfwm->scheduled, !sleep);
176 resched = 1; 190 resched = 1;
177 } 191 }
178 192
@@ -185,8 +199,8 @@ static struct task_struct* edfwm_schedule(struct task_struct * prev)
185 /* Take care of a previously scheduled 199 /* Take care of a previously scheduled
186 * job by taking it out of the Linux runqueue. 200 * job by taking it out of the Linux runqueue.
187 */ 201 */
188 if (pedf->scheduled && !blocks) 202 if (edfwm->scheduled && !blocks)
189 requeue(pedf->scheduled, edf); 203 requeue(edfwm->scheduled, edf);
190 next = __take_ready(edf); 204 next = __take_ready(edf);
191 } else 205 } else
192 /* Only override Linux scheduler if we have a real-time task 206 /* Only override Linux scheduler if we have a real-time task
@@ -202,8 +216,8 @@ static struct task_struct* edfwm_schedule(struct task_struct * prev)
202 TRACE("becoming idle at %llu\n", litmus_clock()); 216 TRACE("becoming idle at %llu\n", litmus_clock());
203 } 217 }
204 218
205 pedf->scheduled = next; 219 edfwm->scheduled = next;
206 raw_spin_unlock(&pedf->slock); 220 raw_spin_unlock(&edfwm->slock);
207 221
208 return next; 222 return next;
209} 223}
@@ -214,7 +228,7 @@ static struct task_struct* edfwm_schedule(struct task_struct * prev)
214static void edfwm_task_new(struct task_struct * t, int on_rq, int running) 228static void edfwm_task_new(struct task_struct * t, int on_rq, int running)
215{ 229{
216 rt_domain_t* edf = task_edf(t); 230 rt_domain_t* edf = task_edf(t);
217 edfwm_domain_t* pedf = task_pedf(t); 231 edfwm_domain_t* edfwm = task_edfwm(t);
218 unsigned long flags; 232 unsigned long flags;
219 233
220 TRACE_TASK(t, "edf-wm: task new, cpu = %d\n", 234 TRACE_TASK(t, "edf-wm: task new, cpu = %d\n",
@@ -226,28 +240,28 @@ static void edfwm_task_new(struct task_struct * t, int on_rq, int running)
226 /* The task should be running in the queue, otherwise signal 240 /* The task should be running in the queue, otherwise signal
227 * code will try to wake it up with fatal consequences. 241 * code will try to wake it up with fatal consequences.
228 */ 242 */
229 raw_spin_lock_irqsave(&pedf->slock, flags); 243 raw_spin_lock_irqsave(&edfwm->slock, flags);
230 if (running) { 244 if (running) {
231 /* there shouldn't be anything else running at the time */ 245 /* there shouldn't be anything else running at the time */
232 BUG_ON(pedf->scheduled); 246 BUG_ON(edfwm->scheduled);
233 pedf->scheduled = t; 247 edfwm->scheduled = t;
234 } else { 248 } else {
235 requeue(t, edf); 249 requeue(t, edf);
236 /* maybe we have to reschedule */ 250 /* maybe we have to reschedule */
237 preempt(pedf); 251 preempt(edfwm);
238 } 252 }
239 raw_spin_unlock_irqrestore(&pedf->slock, flags); 253 raw_spin_unlock_irqrestore(&edfwm->slock, flags);
240} 254}
241 255
242static void edfwm_task_wake_up(struct task_struct *task) 256static void edfwm_task_wake_up(struct task_struct *task)
243{ 257{
244 unsigned long flags; 258 unsigned long flags;
245 edfwm_domain_t* pedf = task_pedf(task); 259 edfwm_domain_t* edfwm = task_edfwm(task);
246 rt_domain_t* edf = task_edf(task); 260 rt_domain_t* edf = task_edf(task);
247 lt_t now; 261 lt_t now;
248 262
249 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 263 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
250 raw_spin_lock_irqsave(&pedf->slock, flags); 264 raw_spin_lock_irqsave(&edfwm->slock, flags);
251 BUG_ON(is_queued(task)); 265 BUG_ON(is_queued(task));
252 /* We need to take suspensions because of semaphores into 266 /* We need to take suspensions because of semaphores into
253 * account! If a job resumes after being suspended due to acquiring 267 * account! If a job resumes after being suspended due to acquiring
@@ -269,10 +283,10 @@ static void edfwm_task_wake_up(struct task_struct *task)
269 * de-scheduling the task, i.e., wake_up() raced with schedule() 283 * de-scheduling the task, i.e., wake_up() raced with schedule()
270 * and won. 284 * and won.
271 */ 285 */
272 if (pedf->scheduled != task) 286 if (edfwm->scheduled != task)
273 requeue(task, edf); 287 requeue(task, edf);
274 288
275 raw_spin_unlock_irqrestore(&pedf->slock, flags); 289 raw_spin_unlock_irqrestore(&edfwm->slock, flags);
276 TRACE_TASK(task, "wake up done\n"); 290 TRACE_TASK(task, "wake up done\n");
277} 291}
278 292
@@ -288,29 +302,29 @@ static void edfwm_task_block(struct task_struct *t)
288static void edfwm_task_exit(struct task_struct * t) 302static void edfwm_task_exit(struct task_struct * t)
289{ 303{
290 unsigned long flags; 304 unsigned long flags;
291 edfwm_domain_t* pedf = task_pedf(t); 305 edfwm_domain_t* edfwm = task_edfwm(t);
292 rt_domain_t* edf; 306 rt_domain_t* edf;
293 307
294 raw_spin_lock_irqsave(&pedf->slock, flags); 308 raw_spin_lock_irqsave(&edfwm->slock, flags);
295 if (is_queued(t)) { 309 if (is_queued(t)) {
296 /* dequeue */ 310 /* dequeue */
297 edf = task_edf(t); 311 edf = task_edf(t);
298 remove(edf, t); 312 remove(edf, t);
299 } 313 }
300 if (pedf->scheduled == t) 314 if (edfwm->scheduled == t)
301 pedf->scheduled = NULL; 315 edfwm->scheduled = NULL;
302 316
303 TRACE_TASK(t, "RIP, now reschedule\n"); 317 TRACE_TASK(t, "RIP, now reschedule\n");
304 318
305 preempt(pedf); 319 preempt(edfwm);
306 raw_spin_unlock_irqrestore(&pedf->slock, flags); 320 raw_spin_unlock_irqrestore(&edfwm->slock, flags);
307} 321}
308 322
309#ifdef CONFIG_FMLP 323#ifdef CONFIG_FMLP
310static long edfwm_pi_block(struct pi_semaphore *sem, 324static long edfwm_pi_block(struct pi_semaphore *sem,
311 struct task_struct *new_waiter) 325 struct task_struct *new_waiter)
312{ 326{
313 edfwm_domain_t* pedf; 327 edfwm_domain_t* edfwm;
314 rt_domain_t* edf; 328 rt_domain_t* edf;
315 struct task_struct* t; 329 struct task_struct* t;
316 int cpu = get_partition(new_waiter); 330 int cpu = get_partition(new_waiter);
@@ -319,11 +333,11 @@ static long edfwm_pi_block(struct pi_semaphore *sem,
319 333
320 if (edf_higher_prio(new_waiter, sem->hp.cpu_task[cpu])) { 334 if (edf_higher_prio(new_waiter, sem->hp.cpu_task[cpu])) {
321 TRACE_TASK(new_waiter, " boosts priority\n"); 335 TRACE_TASK(new_waiter, " boosts priority\n");
322 pedf = task_pedf(new_waiter); 336 edfwm = task_edfwm(new_waiter);
323 edf = task_edf(new_waiter); 337 edf = task_edf(new_waiter);
324 338
325 /* interrupts already disabled */ 339 /* interrupts already disabled */
326 raw_spin_lock(&pedf->slock); 340 raw_spin_lock(&edfwm->slock);
327 341
328 /* store new highest-priority task */ 342 /* store new highest-priority task */
329 sem->hp.cpu_task[cpu] = new_waiter; 343 sem->hp.cpu_task[cpu] = new_waiter;
@@ -346,9 +360,9 @@ static long edfwm_pi_block(struct pi_semaphore *sem,
346 360
347 /* check if we need to reschedule */ 361 /* check if we need to reschedule */
348 if (edf_preemption_needed(edf, current)) 362 if (edf_preemption_needed(edf, current))
349 preempt(pedf); 363 preempt(edfwm);
350 364
351 raw_spin_unlock(&pedf->slock); 365 raw_spin_unlock(&edfwm->slock);
352 } 366 }
353 367
354 return 0; 368 return 0;
@@ -383,7 +397,7 @@ static long edfwm_inherit_priority(struct pi_semaphore *sem,
383static long edfwm_return_priority(struct pi_semaphore *sem) 397static long edfwm_return_priority(struct pi_semaphore *sem)
384{ 398{
385 struct task_struct* t = current; 399 struct task_struct* t = current;
386 edfwm_domain_t* pedf = task_pedf(t); 400 edfwm_domain_t* edfwm = task_edfwm(t);
387 rt_domain_t* edf = task_edf(t); 401 rt_domain_t* edf = task_edf(t);
388 int ret = 0; 402 int ret = 0;
389 int cpu = get_partition(current); 403 int cpu = get_partition(current);
@@ -415,16 +429,16 @@ static long edfwm_return_priority(struct pi_semaphore *sem)
415 /* Always check for delayed preemptions that might have become 429 /* Always check for delayed preemptions that might have become
416 * necessary due to non-preemptive execution. 430 * necessary due to non-preemptive execution.
417 */ 431 */
418 raw_spin_lock(&pedf->slock); 432 raw_spin_lock(&edfwm->slock);
419 433
420 /* Reset inh_task to NULL. */ 434 /* Reset inh_task to NULL. */
421 current->rt_param.inh_task = NULL; 435 current->rt_param.inh_task = NULL;
422 436
423 /* check if we need to reschedule */ 437 /* check if we need to reschedule */
424 if (edf_preemption_needed(edf, current)) 438 if (edf_preemption_needed(edf, current))
425 preempt(pedf); 439 preempt(edfwm);
426 440
427 raw_spin_unlock(&pedf->slock); 441 raw_spin_unlock(&edfwm->slock);
428 442
429 443
430 return ret; 444 return ret;
@@ -469,7 +483,7 @@ static int __init init_edf_wm(void)
469 * we cannot use num_online_cpu() 483 * we cannot use num_online_cpu()
470 */ 484 */
471 for (i = 0; i < num_online_cpus(); i++) { 485 for (i = 0; i < num_online_cpus(); i++) {
472 edfwm_domain_init(remote_pedf(i), 486 edfwm_domain_init(remote_edfwm(i),
473 edfwm_check_resched, 487 edfwm_check_resched,
474 NULL, i); 488 NULL, i);
475 } 489 }