aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-03-21 18:43:43 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-03-21 18:47:18 -0400
commit7bbf3205ae1979cb41fd2a0dfdd103656bf8e84e (patch)
treeb7ffd5eacbd6f11c42ec1e4cee0bc2041bcabd79
parent469aaad39c956446b8a31d351ee36bedd87ac18a (diff)
SOBLIV draining support for C-EDF.wip-2012.3-gpu
Adds support for suspension-oblivous budget draining to C-EDF. Also changes how jobs with exhausted budget in C-EDF are treated: jobs are early released until they catch up.
-rw-r--r--include/litmus/budget.h27
-rw-r--r--include/litmus/litmus.h6
-rw-r--r--include/litmus/locking.h8
-rw-r--r--include/litmus/rt_param.h30
-rw-r--r--include/litmus/sched_trace.h9
-rw-r--r--litmus/budget.c77
-rw-r--r--litmus/edf_common.c2
-rw-r--r--litmus/litmus.c2
-rw-r--r--litmus/sched_cedf.c196
-rw-r--r--litmus/sched_gsn_edf.c13
-rw-r--r--litmus/sched_pfp.c13
-rw-r--r--litmus/sched_psn_edf.c13
-rw-r--r--litmus/sched_task_trace.c4
13 files changed, 337 insertions, 63 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index 2a3511245f7a..72f04777e0b0 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -54,15 +54,17 @@ struct enforcement_timer
54 54
55typedef void (*scheduled_t)(struct task_struct* t); 55typedef void (*scheduled_t)(struct task_struct* t);
56typedef void (*blocked_t)(struct task_struct* t); 56typedef void (*blocked_t)(struct task_struct* t);
57typedef void (*preempt_or_sleep_t)(struct task_struct* t); 57typedef void (*preempt_t)(struct task_struct* t);
58typedef void (*exhausted_t)(struct task_struct* t); 58typedef void (*sleep_t)(struct task_struct* t);
59typedef enum hrtimer_restart (*exhausted_t)(struct task_struct* t);
59typedef void (*exit_t)(struct task_struct* t); 60typedef void (*exit_t)(struct task_struct* t);
60 61
61struct budget_tracker_ops 62struct budget_tracker_ops
62{ 63{
63 scheduled_t on_scheduled; /* called from litmus_schedule(). */ 64 scheduled_t on_scheduled; /* called from litmus_schedule(). */
64 blocked_t on_blocked; /* called from plugin::schedule() */ 65 blocked_t on_blocked; /* called from plugin::schedule() */
65 preempt_or_sleep_t on_preempt_or_sleep; /* called from plugin::schedule() */ 66 preempt_t on_preempt; /* called from plugin::schedule() */
67 sleep_t on_sleep; /* called from plugin::schedule() */
66 68
67 exit_t on_exit; /* task exiting rt mode */ 69 exit_t on_exit; /* task exiting rt mode */
68 70
@@ -84,15 +86,30 @@ enum BT_FLAGS
84}; 86};
85 87
86/* Functions for simple DRAIN_SIMPLE policy common 88/* Functions for simple DRAIN_SIMPLE policy common
87 * to every scheduler. Scheduler must provided 89 * to every scheduler. Scheduler must provide
88 * implementation for simple_on_exhausted(). 90 * implementation for simple_on_exhausted().
89 */ 91 */
90void simple_on_scheduled(struct task_struct* t); 92void simple_on_scheduled(struct task_struct* t);
91void simple_on_blocked(struct task_struct* t); 93void simple_on_blocked(struct task_struct* t);
92void simple_on_preempt_or_sleep(struct task_struct* t); 94void simple_on_preempt(struct task_struct* t);
95void simple_on_sleep(struct task_struct* t);
93void simple_on_exit(struct task_struct* t); 96void simple_on_exit(struct task_struct* t);
94 97
95 98
99/* Functions for DRAIN_SOBLIV policy common
100 * to every scheduler. Scheduler must provide
101 * implementation for sobliv_on_exhausted().
102 *
103 * Limitation: Quantum budget tracking is unsupported.
104 */
105void sobliv_on_scheduled(struct task_struct* t);
106void sobliv_on_blocked(struct task_struct* t);
107void sobliv_on_sleep(struct task_struct* t);
108/* Use the DRAIN_SIMPLE implementations */
109#define sobliv_on_preempt simple_on_preempt
110#define sobliv_on_exit simple_on_exit
111
112
96void init_budget_tracker(struct budget_tracker* bt, 113void init_budget_tracker(struct budget_tracker* bt,
97 const struct budget_tracker_ops* ops); 114 const struct budget_tracker_ops* ops);
98 115
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index f6ea5f6e80ee..ce24e62eee81 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -70,7 +70,11 @@ void litmus_exit_task(struct task_struct *tsk);
70#define get_period(t) (tsk_rt(t)->task_params.period) 70#define get_period(t) (tsk_rt(t)->task_params.period)
71#define get_release(t) (tsk_rt(t)->job_params.release) 71#define get_release(t) (tsk_rt(t)->job_params.release)
72#define get_lateness(t) (tsk_rt(t)->job_params.lateness) 72#define get_lateness(t) (tsk_rt(t)->job_params.lateness)
73#define get_budget_timer(t) (tsk_rt(t)->job_params.budget_timer) 73#define get_backlog(t) (tsk_rt(t)->job_params.backlog)
74
75#define has_backlog(t) (get_backlog(t) != 0)
76
77#define get_budget_timer(t) (tsk_rt(t)->budget)
74 78
75#define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) 79#define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task)
76#define base_priority(t) (t) 80#define base_priority(t) (t)
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 3ae6692dbe95..962ad5e6726a 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -229,5 +229,13 @@ struct litmus_lock_ops {
229void suspend_for_lock(void); 229void suspend_for_lock(void);
230int wake_up_for_lock(struct task_struct* t); 230int wake_up_for_lock(struct task_struct* t);
231 231
232/* thread safe?? */
233#ifndef CONFIG_LITMUS_NESTED_LOCKING
234#define holds_locks(tsk) \
235 (tsk_rt(t)->num_locks_held || tsk_rt(t)->num_local_locks_held)
236#else
237#define holds_locks(tsk) \
238 (tsk_rt(t)->num_locks_held || tsk_rt(t)->num_local_locks_held || !binheap_empty(&tsk_rt(t)->hp_blocked_tasks))
232#endif 239#endif
233 240
241#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 887075b908ca..499ecd899fcd 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -257,6 +257,19 @@ struct rt_job {
257 * Increase this sequence number when a job is released. 257 * Increase this sequence number when a job is released.
258 */ 258 */
259 unsigned int job_no; 259 unsigned int job_no;
260
261 /* Increments each time a job is forced to complete by
262 * budget exhaustion. If a job completes without remaining
263 * budget, the next job will be early-released _without_
264 * pushing back its deadline. job_backlog is decremented once
265 * per early release. This behavior continues until
266 * backlog == 0.
267 */
268 unsigned int backlog;
269
270 /* denotes if the current job is a backlogged job that was caused
271 * by an earlier budget exhaustion */
272 unsigned int is_backlogged_job:1;
260}; 273};
261 274
262struct pfair_param; 275struct pfair_param;
@@ -387,6 +400,14 @@ struct rt_param {
387 unsigned int num_local_locks_held; 400 unsigned int num_local_locks_held;
388#endif 401#endif
389 402
403#ifdef CONFIG_LITMUS_NESTED_LOCKING
404 raw_spinlock_t hp_blocked_tasks_lock;
405 struct binheap hp_blocked_tasks;
406
407 /* pointer to lock upon which is currently blocked */
408 struct litmus_lock* blocked_lock;
409#endif
410
390 /* user controlled parameters */ 411 /* user controlled parameters */
391 struct rt_task task_params; 412 struct rt_task task_params;
392 413
@@ -401,15 +422,6 @@ struct rt_param {
401 */ 422 */
402 struct task_struct* inh_task; 423 struct task_struct* inh_task;
403 424
404#ifdef CONFIG_LITMUS_NESTED_LOCKING
405 raw_spinlock_t hp_blocked_tasks_lock;
406 struct binheap hp_blocked_tasks;
407
408 /* pointer to lock upon which is currently blocked */
409 struct litmus_lock* blocked_lock;
410#endif
411
412
413#ifdef CONFIG_REALTIME_AUX_TASKS 425#ifdef CONFIG_REALTIME_AUX_TASKS
414 unsigned int is_aux_task:1; 426 unsigned int is_aux_task:1;
415 unsigned int aux_ready:1; 427 unsigned int aux_ready:1;
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 0785db39b2fc..9a7e6fa1e6b6 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -52,11 +52,10 @@ struct st_switch_away_data { /* A process was switched away from on a given CPU.
52 52
53struct st_completion_data { /* A job completed. */ 53struct st_completion_data { /* A job completed. */
54 u64 when; 54 u64 when;
55 u8 forced:1; /* Set to 1 if job overran and kernel advanced to the 55 u64 backlog_remaining:62;
56 * next task automatically; set to 0 otherwise. 56 u8 was_backlog_job:1;
57 */ 57 u8 forced:1; /* Set to 1 if job overran and kernel advanced to the
58 u8 __uflags:7; 58 * next task automatically; set to 0 otherwise. */
59 u8 __unused[7];
60} __attribute__((packed)); 59} __attribute__((packed));
61 60
62struct st_block_data { /* A task blocks. */ 61struct st_block_data { /* A task blocks. */
diff --git a/litmus/budget.c b/litmus/budget.c
index 559c54709acc..15de83bc584e 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -38,10 +38,13 @@ inline static void cancel_enforcement_timer(struct task_struct* t)
38 } 38 }
39} 39}
40 40
41
42
41inline static void arm_enforcement_timer(struct task_struct* t) 43inline static void arm_enforcement_timer(struct task_struct* t)
42{ 44{
43 struct enforcement_timer* et; 45 struct enforcement_timer* et;
44 lt_t when_to_fire; 46 lt_t when_to_fire, remaining_budget;
47 lt_t now;
45 unsigned long flags; 48 unsigned long flags;
46 49
47 BUG_ON(!t); 50 BUG_ON(!t);
@@ -80,9 +83,11 @@ inline static void arm_enforcement_timer(struct task_struct* t)
80 goto out; 83 goto out;
81 } 84 }
82 85
83 when_to_fire = litmus_clock() + budget_remaining(t); 86 now = litmus_clock();
87 remaining_budget = budget_remaining(t);
88 when_to_fire = now + remaining_budget;
84 89
85 TRACE_TASK(t, "bremaining: %ld, when_to_fire: %ld\n", budget_remaining(t), when_to_fire); 90 TRACE_TASK(t, "budget remaining: %ld, when_to_fire: %ld\n", remaining_budget, when_to_fire);
86 91
87 __hrtimer_start_range_ns(&et->timer, 92 __hrtimer_start_range_ns(&et->timer,
88 ns_to_ktime(when_to_fire), 93 ns_to_ktime(when_to_fire),
@@ -107,6 +112,9 @@ void send_sigbudget(struct task_struct* t)
107 } 112 }
108} 113}
109 114
115/*
116 * DRAIN_SIMPLE
117 */
110 118
111void simple_on_scheduled(struct task_struct* t) 119void simple_on_scheduled(struct task_struct* t)
112{ 120{
@@ -118,7 +126,7 @@ void simple_on_scheduled(struct task_struct* t)
118 } 126 }
119} 127}
120 128
121static void __simple_on_unscheduled(struct task_struct* t) 129inline static void __simple_on_unscheduled(struct task_struct* t)
122{ 130{
123 BUG_ON(!t); 131 BUG_ON(!t);
124 132
@@ -132,7 +140,12 @@ void simple_on_blocked(struct task_struct* t)
132 __simple_on_unscheduled(t); 140 __simple_on_unscheduled(t);
133} 141}
134 142
135void simple_on_preempt_or_sleep(struct task_struct* t) 143void simple_on_preempt(struct task_struct* t)
144{
145 __simple_on_unscheduled(t);
146}
147
148void simple_on_sleep(struct task_struct* t)
136{ 149{
137 __simple_on_unscheduled(t); 150 __simple_on_unscheduled(t);
138} 151}
@@ -142,12 +155,53 @@ void simple_on_exit(struct task_struct* t)
142 __simple_on_unscheduled(t); 155 __simple_on_unscheduled(t);
143} 156}
144 157
158/*
159 * DRAIN_SOBLIV
160 */
145 161
162void sobliv_on_scheduled(struct task_struct* t)
163{
164 BUG_ON(!t);
165
166 if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
167 if (tsk_rt(t)->budget.timer.armed) {
168 TRACE_TASK(t, "budget timer already armed.\n");
169 }
170 else {
171 arm_enforcement_timer(t);
172 }
173 }
174}
175
176void sobliv_on_blocked(struct task_struct* t)
177{
178 /* NOOP */
179 TRACE_TASK(t, "sobliv: budget drains while suspended.\n");
180}
181
182void sobliv_on_sleep(struct task_struct* t)
183{
184 if (budget_precisely_tracked(t)) {
185 /* kludge. callback called before job_completion logic runs, so
186 * we need to do some logic of our own to figure out if there is a
187 * backlog after this job (it is completing since sleep is asserted)
188 * completes. */
189 int no_backlog = (!has_backlog(t) || /* no backlog */
190 /* the last backlogged job is completing */
191 (get_backlog(t) == 1 && tsk_rt(t)->job_params.is_backlogged_job));
192 if (no_backlog)
193 cancel_enforcement_timer(t);
194 else
195 TRACE_TASK(t, "not cancelling timer because there is time for backlogged work.\n");
196 }
197}
146 198
147 199
148static enum hrtimer_restart __on_timeout(struct hrtimer *timer) 200static enum hrtimer_restart __on_timeout(struct hrtimer *timer)
149{ 201{
202 enum hrtimer_restart restart;
150 unsigned long flags; 203 unsigned long flags;
204
151 struct budget_tracker* bt = 205 struct budget_tracker* bt =
152 container_of( 206 container_of(
153 container_of(timer, 207 container_of(timer,
@@ -168,9 +222,13 @@ static enum hrtimer_restart __on_timeout(struct hrtimer *timer)
168 tsk_rt(t)->budget.timer.armed = 0; 222 tsk_rt(t)->budget.timer.armed = 0;
169 raw_spin_unlock_irqrestore(&bt->timer.lock, flags); 223 raw_spin_unlock_irqrestore(&bt->timer.lock, flags);
170 224
171 bt->ops->on_exhausted(t); 225 restart = bt->ops->on_exhausted(t);
226
227 raw_spin_lock_irqsave(&bt->timer.lock, flags);
228 tsk_rt(t)->budget.timer.armed = (restart == HRTIMER_RESTART);
229 raw_spin_unlock_irqrestore(&bt->timer.lock, flags);
172 230
173 return HRTIMER_NORESTART; 231 return restart;
174} 232}
175 233
176 234
@@ -181,7 +239,8 @@ void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_
181 239
182 BUG_ON(!ops->on_scheduled); 240 BUG_ON(!ops->on_scheduled);
183 BUG_ON(!ops->on_blocked); 241 BUG_ON(!ops->on_blocked);
184 BUG_ON(!ops->on_preempt_or_sleep); 242 BUG_ON(!ops->on_preempt);
243 BUG_ON(!ops->on_sleep);
185 BUG_ON(!ops->on_exhausted); 244 BUG_ON(!ops->on_exhausted);
186 245
187 memset(bt, 0, sizeof(*bt)); 246 memset(bt, 0, sizeof(*bt));
@@ -189,4 +248,4 @@ void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_
189 hrtimer_init(&bt->timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 248 hrtimer_init(&bt->timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
190 bt->timer.timer.function = __on_timeout; 249 bt->timer.timer.function = __on_timeout;
191 bt->ops = ops; 250 bt->ops = ops;
192} \ No newline at end of file 251}
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 52ccac998142..76ed1056ef6f 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -214,6 +214,8 @@ klmirqd_tie_break:
214 */ 214 */
215 int pid_break; 215 int pid_break;
216 216
217 /* TODO: INCORPERATE job_params::backlog INTO TIE-BREAKING */
218
217#if defined(CONFIG_EDF_TIE_BREAK_LATENESS) 219#if defined(CONFIG_EDF_TIE_BREAK_LATENESS)
218 /* Tie break by lateness. Jobs with greater lateness get 220 /* Tie break by lateness. Jobs with greater lateness get
219 * priority. This should spread tardiness across all tasks, 221 * priority. This should spread tardiness across all tasks,
diff --git a/litmus/litmus.c b/litmus/litmus.c
index e8130e362c84..10d9e545a831 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -442,8 +442,6 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
442 binheap_order_t prio_order = NULL; 442 binheap_order_t prio_order = NULL;
443#endif 443#endif
444 444
445 TRACE_TASK(p, "reinit_litmus_state: restore = %d\n", restore);
446
447 if (restore) { 445 if (restore) {
448 /* Safe user-space provided configuration data. 446 /* Safe user-space provided configuration data.
449 * and allocated page. */ 447 * and allocated page. */
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 8fe646f1f0c5..fd1b80ac6090 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -315,7 +315,8 @@ static noinline void requeue(struct task_struct* task)
315 /* sanity check before insertion */ 315 /* sanity check before insertion */
316 BUG_ON(is_queued(task)); 316 BUG_ON(is_queued(task));
317 317
318 if (is_early_releasing(task) || is_released(task, litmus_clock())) { 318 if (is_early_releasing(task) || is_released(task, litmus_clock()) ||
319 tsk_rt(task)->job_params.is_backlogged_job) {
319#ifdef CONFIG_REALTIME_AUX_TASKS 320#ifdef CONFIG_REALTIME_AUX_TASKS
320 if (unlikely(tsk_rt(task)->is_aux_task && task->state != TASK_RUNNING && !tsk_rt(task)->aux_ready)) { 321 if (unlikely(tsk_rt(task)->is_aux_task && task->state != TASK_RUNNING && !tsk_rt(task)->aux_ready)) {
321 /* aux_task probably transitioned to real-time while it was blocked */ 322 /* aux_task probably transitioned to real-time while it was blocked */
@@ -327,7 +328,7 @@ static noinline void requeue(struct task_struct* task)
327 __add_ready(&cluster->domain, task); 328 __add_ready(&cluster->domain, task);
328 } 329 }
329 else { 330 else {
330 TRACE_TASK(task, "not requeueing non-yet-released job\n"); 331 TRACE_TASK(task, "not requeueing not-yet-released job\n");
331 } 332 }
332} 333}
333 334
@@ -413,18 +414,59 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
413static noinline void job_completion(struct task_struct *t, int forced) 414static noinline void job_completion(struct task_struct *t, int forced)
414{ 415{
415 int do_release = 0; 416 int do_release = 0;
417 int do_backlogged_job = 0;
416 lt_t now; 418 lt_t now;
419
417 BUG_ON(!t); 420 BUG_ON(!t);
418 421
422 now = litmus_clock();
423
424 /* DO BACKLOG TRACKING */
425
426 /* job completed with budget remaining */
427 if (get_release_policy(t) != SPORADIC) {
428 /* only jobs we know that will call sleep_next_job() can use backlogging */
429 if (!forced) {
430 /* was it a backlogged job that completed? */
431 if (tsk_rt(t)->job_params.is_backlogged_job) {
432 BUG_ON(!get_backlog(t));
433 --get_backlog(t);
434
435 TRACE_TASK(t, "completed backlogged job\n");
436 }
437 }
438 else {
439 /* budget was exhausted - force early release */
440 ++get_backlog(t);
441 TRACE_TASK(t, "adding backlogged job\n");
442 }
443 do_backlogged_job = has_backlog(t);
444 TRACE_TASK(t, "number of backlogged jobs: %u\n",
445 get_backlog(t));
446 }
447
448
449 /* SETUP FOR THE NEXT JOB */
450
419 sched_trace_task_completion(t, forced); 451 sched_trace_task_completion(t, forced);
420 452
421 now = litmus_clock(); 453 TRACE_TASK(t, "job_completion() at %llu (forced = %d).\n",
422 TRACE_TASK(t, "job_completion() at %llu.\n", now); 454 now, forced);
423 455
424 /* set flags */ 456 /* set flags */
425 tsk_rt(t)->completed = 1; 457 tsk_rt(t)->completed = 1;
426 /* prepare for next period */ 458
427 prepare_for_next_period(t); 459 if (!forced && do_backlogged_job) {
460 /* don't advance deadline/refresh budget. use the remaining budget for
461 * the backlogged job. */
462 }
463 else {
464 if (do_backlogged_job) {
465 TRACE_TASK(t, "refreshing budget with early release for backlogged job.\n");
466 }
467
468 prepare_for_next_period(t);
469 }
428 470
429 do_release = (is_early_releasing(t) || is_released(t, now)); 471 do_release = (is_early_releasing(t) || is_released(t, now));
430 if (do_release) { 472 if (do_release) {
@@ -437,14 +479,30 @@ static noinline void job_completion(struct task_struct *t, int forced)
437 /* release or arm next job */ 479 /* release or arm next job */
438 tsk_rt(t)->completed = 0; 480 tsk_rt(t)->completed = 0;
439 if (is_running(t)) { 481 if (is_running(t)) {
440 if (!do_release) 482 /* is our next job a backlogged job? */
441 add_release(&task_cpu_cluster(t)->domain, t); 483 if (do_backlogged_job) {
442 else 484 TRACE_TASK(t, "next job is a backlogged job.\n");
485 tsk_rt(t)->job_params.is_backlogged_job = 1;
486 }
487 else {
488 TRACE_TASK(t, "next job is a regular job.\n");
489 tsk_rt(t)->job_params.is_backlogged_job = 0;
490 }
491
492 if (do_release || do_backlogged_job) {
443 cedf_job_arrival(t); 493 cedf_job_arrival(t);
494 }
495 else {
496 add_release(&task_cpu_cluster(t)->domain, t);
497 }
498 }
499 else {
500 BUG_ON(!forced);
501 TRACE_TASK(t, "job exhausted budget while sleeping\n");
444 } 502 }
445} 503}
446 504
447static void cedf_simple_on_exhausted(struct task_struct *t) 505static enum hrtimer_restart cedf_simple_on_exhausted(struct task_struct *t)
448{ 506{
449 /* Assumption: t is scheduled on the CPU executing this callback */ 507 /* Assumption: t is scheduled on the CPU executing this callback */
450 508
@@ -458,18 +516,94 @@ static void cedf_simple_on_exhausted(struct task_struct *t)
458 /* np tasks will be preempted when they become 516 /* np tasks will be preempted when they become
459 * preemptable again 517 * preemptable again
460 */ 518 */
519 TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n");
520
461 litmus_reschedule_local(); 521 litmus_reschedule_local();
462 set_will_schedule(); 522 set_will_schedule();
463 TRACE("cedf_scheduler_tick: "
464 "%d is preemptable "
465 " => FORCE_RESCHED\n", t->pid);
466 } else if (is_user_np(t)) { 523 } else if (is_user_np(t)) {
467 TRACE("cedf_scheduler_tick: " 524 TRACE_TASK(t, "is non-preemptable, preemption delayed.\n");
468 "%d is non-preemptable, " 525
469 "preemption delayed.\n", t->pid);
470 request_exit_np(t); 526 request_exit_np(t);
471 } 527 }
472 } 528 }
529
530 return HRTIMER_NORESTART;
531}
532
533static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
534{
535 enum hrtimer_restart restart = HRTIMER_NORESTART;
536
537 /* t may or may not be scheduled */
538
539 if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
540 /* signal exhaustion */
541
542 /* Tasks should block SIG_BUDGET if they cannot gracefully respond to
543 * the signal while suspended. SIG_BUDGET is an rt-signal, so it will
544 * be queued and received when SIG_BUDGET is unblocked */
545 send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */
546 }
547
548 if (budget_enforced(t) && !bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)) {
549 if (is_np(t) && is_user_np(t)) {
550 TRACE_TASK(t, "is non-preemptable, preemption delayed.\n");
551
552 bt_flag_set(t, BTF_BUDGET_EXHAUSTED);
553 request_exit_np(t);
554 }
555 else {
556 /* where do we need to call resched? */
557 int cpu = (tsk_rt(t)->linked_on != NO_CPU) ?
558 tsk_rt(t)->linked_on : tsk_rt(t)->scheduled_on;
559 if (cpu == smp_processor_id()) {
560 TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n");
561
562 bt_flag_set(t, BTF_BUDGET_EXHAUSTED);
563 litmus_reschedule_local();
564 set_will_schedule();
565 }
566 else if (cpu != NO_CPU) {
567 TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu);
568
569 bt_flag_set(t, BTF_BUDGET_EXHAUSTED);
570 litmus_reschedule(cpu);
571 }
572 else {
573 BUG_ON(cpu != NO_CPU);
574#ifdef CONFIG_LITMUS_LOCKING
575 if (holds_locks(t)) {
576 /* TODO: Integration with Litmus locking protocols */
577 TRACE_TASK(t, "prevented lock holder from postponing deadline.\n");
578 }
579 else {
580#endif
581 /* force job completion */
582 cedf_domain_t* cluster = task_cpu_cluster(t);
583 unsigned long flags;
584 lt_t remaining;
585
586 TRACE_TASK(t, "blocked, postponing deadline\n");
587
588 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
589 job_completion(t, 1); /* refreshes budget */
590
591 hrtimer_forward_now(&get_budget_timer(t).timer.timer,
592 ns_to_ktime(budget_remaining(t)));
593 remaining = hrtimer_get_expires_ns(&get_budget_timer(t).timer.timer);
594
595 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
596
597 TRACE_TASK(t, "rearmed timer to %ld\n", remaining);
598 restart = HRTIMER_RESTART;
599#ifdef CONFIG_LITMUS_LOCKING
600 }
601#endif
602 }
603 }
604 }
605
606 return restart;
473} 607}
474 608
475 609
@@ -873,8 +1007,10 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
873 if (tsk_rt(prev)->budget.ops) { 1007 if (tsk_rt(prev)->budget.ops) {
874 if (blocks) 1008 if (blocks)
875 tsk_rt(prev)->budget.ops->on_blocked(prev); 1009 tsk_rt(prev)->budget.ops->on_blocked(prev);
876 else if (preempt || sleep) 1010 else if (sleep)
877 tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev); 1011 tsk_rt(prev)->budget.ops->on_sleep(prev);
1012 else if (preempt)
1013 tsk_rt(prev)->budget.ops->on_preempt(prev);
878 } 1014 }
879 1015
880 /* If a task blocks we have no choice but to reschedule. 1016 /* If a task blocks we have no choice but to reschedule.
@@ -1156,12 +1292,23 @@ static struct budget_tracker_ops cedf_drain_simple_ops =
1156{ 1292{
1157 .on_scheduled = simple_on_scheduled, 1293 .on_scheduled = simple_on_scheduled,
1158 .on_blocked = simple_on_blocked, 1294 .on_blocked = simple_on_blocked,
1159 .on_preempt_or_sleep = simple_on_preempt_or_sleep, 1295 .on_preempt = simple_on_preempt,
1296 .on_sleep = simple_on_sleep,
1160 .on_exit = simple_on_exit, 1297 .on_exit = simple_on_exit,
1161 1298
1162 .on_exhausted = cedf_simple_on_exhausted, 1299 .on_exhausted = cedf_simple_on_exhausted,
1163}; 1300};
1164 1301
1302static struct budget_tracker_ops cedf_drain_sobliv_ops =
1303{
1304 .on_scheduled = sobliv_on_scheduled,
1305 .on_blocked = sobliv_on_blocked,
1306 .on_preempt = sobliv_on_preempt,
1307 .on_sleep = sobliv_on_sleep,
1308 .on_exit = sobliv_on_exit,
1309
1310 .on_exhausted = cedf_sobliv_on_exhausted,
1311};
1165 1312
1166static long cedf_admit_task(struct task_struct* tsk) 1313static long cedf_admit_task(struct task_struct* tsk)
1167{ 1314{
@@ -1173,6 +1320,17 @@ static long cedf_admit_task(struct task_struct* tsk)
1173 case DRAIN_SIMPLE: 1320 case DRAIN_SIMPLE:
1174 init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_simple_ops); 1321 init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_simple_ops);
1175 break; 1322 break;
1323 case DRAIN_SOBLIV:
1324 /* budget_policy and budget_signal_policy cannot be quantum-based */
1325 if (!budget_quantum_tracked(tsk) && budget_precisely_tracked(tsk)) {
1326 init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_sobliv_ops);
1327 }
1328 else {
1329 TRACE_TASK(tsk, "QUANTUM_ENFORCEMENT and QUANTUM_SIGNALS is "
1330 "unsupported with DRAIN_SOBLIV.\n");
1331 return -EINVAL;
1332 }
1333 break;
1176 default: 1334 default:
1177 TRACE_TASK(tsk, "Unsupported budget draining mode.\n"); 1335 TRACE_TASK(tsk, "Unsupported budget draining mode.\n");
1178 return -EINVAL; 1336 return -EINVAL;
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 15ac94038702..2950e39b054e 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -421,7 +421,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
421 gsnedf_job_arrival(t); 421 gsnedf_job_arrival(t);
422} 422}
423 423
424static void gsnedf_simple_on_exhausted(struct task_struct *t) 424static enum hrtimer_restart gsnedf_simple_on_exhausted(struct task_struct *t)
425{ 425{
426 /* Assumption: t is scheduled on the CPU executing this callback */ 426 /* Assumption: t is scheduled on the CPU executing this callback */
427 427
@@ -446,6 +446,8 @@ static void gsnedf_simple_on_exhausted(struct task_struct *t)
446 request_exit_np(t); 446 request_exit_np(t);
447 } 447 }
448 } 448 }
449
450 return HRTIMER_NORESTART;
449} 451}
450 452
451/* gsnedf_tick - this function is called for every local timer 453/* gsnedf_tick - this function is called for every local timer
@@ -849,8 +851,10 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
849 if (tsk_rt(prev)->budget.ops) { 851 if (tsk_rt(prev)->budget.ops) {
850 if (blocks) 852 if (blocks)
851 tsk_rt(prev)->budget.ops->on_blocked(prev); 853 tsk_rt(prev)->budget.ops->on_blocked(prev);
852 else if (preempt || sleep) 854 else if (sleep)
853 tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev); 855 tsk_rt(prev)->budget.ops->on_sleep(prev);
856 else if (preempt)
857 tsk_rt(prev)->budget.ops->on_preempt(prev);
854 } 858 }
855 859
856 /* If a task blocks we have no choice but to reschedule. 860 /* If a task blocks we have no choice but to reschedule.
@@ -1122,7 +1126,8 @@ static struct budget_tracker_ops gsnedf_drain_simple_ops =
1122{ 1126{
1123 .on_scheduled = simple_on_scheduled, 1127 .on_scheduled = simple_on_scheduled,
1124 .on_blocked = simple_on_blocked, 1128 .on_blocked = simple_on_blocked,
1125 .on_preempt_or_sleep = simple_on_preempt_or_sleep, 1129 .on_preempt = simple_on_preempt,
1130 .on_sleep = simple_on_sleep,
1126 .on_exit = simple_on_exit, 1131 .on_exit = simple_on_exit,
1127 1132
1128 .on_exhausted = gsnedf_simple_on_exhausted, 1133 .on_exhausted = gsnedf_simple_on_exhausted,
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index 4a8b8e084f6e..33f861ab0056 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -132,7 +132,7 @@ static void job_completion(struct task_struct* t, int forced)
132 sched_trace_task_release(t); 132 sched_trace_task_release(t);
133} 133}
134 134
135static void pfp_simple_on_exhausted(struct task_struct *t) 135static enum hrtimer_restart pfp_simple_on_exhausted(struct task_struct *t)
136{ 136{
137 /* Assumption: t is scheduled on the CPU executing this callback */ 137 /* Assumption: t is scheduled on the CPU executing this callback */
138 138
@@ -157,6 +157,8 @@ static void pfp_simple_on_exhausted(struct task_struct *t)
157 request_exit_np(t); 157 request_exit_np(t);
158 } 158 }
159 } 159 }
160
161 return HRTIMER_NORESTART;
160} 162}
161 163
162static void pfp_tick(struct task_struct *t) 164static void pfp_tick(struct task_struct *t)
@@ -214,8 +216,10 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
214 if (tsk_rt(prev)->budget.ops) { 216 if (tsk_rt(prev)->budget.ops) {
215 if (blocks) 217 if (blocks)
216 tsk_rt(prev)->budget.ops->on_blocked(prev); 218 tsk_rt(prev)->budget.ops->on_blocked(prev);
217 else if (preempt || sleep) 219 else if (sleep)
218 tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev); 220 tsk_rt(prev)->budget.ops->on_sleep(prev);
221 else if (preempt)
222 tsk_rt(prev)->budget.ops->on_preempt(prev);
219 } 223 }
220 224
221 /* If a task blocks we have no choice but to reschedule. 225 /* If a task blocks we have no choice but to reschedule.
@@ -1713,7 +1717,8 @@ static struct budget_tracker_ops pfp_drain_simple_ops =
1713{ 1717{
1714 .on_scheduled = simple_on_scheduled, 1718 .on_scheduled = simple_on_scheduled,
1715 .on_blocked = simple_on_blocked, 1719 .on_blocked = simple_on_blocked,
1716 .on_preempt_or_sleep = simple_on_preempt_or_sleep, 1720 .on_preempt = simple_on_preempt,
1721 .on_sleep = simple_on_sleep,
1717 .on_exit = simple_on_exit, 1722 .on_exit = simple_on_exit,
1718 1723
1719 .on_exhausted = pfp_simple_on_exhausted, 1724 .on_exhausted = pfp_simple_on_exhausted,
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 3b3edfe908ff..c06db8b434cd 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -164,7 +164,7 @@ static void job_completion(struct task_struct* t, int forced)
164 prepare_for_next_period(t); 164 prepare_for_next_period(t);
165} 165}
166 166
167static void psnedf_simple_on_exhausted(struct task_struct *t) 167static enum hrtimer_restart psnedf_simple_on_exhausted(struct task_struct *t)
168{ 168{
169 /* Assumption: t is scheduled on the CPU executing this callback */ 169 /* Assumption: t is scheduled on the CPU executing this callback */
170 170
@@ -189,6 +189,8 @@ static void psnedf_simple_on_exhausted(struct task_struct *t)
189 request_exit_np(t); 189 request_exit_np(t);
190 } 190 }
191 } 191 }
192
193 return HRTIMER_NORESTART;
192} 194}
193 195
194static void psnedf_tick(struct task_struct *t) 196static void psnedf_tick(struct task_struct *t)
@@ -246,8 +248,10 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
246 if (tsk_rt(prev)->budget.ops) { 248 if (tsk_rt(prev)->budget.ops) {
247 if (blocks) 249 if (blocks)
248 tsk_rt(prev)->budget.ops->on_blocked(prev); 250 tsk_rt(prev)->budget.ops->on_blocked(prev);
249 else if (preempt || sleep) 251 else if (sleep)
250 tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev); 252 tsk_rt(prev)->budget.ops->on_sleep(prev);
253 else if (preempt)
254 tsk_rt(prev)->budget.ops->on_preempt(prev);
251 } 255 }
252 256
253 /* If a task blocks we have no choice but to reschedule. 257 /* If a task blocks we have no choice but to reschedule.
@@ -647,7 +651,8 @@ static struct budget_tracker_ops psnedf_drain_simple_ops =
647{ 651{
648 .on_scheduled = simple_on_scheduled, 652 .on_scheduled = simple_on_scheduled,
649 .on_blocked = simple_on_blocked, 653 .on_blocked = simple_on_blocked,
650 .on_preempt_or_sleep = simple_on_preempt_or_sleep, 654 .on_preempt = simple_on_preempt,
655 .on_sleep = simple_on_sleep,
651 .on_exit = simple_on_exit, 656 .on_exit = simple_on_exit,
652 657
653 .on_exhausted = psnedf_simple_on_exhausted, 658 .on_exhausted = psnedf_simple_on_exhausted,
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index 8d75437e7771..e243b8007826 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -192,7 +192,9 @@ feather_callback void do_sched_trace_task_completion(unsigned long id,
192 struct task_struct *t = (struct task_struct*) _task; 192 struct task_struct *t = (struct task_struct*) _task;
193 struct st_event_record* rec = get_record(ST_COMPLETION, t); 193 struct st_event_record* rec = get_record(ST_COMPLETION, t);
194 if (rec) { 194 if (rec) {
195 rec->data.completion.when = now(); 195 rec->data.completion.when = now();
196 rec->data.completion.backlog_remaining = tsk_rt(t)->job_params.job_backlog;
197 rec->data.completion.was_backlog_job = tsk_rt(t)->job_params.is_backlogged_job;
196 rec->data.completion.forced = forced; 198 rec->data.completion.forced = forced;
197 put_record(rec); 199 put_record(rec);
198 } 200 }