aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-04-01 18:58:46 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-04-01 18:58:46 -0400
commit3324865fc5792b9d755d46cafa42c74b5037bba5 (patch)
tree3093b97b7ece695d0bc64f7f92d8083f2fbf9c95
parent699737644d64e88bceafb9c2d39bd587057c732a (diff)
SOBLIV: Drain budget while task is in top-m only.
Also fixed numerous bugs...
-rw-r--r--include/litmus/bheap.h4
-rw-r--r--include/litmus/budget.h46
-rw-r--r--include/litmus/litmus.h2
-rw-r--r--include/litmus/nvidia_info.h4
-rw-r--r--include/litmus/rt_param.h1
-rw-r--r--include/litmus/sched_plugin.h5
-rw-r--r--litmus/bheap.c23
-rw-r--r--litmus/budget.c149
-rw-r--r--litmus/nvidia_info.c17
-rw-r--r--litmus/sched_cedf.c454
-rw-r--r--litmus/sched_gsn_edf.c8
-rw-r--r--litmus/sched_litmus.c4
-rw-r--r--litmus/sched_plugin.c6
13 files changed, 501 insertions, 222 deletions
diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h
index 49f7e44bc0a5..4fded5724b28 100644
--- a/include/litmus/bheap.h
+++ b/include/litmus/bheap.h
@@ -48,6 +48,10 @@ static inline int bheap_empty(struct bheap* heap)
48// return heap->size; 48// return heap->size;
49//} 49//}
50 50
51typedef void (*bheap_for_all_t)(struct bheap_node* node, void* args);
52
53void bheap_for_all(struct bheap* heap, bheap_for_all_t fn, void* args);
54
51/* insert (and reinitialize) a node into the heap */ 55/* insert (and reinitialize) a node into the heap */
52void bheap_insert(bheap_prio_t higher_prio, 56void bheap_insert(bheap_prio_t higher_prio,
53 struct bheap* heap, 57 struct bheap* heap,
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index 8e426a71f03d..08d5e0970d1d 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -4,6 +4,8 @@
4#include <linux/hrtimer.h> 4#include <linux/hrtimer.h>
5#include <linux/semaphore.h> 5#include <linux/semaphore.h>
6 6
7#include <litmus/binheap.h>
8
7struct enforcement_timer 9struct enforcement_timer
8{ 10{
9 raw_spinlock_t lock; 11 raw_spinlock_t lock;
@@ -15,17 +17,22 @@ typedef void (*scheduled_t)(struct task_struct* t);
15typedef void (*blocked_t)(struct task_struct* t); 17typedef void (*blocked_t)(struct task_struct* t);
16typedef void (*preempt_t)(struct task_struct* t); 18typedef void (*preempt_t)(struct task_struct* t);
17typedef void (*sleep_t)(struct task_struct* t); 19typedef void (*sleep_t)(struct task_struct* t);
20typedef void (*wakeup_t)(struct task_struct* t);
18typedef enum hrtimer_restart (*exhausted_t)(struct task_struct* t); 21typedef enum hrtimer_restart (*exhausted_t)(struct task_struct* t);
19typedef void (*exit_t)(struct task_struct* t); 22typedef void (*exit_t)(struct task_struct* t);
20typedef void (*inherit_t)(struct task_struct* t, struct task_struct* prio_inh); 23typedef void (*inherit_t)(struct task_struct* t, struct task_struct* prio_inh);
21typedef void (*disinherit_t)(struct task_struct* t, struct task_struct* prio_inh); 24typedef void (*disinherit_t)(struct task_struct* t, struct task_struct* prio_inh);
22 25
26typedef void (*enter_top_m_t)(struct task_struct* t);
27typedef void (*exit_top_m_t)(struct task_struct* t);
28
23struct budget_tracker_ops 29struct budget_tracker_ops
24{ 30{
25 scheduled_t on_scheduled; /* called from litmus_schedule(). */ 31 scheduled_t on_scheduled; /* called from litmus_schedule(). */
26 blocked_t on_blocked; /* called from plugin::schedule() */ 32 blocked_t on_blocked; /* called from plugin::schedule() */
27 preempt_t on_preempt; /* called from plugin::schedule() */ 33 preempt_t on_preempt; /* called from plugin::schedule() */
28 sleep_t on_sleep; /* called from plugin::schedule() */ 34 sleep_t on_sleep; /* called from plugin::schedule() */
35 wakeup_t on_wakeup;
29 36
30 exit_t on_exit; /* task exiting rt mode */ 37 exit_t on_exit; /* task exiting rt mode */
31 38
@@ -33,6 +40,9 @@ struct budget_tracker_ops
33 40
34 inherit_t on_inherit; 41 inherit_t on_inherit;
35 disinherit_t on_disinherit; 42 disinherit_t on_disinherit;
43
44 enter_top_m_t on_enter_top_m;
45 exit_top_m_t on_exit_top_m;
36}; 46};
37 47
38struct budget_tracker 48struct budget_tracker
@@ -40,13 +50,17 @@ struct budget_tracker
40 struct enforcement_timer timer; 50 struct enforcement_timer timer;
41 const struct budget_tracker_ops* ops; 51 const struct budget_tracker_ops* ops;
42 unsigned long flags; 52 unsigned long flags;
53
54 struct binheap_node top_m_node;
55 lt_t suspend_timestamp;
43}; 56};
44 57
45/* budget tracker flags */ 58/* budget tracker flags */
46enum BT_FLAGS 59enum BT_FLAGS
47{ 60{
48 BTF_BUDGET_EXHAUSTED = 0, 61 BTF_BUDGET_EXHAUSTED = 0,
49 BTF_SIG_BUDGET_SENT = 1, 62 BTF_SIG_BUDGET_SENT = 1,
63 BTF_IS_TOP_M = 2,
50}; 64};
51 65
52/* Functions for simple DRAIN_SIMPLE policy common 66/* Functions for simple DRAIN_SIMPLE policy common
@@ -66,16 +80,38 @@ void simple_on_exit(struct task_struct* t);
66 * 80 *
67 * Limitation: Quantum budget tracking is unsupported. 81 * Limitation: Quantum budget tracking is unsupported.
68 */ 82 */
69void sobliv_on_scheduled(struct task_struct* t); 83//void sobliv_on_scheduled(struct task_struct* t);
70void sobliv_on_blocked(struct task_struct* t); 84void sobliv_on_blocked(struct task_struct* t);
71void sobliv_on_sleep(struct task_struct* t); 85void sobliv_on_wakeup(struct task_struct* t);
86//void sobliv_on_sleep(struct task_struct* t);
87//void sobliv_on_preempt(struct task_struct* t);
72/* Use the DRAIN_SIMPLE implementations */ 88/* Use the DRAIN_SIMPLE implementations */
73#define sobliv_on_preempt simple_on_preempt
74#define sobliv_on_exit simple_on_exit 89#define sobliv_on_exit simple_on_exit
75void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh); 90void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh);
76void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh); 91void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh);
92void sobliv_on_enter_top_m(struct task_struct* t);
93void sobliv_on_exit_top_m(struct task_struct* t);
94
77void sobliv_revaluate_task(struct task_struct* t); 95void sobliv_revaluate_task(struct task_struct* t);
78 96
97#define budget_state_machine(t, evt) \
98 do { \
99 if (get_budget_timer(t).ops && \
100 get_budget_timer(t).ops->evt != NULL) { \
101 get_budget_timer(t).ops->evt(t); \
102 } \
103 }while(0)
104
105#define budget_state_machine2(a, b, evt) \
106 do { \
107 if (get_budget_timer(a).ops && \
108 get_budget_timer(b).ops && \
109 get_budget_timer(a).ops->evt != NULL && \
110 get_budget_timer(b).ops->evt != NULL) {\
111 get_budget_timer(a).ops->evt(a, b); \
112 } \
113 }while(0)
114
79 115
80void init_budget_tracker(struct budget_tracker* bt, 116void init_budget_tracker(struct budget_tracker* bt,
81 const struct budget_tracker_ops* ops); 117 const struct budget_tracker_ops* ops);
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 4fa705e65f0c..4e74101a5619 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -166,7 +166,7 @@ static inline void set_inh_task_linkback(struct task_struct* t, struct task_stru
166 166
167 while(!success) { 167 while(!success) {
168 int b = find_first_zero_bit(&tsk_rt(linkto)->used_linkback_slots, 168 int b = find_first_zero_bit(&tsk_rt(linkto)->used_linkback_slots,
169 sizeof(tsk_rt(linkto)->used_linkback_slots)); 169 BITS_PER_BYTE*sizeof(tsk_rt(linkto)->used_linkback_slots));
170 170
171 BUG_ON(b > MAX_IDX); 171 BUG_ON(b > MAX_IDX);
172 172
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h
index 7db4a32af734..f1477fb9dc33 100644
--- a/include/litmus/nvidia_info.h
+++ b/include/litmus/nvidia_info.h
@@ -45,6 +45,10 @@ long enable_gpu_owner(struct task_struct *t);
45/* call when the GPU-holding task, t, resumes */ 45/* call when the GPU-holding task, t, resumes */
46long disable_gpu_owner(struct task_struct *t); 46long disable_gpu_owner(struct task_struct *t);
47 47
48/* call when the GPU-holding task, t, had a priority change due to budget
49 * exhaustion */
50long recheck_gpu_owner(struct task_struct* t);
51
48/* call when the GPU-holding task, t, increases its priority */ 52/* call when the GPU-holding task, t, increases its priority */
49int gpu_owner_increase_priority(struct task_struct *t); 53int gpu_owner_increase_priority(struct task_struct *t);
50 54
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 43a7e2126bf4..3f3aa240778f 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -427,7 +427,6 @@ struct rt_param {
427 struct task_struct** inh_task_linkbacks; /* array. BITS_PER_LONG elements. */ 427 struct task_struct** inh_task_linkbacks; /* array. BITS_PER_LONG elements. */
428 unsigned long used_linkback_slots; 428 unsigned long used_linkback_slots;
429 429
430
431#ifdef CONFIG_REALTIME_AUX_TASKS 430#ifdef CONFIG_REALTIME_AUX_TASKS
432 unsigned int is_aux_task:1; 431 unsigned int is_aux_task:1;
433 unsigned int aux_ready:1; 432 unsigned int aux_ready:1;
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index d9e3a46129f4..82e62e8283e9 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -35,9 +35,6 @@ typedef struct task_struct* (*schedule_t)(struct task_struct * prev);
35 */ 35 */
36typedef void (*finish_switch_t)(struct task_struct *prev); 36typedef void (*finish_switch_t)(struct task_struct *prev);
37 37
38/* trigger a reschedule of 't' if 't' is running. */
39typedef void (*check_schedule_t)(struct task_struct *t);
40
41/********************* task state changes ********************/ 38/********************* task state changes ********************/
42 39
43/* Called to setup a new real-time task. 40/* Called to setup a new real-time task.
@@ -132,8 +129,6 @@ struct sched_plugin {
132 scheduler_tick_t tick; 129 scheduler_tick_t tick;
133 schedule_t schedule; 130 schedule_t schedule;
134 finish_switch_t finish_switch; 131 finish_switch_t finish_switch;
135 check_schedule_t check_schedule;
136
137 132
138 /* syscall backend */ 133 /* syscall backend */
139 complete_job_t complete_job; 134 complete_job_t complete_job;
diff --git a/litmus/bheap.c b/litmus/bheap.c
index 45e1db36fa36..403c09cc9e81 100644
--- a/litmus/bheap.c
+++ b/litmus/bheap.c
@@ -21,6 +21,29 @@ void bheap_node_init(struct bheap_node** _h, void* value)
21} 21}
22 22
23 23
24static void __bheap_for_all(struct bheap_node *h, bheap_for_all_t fn, void* args)
25{
26 /* pre-order */
27 fn(h, args);
28
29 /* depth-first */
30 if (h->child)
31 __bheap_for_all(h->child, fn, args);
32 if (h->next)
33 __bheap_for_all(h->next, fn, args);
34}
35
36void bheap_for_all(struct bheap* heap, bheap_for_all_t fn, void* args)
37{
38 struct bheap_node *head;
39
40 BUG_ON(!heap);
41 BUG_ON(!fn);
42
43 head = heap->head;
44 __bheap_for_all(head, fn, args);
45}
46
24/* make child a subtree of root */ 47/* make child a subtree of root */
25static void __bheap_link(struct bheap_node* root, 48static void __bheap_link(struct bheap_node* root,
26 struct bheap_node* child) 49 struct bheap_node* child)
diff --git a/litmus/budget.c b/litmus/budget.c
index 779506abf119..718458925fb4 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -159,63 +159,112 @@ void simple_on_exit(struct task_struct* t)
159 * DRAIN_SOBLIV 159 * DRAIN_SOBLIV
160 */ 160 */
161 161
162void sobliv_on_scheduled(struct task_struct* t) 162void sobliv_on_blocked(struct task_struct* t)
163{ 163{
164 BUG_ON(!t); 164 if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
165 165 if (tsk_rt(t)->budget.timer.armed) {
166 if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { 166 /* there is a fraction of time where we're double-counting the
167 if (tsk_rt(t)->budget.timer.armed) 167 * time tracked by the rq and suspension time.
168 TRACE_TASK(t, "budget timer already armed.\n"); 168 * TODO: Do this recording closer to suspension time. */
169 else 169 tsk_rt(t)->budget.suspend_timestamp = litmus_clock();
170 arm_enforcement_timer(t); 170
171 TRACE_TASK(t, "budget drains while suspended.\n");
172 }
173 else {
174 TRACE_TASK(t, "budget timer not armed?\n");
175 WARN_ON(1);
176 }
171 } 177 }
178}
172 179
173 if (tsk_rt(t)->inh_task) 180void sobliv_on_wakeup(struct task_struct* t)
174 BUG_ON(is_running(tsk_rt(t)->inh_task)); 181{
182 if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
183 /* we're waking up while in top-m. record the time spent
184 * suspended while draining in exec_cost. suspend_timestamp was
185 * either set when we entered top-m while asleep, or when we
186 * blocked. */
187 lt_t suspend_cost;
188 BUG_ON(!tsk_rt(t)->budget.suspend_timestamp);
189 suspend_cost = litmus_clock() - tsk_rt(t)->budget.suspend_timestamp;
190 TRACE_TASK(t, "budget consumed while suspended: %llu\n", suspend_cost);
191 get_exec_time(t) += suspend_cost;
192 }
175} 193}
176 194
177void sobliv_on_blocked(struct task_struct* t) 195void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh)
178{ 196{
179 /* NOOP */ 197 /* TODO: Budget credit accounting. */
180 TRACE_TASK(t, "sobliv: budget drains while suspended.\n"); 198
199 BUG_ON(!prio_inh);
200 TRACE_TASK(t, "called %s\n", __FUNCTION__);
181} 201}
182 202
183void sobliv_on_sleep(struct task_struct* t) 203void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh)
184{ 204{
185 if (budget_precisely_tracked(t)) { 205 /* TODO: Budget credit accounting. */
186 /* kludge. callback called before job_completion logic runs, so 206 TRACE_TASK(t, "called %s\n", __FUNCTION__);
187 * we need to do some logic of our own to figure out if there is a
188 * backlog after this job (it is completing since sleep is asserted)
189 * completes. */
190 int no_backlog = (!has_backlog(t) || /* no backlog */
191 /* the last backlogged job is completing */
192 (get_backlog(t) == 1 && tsk_rt(t)->job_params.is_backlogged_job));
193 if (no_backlog)
194 cancel_enforcement_timer(t);
195 else
196 TRACE_TASK(t, "not cancelling timer because there is time for backlogged work.\n");
197 }
198} 207}
199 208
200void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh) 209void sobliv_on_enter_top_m(struct task_struct* t)
201{ 210{
202// BUG_ON(!prio_inh); 211 if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
203// 212 if (tsk_rt(t)->budget.timer.armed)
204// if (budget_precisely_tracked(t)) { 213 TRACE_TASK(t, "budget timer already armed.\n");
205// TRACE_TASK(t, "inheriting from %s/%d. stop draining own budget.\n", 214 else {
206// prio_inh->comm, prio_inh->pid); 215 /* if we're blocked, then record the time at which we started measuring */
207// cancel_enforcement_timer(t); 216 if (!is_running(t))
208// } 217 tsk_rt(t)->budget.suspend_timestamp = litmus_clock();
218
219 /* the callback will handle it if it is executing */
220 if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer))
221 arm_enforcement_timer(t);
222 else
223 TRACE_TASK(t, "within callback context. deferring timer arm.\n");
224 }
225 }
209} 226}
210 227
211void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh) 228void sobliv_on_exit_top_m(struct task_struct* t)
212{ 229{
213// if (!prio_inh && budget_precisely_tracked(t)) { 230 if (budget_precisely_tracked(t)) {
214// TRACE_TASK(t, "assuming base priority. start draining own budget.\n"); 231 if (tsk_rt(t)->budget.timer.armed) {
215// arm_enforcement_timer(t); 232
216// } 233 if (!is_running(t)) {
234 /* the time at which we started draining budget while suspended
235 * is recorded in evt_timestamp. evt_timestamp was set either
236 * when 't' exited the top-m while suspended or when 't'
237 * blocked. */
238 lt_t suspend_cost;
239 BUG_ON(!tsk_rt(t)->budget.suspend_timestamp);
240 suspend_cost = litmus_clock() - tsk_rt(t)->budget.suspend_timestamp;
241 TRACE_TASK(t, "budget consumed while suspended: %llu\n", suspend_cost);
242 get_exec_time(t) += suspend_cost;
243
244 /* timer should have fired before now */
245 if (get_exec_time(t) + 1000000/10 > get_exec_cost(t)) {
246 TRACE_TASK(t, "budget overrun while suspended by over 1/10 "
247 "millisecond! timer should have already fired!\n");
248 WARN_ON(1);
249 }
250 }
251
252 TRACE_TASK(t, "stops draining budget\n");
253 /* the callback will handle it if it is executing */
254 if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) {
255 /* TODO: record a timestamp if the task isn't running */
256 cancel_enforcement_timer(t);
257 }
258 else
259 TRACE_TASK(t, "within callback context. skipping operation.\n");
260 }
261 else {
262 TRACE_TASK(t, "was not draining budget\n");
263 }
264 }
217} 265}
218 266
267
219void sobliv_revaluate_task(struct task_struct* t) 268void sobliv_revaluate_task(struct task_struct* t)
220{ 269{
221#ifdef CONFIG_LITMUS_NESTED_LOCKING 270#ifdef CONFIG_LITMUS_NESTED_LOCKING
@@ -256,18 +305,6 @@ void sobliv_revaluate_task(struct task_struct* t)
256 305
257 /* TODO: If we hold an OMLP-family outmost lock, then we may 306 /* TODO: If we hold an OMLP-family outmost lock, then we may
258 * need to move a task into a fifo queue */ 307 * need to move a task into a fifo queue */
259
260
261
262// /* anyone who inherits from me may need to be rescheduled */
263// linkback = tsk_rt(t)->inh_task_linkback;
264// if (linkback) {
265// /* TODO: IS THIS THREAD SAFE???? */
266// TRACE_TASK(t, "Checking if inheritor %s/%d needs to be rescheduled.\n",
267// linkback->comm,
268// linkback->pid);
269// litmus->check_schedule(linkback);
270// }
271} 308}
272 309
273 310
@@ -311,17 +348,11 @@ static enum hrtimer_restart __on_timeout(struct hrtimer *timer)
311void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_ops* ops) 348void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_ops* ops)
312{ 349{
313 BUG_ON(!bt); 350 BUG_ON(!bt);
314 BUG_ON(!ops);
315
316 BUG_ON(!ops->on_scheduled);
317 BUG_ON(!ops->on_blocked);
318 BUG_ON(!ops->on_preempt);
319 BUG_ON(!ops->on_sleep);
320 BUG_ON(!ops->on_exhausted);
321 351
322 memset(bt, 0, sizeof(*bt)); 352 memset(bt, 0, sizeof(*bt));
323 raw_spin_lock_init(&bt->timer.lock); 353 raw_spin_lock_init(&bt->timer.lock);
324 hrtimer_init(&bt->timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 354 hrtimer_init(&bt->timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
325 bt->timer.timer.function = __on_timeout; 355 bt->timer.timer.function = __on_timeout;
326 bt->ops = ops; 356 bt->ops = ops;
357 INIT_BINHEAP_NODE(&bt->top_m_node);
327} 358}
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c
index e87e56542a23..c96a209231a2 100644
--- a/litmus/nvidia_info.c
+++ b/litmus/nvidia_info.c
@@ -785,7 +785,7 @@ long enable_gpu_owner(struct task_struct *t)
785 785
786 BUG_ON(!is_realtime(t)); 786 BUG_ON(!is_realtime(t));
787 787
788 gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 788 gpu = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
789 789
790 if (binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { 790 if (binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) {
791 TRACE_CUR("task %s/%d is already active on GPU %d\n", t->comm, t->pid, gpu); 791 TRACE_CUR("task %s/%d is already active on GPU %d\n", t->comm, t->pid, gpu);
@@ -853,7 +853,7 @@ long disable_gpu_owner(struct task_struct *t)
853 853
854 BUG_ON(!is_realtime(t)); 854 BUG_ON(!is_realtime(t));
855 855
856 gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 856 gpu = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
857 857
858 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { 858 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) {
859// TRACE_CUR("task %s/%d is not active on GPU %d\n", t->comm, t->pid, gpu); 859// TRACE_CUR("task %s/%d is not active on GPU %d\n", t->comm, t->pid, gpu);
@@ -916,7 +916,14 @@ out:
916} 916}
917 917
918 918
919 919long recheck_gpu_owner(struct task_struct* t)
920{
921 /* TODO: blend implementation of disable/enable */
922 int retval = disable_gpu_owner(t);
923 if (!retval)
924 retval = enable_gpu_owner(t);
925 return retval
926}
920 927
921 928
922 929
@@ -940,7 +947,7 @@ int gpu_owner_increase_priority(struct task_struct *t)
940 BUG_ON(!is_realtime(t)); 947 BUG_ON(!is_realtime(t));
941 BUG_ON(!tsk_rt(t)->held_gpus); 948 BUG_ON(!tsk_rt(t)->held_gpus);
942 949
943 gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 950 gpu = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
944 951
945 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { 952 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) {
946 TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n", 953 TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n",
@@ -1013,7 +1020,7 @@ int gpu_owner_decrease_priority(struct task_struct *t)
1013 BUG_ON(!is_realtime(t)); 1020 BUG_ON(!is_realtime(t));
1014 BUG_ON(!tsk_rt(t)->held_gpus); 1021 BUG_ON(!tsk_rt(t)->held_gpus);
1015 1022
1016 gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 1023 gpu = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
1017 1024
1018 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { 1025 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) {
1019 TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n", 1026 TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n",
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 4551fb851dbd..fc174c464a17 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -143,6 +143,11 @@ typedef struct clusterdomain {
143#ifdef CONFIG_LITMUS_DGL_SUPPORT 143#ifdef CONFIG_LITMUS_DGL_SUPPORT
144 raw_spinlock_t dgl_lock; 144 raw_spinlock_t dgl_lock;
145#endif 145#endif
146
147 int top_m_size;
148 struct binheap top_m;
149 struct binheap not_top_m;
150
146} cedf_domain_t; 151} cedf_domain_t;
147 152
148/* a cedf_domain per cluster; allocation is done at init/activation time */ 153/* a cedf_domain per cluster; allocation is done at init/activation time */
@@ -164,6 +169,141 @@ static int num_gpu_clusters;
164static unsigned int gpu_cluster_size; 169static unsigned int gpu_cluster_size;
165#endif 170#endif
166 171
172inline static struct task_struct* binheap_node_to_task(struct binheap_node *bn)
173{
174 struct budget_tracker *bt = binheap_entry(bn, struct budget_tracker, top_m_node);
175 struct task_struct *t =
176 container_of(
177 container_of(bt, struct rt_param, budget),
178 struct task_struct,
179 rt_param);
180 return t;
181}
182
183static int cedf_max_heap_base_priority_order(struct binheap_node *a,
184 struct binheap_node *b)
185{
186 struct task_struct* t_a = binheap_node_to_task(a);
187 struct task_struct* t_b = binheap_node_to_task(b);
188 return __edf_higher_prio(t_a, BASE, t_b, BASE);
189}
190
191static int cedf_min_heap_base_priority_order(struct binheap_node *a,
192 struct binheap_node *b)
193{
194 struct task_struct* t_a = binheap_node_to_task(a);
195 struct task_struct* t_b = binheap_node_to_task(b);
196 return __edf_higher_prio(t_b, BASE, t_a, BASE);
197}
198
199static void cedf_track_in_top_m(struct task_struct *t)
200{
201 /* cluster lock must be held */
202 cedf_domain_t *cluster = task_cpu_cluster(t);
203 struct budget_tracker *bt;
204 struct task_struct *mth_highest;
205
206 //BUG_ON(binheap_is_in_heap(&tsk_rt(t)->budget.top_m_node));
207 if (binheap_is_in_heap(&tsk_rt(t)->budget.top_m_node)) {
208 TRACE_TASK(t, "apparently already being tracked. top-m?: %s\n",
209 (bt_flag_is_set(t, BTF_IS_TOP_M)) ? "Yes":"No");
210 return;
211 }
212
213 /* TODO: do cluster_size-1 if release master is in this cluster */
214 if (cluster->top_m_size < cluster_size) {
215 TRACE_TASK(t, "unconditionally adding task to top-m.\n");
216 binheap_add(&tsk_rt(t)->budget.top_m_node, &cluster->top_m,
217 struct budget_tracker, top_m_node);
218 ++cluster->top_m_size;
219 bt_flag_set(t, BTF_IS_TOP_M);
220 budget_state_machine(t,on_enter_top_m);
221
222 return;
223 }
224 bt = binheap_top_entry(&cluster->top_m, struct budget_tracker, top_m_node);
225 mth_highest =
226 container_of(
227 container_of(bt, struct rt_param, budget),
228 struct task_struct,
229 rt_param);
230
231 if (__edf_higher_prio(t, BASE, mth_highest, BASE)) {
232
233 TRACE_TASK(t, "adding to top-m (evicting %s/%d)\n",
234 mth_highest->comm, mth_highest->pid);
235
236 binheap_delete_root(&cluster->top_m, struct budget_tracker, top_m_node);
237 INIT_BINHEAP_NODE(&tsk_rt(mth_highest)->budget.top_m_node);
238 binheap_add(&tsk_rt(mth_highest)->budget.top_m_node,
239 &cluster->not_top_m,
240 struct budget_tracker, top_m_node);
241 budget_state_machine(mth_highest,on_exit_top_m);
242 bt_flag_clear(mth_highest, BTF_IS_TOP_M);
243
244 binheap_add(&tsk_rt(t)->budget.top_m_node, &cluster->top_m,
245 struct budget_tracker, top_m_node);
246 bt_flag_set(t, BTF_IS_TOP_M);
247 budget_state_machine(t,on_enter_top_m);
248 }
249 else {
250 TRACE_TASK(t, "adding to not-top-m\n");
251 binheap_add(&tsk_rt(t)->budget.top_m_node,
252 &cluster->not_top_m,
253 struct budget_tracker, top_m_node);
254 }
255}
256
257static void cedf_untrack_in_top_m(struct task_struct *t)
258{
259 /* cluster lock must be held */
260 cedf_domain_t *cluster = task_cpu_cluster(t);
261
262 if (!binheap_is_in_heap(&tsk_rt(t)->budget.top_m_node)) {
263 TRACE_TASK(t, "is not being tracked\n"); /* BUG() on this case? */
264 return;
265 }
266
267 if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
268
269 TRACE_TASK(t, "removing task from top-m\n");
270
271 /* delete t's entry */
272 binheap_delete(&tsk_rt(t)->budget.top_m_node, &cluster->top_m);
273 budget_state_machine(t,on_exit_top_m);
274 bt_flag_clear(t, BTF_IS_TOP_M);
275
276 /* move a task over from the overflow heap */
277 if(!binheap_empty(&cluster->not_top_m)) {
278 struct budget_tracker *bt =
279 binheap_top_entry(&cluster->not_top_m, struct budget_tracker, top_m_node);
280 struct task_struct *to_move =
281 container_of(
282 container_of(bt, struct rt_param, budget),
283 struct task_struct,
284 rt_param);
285
286 TRACE_TASK(to_move, "being promoted to top-m\n");
287
288 binheap_delete_root(&cluster->not_top_m, struct budget_tracker, top_m_node);
289 INIT_BINHEAP_NODE(&tsk_rt(to_move)->budget.top_m_node);
290
291 binheap_add(&tsk_rt(to_move)->budget.top_m_node,
292 &cluster->top_m,
293 struct budget_tracker, top_m_node);
294 bt_flag_set(to_move, BTF_IS_TOP_M);
295 budget_state_machine(t,on_enter_top_m);
296 }
297 else {
298 --cluster->top_m_size;
299 }
300 }
301 else {
302 TRACE_TASK(t, "removing task from not-top-m\n");
303 binheap_delete(&tsk_rt(t)->budget.top_m_node, &cluster->not_top_m);
304 }
305}
306
167 307
168#ifdef CONFIG_LITMUS_DGL_SUPPORT 308#ifdef CONFIG_LITMUS_DGL_SUPPORT
169static raw_spinlock_t* cedf_get_dgl_spinlock(struct task_struct *t) 309static raw_spinlock_t* cedf_get_dgl_spinlock(struct task_struct *t)
@@ -230,6 +370,11 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
230 /* Currently linked task is set to be unlinked. */ 370 /* Currently linked task is set to be unlinked. */
231 if (entry->linked) { 371 if (entry->linked) {
232 entry->linked->rt_param.linked_on = NO_CPU; 372 entry->linked->rt_param.linked_on = NO_CPU;
373
374#ifdef CONFIG_LITMUS_LOCKING
375 if (tsk_rt(entry->linked)->inh_task)
376 clear_inh_task_linkback(entry->linked, tsk_rt(entry->linked)->inh_task);
377#endif
233 } 378 }
234 379
235 /* Link new task to CPU. */ 380 /* Link new task to CPU. */
@@ -258,8 +403,14 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
258 linked = tmp; 403 linked = tmp;
259 } 404 }
260 } 405 }
261 if (linked) /* might be NULL due to swap */ 406 if (linked) { /* might be NULL due to swap */
262 linked->rt_param.linked_on = entry->cpu; 407 linked->rt_param.linked_on = entry->cpu;
408
409#ifdef CONFIG_LITMUS_LOCKING
410 if (tsk_rt(linked)->inh_task)
411 set_inh_task_linkback(linked, tsk_rt(linked)->inh_task);
412#endif
413 }
263 } 414 }
264 entry->linked = linked; 415 entry->linked = linked;
265#ifdef WANT_ALL_SCHED_EVENTS 416#ifdef WANT_ALL_SCHED_EVENTS
@@ -397,6 +548,14 @@ static noinline void cedf_job_arrival(struct task_struct* task)
397 check_for_preemptions(cluster); 548 check_for_preemptions(cluster);
398} 549}
399 550
551static void cedf_track_on_release(struct bheap_node* n, void* dummy)
552{
553 struct task_struct* t = bheap2task(n);
554 TRACE_TASK(t, "released\n");
555
556 cedf_track_in_top_m(t);
557}
558
400static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) 559static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
401{ 560{
402 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); 561 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain);
@@ -404,6 +563,8 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
404 563
405 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 564 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
406 565
566 bheap_for_all(tasks, cedf_track_on_release, NULL);
567
407 __merge_ready(&cluster->domain, tasks); 568 __merge_ready(&cluster->domain, tasks);
408 check_for_preemptions(cluster); 569 check_for_preemptions(cluster);
409 570
@@ -447,6 +608,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
447 } 608 }
448 609
449 610
611
450 /* SETUP FOR THE NEXT JOB */ 612 /* SETUP FOR THE NEXT JOB */
451 613
452 sched_trace_task_completion(t, forced); 614 sched_trace_task_completion(t, forced);
@@ -466,6 +628,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
466 */ 628 */
467 } 629 }
468 else { 630 else {
631 cedf_untrack_in_top_m(t);
469 prepare_for_next_period(t); 632 prepare_for_next_period(t);
470 633
471 if (do_backlogged_job) { 634 if (do_backlogged_job) {
@@ -496,6 +659,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
496 } 659 }
497 660
498 if (do_release || do_backlogged_job) { 661 if (do_release || do_backlogged_job) {
662 cedf_track_in_top_m(t);
499 cedf_job_arrival(t); 663 cedf_job_arrival(t);
500 } 664 }
501 else { 665 else {
@@ -504,7 +668,9 @@ static noinline void job_completion(struct task_struct *t, int forced)
504 } 668 }
505 else { 669 else {
506 BUG_ON(!forced); 670 BUG_ON(!forced);
671 /* budget was refreshed and job early released */
507 TRACE_TASK(t, "job exhausted budget while sleeping\n"); 672 TRACE_TASK(t, "job exhausted budget while sleeping\n");
673 cedf_track_in_top_m(t);
508 } 674 }
509} 675}
510 676
@@ -536,26 +702,6 @@ static enum hrtimer_restart cedf_simple_on_exhausted(struct task_struct *t)
536 return HRTIMER_NORESTART; 702 return HRTIMER_NORESTART;
537} 703}
538 704
539static void cedf_check_schedule(struct task_struct* t)
540{
541 int cpu;
542
543 cpu = (tsk_rt(t)->linked_on != NO_CPU) ?
544 tsk_rt(t)->linked_on : tsk_rt(t)->scheduled_on;
545 if (cpu == smp_processor_id()) {
546 TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n");
547 litmus_reschedule_local();
548 set_will_schedule();
549 }
550 else if (cpu != NO_CPU) {
551 TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu);
552 litmus_reschedule(cpu);
553 }
554 else {
555 TRACE_TASK(t, "is not running, so no rescheduling necessary.\n");
556 }
557}
558
559static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) 705static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
560{ 706{
561 enum hrtimer_restart restart = HRTIMER_NORESTART; 707 enum hrtimer_restart restart = HRTIMER_NORESTART;
@@ -598,7 +744,6 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
598 else { 744 else {
599 lt_t remaining; 745 lt_t remaining;
600 cedf_domain_t* cluster = task_cpu_cluster(t); 746 cedf_domain_t* cluster = task_cpu_cluster(t);
601 int do_prio_reeval = 0;
602 unsigned long flags; 747 unsigned long flags;
603 748
604 BUG_ON(cpu != NO_CPU); 749 BUG_ON(cpu != NO_CPU);
@@ -611,47 +756,74 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
611 TRACE_TASK(t, "blocked, postponing deadline\n"); 756 TRACE_TASK(t, "blocked, postponing deadline\n");
612 757
613 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 758 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
614 job_completion(t, 1); /* refreshes budget */ 759 job_completion(t, 1); /* refreshes budget and pushes out deadline */
615 760
616#ifdef CONFIG_LITMUS_LOCKING 761#ifdef CONFIG_LITMUS_LOCKING
617 /* Decrease in base-priority is masked by inheritance, so 762 {
618 * we do not need to recheck any prior scheduling decisions
619 * or established inheritance relations. */
620 do_prio_reeval = (tsk_rt(t)->inh_task == NULL);
621
622 /* drop the lock to make prio propagation easy... may need to
623 * do this all within cluster lock if there are races... */
624 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
625
626 if (do_prio_reeval)
627 sobliv_revaluate_task(t);
628 else
629 TRACE_TASK(t, "skipping reevaluation since inheritance "
630 "masks change in base-priority.\n");
631
632
633 /* push any changed state... */
634 if (do_prio_reeval && tsk_rt(t)->used_linkback_slots) {
635 int i; 763 int i;
636 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 764 /* any linked task that inherits from 't' needs to have their
637 /* any running task that inherits from t may need to be rescheduled */ 765 * cpu-position re-evaluated. we have to do this in two passes.
638 for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, 766 * pass 1: remove nodes from heap s.t. heap is in known good state.
639 sizeof(tsk_rt(t)->used_linkback_slots)); 767 * pass 2: re-add nodes.
768 *
769 */
770 for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots));
771 i < BITS_PER_LONG;
772 i = find_next_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots), i+1))
773 {
774 struct task_struct *to_update = tsk_rt(t)->inh_task_linkbacks[i];
775 BUG_ON(!to_update);
776 if (tsk_rt(to_update)->linked_on != NO_CPU) {
777 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, tsk_rt(to_update)->linked_on);
778 BUG_ON(!binheap_is_in_heap(&entry->hn));
779 binheap_delete(&entry->hn, &cluster->cpu_heap);
780 }
781 }
782 for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots));
640 i < BITS_PER_LONG; 783 i < BITS_PER_LONG;
641 i = find_next_bit(&tsk_rt(t)->used_linkback_slots, 784 i = find_next_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots), i+1))
642 sizeof(tsk_rt(t)->used_linkback_slots), i+1)) { 785 {
643 cedf_check_schedule(tsk_rt(t)->inh_task_linkbacks[i]); 786 struct task_struct *to_update = tsk_rt(t)->inh_task_linkbacks[i];
787 BUG_ON(!to_update);
788 if (tsk_rt(to_update)->linked_on != NO_CPU) {
789 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, tsk_rt(to_update)->linked_on);
790 binheap_add(&entry->hn, &cluster->cpu_heap, cpu_entry_t, hn);
791 }
644 } 792 }
645 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
646 } 793 }
647#endif 794#endif
795 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
648 796
649 hrtimer_forward_now(&get_budget_timer(t).timer.timer, 797#ifdef CONFIG_LITMUS_LOCKING
650 ns_to_ktime(budget_remaining(t))); 798 /* Check our inheritance and propagate any changes forward. */
651 remaining = hrtimer_get_expires_ns(&get_budget_timer(t).timer.timer); 799 sobliv_revaluate_task(t);
800#endif
801 /* No need to recheck priority of AUX tasks. They will always
802 * inherit from 't' if they are enabled. Their prio change was
803 * captured by the cpu-heap operations above. */
652 804
653 TRACE_TASK(t, "rearmed timer to %ld\n", remaining); 805#ifdef CONFIG_LITMUS_NVIDIA
654 restart = HRTIMER_RESTART; 806 /* Re-eval priority of GPU interrupt threads. */
807 if(tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu)
808 recheck_gpu_owner(t);
809#endif
810
811#ifdef CONFIG_LITMUS_LOCKING
812 /* double-check that everything is okay */
813 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
814 check_for_preemptions(cluster);
815 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
816#endif
817
818 /* we need to set up the budget timer since we're within the callback. */
819 if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
820 hrtimer_forward_now(&get_budget_timer(t).timer.timer,
821 ns_to_ktime(budget_remaining(t)));
822 remaining = hrtimer_get_expires_ns(&get_budget_timer(t).timer.timer);
823
824 TRACE_TASK(t, "rearmed timer to %ld\n", remaining);
825 restart = HRTIMER_RESTART;
826 }
655 } 827 }
656 } 828 }
657 } 829 }
@@ -672,7 +844,7 @@ static void cedf_tick(struct task_struct* t)
672 tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && 844 tsk_rt(t)->budget.ops && budget_quantum_tracked(t) &&
673 budget_exhausted(t)) { 845 budget_exhausted(t)) {
674 TRACE_TASK(t, "budget exhausted\n"); 846 TRACE_TASK(t, "budget exhausted\n");
675 tsk_rt(t)->budget.ops->on_exhausted(t); 847 budget_state_machine(t,on_exhausted);
676 } 848 }
677} 849}
678 850
@@ -1057,14 +1229,12 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
1057#endif 1229#endif
1058 1230
1059 /* Do budget stuff */ 1231 /* Do budget stuff */
1060 if (tsk_rt(prev)->budget.ops) { 1232 if (blocks)
1061 if (blocks) 1233 budget_state_machine(prev,on_blocked);
1062 tsk_rt(prev)->budget.ops->on_blocked(prev); 1234 else if (sleep)
1063 else if (sleep) 1235 budget_state_machine(prev,on_sleep);
1064 tsk_rt(prev)->budget.ops->on_sleep(prev); 1236 else if (preempt)
1065 else if (preempt) 1237 budget_state_machine(prev,on_preempt);
1066 tsk_rt(prev)->budget.ops->on_preempt(prev);
1067 }
1068 1238
1069 /* If a task blocks we have no choice but to reschedule. 1239 /* If a task blocks we have no choice but to reschedule.
1070 */ 1240 */
@@ -1137,24 +1307,24 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
1137out_set_state: 1307out_set_state:
1138#endif 1308#endif
1139 1309
1140#ifdef CONFIG_LITMUS_LOCKING 1310//#ifdef CONFIG_LITMUS_LOCKING
1141 /* Update priority inheritance linkbacks. 1311// /* Update priority inheritance linkbacks.
1142 * A blocked task may have multiple tasks that inherit from it, but only 1312// * A blocked task may have multiple tasks that inherit from it, but only
1143 * one of those tasks should be runnable. Provide a link-back between the 1313// * one of those tasks should be runnable. Provide a link-back between the
1144 * blocked task and the one that inherits from it. */ 1314// * blocked task and the one that inherits from it. */
1145 1315//
1146 /* TODO: Support klmirqd and aux tasks */ 1316// /* TODO: Support klmirqd and aux tasks */
1147 /* TODO: MOVE THESE CALLS TO __increase AND __decrease TO CATCH ALL CASES. 1317// /* TODO: MOVE THESE CALLS TO __increase AND __decrease TO CATCH ALL CASES.
1148 PAY ATTENTION TO RUN-STATE OF INHERITOR & INHERITEE */ 1318// PAY ATTENTION TO RUN-STATE OF INHERITOR & INHERITEE */
1149 if (next != prev) { 1319// if (next != prev) {
1150 if (prev && tsk_rt(prev)->inh_task) { 1320// if (prev && tsk_rt(prev)->inh_task) {
1151 clear_inh_task_linkback(prev, tsk_rt(prev)->inh_task); 1321// clear_inh_task_linkback(prev, tsk_rt(prev)->inh_task);
1152 } 1322// }
1153 if (next && tsk_rt(next)->inh_task) { 1323// if (next && tsk_rt(next)->inh_task) {
1154 set_inh_task_linkback(next, tsk_rt(next)->inh_task); 1324// set_inh_task_linkback(next, tsk_rt(next)->inh_task);
1155 } 1325// }
1156 } 1326// }
1157#endif 1327//#endif
1158 1328
1159 sched_state_task_picked(); 1329 sched_state_task_picked();
1160 raw_spin_unlock(&cluster->cluster_lock); 1330 raw_spin_unlock(&cluster->cluster_lock);
@@ -1226,50 +1396,53 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
1226 } 1396 }
1227 1397
1228 if (is_running(t)) { 1398 if (is_running(t)) {
1399 cedf_track_in_top_m(t);
1229 cedf_job_arrival(t); 1400 cedf_job_arrival(t);
1230 } 1401 }
1231 1402
1232 raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); 1403 raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags);
1233} 1404}
1234 1405
1235static void cedf_task_wake_up(struct task_struct *task) 1406static void cedf_task_wake_up(struct task_struct *t)
1236{ 1407{
1237 unsigned long flags; 1408 unsigned long flags;
1238 cedf_domain_t *cluster; 1409 cedf_domain_t *cluster;
1239 lt_t now; 1410 lt_t now;
1240 1411
1241 cluster = task_cpu_cluster(task); 1412 cluster = task_cpu_cluster(t);
1242 1413
1243 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1414 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
1244 1415
1245 now = litmus_clock(); 1416 now = litmus_clock();
1246 TRACE_TASK(task, "wake_up at %llu\n", now); 1417 TRACE_TASK(t, "wake_up at %llu\n", now);
1247 1418
1248 if (is_sporadic(task) && is_tardy(task, now)) { 1419 if (is_sporadic(t) && is_tardy(t, now)) {
1249 release_at(task, now); 1420 release_at(t, now);
1250 sched_trace_task_release(task); 1421 sched_trace_task_release(t);
1251 } 1422 }
1252 else { 1423 else {
1253 /* periodic task model. don't force job to end. 1424 /* periodic task model. don't force job to end.
1254 * rely on user to say when jobs complete or when budget expires. */ 1425 * rely on user to say when jobs complete or when budget expires. */
1255 tsk_rt(task)->completed = 0; 1426 tsk_rt(t)->completed = 0;
1256 } 1427 }
1257 1428
1258#ifdef CONFIG_REALTIME_AUX_TASKS 1429#ifdef CONFIG_REALTIME_AUX_TASKS
1259 if (tsk_rt(task)->has_aux_tasks && !tsk_rt(task)->hide_from_aux_tasks) { 1430 if (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) {
1260 TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); 1431 TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", t->comm, t->pid);
1261 disable_aux_task_owner(task); 1432 disable_aux_task_owner(t);
1262 } 1433 }
1263#endif 1434#endif
1264 1435
1265#ifdef CONFIG_LITMUS_NVIDIA 1436#ifdef CONFIG_LITMUS_NVIDIA
1266 if (tsk_rt(task)->held_gpus && !tsk_rt(task)->hide_from_gpu) { 1437 if (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) {
1267 TRACE_CUR("%s/%d is ready so gpu klmirqd tasks may not inherit.\n", task->comm, task->pid); 1438 TRACE_CUR("%s/%d is ready so gpu klmirqd tasks may not inherit.\n", t->comm, t->pid);
1268 disable_gpu_owner(task); 1439 disable_gpu_owner(t);
1269 } 1440 }
1270#endif 1441#endif
1271 1442
1272 cedf_job_arrival(task); 1443 budget_state_machine(t,on_wakeup);
1444 cedf_job_arrival(t);
1445
1273 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1446 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
1274} 1447}
1275 1448
@@ -1321,9 +1494,15 @@ static void cedf_task_exit(struct task_struct * t)
1321 /* unlink if necessary */ 1494 /* unlink if necessary */
1322 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1495 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
1323 1496
1497 if (tsk_rt(t)->inh_task) {
1498 WARN_ON(1);
1499 clear_inh_task_linkback(t, tsk_rt(t)->inh_task);
1500 }
1501
1324 /* disable budget enforcement */ 1502 /* disable budget enforcement */
1503 cedf_untrack_in_top_m(t);
1325 if (tsk_rt(t)->budget.ops) 1504 if (tsk_rt(t)->budget.ops)
1326 tsk_rt(t)->budget.ops->on_exit(t); 1505 budget_state_machine(t,on_exit);
1327 1506
1328#ifdef CONFIG_REALTIME_AUX_TASKS 1507#ifdef CONFIG_REALTIME_AUX_TASKS
1329 /* make sure we clean up on our way out */ 1508 /* make sure we clean up on our way out */
@@ -1368,40 +1547,48 @@ static struct budget_tracker_ops cedf_drain_simple_ops =
1368 .on_sleep = simple_on_sleep, 1547 .on_sleep = simple_on_sleep,
1369 .on_exit = simple_on_exit, 1548 .on_exit = simple_on_exit,
1370 1549
1371 .on_exhausted = cedf_simple_on_exhausted, 1550 .on_wakeup = NULL,
1372
1373 .on_inherit = NULL, 1551 .on_inherit = NULL,
1374 .on_disinherit = NULL, 1552 .on_disinherit = NULL,
1553 .on_enter_top_m = NULL,
1554 .on_exit_top_m = NULL,
1555
1556 .on_exhausted = cedf_simple_on_exhausted,
1375}; 1557};
1376 1558
1377static struct budget_tracker_ops cedf_drain_sobliv_ops = 1559static struct budget_tracker_ops cedf_drain_sobliv_ops =
1378{ 1560{
1379 .on_scheduled = sobliv_on_scheduled, 1561 .on_scheduled = NULL,
1562 .on_preempt = NULL,
1563 .on_sleep = NULL,
1564
1380 .on_blocked = sobliv_on_blocked, 1565 .on_blocked = sobliv_on_blocked,
1381 .on_preempt = sobliv_on_preempt, 1566 .on_wakeup = sobliv_on_wakeup,
1382 .on_sleep = sobliv_on_sleep,
1383 .on_exit = sobliv_on_exit, 1567 .on_exit = sobliv_on_exit,
1384
1385 .on_exhausted = cedf_sobliv_on_exhausted,
1386
1387 .on_inherit = sobliv_on_inherit, 1568 .on_inherit = sobliv_on_inherit,
1388 .on_disinherit = sobliv_on_disinherit, 1569 .on_disinherit = sobliv_on_disinherit,
1570 .on_enter_top_m = sobliv_on_enter_top_m,
1571 .on_exit_top_m = sobliv_on_exit_top_m,
1572
1573 .on_exhausted = cedf_sobliv_on_exhausted,
1389}; 1574};
1390 1575
1391static long cedf_admit_task(struct task_struct* tsk) 1576static long cedf_admit_task(struct task_struct* tsk)
1392{ 1577{
1578 struct budget_tracker_ops* ops = NULL;
1579
1393 if (remote_cluster(task_cpu(tsk)) != task_cpu_cluster(tsk)) 1580 if (remote_cluster(task_cpu(tsk)) != task_cpu_cluster(tsk))
1394 return -EINVAL; 1581 return -EINVAL;
1395 1582
1396 if (budget_enforced(tsk) || budget_signalled(tsk)) { 1583 if (budget_enforced(tsk) || budget_signalled(tsk)) {
1397 switch(get_drain_policy(tsk)) { 1584 switch(get_drain_policy(tsk)) {
1398 case DRAIN_SIMPLE: 1585 case DRAIN_SIMPLE:
1399 init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_simple_ops); 1586 ops = &cedf_drain_simple_ops;
1400 break; 1587 break;
1401 case DRAIN_SOBLIV: 1588 case DRAIN_SOBLIV:
1402 /* budget_policy and budget_signal_policy cannot be quantum-based */ 1589 /* budget_policy and budget_signal_policy cannot be quantum-based */
1403 if (!budget_quantum_tracked(tsk) && budget_precisely_tracked(tsk)) { 1590 if (!budget_quantum_tracked(tsk) && budget_precisely_tracked(tsk)) {
1404 init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_sobliv_ops); 1591 ops = &cedf_drain_sobliv_ops;
1405 } 1592 }
1406 else { 1593 else {
1407 TRACE_TASK(tsk, "QUANTUM_ENFORCEMENT and QUANTUM_SIGNALS is " 1594 TRACE_TASK(tsk, "QUANTUM_ENFORCEMENT and QUANTUM_SIGNALS is "
@@ -1415,6 +1602,8 @@ static long cedf_admit_task(struct task_struct* tsk)
1415 } 1602 }
1416 } 1603 }
1417 1604
1605 init_budget_tracker(&tsk_rt(tsk)->budget, ops);
1606
1418#ifdef CONFIG_LITMUS_NESTED_LOCKING 1607#ifdef CONFIG_LITMUS_NESTED_LOCKING
1419 INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, 1608 INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks,
1420 edf_max_heap_base_priority_order); 1609 edf_max_heap_base_priority_order);
@@ -1491,24 +1680,18 @@ static int __increase_priority_inheritance(struct task_struct* t,
1491 sched_trace_eff_prio_change(t, prio_inh); 1680 sched_trace_eff_prio_change(t, prio_inh);
1492 1681
1493 /* clear out old inheritance relation */ 1682 /* clear out old inheritance relation */
1494 if (NULL != old_prio_inh && 1683 if (old_prio_inh) {
1495 NULL != get_budget_timer(t).ops->on_disinherit && 1684 budget_state_machine2(t,old_prio_inh,on_disinherit);
1496 NULL != get_budget_timer(old_prio_inh).ops->on_disinherit) {
1497 get_budget_timer(t).ops->on_disinherit(t, old_prio_inh);
1498 }
1499 if (old_prio_inh)
1500 clear_inh_task_linkback(t, old_prio_inh); 1685 clear_inh_task_linkback(t, old_prio_inh);
1686 }
1501 1687
1502 TRACE_TASK(t, "inherits priority from %s/%d\n", 1688 TRACE_TASK(t, "inherits priority from %s/%d\n",
1503 prio_inh->comm, prio_inh->pid); 1689 prio_inh->comm, prio_inh->pid);
1504 tsk_rt(t)->inh_task = prio_inh; 1690 tsk_rt(t)->inh_task = prio_inh;
1505 1691
1506 /* update inheritance relation */ 1692 /* update inheritance relation */
1507 if (prio_inh && 1693 if (prio_inh)
1508 NULL != get_budget_timer(t).ops->on_inherit && 1694 budget_state_machine2(t,prio_inh,on_inherit);
1509 NULL != get_budget_timer(prio_inh).ops->on_inherit) {
1510 get_budget_timer(t).ops->on_inherit(t, prio_inh);
1511 }
1512 1695
1513 linked_on = tsk_rt(t)->linked_on; 1696 linked_on = tsk_rt(t)->linked_on;
1514 1697
@@ -1582,8 +1765,14 @@ static int __increase_priority_inheritance(struct task_struct* t,
1582#ifdef CONFIG_LITMUS_NESTED_LOCKING 1765#ifdef CONFIG_LITMUS_NESTED_LOCKING
1583 } 1766 }
1584 else { 1767 else {
1768 /* Occurance is okay under two scenarios:
1769 * 1. Fine-grain nested locks (no compiled DGL support): Concurrent
1770 * updates are chasing each other through the wait-for chain.
1771 * 2. Budget exhausion caused the HP waiter to loose its priority, but
1772 * the lock structure hasn't yet been updated (but soon will be).
1773 */
1585 TRACE_TASK(t, "Spurious invalid priority increase. " 1774 TRACE_TASK(t, "Spurious invalid priority increase. "
1586 "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" 1775 "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d"
1587 "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", 1776 "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n",
1588 t->comm, t->pid, 1777 t->comm, t->pid,
1589 effective_priority(t)->comm, effective_priority(t)->pid, 1778 effective_priority(t)->comm, effective_priority(t)->pid,
@@ -1614,9 +1803,9 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1614#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1803#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1615 if(tsk_rt(t)->held_gpus) { 1804 if(tsk_rt(t)->held_gpus) {
1616 int i; 1805 int i;
1617 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 1806 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
1618 i < NV_DEVICE_NUM; 1807 i < NV_DEVICE_NUM;
1619 i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { 1808 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
1620 pai_check_priority_increase(t, i); 1809 pai_check_priority_increase(t, i);
1621 } 1810 }
1622 } 1811 }
@@ -1688,13 +1877,10 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1688 } 1877 }
1689 1878
1690 /* clear out old inheritance relation */ 1879 /* clear out old inheritance relation */
1691 if (NULL != old_prio_inh && 1880 if (old_prio_inh) {
1692 NULL != get_budget_timer(t).ops->on_disinherit && 1881 budget_state_machine2(t,old_prio_inh,on_disinherit);
1693 NULL != get_budget_timer(old_prio_inh).ops->on_disinherit) {
1694 get_budget_timer(t).ops->on_disinherit(t, old_prio_inh);
1695 }
1696 if (old_prio_inh)
1697 clear_inh_task_linkback(t, old_prio_inh); 1882 clear_inh_task_linkback(t, old_prio_inh);
1883 }
1698 1884
1699 /* A job only stops inheriting a priority when it releases a 1885 /* A job only stops inheriting a priority when it releases a
1700 * resource. Thus we can make the following assumption.*/ 1886 * resource. Thus we can make the following assumption.*/
@@ -1707,11 +1893,8 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1707 /* set up new inheritance relation */ 1893 /* set up new inheritance relation */
1708 tsk_rt(t)->inh_task = prio_inh; 1894 tsk_rt(t)->inh_task = prio_inh;
1709 1895
1710 if (prio_inh && 1896 if (prio_inh)
1711 NULL != get_budget_timer(t).ops->on_inherit && 1897 budget_state_machine2(t,prio_inh,on_inherit);
1712 NULL != get_budget_timer(prio_inh).ops->on_inherit) {
1713 get_budget_timer(t).ops->on_inherit(t, prio_inh);
1714 }
1715 1898
1716 if(tsk_rt(t)->scheduled_on != NO_CPU) { 1899 if(tsk_rt(t)->scheduled_on != NO_CPU) {
1717 TRACE_TASK(t, "is scheduled.\n"); 1900 TRACE_TASK(t, "is scheduled.\n");
@@ -1792,9 +1975,9 @@ static void decrease_priority_inheritance(struct task_struct* t,
1792#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1975#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1793 if(tsk_rt(t)->held_gpus) { 1976 if(tsk_rt(t)->held_gpus) {
1794 int i; 1977 int i;
1795 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 1978 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
1796 i < NV_DEVICE_NUM; 1979 i < NV_DEVICE_NUM;
1797 i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { 1980 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
1798 pai_check_priority_decrease(t, i); 1981 pai_check_priority_decrease(t, i);
1799 } 1982 }
1800 } 1983 }
@@ -2255,6 +2438,10 @@ static long cedf_activate_plugin(void)
2255 raw_spin_lock_init(&cedf[i].dgl_lock); 2438 raw_spin_lock_init(&cedf[i].dgl_lock);
2256#endif 2439#endif
2257 2440
2441 cedf[i].top_m_size = 0;
2442 INIT_BINHEAP_HANDLE(&cedf[i].top_m, cedf_min_heap_base_priority_order);
2443 INIT_BINHEAP_HANDLE(&cedf[i].not_top_m, cedf_max_heap_base_priority_order);
2444
2258 for_each_online_cpu(cpu) { 2445 for_each_online_cpu(cpu) {
2259 /* check if the cpu is already in a cluster */ 2446 /* check if the cpu is already in a cluster */
2260 for (j = 0; j < num_clusters; j++) 2447 for (j = 0; j < num_clusters; j++)
@@ -2320,7 +2507,6 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
2320 .plugin_name = "C-EDF", 2507 .plugin_name = "C-EDF",
2321 .finish_switch = cedf_finish_switch, 2508 .finish_switch = cedf_finish_switch,
2322 .tick = cedf_tick, 2509 .tick = cedf_tick,
2323 .check_schedule = cedf_check_schedule,
2324 .task_new = cedf_task_new, 2510 .task_new = cedf_task_new,
2325 .complete_job = complete_job, 2511 .complete_job = complete_job,
2326 .task_exit = cedf_task_exit, 2512 .task_exit = cedf_task_exit,
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 0756aaddb390..ab97d59c9587 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -1284,9 +1284,9 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1284#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1284#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1285 if(tsk_rt(t)->held_gpus) { 1285 if(tsk_rt(t)->held_gpus) {
1286 int i; 1286 int i;
1287 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 1287 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
1288 i < NV_DEVICE_NUM; 1288 i < NV_DEVICE_NUM;
1289 i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { 1289 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
1290 pai_check_priority_increase(t, i); 1290 pai_check_priority_increase(t, i);
1291 } 1291 }
1292 } 1292 }
@@ -1394,9 +1394,9 @@ static void decrease_priority_inheritance(struct task_struct* t,
1394#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1394#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1395 if(tsk_rt(t)->held_gpus) { 1395 if(tsk_rt(t)->held_gpus) {
1396 int i; 1396 int i;
1397 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 1397 for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus));
1398 i < NV_DEVICE_NUM; 1398 i < NV_DEVICE_NUM;
1399 i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { 1399 i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) {
1400 pai_check_priority_decrease(t, i); 1400 pai_check_priority_decrease(t, i);
1401 } 1401 }
1402 } 1402 }
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 60b58bb29ac4..eadd4fb8e5a4 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -151,8 +151,8 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
151 next->rt_param.stack_in_use = rq->cpu; 151 next->rt_param.stack_in_use = rq->cpu;
152 next->se.exec_start = rq->clock; 152 next->se.exec_start = rq->clock;
153 153
154 if (is_realtime(next) && tsk_rt(next)->budget.ops) 154 if (is_realtime(next))
155 tsk_rt(next)->budget.ops->on_scheduled(next); 155 budget_state_machine(next,on_scheduled);
156 } 156 }
157 157
158 return next; 158 return next;
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 74bf6b1d2ce4..0d6cb534be8b 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -92,10 +92,6 @@ static void litmus_dummy_tick(struct task_struct* tsk)
92{ 92{
93} 93}
94 94
95static void litmus_dummy_check_schedule(struct task_struct* tsk)
96{
97}
98
99static long litmus_dummy_admit_task(struct task_struct* tsk) 95static long litmus_dummy_admit_task(struct task_struct* tsk)
100{ 96{
101 printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n", 97 printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n",
@@ -247,7 +243,6 @@ struct sched_plugin linux_sched_plugin = {
247 .complete_job = litmus_dummy_complete_job, 243 .complete_job = litmus_dummy_complete_job,
248 .schedule = litmus_dummy_schedule, 244 .schedule = litmus_dummy_schedule,
249 .finish_switch = litmus_dummy_finish_switch, 245 .finish_switch = litmus_dummy_finish_switch,
250 .check_schedule = litmus_dummy_check_schedule,
251 .activate_plugin = litmus_dummy_activate_plugin, 246 .activate_plugin = litmus_dummy_activate_plugin,
252 .deactivate_plugin = litmus_dummy_deactivate_plugin, 247 .deactivate_plugin = litmus_dummy_deactivate_plugin,
253 .compare = litmus_dummy_compare, 248 .compare = litmus_dummy_compare,
@@ -305,7 +300,6 @@ int register_sched_plugin(struct sched_plugin* plugin)
305 CHECK(finish_switch); 300 CHECK(finish_switch);
306 CHECK(schedule); 301 CHECK(schedule);
307 CHECK(tick); 302 CHECK(tick);
308 CHECK(check_schedule);
309 CHECK(task_wake_up); 303 CHECK(task_wake_up);
310 CHECK(task_exit); 304 CHECK(task_exit);
311 CHECK(task_block); 305 CHECK(task_block);