diff options
author | Bjoern B. Brandenburg <bbb@jupiter-cs.cs.unc.edu> | 2007-02-01 18:04:10 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@jupiter-cs.cs.unc.edu> | 2007-02-01 18:04:10 -0500 |
commit | d850b453e3969eb1c611345d9f46c4a50bb4057b (patch) | |
tree | bad8d5677d3f2b30e437e4dedc42372cdcf141c6 /kernel | |
parent | c72efb7abd520758e86317c8eb451ed607042ad9 (diff) |
got some cruft out of litmus.c
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/litmus.c | 290 | ||||
-rw-r--r-- | kernel/sched_plugin.c | 17 |
2 files changed, 96 insertions, 211 deletions
diff --git a/kernel/litmus.c b/kernel/litmus.c index 7e97df948a..26471c3e31 100644 --- a/kernel/litmus.c +++ b/kernel/litmus.c | |||
@@ -1,11 +1,6 @@ | |||
1 | /* | 1 | /* litmus.c -- Implementation of the LITMUS syscalls, the LITMUS intialization, |
2 | * Variable and function definitions | 2 | * and the common tick function. |
3 | * that help alter the underlying scheduler | ||
4 | * on the fly. | ||
5 | * | ||
6 | */ | 3 | */ |
7 | |||
8 | |||
9 | #include <asm/uaccess.h> | 4 | #include <asm/uaccess.h> |
10 | #include <linux/sysrq.h> | 5 | #include <linux/sysrq.h> |
11 | 6 | ||
@@ -19,77 +14,72 @@ | |||
19 | */ | 14 | */ |
20 | 15 | ||
21 | 16 | ||
22 | /* External variables declared in plugin driver */ | ||
23 | extern sched_plugin_t *curr_sched_plugin; | ||
24 | extern sched_plugin_t linux_sched_plugin; | ||
25 | |||
26 | /* Variables that govern the scheduling process */ | 17 | /* Variables that govern the scheduling process */ |
27 | spolicy sched_policy = SCHED_DEFAULT; | 18 | spolicy sched_policy = SCHED_DEFAULT; |
28 | int sched_options = 0; | 19 | int sched_options = 0; |
29 | unsigned long slot_size = DEFAULT_SLOT_SIZE; | ||
30 | unsigned long stagger_offset = DEFAULT_STAGGER; /* TODO: get rid of software stagger */ | ||
31 | 20 | ||
21 | /* avoid races with multiple task wake-ups */ | ||
32 | spinlock_t litmus_task_set_lock = SPIN_LOCK_UNLOCKED; | 22 | spinlock_t litmus_task_set_lock = SPIN_LOCK_UNLOCKED; |
33 | 23 | ||
34 | /* Use linux runqueues to store currently running CPU task by default */ | 24 | /* Use linux runqueues to store currently running CPU task by default */ |
35 | int sched_type = INDIRECT_SWITCH; | 25 | int sched_type = INDIRECT_SWITCH; |
36 | 26 | ||
37 | 27 | ||
38 | /* This is a flag for switching the system into RT mode when it is booted up | 28 | /* This is a flag for switching the system into RT mode when it is booted up |
39 | * In RT-mode non-realtime tasks are shut down and scheduled as spare time available | 29 | * In RT-mode non-realtime tasks are shut down and scheduled as spare |
40 | * Even though the system may switch scheduling plugin on the fly | 30 | * time available |
41 | * it must continue to mimic linux scheduler until rt_mode is explicitly | 31 | */ |
42 | * enabled. | ||
43 | */ | ||
44 | 32 | ||
45 | /* The system is booting in non-realtime mode */ | 33 | /* The system is booting in non-realtime mode */ |
46 | atomic_t rt_mode = ATOMIC_INIT(MODE_NON_RT); | 34 | atomic_t rt_mode = ATOMIC_INIT(MODE_NON_RT); |
47 | /* Here we specify a mode change to be made */ | 35 | /* Here we specify a mode change to be made */ |
48 | atomic_t new_mode = ATOMIC_INIT(MODE_NON_RT); | 36 | atomic_t new_mode = ATOMIC_INIT(MODE_NON_RT); |
49 | /* Number of RT tasks that exist in the system */ | 37 | /* Number of RT tasks that exist in the system */ |
50 | atomic_t n_rt_tasks = ATOMIC_INIT(0); | 38 | atomic_t n_rt_tasks = ATOMIC_INIT(0); |
51 | atomic_t *n_rt_tasks_ptr = &n_rt_tasks; | 39 | |
52 | /* Only one process can perform mode change */ | 40 | /* Only one process can perform mode change */ |
53 | static queuelock_t mode_change_lock; | 41 | static queuelock_t mode_change_lock; |
42 | |||
54 | /* A time instant when we switched to RT mode */ | 43 | /* A time instant when we switched to RT mode */ |
55 | volatile unsigned long rt_start_time = 0; | 44 | volatile jiffie_t rt_start_time = 0; |
45 | |||
46 | |||
47 | |||
56 | /** | 48 | /** |
57 | * sys_set_rt_mode | 49 | * sys_set_rt_mode |
58 | * @newmode: new mode the scheduler must be switched to | 50 | * @newmode: new mode the scheduler must be switched to |
59 | * External syscall for setting the RT mode flag | 51 | * External syscall for setting the RT mode flag |
60 | * Returns EINVAL if mode is not recognized or mode transition is not permitted | 52 | * Returns EINVAL if mode is not recognized or mode transition is |
61 | * On success 0 is returned | 53 | * not permitted |
62 | * TODO: implement transition table | 54 | * On success 0 is returned |
63 | */ | 55 | * |
56 | * FIXME: In a "real" OS we cannot just let any user switch the mode... | ||
57 | */ | ||
64 | asmlinkage long sys_set_rt_mode(int newmode) | 58 | asmlinkage long sys_set_rt_mode(int newmode) |
65 | { | 59 | { |
66 | |||
67 | if ((newmode == MODE_NON_RT) || (newmode == MODE_RT_RUN)) { | 60 | if ((newmode == MODE_NON_RT) || (newmode == MODE_RT_RUN)) { |
68 | unsigned long flags; | ||
69 | local_irq_save(flags); | ||
70 | printk(KERN_INFO "real-time mode switch to %s\n", | 61 | printk(KERN_INFO "real-time mode switch to %s\n", |
71 | (newmode == MODE_RT_RUN ? "rt" : "non-rt")); | 62 | (newmode == MODE_RT_RUN ? "rt" : "non-rt")); |
72 | local_irq_restore(flags); | ||
73 | atomic_set(&new_mode, newmode); | 63 | atomic_set(&new_mode, newmode); |
74 | return 0; | 64 | return 0; |
75 | } | 65 | } |
76 | return -EINVAL; | 66 | return -EINVAL; |
77 | } | 67 | } |
78 | 68 | ||
79 | /** | 69 | /* |
80 | * sys_set_task_rt_param | 70 | * sys_set_task_rt_param |
81 | * @pid: Pid of the task which scheduling parameters must be changed | 71 | * @pid: Pid of the task which scheduling parameters must be changed |
82 | * @param: New real-time extension parameters such as the execution cost and period | 72 | * @param: New real-time extension parameters such as the execution cost and |
83 | * Syscall for manipulating with task rt extension params | 73 | * period |
84 | * Returns EFAULT if param is NULL. | 74 | * Syscall for manipulating with task rt extension params |
85 | * ESRCH if pid is not corrsponding | 75 | * Returns EFAULT if param is NULL. |
86 | * to a valid task. | 76 | * ESRCH if pid is not corrsponding |
87 | * EINVAL if either period or execution cost is <=0 | 77 | * to a valid task. |
88 | * 0 if success | 78 | * EINVAL if either period or execution cost is <=0 |
89 | * | 79 | * 0 if success |
90 | * | 80 | * |
91 | * FIXME: This code is racy during real-time mode. | 81 | * FIXME: This code is racy during real-time mode. |
92 | */ | 82 | */ |
93 | asmlinkage long sys_set_rt_task_param(pid_t pid, rt_param_t __user * param) | 83 | asmlinkage long sys_set_rt_task_param(pid_t pid, rt_param_t __user * param) |
94 | { | 84 | { |
95 | rt_param_t tp; | 85 | rt_param_t tp; |
@@ -103,9 +93,8 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, rt_param_t __user * param) | |||
103 | retval = -EFAULT; | 93 | retval = -EFAULT; |
104 | goto out; | 94 | goto out; |
105 | } | 95 | } |
106 | /* Task search and manipulation must be protected */ | ||
107 | 96 | ||
108 | 97 | /* Task search and manipulation must be protected */ | |
109 | read_lock_irq(&tasklist_lock); | 98 | read_lock_irq(&tasklist_lock); |
110 | if (!(target = find_task_by_pid(pid))) { | 99 | if (!(target = find_task_by_pid(pid))) { |
111 | retval = -ESRCH; | 100 | retval = -ESRCH; |
@@ -134,11 +123,11 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, rt_param_t __user * param) | |||
134 | return retval; | 123 | return retval; |
135 | } | 124 | } |
136 | 125 | ||
137 | /** Getter of task's RT params | 126 | /* Getter of task's RT params |
138 | * returns EINVAL if param or pid is NULL | 127 | * returns EINVAL if param or pid is NULL |
139 | * returns ESRCH if pid does not correspond to a valid task | 128 | * returns ESRCH if pid does not correspond to a valid task |
140 | * returns EFAULT if copying of parameters has failed. | 129 | * returns EFAULT if copying of parameters has failed. |
141 | */ | 130 | */ |
142 | asmlinkage long sys_get_rt_task_param(pid_t pid, rt_param_t __user * param) | 131 | asmlinkage long sys_get_rt_task_param(pid_t pid, rt_param_t __user * param) |
143 | { | 132 | { |
144 | int retval = -EINVAL; | 133 | int retval = -EINVAL; |
@@ -164,50 +153,25 @@ asmlinkage long sys_get_rt_task_param(pid_t pid, rt_param_t __user * param) | |||
164 | 153 | ||
165 | } | 154 | } |
166 | 155 | ||
167 | /*static inline int get_ok_sched(int cpu) | 156 | /* |
168 | { | 157 | * sys_prepare_rt_task |
169 | return atomic_read(cpu_okay_to_sched(cpu)); | 158 | * @pid: Pid of the task we want to prepare for RT mode |
170 | } | 159 | * Syscall for adding a task to RT queue, plugin dependent. |
171 | static inline void set_ok_sched(int cpu, int val) | 160 | * Must be called before RT tasks are going to start up. |
172 | { | 161 | * Returns EPERM if current plugin does not define prepare operation |
173 | atomic_set(cpu_okay_to_sched(cpu), val); | 162 | * or scheduling policy does not allow the operation. |
174 | } | 163 | * ESRCH if pid does not correspond to a valid task. |
175 | */ | 164 | * EINVAL if a task is non-realtime or in invalid state |
176 | 165 | * from underlying plugin function | |
177 | /* Returns true if okay_to_sched for current CPU is set to 1 */ | 166 | * EAGAIN if a task is not in the right state |
178 | 167 | * ENOMEM if there is no memory space to handle this task | |
179 | /*inline int get_this_ok_sched(void) | 168 | * 0 if success |
180 | { | 169 | */ |
181 | return atomic_dec_and_test(thisoksched()); | 170 | asmlinkage long sys_prepare_rt_task(pid_t pid) |
182 | } | ||
183 | inline void set_this_ok_sched(int val) | ||
184 | { | ||
185 | atomic_set(thisoksched(), val); | ||
186 | } | ||
187 | */ | ||
188 | /** | ||
189 | * sys_prepare_rt_task | ||
190 | * @pid: Pid of the task we want to prepare for RT mode | ||
191 | * Syscall for adding a task to RT queue, plugin dependent. | ||
192 | * Must be called before RT tasks are going to start up. | ||
193 | * Returns EPERM if current plugin does not define prepare operation | ||
194 | * or scheduling policy does not allow the operation. | ||
195 | * ESRCH if pid does not correspond to a valid task. | ||
196 | * EINVAL if a task is non-realtime or in invalid state | ||
197 | * from underlying plugin function | ||
198 | * EAGAIN if a task is not in the right state | ||
199 | * ENOMEM if there is no memory space to handle this task | ||
200 | * 0 if success | ||
201 | * | ||
202 | * | ||
203 | * FIXME:The cpu parameter is currently ignored. It s hould be removed | ||
204 | * altogether | ||
205 | */ | ||
206 | asmlinkage long sys_prepare_rt_task(pid_t pid, int dummy) | ||
207 | { | 171 | { |
208 | int retval = -EINVAL; | 172 | int retval = -EINVAL; |
209 | struct task_struct *target = 0; | 173 | struct task_struct *target = 0; |
210 | /* If a plugin does not define preparation mode then nothing to do */ | 174 | /* If a plugin does not define preparation mode then nothing to do */ |
211 | if (curr_sched_plugin->prepare_task == 0 | 175 | if (curr_sched_plugin->prepare_task == 0 |
212 | || sched_policy == SCHED_DEFAULT) { | 176 | || sched_policy == SCHED_DEFAULT) { |
213 | retval = -EPERM; | 177 | retval = -EPERM; |
@@ -229,90 +193,35 @@ asmlinkage long sys_prepare_rt_task(pid_t pid, int dummy) | |||
229 | atomic_inc(&n_rt_tasks); | 193 | atomic_inc(&n_rt_tasks); |
230 | target->rt_param.is_realtime = 1; | 194 | target->rt_param.is_realtime = 1; |
231 | } | 195 | } |
232 | out_prepare_unlock: | 196 | out_prepare_unlock: |
233 | read_unlock_irq(&tasklist_lock); | 197 | read_unlock_irq(&tasklist_lock); |
234 | out_prepare: | 198 | out_prepare: |
235 | return retval; | 199 | return retval; |
236 | } | 200 | } |
237 | 201 | ||
238 | /* External reference to kill, probably need to move at the header */ | 202 | /* |
239 | extern long sys_kill(pid_t, int); | 203 | * This is the crucial function for periodic task implementation, |
240 | /** | 204 | * It checks if a task is periodic, checks if such kind of sleep |
241 | * Task tear down, removes a task from plugin structures, sets its | 205 | * is permitted and calls plugin-specific sleep, which puts the |
242 | * properties to non-realtime and sends a signal (SIGKILL suggested, so that the task shuts down immediately) | 206 | * task into a wait array. |
243 | * returns EINVAL if the task is not a realtime task | 207 | * returns 0 on successful wakeup |
244 | * returns EPERM if the task is current, or current kernel policy does not permit | 208 | * returns EPERM if current conditions do not permit such sleep |
245 | * realtime operations | 209 | * returns EINVAL if current task is not able to go to sleep |
246 | * returns ESRCH if the pid does not correspond to a valid task | 210 | */ |
247 | * Otherwise the result of sys_kill is returned | ||
248 | */ | ||
249 | asmlinkage long sys_tear_down_task(pid_t pid, int sig) | ||
250 | { | ||
251 | int retval = -EINVAL; | ||
252 | struct task_struct *target = 0; | ||
253 | if (curr_sched_plugin->tear_down == 0 | ||
254 | || sched_policy == SCHED_DEFAULT) { | ||
255 | retval = -EPERM; | ||
256 | goto out; | ||
257 | } | ||
258 | read_lock_irq(&tasklist_lock); | ||
259 | if (!(target = find_task_by_pid(pid))) { | ||
260 | retval = -ESRCH; | ||
261 | goto out_unlock; | ||
262 | } | ||
263 | if (target == current) { | ||
264 | retval = -EPERM; | ||
265 | goto out_unlock; | ||
266 | } | ||
267 | printk(KERN_WARNING "task %d: use of sys_tear_down_task is deprecated! " | ||
268 | "Just use kill instead.\n", current->pid); | ||
269 | /* if (is_realtime(target)) { | ||
270 | // the task is not realtime now, there is no harm | ||
271 | // because it is going to be killed | ||
272 | retval = curr_sched_plugin->tear_down(target); | ||
273 | // drop all the RT params of the task | ||
274 | clear_task(target); | ||
275 | target->state = TASK_STOPPED; | ||
276 | } | ||
277 | */ | ||
278 | read_unlock_irq(&tasklist_lock); | ||
279 | |||
280 | // call syskill | ||
281 | return sys_kill(pid, sig); | ||
282 | out_unlock: | ||
283 | read_unlock_irq(&tasklist_lock); | ||
284 | out: | ||
285 | return retval; | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * This is the crucial function for periodic task implementation, | ||
290 | * It checks if a task is periodic, checks if such kind of sleep is permitted | ||
291 | * and calls plugin-specific sleep, which puts the task into a wait array. | ||
292 | * returns 0 on successful wakeup | ||
293 | * returns EPERM if current conditions do not permit such sleep | ||
294 | * returns EINVAL if current task is not able to go to sleep | ||
295 | */ | ||
296 | asmlinkage long sys_sleep_next_period(void) | 211 | asmlinkage long sys_sleep_next_period(void) |
297 | { | 212 | { |
298 | int retval = -EPERM; | 213 | int retval = -EPERM; |
299 | // Periodic sleep is not permitted for linux scheduler | ||
300 | // because quantum runqueues are not merged | ||
301 | if (curr_sched_plugin->sleep_next_period == 0 | ||
302 | || sched_policy == SCHED_DEFAULT) { | ||
303 | goto out; | ||
304 | } | ||
305 | if (!is_realtime(current)) { | 214 | if (!is_realtime(current)) { |
306 | retval = -EINVAL; | 215 | retval = -EINVAL; |
307 | goto out; | 216 | goto out; |
308 | } | 217 | } |
309 | // Task with negative or zero period cannot sleep | 218 | /* Task with negative or zero period cannot sleep */ |
310 | if (get_rt_period(current) <= 0) { | 219 | if (get_rt_period(current) <= 0) { |
311 | retval = -EINVAL; | 220 | retval = -EINVAL; |
312 | goto out; | 221 | goto out; |
313 | } | 222 | } |
314 | /* Basically the plugin have to put the task into | 223 | /* The plugin has to put the task into an |
315 | * appropriate quantum queue and call schedule | 224 | * appropriate queue and call schedule |
316 | */ | 225 | */ |
317 | retval = curr_sched_plugin->sleep_next_period(); | 226 | retval = curr_sched_plugin->sleep_next_period(); |
318 | out: | 227 | out: |
@@ -325,12 +234,10 @@ void set_sched_options(int options) | |||
325 | sched_options = options; | 234 | sched_options = options; |
326 | } | 235 | } |
327 | 236 | ||
328 | 237 | /* The LITMUS tick function. It manages the change to and from real-time mode | |
329 | /* Wrapper code for tick function - called by all schedulers before | 238 | * and then calls the plugin's tick function. |
330 | entering their own code. | 239 | */ |
331 | */ | 240 | int __sched rt_scheduler_tick(void) |
332 | |||
333 | int rt_scheduler_tick(void) | ||
334 | { | 241 | { |
335 | /* Check for mode change */ | 242 | /* Check for mode change */ |
336 | if ((get_rt_mode() != atomic_read(&new_mode))) { | 243 | if ((get_rt_mode() != atomic_read(&new_mode))) { |
@@ -368,40 +275,9 @@ int rt_scheduler_tick(void) | |||
368 | return curr_sched_plugin->algo_scheduler_tick(); | 275 | return curr_sched_plugin->algo_scheduler_tick(); |
369 | } | 276 | } |
370 | 277 | ||
371 | inline int get_sched_options(void) | ||
372 | { | ||
373 | return sched_options; | ||
374 | } | ||
375 | |||
376 | /* This code must be non-preemptable */ | ||
377 | asmlinkage spolicy sys_sched_setpolicy(spolicy newpolicy) | 278 | asmlinkage spolicy sys_sched_setpolicy(spolicy newpolicy) |
378 | { | 279 | { |
379 | /* Dynamic policy change is disabled at the moment */ | 280 | /* Dynamic policy change is disabled at the moment */ |
380 | /* | ||
381 | spolicy cpolicy = sched_policy; | ||
382 | |||
383 | if (newpolicy >= SCHED_BEG && newpolicy <= SCHED_END) { | ||
384 | sched_policy = newpolicy; | ||
385 | switch (sched_policy) { | ||
386 | case SCHED_GLOBAL_EDF: | ||
387 | initialize_global_edf(); | ||
388 | break; | ||
389 | case SCHED_PART_EDF: | ||
390 | initialize_part_edf(); | ||
391 | break; | ||
392 | case SCHED_PFAIR: | ||
393 | initialize_scheduler_pfair(); | ||
394 | break; | ||
395 | case SCHED_PFAIR_STAGGER: | ||
396 | initialize_scheduler_pfair_stagger(); | ||
397 | break; | ||
398 | default: | ||
399 | initialize_scheduler_linux(); | ||
400 | break; | ||
401 | } | ||
402 | return cpolicy; | ||
403 | } else | ||
404 | */ | ||
405 | return SCHED_INVALID; | 281 | return SCHED_INVALID; |
406 | } | 282 | } |
407 | 283 | ||
@@ -425,6 +301,8 @@ asmlinkage int sys_scheduler_setup(int cmd, void __user *parameter) | |||
425 | * always preempted (if it is not a real-time task). | 301 | * always preempted (if it is not a real-time task). |
426 | */ | 302 | */ |
427 | 303 | ||
304 | int sys_kill(int pid, int sig); | ||
305 | |||
428 | static void sysrq_handle_toGgle_rt_mode(int key, struct tty_struct *tty) | 306 | static void sysrq_handle_toGgle_rt_mode(int key, struct tty_struct *tty) |
429 | { | 307 | { |
430 | sys_set_rt_mode(get_rt_mode() == MODE_NON_RT); | 308 | sys_set_rt_mode(get_rt_mode() == MODE_NON_RT); |
@@ -524,6 +402,7 @@ void litmus_dummy_wake_up_task(struct task_struct *task); | |||
524 | void litmus_dummy_task_blocks(struct task_struct *task); | 402 | void litmus_dummy_task_blocks(struct task_struct *task); |
525 | long litmus_dummy_tear_down(struct task_struct *task); | 403 | long litmus_dummy_tear_down(struct task_struct *task); |
526 | int litmus_dummy_scheduler_setup(int cmd, void __user *parameter); | 404 | int litmus_dummy_scheduler_setup(int cmd, void __user *parameter); |
405 | long litmus_dummy_sleep_next_period(void); | ||
527 | 406 | ||
528 | #define CHECK(func) {\ | 407 | #define CHECK(func) {\ |
529 | if (!curr_sched_plugin->func) \ | 408 | if (!curr_sched_plugin->func) \ |
@@ -568,6 +447,7 @@ out: | |||
568 | CHECK(task_blocks); | 447 | CHECK(task_blocks); |
569 | CHECK(prepare_task); | 448 | CHECK(prepare_task); |
570 | CHECK(scheduler_setup); | 449 | CHECK(scheduler_setup); |
450 | CHECK(sleep_next_period); | ||
571 | 451 | ||
572 | #ifdef CONFIG_MAGIC_SYSRQ | 452 | #ifdef CONFIG_MAGIC_SYSRQ |
573 | /* offer some debugging help */ | 453 | /* offer some debugging help */ |
diff --git a/kernel/sched_plugin.c b/kernel/sched_plugin.c index 3e7e90b767..31771f7c51 100644 --- a/kernel/sched_plugin.c +++ b/kernel/sched_plugin.c | |||
@@ -20,7 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | void linux_sched_init(void); | 21 | void linux_sched_init(void); |
22 | void linux_sched_init_smp(void); | 22 | void linux_sched_init_smp(void); |
23 | int linux_migration_init(void); | 23 | int linux_migration_init(void); |
24 | 24 | ||
25 | /************************************************************* | 25 | /************************************************************* |
26 | * Dummy plugin functions * | 26 | * Dummy plugin functions * |
@@ -69,6 +69,11 @@ int litmus_dummy_scheduler_setup(int cmd, void __user *parameter) | |||
69 | return -EPERM; | 69 | return -EPERM; |
70 | } | 70 | } |
71 | 71 | ||
72 | long litmus_dummy_sleep_next_period(void) | ||
73 | { | ||
74 | return -EPERM; | ||
75 | } | ||
76 | |||
72 | 77 | ||
73 | /* The default scheduler plugin. It doesn't do anything and lets Linux do its | 78 | /* The default scheduler plugin. It doesn't do anything and lets Linux do its |
74 | * job. | 79 | * job. |
@@ -83,7 +88,7 @@ sched_plugin_t linux_sched_plugin = { | |||
83 | .tear_down = litmus_dummy_tear_down, | 88 | .tear_down = litmus_dummy_tear_down, |
84 | .wake_up_task = litmus_dummy_wake_up_task, | 89 | .wake_up_task = litmus_dummy_wake_up_task, |
85 | .task_blocks = litmus_dummy_task_blocks, | 90 | .task_blocks = litmus_dummy_task_blocks, |
86 | .sleep_next_period = 0, /* Linux does not have periodic tasks */ | 91 | .sleep_next_period = litmus_dummy_sleep_next_period, |
87 | .shutdown_hook = 0, | 92 | .shutdown_hook = 0, |
88 | .schedule = litmus_dummy_schedule, | 93 | .schedule = litmus_dummy_schedule, |
89 | .finish_switch = litmus_dummy_finish_switch, | 94 | .finish_switch = litmus_dummy_finish_switch, |
@@ -91,10 +96,10 @@ sched_plugin_t linux_sched_plugin = { | |||
91 | }; | 96 | }; |
92 | 97 | ||
93 | /* | 98 | /* |
94 | * The reference to current plugin that is used to schedule tasks within | 99 | * The reference to current plugin that is used to schedule tasks within |
95 | * the system. It stores references to actual function implementations | 100 | * the system. It stores references to actual function implementations |
96 | * Should be initialized by calling "init_***_plugin()" | 101 | * Should be initialized by calling "init_***_plugin()" |
97 | */ | 102 | */ |
98 | sched_plugin_t *curr_sched_plugin = &linux_sched_plugin; | 103 | sched_plugin_t *curr_sched_plugin = &linux_sched_plugin; |
99 | 104 | ||
100 | 105 | ||