diff options
author | Bjoern Brandenburg <bbb@Serenity.local> | 2008-12-01 05:32:56 -0500 |
---|---|---|
committer | Bjoern Brandenburg <bbb@Serenity.local> | 2008-12-01 05:32:56 -0500 |
commit | d29554984f16f27a5580be243928fa76fe6209f6 (patch) | |
tree | 88cc4350c793ad66ea384046f83c106cb97fcc97 | |
parent | ae9099d7f34b26e19493be2bfca646bec5998e54 (diff) |
release 2008.2 version
-rw-r--r-- | download/2008.2/SHA256SUMS | 2 | ||||
-rw-r--r-- | download/2008.2/liblitmus-2008.2.tgz | bin | 0 -> 12100 bytes | |||
-rw-r--r-- | download/2008.2/litmus-rt-2008.2.patch | 9880 | ||||
-rw-r--r-- | download/2008.2/qemu-config | 1419 | ||||
-rw-r--r-- | index.html | 63 |
5 files changed, 11348 insertions, 16 deletions
diff --git a/download/2008.2/SHA256SUMS b/download/2008.2/SHA256SUMS new file mode 100644 index 0000000..3cceb4d --- /dev/null +++ b/download/2008.2/SHA256SUMS | |||
@@ -0,0 +1,2 @@ | |||
1 | 390e753978110ec01e4f16e64ffb5306af0b4387c860ecf51a8a7c48264ef21e liblitmus-2008.2.tgz | ||
2 | 56e363ff586b86423950d4cb4de744050c5f63adaff52ef8ff0edbd30202d005 litmus-rt-2008.2.patch | ||
diff --git a/download/2008.2/liblitmus-2008.2.tgz b/download/2008.2/liblitmus-2008.2.tgz new file mode 100644 index 0000000..58687b4 --- /dev/null +++ b/download/2008.2/liblitmus-2008.2.tgz | |||
Binary files differ | |||
diff --git a/download/2008.2/litmus-rt-2008.2.patch b/download/2008.2/litmus-rt-2008.2.patch new file mode 100644 index 0000000..d81050a --- /dev/null +++ b/download/2008.2/litmus-rt-2008.2.patch | |||
@@ -0,0 +1,9880 @@ | |||
1 | Makefile | 2 +- | ||
2 | arch/sparc64/Kconfig | 2 + | ||
3 | arch/sparc64/kernel/smp.c | 1 + | ||
4 | arch/sparc64/kernel/systbls.S | 20 +- | ||
5 | arch/x86/Kconfig | 2 + | ||
6 | arch/x86/kernel/Makefile_32 | 3 + | ||
7 | arch/x86/kernel/ft_event.c | 104 ++++ | ||
8 | arch/x86/kernel/smp_32.c | 1 + | ||
9 | arch/x86/kernel/syscall_table_32.S | 16 + | ||
10 | fs/exec.c | 3 + | ||
11 | fs/inode.c | 2 + | ||
12 | include/asm-sparc64/feather_trace.h | 22 + | ||
13 | include/asm-sparc64/spinlock.h | 113 ++--- | ||
14 | include/asm-sparc64/spinlock_types.h | 5 +- | ||
15 | include/asm-sparc64/unistd.h | 6 +- | ||
16 | include/asm-x86/feather_trace.h | 104 ++++ | ||
17 | include/asm-x86/unistd_32.h | 6 +- | ||
18 | include/linux/completion.h | 2 +- | ||
19 | include/linux/fs.h | 5 + | ||
20 | include/linux/sched.h | 11 + | ||
21 | include/linux/tick.h | 3 + | ||
22 | include/linux/time.h | 4 + | ||
23 | include/linux/uaccess.h | 16 + | ||
24 | include/litmus/edf_common.h | 25 + | ||
25 | include/litmus/fdso.h | 69 +++ | ||
26 | include/litmus/feather_buffer.h | 94 ++++ | ||
27 | include/litmus/feather_trace.h | 37 ++ | ||
28 | include/litmus/ftdev.h | 49 ++ | ||
29 | include/litmus/heap.h | 327 +++++++++++++ | ||
30 | include/litmus/jobs.h | 9 + | ||
31 | include/litmus/litmus.h | 227 +++++++++ | ||
32 | include/litmus/norqlock.h | 26 + | ||
33 | include/litmus/rt_domain.h | 174 +++++++ | ||
34 | include/litmus/rt_param.h | 167 +++++++ | ||
35 | include/litmus/sched_plugin.h | 159 ++++++ | ||
36 | include/litmus/sched_trace.h | 168 +++++++ | ||
37 | include/litmus/trace.h | 103 ++++ | ||
38 | include/litmus/unistd.h | 20 + | ||
39 | kernel/exit.c | 4 + | ||
40 | kernel/fork.c | 8 + | ||
41 | kernel/printk.c | 10 +- | ||
42 | kernel/sched.c | 96 ++++- | ||
43 | kernel/sched_fair.c | 2 +- | ||
44 | kernel/sched_rt.c | 2 +- | ||
45 | kernel/time/tick-sched.c | 44 ++- | ||
46 | litmus/Kconfig | 78 +++ | ||
47 | litmus/Makefile | 16 + | ||
48 | litmus/edf_common.c | 95 ++++ | ||
49 | litmus/fdso.c | 282 +++++++++++ | ||
50 | litmus/fmlp.c | 262 ++++++++++ | ||
51 | litmus/ft_event.c | 43 ++ | ||
52 | litmus/ftdev.c | 352 +++++++++++++ | ||
53 | litmus/jobs.c | 43 ++ | ||
54 | litmus/litmus.c | 851 ++++++++++++++++++++++++++++++++ | ||
55 | litmus/norqlock.c | 56 +++ | ||
56 | litmus/rt_domain.c | 289 +++++++++++ | ||
57 | litmus/sched_cedf.c | 705 ++++++++++++++++++++++++++ | ||
58 | litmus/sched_gsn_edf.c | 728 +++++++++++++++++++++++++++ | ||
59 | litmus/sched_litmus.c | 230 +++++++++ | ||
60 | litmus/sched_pfair.c | 895 ++++++++++++++++++++++++++++++++++ | ||
61 | litmus/sched_plugin.c | 199 ++++++++ | ||
62 | litmus/sched_psn_edf.c | 454 +++++++++++++++++ | ||
63 | litmus/sched_task_trace.c | 192 ++++++++ | ||
64 | litmus/sched_trace.c | 462 ++++++++++++++++++ | ||
65 | litmus/srp.c | 318 ++++++++++++ | ||
66 | litmus/sync.c | 90 ++++ | ||
67 | litmus/trace.c | 83 ++++ | ||
68 | 67 files changed, 8910 insertions(+), 86 deletions(-) | ||
69 | |||
70 | diff --git a/Makefile b/Makefile | ||
71 | index 189d8ef..d9e4495 100644 | ||
72 | --- a/Makefile | ||
73 | +++ b/Makefile | ||
74 | @@ -597,7 +597,7 @@ export mod_strip_cmd | ||
75 | |||
76 | |||
77 | ifeq ($(KBUILD_EXTMOD),) | ||
78 | -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ | ||
79 | +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ litmus/ | ||
80 | |||
81 | vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ | ||
82 | $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ | ||
83 | diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig | ||
84 | index 10b212a..8d90b5a 100644 | ||
85 | --- a/arch/sparc64/Kconfig | ||
86 | +++ b/arch/sparc64/Kconfig | ||
87 | @@ -471,3 +471,5 @@ source "security/Kconfig" | ||
88 | source "crypto/Kconfig" | ||
89 | |||
90 | source "lib/Kconfig" | ||
91 | + | ||
92 | +source "litmus/Kconfig" | ||
93 | diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c | ||
94 | index c399449..cd2bc7e 100644 | ||
95 | --- a/arch/sparc64/kernel/smp.c | ||
96 | +++ b/arch/sparc64/kernel/smp.c | ||
97 | @@ -1033,6 +1033,7 @@ void smp_receive_signal(int cpu) | ||
98 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | ||
99 | { | ||
100 | clear_softint(1 << irq); | ||
101 | + set_tsk_need_resched(current); | ||
102 | } | ||
103 | |||
104 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | ||
105 | diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S | ||
106 | index 06d1090..7fc7615 100644 | ||
107 | --- a/arch/sparc64/kernel/systbls.S | ||
108 | +++ b/arch/sparc64/kernel/systbls.S | ||
109 | @@ -82,6 +82,13 @@ sys_call_table32: | ||
110 | .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait | ||
111 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, compat_sys_timerfd, sys_eventfd, compat_sys_fallocate | ||
112 | |||
113 | +/*LITMUS, 315*/ | ||
114 | + .word sys_set_rt_task_param, sys_get_rt_task_param, sys_complete_job, sys_register_np_flag, sys_exit_np | ||
115 | +/*320*/ | ||
116 | + .word sys_od_open, sys_od_close, sys_fmlp_down, sys_fmlp_up, sys_srp_down | ||
117 | +/*325*/ .word sys_srp_up, sys_query_job_no, sys_wait_for_job_release, sys_wait_for_ts_release, sys_release_ts | ||
118 | + | ||
119 | + | ||
120 | #endif /* CONFIG_COMPAT */ | ||
121 | |||
122 | /* Now the 64-bit native Linux syscall table. */ | ||
123 | @@ -154,6 +161,12 @@ sys_call_table: | ||
124 | .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait | ||
125 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd, sys_eventfd, sys_fallocate | ||
126 | |||
127 | +/*LITMUS, 315*/ | ||
128 | + .word sys_set_rt_task_param, sys_get_rt_task_param, sys_complete_job, sys_register_np_flag, sys_exit_np | ||
129 | +/*320*/ | ||
130 | + .word sys_od_open, sys_od_close, sys_fmlp_down, sys_fmlp_up, sys_srp_down | ||
131 | +/*325*/ .word sys_srp_up, sys_query_job_no, sys_wait_for_job_release, sys_wait_for_ts_release, sys_release_ts | ||
132 | + | ||
133 | #if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \ | ||
134 | defined(CONFIG_SOLARIS_EMUL_MODULE) | ||
135 | /* Now the 32-bit SunOS syscall table. */ | ||
136 | @@ -271,6 +284,11 @@ sunos_sys_table: | ||
137 | .word sunos_nosys, sunos_nosys, sunos_nosys | ||
138 | .word sunos_nosys | ||
139 | /*310*/ .word sunos_nosys, sunos_nosys, sunos_nosys | ||
140 | - .word sunos_nosys, sunos_nosys | ||
141 | + .word sunos_nosys, sunos_nosys, sunos_nosys | ||
142 | + .word sunos_nosys, sunos_nosys, sunos_nosys | ||
143 | + .word sunos_nosys | ||
144 | +/*320*/ .word sunos_nosys, sunos_nosys, sunos_nosys | ||
145 | + .word sunos_nosys, sunos_nosys, sunos_nosys | ||
146 | + .word sunos_nosys, sunos_nosys, sunos_nosys | ||
147 | |||
148 | #endif | ||
149 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig | ||
150 | index 80b7ba4..f99330f 100644 | ||
151 | --- a/arch/x86/Kconfig | ||
152 | +++ b/arch/x86/Kconfig | ||
153 | @@ -1620,3 +1620,5 @@ source "security/Kconfig" | ||
154 | source "crypto/Kconfig" | ||
155 | |||
156 | source "lib/Kconfig" | ||
157 | + | ||
158 | +source "litmus/Kconfig" | ||
159 | diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32 | ||
160 | index a7bc93c..5f87f32 100644 | ||
161 | --- a/arch/x86/kernel/Makefile_32 | ||
162 | +++ b/arch/x86/kernel/Makefile_32 | ||
163 | @@ -49,6 +49,9 @@ obj-y += pcspeaker.o | ||
164 | |||
165 | obj-$(CONFIG_SCx200) += scx200_32.o | ||
166 | |||
167 | +obj-$(CONFIG_FEATHER_TRACE) += ft_event.o | ||
168 | + | ||
169 | + | ||
170 | # vsyscall_32.o contains the vsyscall DSO images as __initdata. | ||
171 | # We must build both images before we can assemble it. | ||
172 | # Note: kbuild does not track this dependency due to usage of .incbin | ||
173 | diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c | ||
174 | new file mode 100644 | ||
175 | index 0000000..b1d80c5 | ||
176 | --- /dev/null | ||
177 | +++ b/arch/x86/kernel/ft_event.c | ||
178 | @@ -0,0 +1,104 @@ | ||
179 | +#include <linux/types.h> | ||
180 | + | ||
181 | +#include <litmus/feather_trace.h> | ||
182 | + | ||
183 | +/* the feather trace management functions assume | ||
184 | + * exclusive access to the event table | ||
185 | + */ | ||
186 | + | ||
187 | + | ||
188 | +#define BYTE_JUMP 0xeb | ||
189 | +#define BYTE_JUMP_LEN 0x02 | ||
190 | + | ||
191 | +/* for each event, there is an entry in the event table */ | ||
192 | +struct trace_event { | ||
193 | + long id; | ||
194 | + long count; | ||
195 | + long start_addr; | ||
196 | + long end_addr; | ||
197 | +}; | ||
198 | + | ||
199 | +extern struct trace_event __start___event_table[]; | ||
200 | +extern struct trace_event __stop___event_table[]; | ||
201 | + | ||
202 | +int ft_enable_event(unsigned long id) | ||
203 | +{ | ||
204 | + struct trace_event* te = __start___event_table; | ||
205 | + int count = 0; | ||
206 | + char* delta; | ||
207 | + unsigned char* instr; | ||
208 | + | ||
209 | + while (te < __stop___event_table) { | ||
210 | + if (te->id == id && ++te->count == 1) { | ||
211 | + instr = (unsigned char*) te->start_addr; | ||
212 | + /* make sure we don't clobber something wrong */ | ||
213 | + if (*instr == BYTE_JUMP) { | ||
214 | + delta = (((unsigned char*) te->start_addr) + 1); | ||
215 | + *delta = 0; | ||
216 | + } | ||
217 | + } | ||
218 | + if (te->id == id) | ||
219 | + count++; | ||
220 | + te++; | ||
221 | + } | ||
222 | + return count; | ||
223 | +} | ||
224 | + | ||
225 | +int ft_disable_event(unsigned long id) | ||
226 | +{ | ||
227 | + struct trace_event* te = __start___event_table; | ||
228 | + int count = 0; | ||
229 | + char* delta; | ||
230 | + unsigned char* instr; | ||
231 | + | ||
232 | + while (te < __stop___event_table) { | ||
233 | + if (te->id == id && --te->count == 0) { | ||
234 | + instr = (unsigned char*) te->start_addr; | ||
235 | + if (*instr == BYTE_JUMP) { | ||
236 | + delta = (((unsigned char*) te->start_addr) + 1); | ||
237 | + *delta = te->end_addr - te->start_addr - | ||
238 | + BYTE_JUMP_LEN; | ||
239 | + } | ||
240 | + } | ||
241 | + if (te->id == id) | ||
242 | + count++; | ||
243 | + te++; | ||
244 | + } | ||
245 | + return count; | ||
246 | +} | ||
247 | + | ||
248 | +int ft_disable_all_events(void) | ||
249 | +{ | ||
250 | + struct trace_event* te = __start___event_table; | ||
251 | + int count = 0; | ||
252 | + char* delta; | ||
253 | + unsigned char* instr; | ||
254 | + | ||
255 | + while (te < __stop___event_table) { | ||
256 | + if (te->count) { | ||
257 | + instr = (unsigned char*) te->start_addr; | ||
258 | + if (*instr == BYTE_JUMP) { | ||
259 | + delta = (((unsigned char*) te->start_addr) | ||
260 | + + 1); | ||
261 | + *delta = te->end_addr - te->start_addr - | ||
262 | + BYTE_JUMP_LEN; | ||
263 | + te->count = 0; | ||
264 | + count++; | ||
265 | + } | ||
266 | + } | ||
267 | + te++; | ||
268 | + } | ||
269 | + return count; | ||
270 | +} | ||
271 | + | ||
272 | +int ft_is_event_enabled(unsigned long id) | ||
273 | +{ | ||
274 | + struct trace_event* te = __start___event_table; | ||
275 | + | ||
276 | + while (te < __stop___event_table) { | ||
277 | + if (te->id == id) | ||
278 | + return te->count; | ||
279 | + te++; | ||
280 | + } | ||
281 | + return 0; | ||
282 | +} | ||
283 | diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c | ||
284 | index fcaa026..1063dfc 100644 | ||
285 | --- a/arch/x86/kernel/smp_32.c | ||
286 | +++ b/arch/x86/kernel/smp_32.c | ||
287 | @@ -641,6 +641,7 @@ static void native_smp_send_stop(void) | ||
288 | fastcall void smp_reschedule_interrupt(struct pt_regs *regs) | ||
289 | { | ||
290 | ack_APIC_irq(); | ||
291 | + set_tsk_need_resched(current); | ||
292 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
293 | } | ||
294 | |||
295 | diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S | ||
296 | index 8344c70..f6fdb0a 100644 | ||
297 | --- a/arch/x86/kernel/syscall_table_32.S | ||
298 | +++ b/arch/x86/kernel/syscall_table_32.S | ||
299 | @@ -324,3 +324,19 @@ ENTRY(sys_call_table) | ||
300 | .long sys_timerfd | ||
301 | .long sys_eventfd | ||
302 | .long sys_fallocate | ||
303 | + /* LITMUS */ | ||
304 | + .long sys_set_rt_task_param /* 325 */ | ||
305 | + .long sys_get_rt_task_param | ||
306 | + .long sys_complete_job | ||
307 | + .long sys_register_np_flag | ||
308 | + .long sys_exit_np | ||
309 | + .long sys_od_open /* 330 */ | ||
310 | + .long sys_od_close | ||
311 | + .long sys_fmlp_down | ||
312 | + .long sys_fmlp_up | ||
313 | + .long sys_srp_down | ||
314 | + .long sys_srp_up /* 335 */ | ||
315 | + .long sys_query_job_no | ||
316 | + .long sys_wait_for_job_release | ||
317 | + .long sys_wait_for_ts_release | ||
318 | + .long sys_release_ts /* 339 */ | ||
319 | diff --git a/fs/exec.c b/fs/exec.c | ||
320 | index 282240a..6f47786 100644 | ||
321 | --- a/fs/exec.c | ||
322 | +++ b/fs/exec.c | ||
323 | @@ -56,6 +56,8 @@ | ||
324 | #include <asm/mmu_context.h> | ||
325 | #include <asm/tlb.h> | ||
326 | |||
327 | +#include <litmus/litmus.h> | ||
328 | + | ||
329 | #ifdef CONFIG_KMOD | ||
330 | #include <linux/kmod.h> | ||
331 | #endif | ||
332 | @@ -1309,6 +1311,7 @@ int do_execve(char * filename, | ||
333 | goto out_kfree; | ||
334 | |||
335 | sched_exec(); | ||
336 | + litmus_exec(); | ||
337 | |||
338 | bprm->file = file; | ||
339 | bprm->filename = filename; | ||
340 | diff --git a/fs/inode.c b/fs/inode.c | ||
341 | index ed35383..ef71ea0 100644 | ||
342 | --- a/fs/inode.c | ||
343 | +++ b/fs/inode.c | ||
344 | @@ -220,6 +220,8 @@ void inode_init_once(struct inode *inode) | ||
345 | INIT_LIST_HEAD(&inode->inotify_watches); | ||
346 | mutex_init(&inode->inotify_mutex); | ||
347 | #endif | ||
348 | + INIT_LIST_HEAD(&inode->i_obj_list); | ||
349 | + mutex_init(&inode->i_obj_mutex); | ||
350 | } | ||
351 | |||
352 | EXPORT_SYMBOL(inode_init_once); | ||
353 | diff --git a/include/asm-sparc64/feather_trace.h b/include/asm-sparc64/feather_trace.h | ||
354 | new file mode 100644 | ||
355 | index 0000000..35ec70f | ||
356 | --- /dev/null | ||
357 | +++ b/include/asm-sparc64/feather_trace.h | ||
358 | @@ -0,0 +1,22 @@ | ||
359 | +#ifndef _ARCH_FEATHER_TRACE_H | ||
360 | +#define _ARCH_FEATHER_TRACE_H | ||
361 | + | ||
362 | +#include <asm/atomic.h> | ||
363 | +#include <asm/timex.h> | ||
364 | + | ||
365 | +static inline int fetch_and_inc(int *val) | ||
366 | +{ | ||
367 | + return atomic_add_ret(1, (atomic_t*) val) - 1; | ||
368 | +} | ||
369 | + | ||
370 | +static inline int fetch_and_dec(int *val) | ||
371 | +{ | ||
372 | + return atomic_sub_ret(1, (atomic_t*) val) + 1; | ||
373 | +} | ||
374 | + | ||
375 | +static inline unsigned long long ft_timestamp(void) | ||
376 | +{ | ||
377 | + return get_cycles(); | ||
378 | +} | ||
379 | + | ||
380 | +#endif | ||
381 | diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h | ||
382 | index 0006fe9..16931d4 100644 | ||
383 | --- a/include/asm-sparc64/spinlock.h | ||
384 | +++ b/include/asm-sparc64/spinlock.h | ||
385 | @@ -15,93 +15,80 @@ | ||
386 | * and rebuild your kernel. | ||
387 | */ | ||
388 | |||
389 | -/* All of these locking primitives are expected to work properly | ||
390 | - * even in an RMO memory model, which currently is what the kernel | ||
391 | - * runs in. | ||
392 | - * | ||
393 | - * There is another issue. Because we play games to save cycles | ||
394 | - * in the non-contention case, we need to be extra careful about | ||
395 | - * branch targets into the "spinning" code. They live in their | ||
396 | - * own section, but the newer V9 branches have a shorter range | ||
397 | - * than the traditional 32-bit sparc branch variants. The rule | ||
398 | - * is that the branches that go into and out of the spinner sections | ||
399 | - * must be pre-V9 branches. | ||
400 | - */ | ||
401 | - | ||
402 | -#define __raw_spin_is_locked(lp) ((lp)->lock != 0) | ||
403 | +#define __raw_spin_is_locked(lp) ((lp)->tail != (lp)->head) | ||
404 | |||
405 | #define __raw_spin_unlock_wait(lp) \ | ||
406 | do { rmb(); \ | ||
407 | - } while((lp)->lock) | ||
408 | + } while((lp)->tail != (lp)->head) | ||
409 | + | ||
410 | + | ||
411 | |||
412 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
413 | { | ||
414 | - unsigned long tmp; | ||
415 | - | ||
416 | + int ticket, tmp; | ||
417 | __asm__ __volatile__( | ||
418 | -"1: ldstub [%1], %0\n" | ||
419 | -" membar #StoreLoad | #StoreStore\n" | ||
420 | -" brnz,pn %0, 2f\n" | ||
421 | -" nop\n" | ||
422 | -" .subsection 2\n" | ||
423 | -"2: ldub [%1], %0\n" | ||
424 | -" membar #LoadLoad\n" | ||
425 | -" brnz,pt %0, 2b\n" | ||
426 | -" nop\n" | ||
427 | -" ba,a,pt %%xcc, 1b\n" | ||
428 | -" .previous" | ||
429 | - : "=&r" (tmp) | ||
430 | - : "r" (lock) | ||
431 | +"1: lduw [%2], %0 \n" /* read ticket */ | ||
432 | +" add %0, 1, %1 \n" | ||
433 | +" cas [%2], %0, %1 \n" | ||
434 | +" cmp %0, %1 \n" | ||
435 | +" be,a,pt %%icc, 2f \n" | ||
436 | +" nop \n" | ||
437 | +" membar #LoadLoad | #StoreLoad | #LoadStore\n" | ||
438 | +" ba 1b\n" | ||
439 | +" nop \n" | ||
440 | +"2: lduw [%3], %1 \n" | ||
441 | +" cmp %0, %1 \n" | ||
442 | +" be,a,pt %%icc, 3f \n" | ||
443 | +" nop \n" | ||
444 | +" membar #LoadLoad | #StoreLoad | #LoadStore\n" | ||
445 | +" ba 2b\n" | ||
446 | +"3: membar #StoreStore | #StoreLoad" | ||
447 | + : "=&r" (ticket), "=&r" (tmp) | ||
448 | + : "r" (&lock->tail), "r" (&lock->head) | ||
449 | : "memory"); | ||
450 | } | ||
451 | |||
452 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
453 | { | ||
454 | - unsigned long result; | ||
455 | - | ||
456 | + int tail, head; | ||
457 | __asm__ __volatile__( | ||
458 | -" ldstub [%1], %0\n" | ||
459 | -" membar #StoreLoad | #StoreStore" | ||
460 | - : "=r" (result) | ||
461 | - : "r" (lock) | ||
462 | +" lduw [%2], %0 \n" /* read tail */ | ||
463 | +" lduw [%3], %1 \n" /* read head */ | ||
464 | +" cmp %0, %1 \n" | ||
465 | +" bne,a,pn %%icc, 1f \n" | ||
466 | +" nop \n" | ||
467 | +" inc %1 \n" | ||
468 | +" cas [%2], %0, %1 \n" /* try to inc ticket */ | ||
469 | +" membar #StoreStore | #StoreLoad \n" | ||
470 | +"1: " | ||
471 | + : "=&r" (tail), "=&r" (head) | ||
472 | + : "r" (&lock->tail), "r" (&lock->head) | ||
473 | : "memory"); | ||
474 | |||
475 | - return (result == 0UL); | ||
476 | + return tail == head; | ||
477 | } | ||
478 | |||
479 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
480 | { | ||
481 | + int tmp; | ||
482 | __asm__ __volatile__( | ||
483 | -" membar #StoreStore | #LoadStore\n" | ||
484 | -" stb %%g0, [%0]" | ||
485 | - : /* No outputs */ | ||
486 | - : "r" (lock) | ||
487 | +" membar #StoreStore | #LoadStore \n" | ||
488 | +" lduw [%1], %0 \n" | ||
489 | +" inc %0 \n" | ||
490 | +" st %0, [%1] \n" | ||
491 | +" membar #StoreStore | #StoreLoad" | ||
492 | + : "=&r" (tmp) | ||
493 | + : "r" (&lock->head) | ||
494 | : "memory"); | ||
495 | } | ||
496 | |||
497 | -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
498 | -{ | ||
499 | - unsigned long tmp1, tmp2; | ||
500 | +/* We don't handle this yet, but it looks like not re-enabling the interrupts | ||
501 | + * works fine, too. For example, lockdep also does it like this. | ||
502 | + */ | ||
503 | +#define __raw_spin_lock_flags(l, f) __raw_spin_lock(l) | ||
504 | + | ||
505 | + | ||
506 | |||
507 | - __asm__ __volatile__( | ||
508 | -"1: ldstub [%2], %0\n" | ||
509 | -" membar #StoreLoad | #StoreStore\n" | ||
510 | -" brnz,pn %0, 2f\n" | ||
511 | -" nop\n" | ||
512 | -" .subsection 2\n" | ||
513 | -"2: rdpr %%pil, %1\n" | ||
514 | -" wrpr %3, %%pil\n" | ||
515 | -"3: ldub [%2], %0\n" | ||
516 | -" membar #LoadLoad\n" | ||
517 | -" brnz,pt %0, 3b\n" | ||
518 | -" nop\n" | ||
519 | -" ba,pt %%xcc, 1b\n" | ||
520 | -" wrpr %1, %%pil\n" | ||
521 | -" .previous" | ||
522 | - : "=&r" (tmp1), "=&r" (tmp2) | ||
523 | - : "r"(lock), "r"(flags) | ||
524 | - : "memory"); | ||
525 | -} | ||
526 | |||
527 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | ||
528 | |||
529 | diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h | ||
530 | index e128112..1a2e24b 100644 | ||
531 | --- a/include/asm-sparc64/spinlock_types.h | ||
532 | +++ b/include/asm-sparc64/spinlock_types.h | ||
533 | @@ -6,10 +6,11 @@ | ||
534 | #endif | ||
535 | |||
536 | typedef struct { | ||
537 | - volatile unsigned char lock; | ||
538 | + int tail; | ||
539 | + int head; | ||
540 | } raw_spinlock_t; | ||
541 | |||
542 | -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
543 | +#define __RAW_SPIN_LOCK_UNLOCKED { 0, 0 } | ||
544 | |||
545 | typedef struct { | ||
546 | volatile unsigned int lock; | ||
547 | diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h | ||
548 | index cb751b4..ebebde6 100644 | ||
549 | --- a/include/asm-sparc64/unistd.h | ||
550 | +++ b/include/asm-sparc64/unistd.h | ||
551 | @@ -333,7 +333,11 @@ | ||
552 | #define __NR_eventfd 313 | ||
553 | #define __NR_fallocate 314 | ||
554 | |||
555 | -#define NR_SYSCALLS 315 | ||
556 | +#define __NR_LITMUS 315 | ||
557 | + | ||
558 | +#include "litmus/unistd.h" | ||
559 | + | ||
560 | +#define NR_SYSCALLS 315 + NR_litmus_syscalls | ||
561 | |||
562 | #ifdef __KERNEL__ | ||
563 | /* sysconf options, for SunOS compatibility */ | ||
564 | diff --git a/include/asm-x86/feather_trace.h b/include/asm-x86/feather_trace.h | ||
565 | new file mode 100644 | ||
566 | index 0000000..253067e | ||
567 | --- /dev/null | ||
568 | +++ b/include/asm-x86/feather_trace.h | ||
569 | @@ -0,0 +1,104 @@ | ||
570 | +#ifndef _ARCH_FEATHER_TRACE_H | ||
571 | +#define _ARCH_FEATHER_TRACE_H | ||
572 | + | ||
573 | +static inline int fetch_and_inc(int *val) | ||
574 | +{ | ||
575 | + int ret = 1; | ||
576 | + __asm__ __volatile__("lock; xaddl %0, %1" : "+r" (ret), "+m" (*val) : : "memory" ); | ||
577 | + return ret; | ||
578 | +} | ||
579 | + | ||
580 | +static inline int fetch_and_dec(int *val) | ||
581 | +{ | ||
582 | + int ret = -1; | ||
583 | + __asm__ __volatile__("lock; xaddl %0, %1" : "+r" (ret), "+m" (*val) : : "memory" ); | ||
584 | + return ret; | ||
585 | +} | ||
586 | + | ||
587 | +#define feather_callback __attribute__((regparm(0))) | ||
588 | + | ||
589 | +/* make the compiler reload any register that is not saved in | ||
590 | + * a cdecl function call | ||
591 | + */ | ||
592 | +#define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" | ||
593 | + | ||
594 | +#define ft_event(id, callback) \ | ||
595 | + __asm__ __volatile__( \ | ||
596 | + "1: jmp 2f \n\t" \ | ||
597 | + " call " #callback " \n\t" \ | ||
598 | + ".section __event_table, \"aw\" \n\t" \ | ||
599 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
600 | + ".previous \n\t" \ | ||
601 | + "2: \n\t" \ | ||
602 | + : : : CLOBBER_LIST) | ||
603 | + | ||
604 | +#define ft_event0(id, callback) \ | ||
605 | + __asm__ __volatile__( \ | ||
606 | + "1: jmp 2f \n\t" \ | ||
607 | + " subl $4, %%esp \n\t" \ | ||
608 | + " movl $" #id ", (%%esp) \n\t" \ | ||
609 | + " call " #callback " \n\t" \ | ||
610 | + " addl $4, %%esp \n\t" \ | ||
611 | + ".section __event_table, \"aw\" \n\t" \ | ||
612 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
613 | + ".previous \n\t" \ | ||
614 | + "2: \n\t" \ | ||
615 | + : : : CLOBBER_LIST) | ||
616 | + | ||
617 | +#define ft_event1(id, callback, param) \ | ||
618 | + __asm__ __volatile__( \ | ||
619 | + "1: jmp 2f \n\t" \ | ||
620 | + " subl $8, %%esp \n\t" \ | ||
621 | + " movl %0, 4(%%esp) \n\t" \ | ||
622 | + " movl $" #id ", (%%esp) \n\t" \ | ||
623 | + " call " #callback " \n\t" \ | ||
624 | + " addl $8, %%esp \n\t" \ | ||
625 | + ".section __event_table, \"aw\" \n\t" \ | ||
626 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
627 | + ".previous \n\t" \ | ||
628 | + "2: \n\t" \ | ||
629 | + : : "r" (param) : CLOBBER_LIST) | ||
630 | + | ||
631 | +#define ft_event2(id, callback, param, param2) \ | ||
632 | + __asm__ __volatile__( \ | ||
633 | + "1: jmp 2f \n\t" \ | ||
634 | + " subl $12, %%esp \n\t" \ | ||
635 | + " movl %1, 8(%%esp) \n\t" \ | ||
636 | + " movl %0, 4(%%esp) \n\t" \ | ||
637 | + " movl $" #id ", (%%esp) \n\t" \ | ||
638 | + " call " #callback " \n\t" \ | ||
639 | + " addl $12, %%esp \n\t" \ | ||
640 | + ".section __event_table, \"aw\" \n\t" \ | ||
641 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
642 | + ".previous \n\t" \ | ||
643 | + "2: \n\t" \ | ||
644 | + : : "r" (param), "r" (param2) : CLOBBER_LIST) | ||
645 | + | ||
646 | + | ||
647 | +#define ft_event3(id, callback, p, p2, p3) \ | ||
648 | + __asm__ __volatile__( \ | ||
649 | + "1: jmp 2f \n\t" \ | ||
650 | + " subl $16, %%esp \n\t" \ | ||
651 | + " movl %1, 12(%%esp) \n\t" \ | ||
652 | + " movl %1, 8(%%esp) \n\t" \ | ||
653 | + " movl %0, 4(%%esp) \n\t" \ | ||
654 | + " movl $" #id ", (%%esp) \n\t" \ | ||
655 | + " call " #callback " \n\t" \ | ||
656 | + " addl $16, %%esp \n\t" \ | ||
657 | + ".section __event_table, \"aw\" \n\t" \ | ||
658 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
659 | + ".previous \n\t" \ | ||
660 | + "2: \n\t" \ | ||
661 | + : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | ||
662 | + | ||
663 | + | ||
664 | +static inline unsigned long long ft_timestamp(void) | ||
665 | +{ | ||
666 | + unsigned long long ret; | ||
667 | + __asm__ __volatile__("rdtsc" : "=A" (ret)); | ||
668 | + return ret; | ||
669 | +} | ||
670 | + | ||
671 | +#define __ARCH_HAS_FEATHER_TRACE | ||
672 | + | ||
673 | +#endif | ||
674 | diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h | ||
675 | index 9b15545..36fec84 100644 | ||
676 | --- a/include/asm-x86/unistd_32.h | ||
677 | +++ b/include/asm-x86/unistd_32.h | ||
678 | @@ -331,9 +331,13 @@ | ||
679 | #define __NR_eventfd 323 | ||
680 | #define __NR_fallocate 324 | ||
681 | |||
682 | +#define __NR_LITMUS 325 | ||
683 | + | ||
684 | +#include "litmus/unistd.h" | ||
685 | + | ||
686 | #ifdef __KERNEL__ | ||
687 | |||
688 | -#define NR_syscalls 325 | ||
689 | +#define NR_syscalls 324 + NR_litmus_syscalls | ||
690 | |||
691 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
692 | #define __ARCH_WANT_OLD_READDIR | ||
693 | diff --git a/include/linux/completion.h b/include/linux/completion.h | ||
694 | index 33d6aaf..5b55e97 100644 | ||
695 | --- a/include/linux/completion.h | ||
696 | +++ b/include/linux/completion.h | ||
697 | @@ -51,7 +51,7 @@ extern unsigned long wait_for_completion_interruptible_timeout( | ||
698 | |||
699 | extern void complete(struct completion *); | ||
700 | extern void complete_all(struct completion *); | ||
701 | - | ||
702 | +extern void complete_n(struct completion *, int n); | ||
703 | #define INIT_COMPLETION(x) ((x).done = 0) | ||
704 | |||
705 | #endif | ||
706 | diff --git a/include/linux/fs.h b/include/linux/fs.h | ||
707 | index b3ec4a4..22f856c 100644 | ||
708 | --- a/include/linux/fs.h | ||
709 | +++ b/include/linux/fs.h | ||
710 | @@ -588,6 +588,8 @@ static inline int mapping_writably_mapped(struct address_space *mapping) | ||
711 | #define i_size_ordered_init(inode) do { } while (0) | ||
712 | #endif | ||
713 | |||
714 | +struct inode_obj_id_table; | ||
715 | + | ||
716 | struct inode { | ||
717 | struct hlist_node i_hash; | ||
718 | struct list_head i_list; | ||
719 | @@ -653,6 +655,9 @@ struct inode { | ||
720 | void *i_security; | ||
721 | #endif | ||
722 | void *i_private; /* fs or device private pointer */ | ||
723 | + | ||
724 | + struct list_head i_obj_list; | ||
725 | + struct mutex i_obj_mutex; | ||
726 | }; | ||
727 | |||
728 | /* | ||
729 | diff --git a/include/linux/sched.h b/include/linux/sched.h | ||
730 | index cc14656..76e28f1 100644 | ||
731 | --- a/include/linux/sched.h | ||
732 | +++ b/include/linux/sched.h | ||
733 | @@ -37,6 +37,7 @@ | ||
734 | #define SCHED_BATCH 3 | ||
735 | /* SCHED_ISO: reserved but not implemented yet */ | ||
736 | #define SCHED_IDLE 5 | ||
737 | +#define SCHED_LITMUS 6 | ||
738 | |||
739 | #ifdef __KERNEL__ | ||
740 | |||
741 | @@ -91,6 +92,8 @@ struct sched_param { | ||
742 | |||
743 | #include <asm/processor.h> | ||
744 | |||
745 | +#include <litmus/rt_param.h> | ||
746 | + | ||
747 | struct exec_domain; | ||
748 | struct futex_pi_state; | ||
749 | struct bio; | ||
750 | @@ -914,6 +917,8 @@ struct sched_entity { | ||
751 | #endif | ||
752 | }; | ||
753 | |||
754 | +struct od_table_entry; | ||
755 | + | ||
756 | struct task_struct { | ||
757 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | ||
758 | void *stack; | ||
759 | @@ -1178,6 +1183,12 @@ struct task_struct { | ||
760 | int make_it_fail; | ||
761 | #endif | ||
762 | struct prop_local_single dirties; | ||
763 | + | ||
764 | + /* litmus parameters and state */ | ||
765 | + struct rt_param rt_param; | ||
766 | + | ||
767 | + /* references to PI semaphores, etc. */ | ||
768 | + struct od_table_entry* od_table; | ||
769 | }; | ||
770 | |||
771 | /* | ||
772 | diff --git a/include/linux/tick.h b/include/linux/tick.h | ||
773 | index f4a1395..7eae358 100644 | ||
774 | --- a/include/linux/tick.h | ||
775 | +++ b/include/linux/tick.h | ||
776 | @@ -64,6 +64,9 @@ extern int tick_is_oneshot_available(void); | ||
777 | extern struct tick_device *tick_get_device(int cpu); | ||
778 | |||
779 | # ifdef CONFIG_HIGH_RES_TIMERS | ||
780 | +#define LINUX_DEFAULT_TICKS 0 | ||
781 | +#define LITMUS_ALIGNED_TICKS 1 | ||
782 | +#define LITMUS_STAGGERED_TICKS 2 | ||
783 | extern int tick_init_highres(void); | ||
784 | extern int tick_program_event(ktime_t expires, int force); | ||
785 | extern void tick_setup_sched_timer(void); | ||
786 | diff --git a/include/linux/time.h b/include/linux/time.h | ||
787 | index b04136d..3e8fd9e 100644 | ||
788 | --- a/include/linux/time.h | ||
789 | +++ b/include/linux/time.h | ||
790 | @@ -173,6 +173,10 @@ static inline void timespec_add_ns(struct timespec *a, u64 ns) | ||
791 | { | ||
792 | ns += a->tv_nsec; | ||
793 | while(unlikely(ns >= NSEC_PER_SEC)) { | ||
794 | + /* The following asm() prevents the compiler from | ||
795 | + * optimising this loop into a modulo operation. */ | ||
796 | + asm("" : "+r"(ns)); | ||
797 | + | ||
798 | ns -= NSEC_PER_SEC; | ||
799 | a->tv_sec++; | ||
800 | } | ||
801 | diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h | ||
802 | index 975c963..6ae0ff9 100644 | ||
803 | --- a/include/linux/uaccess.h | ||
804 | +++ b/include/linux/uaccess.h | ||
805 | @@ -84,4 +84,20 @@ static inline unsigned long __copy_from_user_nocache(void *to, | ||
806 | ret; \ | ||
807 | }) | ||
808 | |||
809 | +/* This is a naive attempt at a write version of the above native Linux macro. | ||
810 | + */ | ||
811 | +#define poke_kernel_address(val, addr) \ | ||
812 | + ({ \ | ||
813 | + long ret; \ | ||
814 | + mm_segment_t old_fs = get_fs(); \ | ||
815 | + \ | ||
816 | + set_fs(KERNEL_DS); \ | ||
817 | + pagefault_disable(); \ | ||
818 | + ret = __put_user(val, (__force typeof(val) __user *)(addr)); \ | ||
819 | + pagefault_enable(); \ | ||
820 | + set_fs(old_fs); \ | ||
821 | + ret; \ | ||
822 | + }) | ||
823 | + | ||
824 | + | ||
825 | #endif /* __LINUX_UACCESS_H__ */ | ||
826 | diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h | ||
827 | new file mode 100644 | ||
828 | index 0000000..32dcf9b | ||
829 | --- /dev/null | ||
830 | +++ b/include/litmus/edf_common.h | ||
831 | @@ -0,0 +1,25 @@ | ||
832 | +/* EDF common data structures and utility functions shared by all EDF | ||
833 | + * based scheduler plugins | ||
834 | + */ | ||
835 | + | ||
836 | +/* CLEANUP: Add comments and make it less messy. | ||
837 | + * | ||
838 | + */ | ||
839 | + | ||
840 | +#ifndef __UNC_EDF_COMMON_H__ | ||
841 | +#define __UNC_EDF_COMMON_H__ | ||
842 | + | ||
843 | +#include <litmus/rt_domain.h> | ||
844 | + | ||
845 | + | ||
846 | +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
847 | + release_jobs_t release); | ||
848 | + | ||
849 | +int edf_higher_prio(struct task_struct* first, | ||
850 | + struct task_struct* second); | ||
851 | + | ||
852 | +int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
853 | + | ||
854 | +int edf_set_hp_task(struct pi_semaphore *sem); | ||
855 | +int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); | ||
856 | +#endif | ||
857 | diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h | ||
858 | new file mode 100644 | ||
859 | index 0000000..286e10f | ||
860 | --- /dev/null | ||
861 | +++ b/include/litmus/fdso.h | ||
862 | @@ -0,0 +1,69 @@ | ||
863 | +/* fdso.h - file descriptor attached shared objects | ||
864 | + * | ||
865 | + * (c) 2007 B. Brandenburg, LITMUS^RT project | ||
866 | + */ | ||
867 | + | ||
868 | +#ifndef _LINUX_FDSO_H_ | ||
869 | +#define _LINUX_FDSO_H_ | ||
870 | + | ||
871 | +#include <linux/list.h> | ||
872 | +#include <asm/atomic.h> | ||
873 | + | ||
874 | +#include <linux/fs.h> | ||
875 | + | ||
876 | +#define MAX_OBJECT_DESCRIPTORS 32 | ||
877 | + | ||
878 | +typedef enum { | ||
879 | + MIN_OBJ_TYPE = 0, | ||
880 | + | ||
881 | + FMLP_SEM = 0, | ||
882 | + SRP_SEM = 1, | ||
883 | + | ||
884 | + MAX_OBJ_TYPE = 1 | ||
885 | +} obj_type_t; | ||
886 | + | ||
887 | +struct inode_obj_id { | ||
888 | + struct list_head list; | ||
889 | + atomic_t count; | ||
890 | + struct inode* inode; | ||
891 | + | ||
892 | + obj_type_t type; | ||
893 | + void* obj; | ||
894 | + unsigned int id; | ||
895 | +}; | ||
896 | + | ||
897 | + | ||
898 | +struct od_table_entry { | ||
899 | + unsigned int used; | ||
900 | + | ||
901 | + struct inode_obj_id* obj; | ||
902 | + void* extra; | ||
903 | +}; | ||
904 | + | ||
905 | +struct fdso_ops { | ||
906 | + void* (*create) (void); | ||
907 | + void (*destroy)(void*); | ||
908 | + int (*open) (struct od_table_entry*, void* __user); | ||
909 | + int (*close) (struct od_table_entry*); | ||
910 | +}; | ||
911 | + | ||
912 | +/* translate a userspace supplied od into the raw table entry | ||
913 | + * returns NULL if od is invalid | ||
914 | + */ | ||
915 | +struct od_table_entry* __od_lookup(int od); | ||
916 | + | ||
917 | +/* translate a userspace supplied od into the associated object | ||
918 | + * returns NULL if od is invalid | ||
919 | + */ | ||
920 | +static inline void* od_lookup(int od, obj_type_t type) | ||
921 | +{ | ||
922 | + struct od_table_entry* e = __od_lookup(od); | ||
923 | + return e && e->obj->type == type ? e->obj->obj : NULL; | ||
924 | +} | ||
925 | + | ||
926 | +#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) | ||
927 | +#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | ||
928 | +#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | ||
929 | + | ||
930 | + | ||
931 | +#endif | ||
932 | diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h | ||
933 | new file mode 100644 | ||
934 | index 0000000..6c18277 | ||
935 | --- /dev/null | ||
936 | +++ b/include/litmus/feather_buffer.h | ||
937 | @@ -0,0 +1,94 @@ | ||
938 | +#ifndef _FEATHER_BUFFER_H_ | ||
939 | +#define _FEATHER_BUFFER_H_ | ||
940 | + | ||
941 | +/* requires UINT_MAX and memcpy */ | ||
942 | + | ||
943 | +#define SLOT_FREE 0 | ||
944 | +#define SLOT_BUSY 1 | ||
945 | +#define SLOT_READY 2 | ||
946 | + | ||
947 | +struct ft_buffer { | ||
948 | + unsigned int slot_count; | ||
949 | + unsigned int slot_size; | ||
950 | + | ||
951 | + int free_count; | ||
952 | + unsigned int write_idx; | ||
953 | + unsigned int read_idx; | ||
954 | + | ||
955 | + char* slots; | ||
956 | + void* buffer_mem; | ||
957 | + unsigned int failed_writes; | ||
958 | +}; | ||
959 | + | ||
960 | +static inline int init_ft_buffer(struct ft_buffer* buf, | ||
961 | + unsigned int slot_count, | ||
962 | + unsigned int slot_size, | ||
963 | + char* slots, | ||
964 | + void* buffer_mem) | ||
965 | +{ | ||
966 | + int i = 0; | ||
967 | + if (!slot_count || UINT_MAX % slot_count != slot_count - 1) { | ||
968 | + /* The slot count must divide UNIT_MAX + 1 so that when it | ||
969 | + * wraps around the index correctly points to 0. | ||
970 | + */ | ||
971 | + return 0; | ||
972 | + } else { | ||
973 | + buf->slot_count = slot_count; | ||
974 | + buf->slot_size = slot_size; | ||
975 | + buf->slots = slots; | ||
976 | + buf->buffer_mem = buffer_mem; | ||
977 | + buf->free_count = slot_count; | ||
978 | + buf->write_idx = 0; | ||
979 | + buf->read_idx = 0; | ||
980 | + buf->failed_writes = 0; | ||
981 | + for (i = 0; i < slot_count; i++) | ||
982 | + buf->slots[i] = SLOT_FREE; | ||
983 | + return 1; | ||
984 | + } | ||
985 | +} | ||
986 | + | ||
987 | +static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr) | ||
988 | +{ | ||
989 | + int free = fetch_and_dec(&buf->free_count); | ||
990 | + unsigned int idx; | ||
991 | + if (free <= 0) { | ||
992 | + fetch_and_inc(&buf->free_count); | ||
993 | + *ptr = 0; | ||
994 | + fetch_and_inc(&buf->failed_writes); | ||
995 | + return 0; | ||
996 | + } else { | ||
997 | + idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count; | ||
998 | + buf->slots[idx] = SLOT_BUSY; | ||
999 | + *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size; | ||
1000 | + return 1; | ||
1001 | + } | ||
1002 | +} | ||
1003 | + | ||
1004 | +static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr) | ||
1005 | +{ | ||
1006 | + unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size; | ||
1007 | + buf->slots[idx] = SLOT_READY; | ||
1008 | +} | ||
1009 | + | ||
1010 | + | ||
1011 | +/* exclusive reader access is assumed */ | ||
1012 | +static inline int ft_buffer_read(struct ft_buffer* buf, void* dest) | ||
1013 | +{ | ||
1014 | + unsigned int idx; | ||
1015 | + if (buf->free_count == buf->slot_count) | ||
1016 | + /* nothing available */ | ||
1017 | + return 0; | ||
1018 | + idx = buf->read_idx % buf->slot_count; | ||
1019 | + if (buf->slots[idx] == SLOT_READY) { | ||
1020 | + memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size, | ||
1021 | + buf->slot_size); | ||
1022 | + buf->slots[idx] = SLOT_FREE; | ||
1023 | + buf->read_idx++; | ||
1024 | + fetch_and_inc(&buf->free_count); | ||
1025 | + return 1; | ||
1026 | + } else | ||
1027 | + return 0; | ||
1028 | +} | ||
1029 | + | ||
1030 | + | ||
1031 | +#endif | ||
1032 | diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h | ||
1033 | new file mode 100644 | ||
1034 | index 0000000..f8fb7ba | ||
1035 | --- /dev/null | ||
1036 | +++ b/include/litmus/feather_trace.h | ||
1037 | @@ -0,0 +1,37 @@ | ||
1038 | +#ifndef _FEATHER_TRACE_H_ | ||
1039 | +#define _FEATHER_TRACE_H_ | ||
1040 | + | ||
1041 | +#include <asm/feather_trace.h> | ||
1042 | + | ||
1043 | +int ft_enable_event(unsigned long id); | ||
1044 | +int ft_disable_event(unsigned long id); | ||
1045 | +int ft_is_event_enabled(unsigned long id); | ||
1046 | +int ft_disable_all_events(void); | ||
1047 | + | ||
1048 | +#ifndef __ARCH_HAS_FEATHER_TRACE | ||
1049 | +/* provide default implementation */ | ||
1050 | + | ||
1051 | +#define feather_callback | ||
1052 | + | ||
1053 | +#define MAX_EVENTS 1024 | ||
1054 | + | ||
1055 | +extern int ft_events[MAX_EVENTS]; | ||
1056 | + | ||
1057 | +#define ft_event(id, callback) \ | ||
1058 | + if (ft_events[id]) callback(); | ||
1059 | + | ||
1060 | +#define ft_event0(id, callback) \ | ||
1061 | + if (ft_events[id]) callback(id); | ||
1062 | + | ||
1063 | +#define ft_event1(id, callback, param) \ | ||
1064 | + if (ft_events[id]) callback(id, param); | ||
1065 | + | ||
1066 | +#define ft_event2(id, callback, param, param2) \ | ||
1067 | + if (ft_events[id]) callback(id, param, param2); | ||
1068 | + | ||
1069 | +#define ft_event3(id, callback, p, p2, p3) \ | ||
1070 | + if (ft_events[id]) callback(id, p, p2, p3); | ||
1071 | +#endif | ||
1072 | + | ||
1073 | + | ||
1074 | +#endif | ||
1075 | diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h | ||
1076 | new file mode 100644 | ||
1077 | index 0000000..7697b46 | ||
1078 | --- /dev/null | ||
1079 | +++ b/include/litmus/ftdev.h | ||
1080 | @@ -0,0 +1,49 @@ | ||
1081 | +#ifndef _LITMUS_FTDEV_H_ | ||
1082 | +#define _LITMUS_FTDEV_H_ | ||
1083 | + | ||
1084 | +#include <litmus/feather_trace.h> | ||
1085 | +#include <litmus/feather_buffer.h> | ||
1086 | +#include <linux/mutex.h> | ||
1087 | +#include <linux/cdev.h> | ||
1088 | + | ||
1089 | +#define MAX_FTDEV_MINORS NR_CPUS | ||
1090 | + | ||
1091 | +#define FTDEV_ENABLE_CMD 0 | ||
1092 | +#define FTDEV_DISABLE_CMD 1 | ||
1093 | + | ||
1094 | +struct ftdev; | ||
1095 | + | ||
1096 | +/* return 0 if buffer can be opened, otherwise -$REASON */ | ||
1097 | +typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no); | ||
1098 | +/* return 0 on success, otherwise -$REASON */ | ||
1099 | +typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no); | ||
1100 | +typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no); | ||
1101 | + | ||
1102 | + | ||
1103 | +struct ftdev_event; | ||
1104 | + | ||
1105 | +struct ftdev_minor { | ||
1106 | + struct ft_buffer* buf; | ||
1107 | + unsigned int readers; | ||
1108 | + struct mutex lock; | ||
1109 | + /* FIXME: filter for authorized events */ | ||
1110 | + struct ftdev_event* events; | ||
1111 | +}; | ||
1112 | + | ||
1113 | +struct ftdev { | ||
1114 | + struct cdev cdev; | ||
1115 | + /* FIXME: don't waste memory, allocate dynamically */ | ||
1116 | + struct ftdev_minor minor[MAX_FTDEV_MINORS]; | ||
1117 | + unsigned int minor_cnt; | ||
1118 | + ftdev_alloc_t alloc; | ||
1119 | + ftdev_free_t free; | ||
1120 | + ftdev_can_open_t can_open; | ||
1121 | +}; | ||
1122 | + | ||
1123 | +struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size); | ||
1124 | +void free_ft_buffer(struct ft_buffer* buf); | ||
1125 | + | ||
1126 | +void ftdev_init(struct ftdev* ftdev, struct module* owner); | ||
1127 | +int register_ftdev(struct ftdev* ftdev, const char* name, int major); | ||
1128 | + | ||
1129 | +#endif | ||
1130 | diff --git a/include/litmus/heap.h b/include/litmus/heap.h | ||
1131 | new file mode 100644 | ||
1132 | index 0000000..e5b4746 | ||
1133 | --- /dev/null | ||
1134 | +++ b/include/litmus/heap.h | ||
1135 | @@ -0,0 +1,327 @@ | ||
1136 | +/* heaps.h -- Binomial Heaps | ||
1137 | + * | ||
1138 | + * (c) 2008 Bjoern Brandenburg | ||
1139 | + */ | ||
1140 | + | ||
1141 | +#ifndef HEAP_H | ||
1142 | +#define HEAP_H | ||
1143 | + | ||
1144 | +#define NOT_IN_HEAP UINT_MAX | ||
1145 | + | ||
1146 | +struct heap_node { | ||
1147 | + struct heap_node* parent; | ||
1148 | + struct heap_node* next; | ||
1149 | + struct heap_node* child; | ||
1150 | + | ||
1151 | + unsigned int degree; | ||
1152 | + void* value; | ||
1153 | + struct heap_node** ref; | ||
1154 | +}; | ||
1155 | + | ||
1156 | +struct heap { | ||
1157 | + struct heap_node* head; | ||
1158 | + /* We cache the minimum of the heap. | ||
1159 | + * This speeds up repeated peek operations. | ||
1160 | + */ | ||
1161 | + struct heap_node* min; | ||
1162 | +}; | ||
1163 | + | ||
1164 | +typedef int (*heap_prio_t)(struct heap_node* a, struct heap_node* b); | ||
1165 | + | ||
1166 | +static inline void heap_init(struct heap* heap) | ||
1167 | +{ | ||
1168 | + heap->head = NULL; | ||
1169 | + heap->min = NULL; | ||
1170 | +} | ||
1171 | + | ||
1172 | +static inline void heap_node_init(struct heap_node** _h, void* value) | ||
1173 | +{ | ||
1174 | + struct heap_node* h = *_h; | ||
1175 | + h->parent = NULL; | ||
1176 | + h->next = NULL; | ||
1177 | + h->child = NULL; | ||
1178 | + h->degree = NOT_IN_HEAP; | ||
1179 | + h->value = value; | ||
1180 | + h->ref = _h; | ||
1181 | +} | ||
1182 | + | ||
1183 | +struct heap_node* heap_node_alloc(int gfp_flags); | ||
1184 | +void heap_node_free(struct heap_node* hn); | ||
1185 | + | ||
1186 | +static inline int heap_node_in_heap(struct heap_node* h) | ||
1187 | +{ | ||
1188 | + return h->degree != NOT_IN_HEAP; | ||
1189 | +} | ||
1190 | + | ||
1191 | +static inline int heap_empty(struct heap* heap) | ||
1192 | +{ | ||
1193 | + return heap->head == NULL && heap->min == NULL; | ||
1194 | +} | ||
1195 | + | ||
1196 | +/* make child a subtree of root */ | ||
1197 | +static inline void __heap_link(struct heap_node* root, | ||
1198 | + struct heap_node* child) | ||
1199 | +{ | ||
1200 | + child->parent = root; | ||
1201 | + child->next = root->child; | ||
1202 | + root->child = child; | ||
1203 | + root->degree++; | ||
1204 | +} | ||
1205 | + | ||
1206 | +/* merge root lists */ | ||
1207 | +static inline struct heap_node* __heap_merge(struct heap_node* a, | ||
1208 | + struct heap_node* b) | ||
1209 | +{ | ||
1210 | + struct heap_node* head = NULL; | ||
1211 | + struct heap_node** pos = &head; | ||
1212 | + | ||
1213 | + while (a && b) { | ||
1214 | + if (a->degree < b->degree) { | ||
1215 | + *pos = a; | ||
1216 | + a = a->next; | ||
1217 | + } else { | ||
1218 | + *pos = b; | ||
1219 | + b = b->next; | ||
1220 | + } | ||
1221 | + pos = &(*pos)->next; | ||
1222 | + } | ||
1223 | + if (a) | ||
1224 | + *pos = a; | ||
1225 | + else | ||
1226 | + *pos = b; | ||
1227 | + return head; | ||
1228 | +} | ||
1229 | + | ||
1230 | +/* reverse a linked list of nodes. also clears parent pointer */ | ||
1231 | +static inline struct heap_node* __heap_reverse(struct heap_node* h) | ||
1232 | +{ | ||
1233 | + struct heap_node* tail = NULL; | ||
1234 | + struct heap_node* next; | ||
1235 | + | ||
1236 | + if (!h) | ||
1237 | + return h; | ||
1238 | + | ||
1239 | + h->parent = NULL; | ||
1240 | + while (h->next) { | ||
1241 | + next = h->next; | ||
1242 | + h->next = tail; | ||
1243 | + tail = h; | ||
1244 | + h = next; | ||
1245 | + h->parent = NULL; | ||
1246 | + } | ||
1247 | + h->next = tail; | ||
1248 | + return h; | ||
1249 | +} | ||
1250 | + | ||
1251 | +static inline void __heap_min(heap_prio_t higher_prio, struct heap* heap, | ||
1252 | + struct heap_node** prev, struct heap_node** node) | ||
1253 | +{ | ||
1254 | + struct heap_node *_prev, *cur; | ||
1255 | + *prev = NULL; | ||
1256 | + | ||
1257 | + if (!heap->head) { | ||
1258 | + *node = NULL; | ||
1259 | + return; | ||
1260 | + } | ||
1261 | + | ||
1262 | + *node = heap->head; | ||
1263 | + _prev = heap->head; | ||
1264 | + cur = heap->head->next; | ||
1265 | + while (cur) { | ||
1266 | + if (higher_prio(cur, *node)) { | ||
1267 | + *node = cur; | ||
1268 | + *prev = _prev; | ||
1269 | + } | ||
1270 | + _prev = cur; | ||
1271 | + cur = cur->next; | ||
1272 | + } | ||
1273 | +} | ||
1274 | + | ||
1275 | +static inline void __heap_union(heap_prio_t higher_prio, struct heap* heap, | ||
1276 | + struct heap_node* h2) | ||
1277 | +{ | ||
1278 | + struct heap_node* h1; | ||
1279 | + struct heap_node *prev, *x, *next; | ||
1280 | + if (!h2) | ||
1281 | + return; | ||
1282 | + h1 = heap->head; | ||
1283 | + if (!h1) { | ||
1284 | + heap->head = h2; | ||
1285 | + return; | ||
1286 | + } | ||
1287 | + h1 = __heap_merge(h1, h2); | ||
1288 | + prev = NULL; | ||
1289 | + x = h1; | ||
1290 | + next = x->next; | ||
1291 | + while (next) { | ||
1292 | + if (x->degree != next->degree || | ||
1293 | + (next->next && next->next->degree == x->degree)) { | ||
1294 | + /* nothing to do, advance */ | ||
1295 | + prev = x; | ||
1296 | + x = next; | ||
1297 | + } else if (higher_prio(x, next)) { | ||
1298 | + /* x becomes the root of next */ | ||
1299 | + x->next = next->next; | ||
1300 | + __heap_link(x, next); | ||
1301 | + } else { | ||
1302 | + /* next becomes the root of x */ | ||
1303 | + if (prev) | ||
1304 | + prev->next = next; | ||
1305 | + else | ||
1306 | + h1 = next; | ||
1307 | + __heap_link(next, x); | ||
1308 | + x = next; | ||
1309 | + } | ||
1310 | + next = x->next; | ||
1311 | + } | ||
1312 | + heap->head = h1; | ||
1313 | +} | ||
1314 | + | ||
1315 | +static inline struct heap_node* __heap_extract_min(heap_prio_t higher_prio, | ||
1316 | + struct heap* heap) | ||
1317 | +{ | ||
1318 | + struct heap_node *prev, *node; | ||
1319 | + __heap_min(higher_prio, heap, &prev, &node); | ||
1320 | + if (!node) | ||
1321 | + return NULL; | ||
1322 | + if (prev) | ||
1323 | + prev->next = node->next; | ||
1324 | + else | ||
1325 | + heap->head = node->next; | ||
1326 | + __heap_union(higher_prio, heap, __heap_reverse(node->child)); | ||
1327 | + return node; | ||
1328 | +} | ||
1329 | + | ||
1330 | +/* insert (and reinitialize) a node into the heap */ | ||
1331 | +static inline void heap_insert(heap_prio_t higher_prio, struct heap* heap, | ||
1332 | + struct heap_node* node) | ||
1333 | +{ | ||
1334 | + struct heap_node *min; | ||
1335 | + node->child = NULL; | ||
1336 | + node->parent = NULL; | ||
1337 | + node->next = NULL; | ||
1338 | + node->degree = 0; | ||
1339 | + if (heap->min && higher_prio(node, heap->min)) { | ||
1340 | + /* swap min cache */ | ||
1341 | + min = heap->min; | ||
1342 | + min->child = NULL; | ||
1343 | + min->parent = NULL; | ||
1344 | + min->next = NULL; | ||
1345 | + min->degree = 0; | ||
1346 | + __heap_union(higher_prio, heap, min); | ||
1347 | + heap->min = node; | ||
1348 | + } else | ||
1349 | + __heap_union(higher_prio, heap, node); | ||
1350 | +} | ||
1351 | + | ||
1352 | +static inline void __uncache_min(heap_prio_t higher_prio, struct heap* heap) | ||
1353 | +{ | ||
1354 | + struct heap_node* min; | ||
1355 | + if (heap->min) { | ||
1356 | + min = heap->min; | ||
1357 | + heap->min = NULL; | ||
1358 | + heap_insert(higher_prio, heap, min); | ||
1359 | + } | ||
1360 | +} | ||
1361 | + | ||
1362 | +/* merge addition into target */ | ||
1363 | +static inline void heap_union(heap_prio_t higher_prio, | ||
1364 | + struct heap* target, struct heap* addition) | ||
1365 | +{ | ||
1366 | + /* first insert any cached minima, if necessary */ | ||
1367 | + __uncache_min(higher_prio, target); | ||
1368 | + __uncache_min(higher_prio, addition); | ||
1369 | + __heap_union(higher_prio, target, addition->head); | ||
1370 | + /* this is a destructive merge */ | ||
1371 | + addition->head = NULL; | ||
1372 | +} | ||
1373 | + | ||
1374 | +static inline struct heap_node* heap_peek(heap_prio_t higher_prio, | ||
1375 | + struct heap* heap) | ||
1376 | +{ | ||
1377 | + if (!heap->min) | ||
1378 | + heap->min = __heap_extract_min(higher_prio, heap); | ||
1379 | + return heap->min; | ||
1380 | +} | ||
1381 | + | ||
1382 | +static inline struct heap_node* heap_take(heap_prio_t higher_prio, | ||
1383 | + struct heap* heap) | ||
1384 | +{ | ||
1385 | + struct heap_node *node; | ||
1386 | + if (!heap->min) | ||
1387 | + heap->min = __heap_extract_min(higher_prio, heap); | ||
1388 | + node = heap->min; | ||
1389 | + heap->min = NULL; | ||
1390 | + if (node) | ||
1391 | + node->degree = NOT_IN_HEAP; | ||
1392 | + return node; | ||
1393 | +} | ||
1394 | + | ||
1395 | +static inline void heap_delete(heap_prio_t higher_prio, struct heap* heap, | ||
1396 | + struct heap_node* node) | ||
1397 | +{ | ||
1398 | + struct heap_node *parent, *prev, *pos; | ||
1399 | + struct heap_node** tmp_ref; | ||
1400 | + void* tmp; | ||
1401 | + | ||
1402 | + if (heap->min != node) { | ||
1403 | + /* bubble up */ | ||
1404 | + parent = node->parent; | ||
1405 | + while (parent) { | ||
1406 | + /* swap parent and node */ | ||
1407 | + tmp = parent->value; | ||
1408 | + parent->value = node->value; | ||
1409 | + node->value = tmp; | ||
1410 | + /* swap references */ | ||
1411 | + *(parent->ref) = node; | ||
1412 | + *(node->ref) = parent; | ||
1413 | + tmp_ref = parent->ref; | ||
1414 | + parent->ref = node->ref; | ||
1415 | + node->ref = tmp_ref; | ||
1416 | + /* step up */ | ||
1417 | + node = parent; | ||
1418 | + parent = node->parent; | ||
1419 | + } | ||
1420 | + /* now delete: | ||
1421 | + * first find prev */ | ||
1422 | + prev = NULL; | ||
1423 | + pos = heap->head; | ||
1424 | + while (pos != node) { | ||
1425 | + prev = pos; | ||
1426 | + pos = pos->next; | ||
1427 | + } | ||
1428 | + /* we have prev, now remove node */ | ||
1429 | + if (prev) | ||
1430 | + prev->next = node->next; | ||
1431 | + else | ||
1432 | + heap->head = node->next; | ||
1433 | + __heap_union(higher_prio, heap, __heap_reverse(node->child)); | ||
1434 | + } else | ||
1435 | + heap->min = NULL; | ||
1436 | + node->degree = NOT_IN_HEAP; | ||
1437 | +} | ||
1438 | + | ||
1439 | +/* allocate a heap node for value and insert into the heap */ | ||
1440 | +static inline int heap_add(heap_prio_t higher_prio, struct heap* heap, | ||
1441 | + void* value, int gfp_flags) | ||
1442 | +{ | ||
1443 | + struct heap_node* hn = heap_node_alloc(gfp_flags); | ||
1444 | + if (likely(hn)) { | ||
1445 | + heap_node_init(&hn, value); | ||
1446 | + heap_insert(higher_prio, heap, hn); | ||
1447 | + } | ||
1448 | + return hn != NULL; | ||
1449 | +} | ||
1450 | + | ||
1451 | +static inline void* heap_take_del(heap_prio_t higher_prio, | ||
1452 | + struct heap* heap) | ||
1453 | +{ | ||
1454 | + struct heap_node* hn = heap_take(higher_prio, heap); | ||
1455 | + void* ret = NULL; | ||
1456 | + if (hn) { | ||
1457 | + ret = hn->value; | ||
1458 | + heap_node_free(hn); | ||
1459 | + } | ||
1460 | + return ret; | ||
1461 | +} | ||
1462 | +#endif | ||
1463 | diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h | ||
1464 | new file mode 100644 | ||
1465 | index 0000000..9bd361e | ||
1466 | --- /dev/null | ||
1467 | +++ b/include/litmus/jobs.h | ||
1468 | @@ -0,0 +1,9 @@ | ||
1469 | +#ifndef __LITMUS_JOBS_H__ | ||
1470 | +#define __LITMUS_JOBS_H__ | ||
1471 | + | ||
1472 | +void prepare_for_next_period(struct task_struct *t); | ||
1473 | +void release_at(struct task_struct *t, lt_t start); | ||
1474 | +long complete_job(void); | ||
1475 | + | ||
1476 | +#endif | ||
1477 | + | ||
1478 | diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h | ||
1479 | new file mode 100644 | ||
1480 | index 0000000..7ef5a62 | ||
1481 | --- /dev/null | ||
1482 | +++ b/include/litmus/litmus.h | ||
1483 | @@ -0,0 +1,227 @@ | ||
1484 | +/* | ||
1485 | + * Constant definitions related to | ||
1486 | + * scheduling policy. | ||
1487 | + */ | ||
1488 | + | ||
1489 | +#ifndef _LINUX_LITMUS_H_ | ||
1490 | +#define _LINUX_LITMUS_H_ | ||
1491 | + | ||
1492 | +#include <linux/jiffies.h> | ||
1493 | +#include <litmus/sched_trace.h> | ||
1494 | + | ||
1495 | +/* RT mode start time */ | ||
1496 | +extern volatile unsigned long rt_start_time; | ||
1497 | + | ||
1498 | +extern atomic_t __log_seq_no; | ||
1499 | + | ||
1500 | +#define TRACE(fmt, args...) \ | ||
1501 | + sched_trace_log_message("%d P%d: " fmt, atomic_add_return(1, &__log_seq_no), \ | ||
1502 | + raw_smp_processor_id(), ## args) | ||
1503 | + | ||
1504 | +#define TRACE_TASK(t, fmt, args...) \ | ||
1505 | + TRACE("(%s/%d) " fmt, (t)->comm, (t)->pid, ##args) | ||
1506 | + | ||
1507 | +#define TRACE_CUR(fmt, args...) \ | ||
1508 | + TRACE_TASK(current, fmt, ## args) | ||
1509 | + | ||
1510 | +#define TRACE_BUG_ON(cond) \ | ||
1511 | + do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ | ||
1512 | + "called from %p current=%s/%d state=%d " \ | ||
1513 | + "flags=%x partition=%d cpu=%d rtflags=%d"\ | ||
1514 | + " job=%u knp=%d timeslice=%u\n", \ | ||
1515 | + #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ | ||
1516 | + current->pid, current->state, current->flags, \ | ||
1517 | + get_partition(current), smp_processor_id(), get_rt_flags(current), \ | ||
1518 | + current->rt_param.job_params.job_no, current->rt_param.kernel_np, \ | ||
1519 | + current->time_slice\ | ||
1520 | + ); } while(0); | ||
1521 | + | ||
1522 | + | ||
1523 | +/* in_list - is a given list_head queued on some list? | ||
1524 | + */ | ||
1525 | +static inline int in_list(struct list_head* list) | ||
1526 | +{ | ||
1527 | + return !( /* case 1: deleted */ | ||
1528 | + (list->next == LIST_POISON1 && | ||
1529 | + list->prev == LIST_POISON2) | ||
1530 | + || | ||
1531 | + /* case 2: initialized */ | ||
1532 | + (list->next == list && | ||
1533 | + list->prev == list) | ||
1534 | + ); | ||
1535 | +} | ||
1536 | + | ||
1537 | +typedef int (*list_cmp_t)(struct list_head*, struct list_head*); | ||
1538 | + | ||
1539 | +static inline unsigned int list_insert(struct list_head* new, | ||
1540 | + struct list_head* head, | ||
1541 | + list_cmp_t order_before) | ||
1542 | +{ | ||
1543 | + struct list_head *pos; | ||
1544 | + unsigned int passed = 0; | ||
1545 | + | ||
1546 | + BUG_ON(!new); | ||
1547 | + | ||
1548 | + /* find a spot where the new entry is less than the next */ | ||
1549 | + list_for_each(pos, head) { | ||
1550 | + if (unlikely(order_before(new, pos))) { | ||
1551 | + /* pos is not less than new, thus insert here */ | ||
1552 | + __list_add(new, pos->prev, pos); | ||
1553 | + goto out; | ||
1554 | + } | ||
1555 | + passed++; | ||
1556 | + } | ||
1557 | + /* if we get to this point either the list is empty or every entry | ||
1558 | + * queued element is less than new. | ||
1559 | + * Let's add new to the end. */ | ||
1560 | + list_add_tail(new, head); | ||
1561 | + out: | ||
1562 | + return passed; | ||
1563 | +} | ||
1564 | + | ||
1565 | +void list_qsort(struct list_head* list, list_cmp_t less_than); | ||
1566 | + | ||
1567 | + | ||
1568 | +#define RT_PREEMPTIVE 0x2050 /* = NP */ | ||
1569 | +#define RT_NON_PREEMPTIVE 0x4e50 /* = P */ | ||
1570 | +#define RT_EXIT_NP_REQUESTED 0x5251 /* = RQ */ | ||
1571 | + | ||
1572 | + | ||
1573 | +/* kill naughty tasks | ||
1574 | + */ | ||
1575 | +void scheduler_signal(struct task_struct *t, unsigned int signal); | ||
1576 | +void send_scheduler_signals(void); | ||
1577 | +void np_mem_kill(struct task_struct *t); | ||
1578 | + | ||
1579 | +void litmus_fork(struct task_struct *tsk); | ||
1580 | +void litmus_exec(void); | ||
1581 | +/* clean up real-time state of a task */ | ||
1582 | +void exit_litmus(struct task_struct *dead_tsk); | ||
1583 | + | ||
1584 | +long litmus_admit_task(struct task_struct *tsk); | ||
1585 | +void litmus_exit_task(struct task_struct *tsk); | ||
1586 | + | ||
1587 | +#define is_realtime(t) ((t)->policy == SCHED_LITMUS) | ||
1588 | +#define rt_transition_pending(t) \ | ||
1589 | + ((t)->rt_param.transition_pending) | ||
1590 | + | ||
1591 | +#define tsk_rt(t) (&(t)->rt_param) | ||
1592 | + | ||
1593 | +/* Realtime utility macros */ | ||
1594 | +#define get_rt_flags(t) (tsk_rt(t)->flags) | ||
1595 | +#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) | ||
1596 | +#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) | ||
1597 | +#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | ||
1598 | +#define get_rt_period(t) (tsk_rt(t)->task_params.period) | ||
1599 | +#define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | ||
1600 | +#define get_partition(t) (tsk_rt(t)->task_params.cpu) | ||
1601 | +#define get_deadline(t) (tsk_rt(t)->job_params.deadline) | ||
1602 | +#define get_class(t) (tsk_rt(t)->task_params.cls) | ||
1603 | + | ||
1604 | +inline static int budget_exhausted(struct task_struct* t) | ||
1605 | +{ | ||
1606 | + return get_exec_time(t) >= get_exec_cost(t); | ||
1607 | +} | ||
1608 | + | ||
1609 | + | ||
1610 | +#define is_hrt(t) \ | ||
1611 | + (tsk_rt(t)->task_params.class == RT_CLASS_HARD) | ||
1612 | +#define is_srt(t) \ | ||
1613 | + (tsk_rt(t)->task_params.class == RT_CLASS_SOFT) | ||
1614 | +#define is_be(t) \ | ||
1615 | + (tsk_rt(t)->task_params.class == RT_CLASS_BEST_EFFORT) | ||
1616 | + | ||
1617 | +#define get_release(t) (tsk_rt(t)->job_params.release) | ||
1618 | + | ||
1619 | +/* Our notion of time within LITMUS: kernel monotonic time. */ | ||
1620 | +static inline lt_t litmus_clock(void) | ||
1621 | +{ | ||
1622 | + return ktime_to_ns(ktime_get()); | ||
1623 | +} | ||
1624 | + | ||
1625 | +/* A macro to convert from nanoseconds to ktime_t. */ | ||
1626 | +#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) | ||
1627 | + | ||
1628 | +#define get_domain(t) (tsk_rt(t)->domain) | ||
1629 | + | ||
1630 | +/* Honor the flag in the preempt_count variable that is set | ||
1631 | + * when scheduling is in progress. | ||
1632 | + */ | ||
1633 | +#define is_running(t) \ | ||
1634 | + ((t)->state == TASK_RUNNING || \ | ||
1635 | + task_thread_info(t)->preempt_count & PREEMPT_ACTIVE) | ||
1636 | + | ||
1637 | +#define is_blocked(t) \ | ||
1638 | + (!is_running(t)) | ||
1639 | +#define is_released(t, now) \ | ||
1640 | + (lt_before_eq(get_release(t), now)) | ||
1641 | +#define is_tardy(t, now) \ | ||
1642 | + (lt_before_eq(tsk_rt(t)->job_params.deadline, now)) | ||
1643 | + | ||
1644 | +/* real-time comparison macros */ | ||
1645 | +#define earlier_deadline(a, b) (lt_before(\ | ||
1646 | + (a)->rt_param.job_params.deadline,\ | ||
1647 | + (b)->rt_param.job_params.deadline)) | ||
1648 | +#define earlier_release(a, b) (lt_before(\ | ||
1649 | + (a)->rt_param.job_params.release,\ | ||
1650 | + (b)->rt_param.job_params.release)) | ||
1651 | + | ||
1652 | +#define make_np(t) do {t->rt_param.kernel_np++;} while(0); | ||
1653 | +#define take_np(t) do {t->rt_param.kernel_np--;} while(0); | ||
1654 | + | ||
1655 | +#ifdef CONFIG_SRP | ||
1656 | +void srp_ceiling_block(void); | ||
1657 | +#else | ||
1658 | +#define srp_ceiling_block() /* nothing */ | ||
1659 | +#endif | ||
1660 | + | ||
1661 | +#define heap2task(hn) ((struct task_struct*) hn->value) | ||
1662 | + | ||
1663 | + | ||
1664 | +#ifdef CONFIG_NP_SECTION | ||
1665 | +/* returns 1 if task t has registered np flag and set it to RT_NON_PREEMPTIVE | ||
1666 | + */ | ||
1667 | +int is_np(struct task_struct *t); | ||
1668 | + | ||
1669 | +/* request that the task should call sys_exit_np() | ||
1670 | + */ | ||
1671 | +void request_exit_np(struct task_struct *t); | ||
1672 | + | ||
1673 | +#else | ||
1674 | + | ||
1675 | +static inline int is_np(struct task_struct *t) | ||
1676 | +{ | ||
1677 | + return tsk_rt(t)->kernel_np; | ||
1678 | +} | ||
1679 | + | ||
1680 | +#define request_exit_np(t) | ||
1681 | + | ||
1682 | +#endif | ||
1683 | + | ||
1684 | +/* make the unit explicit */ | ||
1685 | +typedef unsigned long quanta_t; | ||
1686 | + | ||
1687 | +enum round { | ||
1688 | + FLOOR, | ||
1689 | + CEIL | ||
1690 | +}; | ||
1691 | + | ||
1692 | + | ||
1693 | +/* Tick period is used to convert ns-specified execution | ||
1694 | + * costs and periods into tick-based equivalents. | ||
1695 | + */ | ||
1696 | +extern ktime_t tick_period; | ||
1697 | + | ||
1698 | +static inline quanta_t time2quanta(lt_t time, enum round round) | ||
1699 | +{ | ||
1700 | + s64 quantum_length = ktime_to_ns(tick_period); | ||
1701 | + | ||
1702 | + if (do_div(time, quantum_length) && round == CEIL) | ||
1703 | + time++; | ||
1704 | + return (quanta_t) time; | ||
1705 | +} | ||
1706 | + | ||
1707 | +/* By how much is cpu staggered behind CPU 0? */ | ||
1708 | +u64 cpu_stagger_offset(int cpu); | ||
1709 | + | ||
1710 | +#endif | ||
1711 | diff --git a/include/litmus/norqlock.h b/include/litmus/norqlock.h | ||
1712 | new file mode 100644 | ||
1713 | index 0000000..e4c1d06 | ||
1714 | --- /dev/null | ||
1715 | +++ b/include/litmus/norqlock.h | ||
1716 | @@ -0,0 +1,26 @@ | ||
1717 | +#ifndef NORQLOCK_H | ||
1718 | +#define NORQLOCK_H | ||
1719 | + | ||
1720 | +typedef void (*work_t)(unsigned long arg); | ||
1721 | + | ||
1722 | +struct no_rqlock_work { | ||
1723 | + int active; | ||
1724 | + work_t work; | ||
1725 | + unsigned long arg; | ||
1726 | + struct no_rqlock_work* next; | ||
1727 | +}; | ||
1728 | + | ||
1729 | +void init_no_rqlock_work(struct no_rqlock_work* w, work_t work, | ||
1730 | + unsigned long arg); | ||
1731 | + | ||
1732 | +void __do_without_rqlock(struct no_rqlock_work *work); | ||
1733 | + | ||
1734 | +static inline void do_without_rqlock(struct no_rqlock_work *work) | ||
1735 | +{ | ||
1736 | + if (!test_and_set_bit(0, (void*)&work->active)) | ||
1737 | + __do_without_rqlock(work); | ||
1738 | +} | ||
1739 | + | ||
1740 | +void tick_no_rqlock(void); | ||
1741 | + | ||
1742 | +#endif | ||
1743 | diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h | ||
1744 | new file mode 100644 | ||
1745 | index 0000000..7356ec7 | ||
1746 | --- /dev/null | ||
1747 | +++ b/include/litmus/rt_domain.h | ||
1748 | @@ -0,0 +1,174 @@ | ||
1749 | +/* CLEANUP: Add comments and make it less messy. | ||
1750 | + * | ||
1751 | + */ | ||
1752 | + | ||
1753 | +#ifndef __UNC_RT_DOMAIN_H__ | ||
1754 | +#define __UNC_RT_DOMAIN_H__ | ||
1755 | + | ||
1756 | +#include <litmus/norqlock.h> | ||
1757 | +#include <litmus/heap.h> | ||
1758 | + | ||
1759 | +#define RELEASE_QUEUE_SLOTS 127 /* prime */ | ||
1760 | + | ||
1761 | +struct _rt_domain; | ||
1762 | + | ||
1763 | +typedef int (*check_resched_needed_t)(struct _rt_domain *rt); | ||
1764 | +typedef void (*release_jobs_t)(struct _rt_domain *rt, struct heap* tasks); | ||
1765 | + | ||
1766 | +int heap_earlier_release(struct heap_node *_a, struct heap_node *_b); | ||
1767 | + | ||
1768 | +struct release_heap { | ||
1769 | + struct list_head list; | ||
1770 | + lt_t release_time; | ||
1771 | + struct heap heap; | ||
1772 | +}; | ||
1773 | + | ||
1774 | +struct release_queue { | ||
1775 | + /* each slot maintains a list of release heaps sorted | ||
1776 | + * by release time */ | ||
1777 | + struct list_head slot[RELEASE_QUEUE_SLOTS]; | ||
1778 | + /* the heap of heaps ordered by release time */ | ||
1779 | + struct heap rel_heap; | ||
1780 | + /* the actual timer used to trigger releases */ | ||
1781 | + struct hrtimer timer; | ||
1782 | + /* used to determine when to start the timer */ | ||
1783 | + int timer_armed; | ||
1784 | + /* when will it go off? */ | ||
1785 | + lt_t timer_time; | ||
1786 | +}; | ||
1787 | + | ||
1788 | +typedef struct _rt_domain { | ||
1789 | + struct no_rqlock_work arm_timer; | ||
1790 | + | ||
1791 | + /* runnable rt tasks are in here */ | ||
1792 | + spinlock_t ready_lock; | ||
1793 | + struct heap ready_queue; | ||
1794 | + | ||
1795 | + /* real-time tasks waiting for release are in here */ | ||
1796 | + spinlock_t release_lock; | ||
1797 | + struct release_queue release_queue; | ||
1798 | + | ||
1799 | + /* for moving tasks to the release queue */ | ||
1800 | + spinlock_t tobe_lock; | ||
1801 | + struct list_head tobe_released; | ||
1802 | + | ||
1803 | + /* how do we check if we need to kick another CPU? */ | ||
1804 | + check_resched_needed_t check_resched; | ||
1805 | + | ||
1806 | + /* how do we release a job? */ | ||
1807 | + release_jobs_t release_jobs; | ||
1808 | + | ||
1809 | + /* how are tasks ordered in the ready queue? */ | ||
1810 | + heap_prio_t order; | ||
1811 | +} rt_domain_t; | ||
1812 | + | ||
1813 | +/* caller must hold release_lock */ | ||
1814 | +static inline int next_release(rt_domain_t *rt, lt_t *time) | ||
1815 | +{ | ||
1816 | + struct heap_node* top = heap_peek(heap_earlier_release, | ||
1817 | + &rt->release_queue.rel_heap); | ||
1818 | + if (top) | ||
1819 | + *time = ((struct release_heap*) top->value)->release_time; | ||
1820 | + return top != NULL; | ||
1821 | +} | ||
1822 | + | ||
1823 | +static inline struct task_struct* __next_ready(rt_domain_t* rt) | ||
1824 | +{ | ||
1825 | + struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue); | ||
1826 | + if (hn) | ||
1827 | + return heap2task(hn); | ||
1828 | + else | ||
1829 | + return NULL; | ||
1830 | +} | ||
1831 | + | ||
1832 | +void rt_domain_init(rt_domain_t *rt, heap_prio_t order, | ||
1833 | + check_resched_needed_t check, | ||
1834 | + release_jobs_t relase); | ||
1835 | + | ||
1836 | +void __add_ready(rt_domain_t* rt, struct task_struct *new); | ||
1837 | +void __merge_ready(rt_domain_t* rt, struct heap *tasks); | ||
1838 | +void __add_release(rt_domain_t* rt, struct task_struct *task); | ||
1839 | + | ||
1840 | +static inline struct task_struct* __take_ready(rt_domain_t* rt) | ||
1841 | +{ | ||
1842 | + struct heap_node* hn = heap_take(rt->order, &rt->ready_queue); | ||
1843 | + if (hn) | ||
1844 | + return heap2task(hn); | ||
1845 | + else | ||
1846 | + return NULL; | ||
1847 | +} | ||
1848 | + | ||
1849 | +static inline struct task_struct* __peek_ready(rt_domain_t* rt) | ||
1850 | +{ | ||
1851 | + struct heap_node* hn = heap_peek(rt->order, &rt->ready_queue); | ||
1852 | + if (hn) | ||
1853 | + return heap2task(hn); | ||
1854 | + else | ||
1855 | + return NULL; | ||
1856 | +} | ||
1857 | + | ||
1858 | +static inline int is_queued(struct task_struct *t) | ||
1859 | +{ | ||
1860 | + return heap_node_in_heap(tsk_rt(t)->heap_node); | ||
1861 | +} | ||
1862 | + | ||
1863 | +static inline void remove(rt_domain_t* rt, struct task_struct *t) | ||
1864 | +{ | ||
1865 | + heap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); | ||
1866 | +} | ||
1867 | + | ||
1868 | +static inline void add_ready(rt_domain_t* rt, struct task_struct *new) | ||
1869 | +{ | ||
1870 | + unsigned long flags; | ||
1871 | + /* first we need the write lock for rt_ready_queue */ | ||
1872 | + spin_lock_irqsave(&rt->ready_lock, flags); | ||
1873 | + __add_ready(rt, new); | ||
1874 | + spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
1875 | +} | ||
1876 | + | ||
1877 | +static inline void merge_ready(rt_domain_t* rt, struct heap* tasks) | ||
1878 | +{ | ||
1879 | + unsigned long flags; | ||
1880 | + spin_lock_irqsave(&rt->ready_lock, flags); | ||
1881 | + __merge_ready(rt, tasks); | ||
1882 | + spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
1883 | +} | ||
1884 | + | ||
1885 | +static inline struct task_struct* take_ready(rt_domain_t* rt) | ||
1886 | +{ | ||
1887 | + unsigned long flags; | ||
1888 | + struct task_struct* ret; | ||
1889 | + /* first we need the write lock for rt_ready_queue */ | ||
1890 | + spin_lock_irqsave(&rt->ready_lock, flags); | ||
1891 | + ret = __take_ready(rt); | ||
1892 | + spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
1893 | + return ret; | ||
1894 | +} | ||
1895 | + | ||
1896 | + | ||
1897 | +static inline void add_release(rt_domain_t* rt, struct task_struct *task) | ||
1898 | +{ | ||
1899 | + unsigned long flags; | ||
1900 | + /* first we need the write lock for rt_ready_queue */ | ||
1901 | + spin_lock_irqsave(&rt->tobe_lock, flags); | ||
1902 | + __add_release(rt, task); | ||
1903 | + spin_unlock_irqrestore(&rt->tobe_lock, flags); | ||
1904 | +} | ||
1905 | + | ||
1906 | +static inline int __jobs_pending(rt_domain_t* rt) | ||
1907 | +{ | ||
1908 | + return !heap_empty(&rt->ready_queue); | ||
1909 | +} | ||
1910 | + | ||
1911 | +static inline int jobs_pending(rt_domain_t* rt) | ||
1912 | +{ | ||
1913 | + unsigned long flags; | ||
1914 | + int ret; | ||
1915 | + /* first we need the write lock for rt_ready_queue */ | ||
1916 | + spin_lock_irqsave(&rt->ready_lock, flags); | ||
1917 | + ret = !heap_empty(&rt->ready_queue); | ||
1918 | + spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
1919 | + return ret; | ||
1920 | +} | ||
1921 | + | ||
1922 | +#endif | ||
1923 | diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h | ||
1924 | new file mode 100644 | ||
1925 | index 0000000..403ebc8 | ||
1926 | --- /dev/null | ||
1927 | +++ b/include/litmus/rt_param.h | ||
1928 | @@ -0,0 +1,167 @@ | ||
1929 | +/* | ||
1930 | + * Definition of the scheduler plugin interface. | ||
1931 | + * | ||
1932 | + */ | ||
1933 | +#ifndef _LINUX_RT_PARAM_H_ | ||
1934 | +#define _LINUX_RT_PARAM_H_ | ||
1935 | + | ||
1936 | +/* Litmus time type. */ | ||
1937 | +typedef unsigned long long lt_t; | ||
1938 | + | ||
1939 | +static inline int lt_after(lt_t a, lt_t b) | ||
1940 | +{ | ||
1941 | + return ((long long) b) - ((long long) a) < 0; | ||
1942 | +} | ||
1943 | +#define lt_before(a, b) lt_after(b, a) | ||
1944 | + | ||
1945 | +static inline int lt_after_eq(lt_t a, lt_t b) | ||
1946 | +{ | ||
1947 | + return ((long long) a) - ((long long) b) >= 0; | ||
1948 | +} | ||
1949 | +#define lt_before_eq(a, b) lt_after_eq(b, a) | ||
1950 | + | ||
1951 | +/* different types of clients */ | ||
1952 | +typedef enum { | ||
1953 | + RT_CLASS_HARD, | ||
1954 | + RT_CLASS_SOFT, | ||
1955 | + RT_CLASS_BEST_EFFORT | ||
1956 | +} task_class_t; | ||
1957 | + | ||
1958 | +struct rt_task { | ||
1959 | + lt_t exec_cost; | ||
1960 | + lt_t period; | ||
1961 | + lt_t phase; | ||
1962 | + unsigned int cpu; | ||
1963 | + task_class_t cls; | ||
1964 | +}; | ||
1965 | + | ||
1966 | +/* don't export internal data structures to user space (liblitmus) */ | ||
1967 | +#ifdef __KERNEL__ | ||
1968 | + | ||
1969 | +struct _rt_domain; | ||
1970 | +struct heap_node; | ||
1971 | + | ||
1972 | +struct rt_job { | ||
1973 | + /* Time instant the the job was or will be released. */ | ||
1974 | + lt_t release; | ||
1975 | + /* What is the current deadline? */ | ||
1976 | + lt_t deadline; | ||
1977 | + | ||
1978 | + /* How much service has this job received so far? */ | ||
1979 | + lt_t exec_time; | ||
1980 | + | ||
1981 | + /* Which job is this. This is used to let user space | ||
1982 | + * specify which job to wait for, which is important if jobs | ||
1983 | + * overrun. If we just call sys_sleep_next_period() then we | ||
1984 | + * will unintentionally miss jobs after an overrun. | ||
1985 | + * | ||
1986 | + * Increase this sequence number when a job is released. | ||
1987 | + */ | ||
1988 | + unsigned int job_no; | ||
1989 | + | ||
1990 | + /* when did this job start executing? */ | ||
1991 | + lt_t exec_start; | ||
1992 | +}; | ||
1993 | + | ||
1994 | + | ||
1995 | +struct pfair_param; | ||
1996 | + | ||
1997 | +/* RT task parameters for scheduling extensions | ||
1998 | + * These parameters are inherited during clone and therefore must | ||
1999 | + * be explicitly set up before the task set is launched. | ||
2000 | + */ | ||
2001 | +struct rt_param { | ||
2002 | + /* is the task sleeping? */ | ||
2003 | + unsigned int flags:8; | ||
2004 | + | ||
2005 | + /* do we need to check for srp blocking? */ | ||
2006 | + unsigned int srp_non_recurse:1; | ||
2007 | + | ||
2008 | + /* user controlled parameters */ | ||
2009 | + struct rt_task task_params; | ||
2010 | + | ||
2011 | + /* timing parameters */ | ||
2012 | + struct rt_job job_params; | ||
2013 | + | ||
2014 | + /* task representing the current "inherited" task | ||
2015 | + * priority, assigned by inherit_priority and | ||
2016 | + * return priority in the scheduler plugins. | ||
2017 | + * could point to self if PI does not result in | ||
2018 | + * an increased task priority. | ||
2019 | + */ | ||
2020 | + struct task_struct* inh_task; | ||
2021 | + | ||
2022 | + /* Don't just dereference this pointer in kernel space! | ||
2023 | + * It might very well point to junk or nothing at all. | ||
2024 | + * NULL indicates that the task has not requested any non-preemptable | ||
2025 | + * section support. | ||
2026 | + * Not inherited upon fork. | ||
2027 | + */ | ||
2028 | + short* np_flag; | ||
2029 | + | ||
2030 | + /* For the FMLP under PSN-EDF, it is required to make the task | ||
2031 | + * non-preemptive from kernel space. In order not to interfere with | ||
2032 | + * user space, this counter indicates the kernel space np setting. | ||
2033 | + * kernel_np > 0 => task is non-preemptive | ||
2034 | + */ | ||
2035 | + unsigned int kernel_np; | ||
2036 | + | ||
2037 | + /* This field can be used by plugins to store where the task | ||
2038 | + * is currently scheduled. It is the responsibility of the | ||
2039 | + * plugin to avoid race conditions. | ||
2040 | + * | ||
2041 | + * This used by GSN-EDF and PFAIR. | ||
2042 | + */ | ||
2043 | + volatile int scheduled_on; | ||
2044 | + | ||
2045 | + /* Is the stack of the task currently in use? This is updated by | ||
2046 | + * the LITMUS core. | ||
2047 | + * | ||
2048 | + * Be careful to avoid deadlocks! | ||
2049 | + */ | ||
2050 | + volatile int stack_in_use; | ||
2051 | + | ||
2052 | + /* This field can be used by plugins to store where the task | ||
2053 | + * is currently linked. It is the responsibility of the plugin | ||
2054 | + * to avoid race conditions. | ||
2055 | + * | ||
2056 | + * Used by GSN-EDF. | ||
2057 | + */ | ||
2058 | + volatile int linked_on; | ||
2059 | + | ||
2060 | + /* PFAIR/PD^2 state. Allocated on demand. */ | ||
2061 | + struct pfair_param* pfair; | ||
2062 | + | ||
2063 | + /* Fields saved before BE->RT transition. | ||
2064 | + */ | ||
2065 | + int old_policy; | ||
2066 | + int old_prio; | ||
2067 | + | ||
2068 | + /* ready queue for this task */ | ||
2069 | + struct _rt_domain* domain; | ||
2070 | + | ||
2071 | + /* heap element for this task | ||
2072 | + * | ||
2073 | + * Warning: Don't statically allocate this node. The heap | ||
2074 | + * implementation swaps these between tasks, thus after | ||
2075 | + * dequeuing from a heap you may end up with a different node | ||
2076 | + * then the one you had when enqueuing the task. For the same | ||
2077 | + * reason, don't obtain and store references to this node | ||
2078 | + * other than this pointer (which is updated by the heap | ||
2079 | + * implementation). | ||
2080 | + */ | ||
2081 | + struct heap_node* heap_node; | ||
2082 | + | ||
2083 | + /* Used by rt_domain to queue task in release list. | ||
2084 | + */ | ||
2085 | + struct list_head list; | ||
2086 | +}; | ||
2087 | + | ||
2088 | +/* Possible RT flags */ | ||
2089 | +#define RT_F_RUNNING 0x00000000 | ||
2090 | +#define RT_F_SLEEP 0x00000001 | ||
2091 | +#define RT_F_EXIT_SEM 0x00000008 | ||
2092 | + | ||
2093 | +#endif | ||
2094 | + | ||
2095 | +#endif | ||
2096 | diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h | ||
2097 | new file mode 100644 | ||
2098 | index 0000000..94952f6 | ||
2099 | --- /dev/null | ||
2100 | +++ b/include/litmus/sched_plugin.h | ||
2101 | @@ -0,0 +1,159 @@ | ||
2102 | +/* | ||
2103 | + * Definition of the scheduler plugin interface. | ||
2104 | + * | ||
2105 | + */ | ||
2106 | +#ifndef _LINUX_SCHED_PLUGIN_H_ | ||
2107 | +#define _LINUX_SCHED_PLUGIN_H_ | ||
2108 | + | ||
2109 | +#include <linux/sched.h> | ||
2110 | + | ||
2111 | +/* struct for semaphore with priority inheritance */ | ||
2112 | +struct pi_semaphore { | ||
2113 | + atomic_t count; | ||
2114 | + int sleepers; | ||
2115 | + wait_queue_head_t wait; | ||
2116 | + union { | ||
2117 | + /* highest-prio holder/waiter */ | ||
2118 | + struct task_struct *task; | ||
2119 | + struct task_struct* cpu_task[NR_CPUS]; | ||
2120 | + } hp; | ||
2121 | + /* current lock holder */ | ||
2122 | + struct task_struct *holder; | ||
2123 | +}; | ||
2124 | + | ||
2125 | +/************************ setup/tear down ********************/ | ||
2126 | + | ||
2127 | +typedef long (*activate_plugin_t) (void); | ||
2128 | +typedef long (*deactivate_plugin_t) (void); | ||
2129 | + | ||
2130 | + | ||
2131 | + | ||
2132 | +/********************* scheduler invocation ******************/ | ||
2133 | + | ||
2134 | +/* Plugin-specific realtime tick handler */ | ||
2135 | +typedef void (*scheduler_tick_t) (struct task_struct *cur); | ||
2136 | +/* Novell make sched decision function */ | ||
2137 | +typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | ||
2138 | +/* Clean up after the task switch has occured. | ||
2139 | + * This function is called after every (even non-rt) task switch. | ||
2140 | + */ | ||
2141 | +typedef void (*finish_switch_t)(struct task_struct *prev); | ||
2142 | + | ||
2143 | + | ||
2144 | +/********************* task state changes ********************/ | ||
2145 | + | ||
2146 | +/* Called to setup a new real-time task. | ||
2147 | + * Release the first job, enqueue, etc. | ||
2148 | + * Task may already be running. | ||
2149 | + */ | ||
2150 | +typedef void (*task_new_t) (struct task_struct *task, | ||
2151 | + int on_rq, | ||
2152 | + int running); | ||
2153 | + | ||
2154 | +/* Called to re-introduce a task after blocking. | ||
2155 | + * Can potentially be called multiple times. | ||
2156 | + */ | ||
2157 | +typedef void (*task_wake_up_t) (struct task_struct *task); | ||
2158 | +/* called to notify the plugin of a blocking real-time task | ||
2159 | + * it will only be called for real-time tasks and before schedule is called */ | ||
2160 | +typedef void (*task_block_t) (struct task_struct *task); | ||
2161 | +/* Called when a real-time task exits or changes to a different scheduling | ||
2162 | + * class. | ||
2163 | + * Free any allocated resources | ||
2164 | + */ | ||
2165 | +typedef void (*task_exit_t) (struct task_struct *); | ||
2166 | + | ||
2167 | +/* Called when the new_owner is released from the wait queue | ||
2168 | + * it should now inherit the priority from sem, _before_ it gets readded | ||
2169 | + * to any queue | ||
2170 | + */ | ||
2171 | +typedef long (*inherit_priority_t) (struct pi_semaphore *sem, | ||
2172 | + struct task_struct *new_owner); | ||
2173 | + | ||
2174 | +/* Called when the current task releases a semahpore where it might have | ||
2175 | + * inherited a piority from | ||
2176 | + */ | ||
2177 | +typedef long (*return_priority_t) (struct pi_semaphore *sem); | ||
2178 | + | ||
2179 | +/* Called when a task tries to acquire a semaphore and fails. Check if its | ||
2180 | + * priority is higher than that of the current holder. | ||
2181 | + */ | ||
2182 | +typedef long (*pi_block_t) (struct pi_semaphore *sem, struct task_struct *t); | ||
2183 | + | ||
2184 | + | ||
2185 | + | ||
2186 | + | ||
2187 | +/********************* sys call backends ********************/ | ||
2188 | +/* This function causes the caller to sleep until the next release */ | ||
2189 | +typedef long (*complete_job_t) (void); | ||
2190 | + | ||
2191 | +typedef long (*admit_task_t)(struct task_struct* tsk); | ||
2192 | + | ||
2193 | +typedef void (*release_at_t)(struct task_struct *t, lt_t start); | ||
2194 | + | ||
2195 | +struct sched_plugin { | ||
2196 | + struct list_head list; | ||
2197 | + /* basic info */ | ||
2198 | + char *plugin_name; | ||
2199 | + | ||
2200 | + /* setup */ | ||
2201 | + activate_plugin_t activate_plugin; | ||
2202 | + deactivate_plugin_t deactivate_plugin; | ||
2203 | + | ||
2204 | +#ifdef CONFIG_SRP | ||
2205 | + unsigned int srp_active; | ||
2206 | +#endif | ||
2207 | + | ||
2208 | + /* scheduler invocation */ | ||
2209 | + scheduler_tick_t tick; | ||
2210 | + schedule_t schedule; | ||
2211 | + finish_switch_t finish_switch; | ||
2212 | + | ||
2213 | + /* syscall backend */ | ||
2214 | + complete_job_t complete_job; | ||
2215 | + release_at_t release_at; | ||
2216 | + | ||
2217 | + /* task state changes */ | ||
2218 | + admit_task_t admit_task; | ||
2219 | + | ||
2220 | + task_new_t task_new; | ||
2221 | + task_wake_up_t task_wake_up; | ||
2222 | + task_block_t task_block; | ||
2223 | + task_exit_t task_exit; | ||
2224 | + | ||
2225 | +#ifdef CONFIG_FMLP | ||
2226 | + /* priority inheritance */ | ||
2227 | + unsigned int fmlp_active; | ||
2228 | + inherit_priority_t inherit_priority; | ||
2229 | + return_priority_t return_priority; | ||
2230 | + pi_block_t pi_block; | ||
2231 | +#endif | ||
2232 | +} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
2233 | + | ||
2234 | + | ||
2235 | +extern struct sched_plugin *litmus; | ||
2236 | + | ||
2237 | +int register_sched_plugin(struct sched_plugin* plugin); | ||
2238 | +struct sched_plugin* find_sched_plugin(const char* name); | ||
2239 | +int print_sched_plugins(char* buf, int max); | ||
2240 | + | ||
2241 | +static inline int srp_active(void) | ||
2242 | +{ | ||
2243 | +#ifdef CONFIG_SRP | ||
2244 | + return litmus->srp_active; | ||
2245 | +#else | ||
2246 | + return 0; | ||
2247 | +#endif | ||
2248 | +} | ||
2249 | +static inline int fmlp_active(void) | ||
2250 | +{ | ||
2251 | +#ifdef CONFIG_FMLP | ||
2252 | + return litmus->fmlp_active; | ||
2253 | +#else | ||
2254 | + return 0; | ||
2255 | +#endif | ||
2256 | +} | ||
2257 | + | ||
2258 | +extern struct sched_plugin linux_sched_plugin; | ||
2259 | + | ||
2260 | +#endif | ||
2261 | diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h | ||
2262 | new file mode 100644 | ||
2263 | index 0000000..06e1aaa | ||
2264 | --- /dev/null | ||
2265 | +++ b/include/litmus/sched_trace.h | ||
2266 | @@ -0,0 +1,168 @@ | ||
2267 | +/* sched_trace.h -- record scheduler events to a byte stream for offline analysis. | ||
2268 | + */ | ||
2269 | +#ifndef _LINUX_SCHED_TRACE_H_ | ||
2270 | +#define _LINUX_SCHED_TRACE_H_ | ||
2271 | + | ||
2272 | +/* all times in nanoseconds */ | ||
2273 | + | ||
2274 | +struct st_trace_header { | ||
2275 | + u8 type; /* Of what type is this record? */ | ||
2276 | + u8 cpu; /* On which CPU was it recorded? */ | ||
2277 | + u16 pid; /* PID of the task. */ | ||
2278 | + u32 job; /* The job sequence number. */ | ||
2279 | +}; | ||
2280 | + | ||
2281 | +#define ST_NAME_LEN 16 | ||
2282 | +struct st_name_data { | ||
2283 | + char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ | ||
2284 | +}; | ||
2285 | + | ||
2286 | +struct st_param_data { /* regular params */ | ||
2287 | + u32 wcet; | ||
2288 | + u32 period; | ||
2289 | + u32 phase; | ||
2290 | + u8 partition; | ||
2291 | + u8 __unused[3]; | ||
2292 | +}; | ||
2293 | + | ||
2294 | +struct st_release_data { /* A job is was/is going to be released. */ | ||
2295 | + u64 release; /* What's the release time? */ | ||
2296 | + u64 deadline; /* By when must it finish? */ | ||
2297 | +}; | ||
2298 | + | ||
2299 | +struct st_assigned_data { /* A job was asigned to a CPU. */ | ||
2300 | + u64 when; | ||
2301 | + u8 target; /* Where should it execute? */ | ||
2302 | + u8 __unused[3]; | ||
2303 | +}; | ||
2304 | + | ||
2305 | +struct st_switch_to_data { /* A process was switched to on a given CPU. */ | ||
2306 | + u64 when; /* When did this occur? */ | ||
2307 | + u32 exec_time; /* Time the current job has executed. */ | ||
2308 | + | ||
2309 | +}; | ||
2310 | + | ||
2311 | +struct st_switch_away_data { /* A process was switched away from on a given CPU. */ | ||
2312 | + u64 when; | ||
2313 | + u64 exec_time; | ||
2314 | +}; | ||
2315 | + | ||
2316 | +struct st_completion_data { /* A job completed. */ | ||
2317 | + u64 when; | ||
2318 | + u8 forced:1; /* Set to 1 if job overran and kernel advanced to the | ||
2319 | + * next task automatically; set to 0 otherwise. | ||
2320 | + */ | ||
2321 | + u8 __uflags:7; | ||
2322 | + u8 __unused[3]; | ||
2323 | +}; | ||
2324 | + | ||
2325 | +struct st_block_data { /* A task blocks. */ | ||
2326 | + u64 when; | ||
2327 | + u64 __unused; | ||
2328 | +}; | ||
2329 | + | ||
2330 | +struct st_resume_data { /* A task resumes. */ | ||
2331 | + u64 when; | ||
2332 | + u64 __unused; | ||
2333 | +}; | ||
2334 | + | ||
2335 | +struct st_sys_release_data { | ||
2336 | + u64 when; | ||
2337 | + u64 release; | ||
2338 | +}; | ||
2339 | + | ||
2340 | +#define DATA(x) struct st_ ## x ## _data x; | ||
2341 | + | ||
2342 | +typedef enum { | ||
2343 | + ST_NAME = 1, /* Start at one, so that we can spot | ||
2344 | + * uninitialized records. */ | ||
2345 | + ST_PARAM, | ||
2346 | + ST_RELEASE, | ||
2347 | + ST_ASSIGNED, | ||
2348 | + ST_SWITCH_TO, | ||
2349 | + ST_SWITCH_AWAY, | ||
2350 | + ST_COMPLETION, | ||
2351 | + ST_BLOCK, | ||
2352 | + ST_RESUME, | ||
2353 | + ST_SYS_RELEASE, | ||
2354 | +} st_event_record_type_t; | ||
2355 | + | ||
2356 | +struct st_event_record { | ||
2357 | + struct st_trace_header hdr; | ||
2358 | + union { | ||
2359 | + u64 raw[2]; | ||
2360 | + | ||
2361 | + DATA(name); | ||
2362 | + DATA(param); | ||
2363 | + DATA(release); | ||
2364 | + DATA(assigned); | ||
2365 | + DATA(switch_to); | ||
2366 | + DATA(switch_away); | ||
2367 | + DATA(completion); | ||
2368 | + DATA(block); | ||
2369 | + DATA(resume); | ||
2370 | + DATA(sys_release); | ||
2371 | + | ||
2372 | + } data; | ||
2373 | +}; | ||
2374 | + | ||
2375 | +#undef DATA | ||
2376 | + | ||
2377 | +#ifdef __KERNEL__ | ||
2378 | + | ||
2379 | +#include <linux/sched.h> | ||
2380 | +#include <litmus/feather_trace.h> | ||
2381 | + | ||
2382 | +#ifdef CONFIG_SCHED_TASK_TRACE | ||
2383 | + | ||
2384 | +#define SCHED_TRACE(id, callback, task) \ | ||
2385 | + ft_event1(id, callback, task) | ||
2386 | +#define SCHED_TRACE2(id, callback, task, xtra) \ | ||
2387 | + ft_event2(id, callback, task, xtra) | ||
2388 | + | ||
2389 | +#else | ||
2390 | + | ||
2391 | +#define SCHED_TRACE(id, callback, task) /* no tracing */ | ||
2392 | +#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ | ||
2393 | + | ||
2394 | +#endif | ||
2395 | + | ||
2396 | + | ||
2397 | +#define SCHED_TRACE_BASE_ID 500 | ||
2398 | + | ||
2399 | + | ||
2400 | +#define sched_trace_task_name(t) \ | ||
2401 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t) | ||
2402 | +#define sched_trace_task_param(t) \ | ||
2403 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t) | ||
2404 | +#define sched_trace_task_release(t) \ | ||
2405 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t) | ||
2406 | +#define sched_trace_task_switch_to(t) \ | ||
2407 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t) | ||
2408 | +#define sched_trace_task_switch_away(t) \ | ||
2409 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) | ||
2410 | +#define sched_trace_task_completion(t, forced) \ | ||
2411 | + SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ | ||
2412 | + forced) | ||
2413 | +#define sched_trace_task_block(t) \ | ||
2414 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) | ||
2415 | +#define sched_trace_task_resume(t) \ | ||
2416 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) | ||
2417 | + | ||
2418 | +#define sched_trace_sys_release(when) \ | ||
2419 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) | ||
2420 | + | ||
2421 | +#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | ||
2422 | + | ||
2423 | +#ifdef CONFIG_SCHED_DEBUG_TRACE | ||
2424 | +void sched_trace_log_message(const char* fmt, ...); | ||
2425 | + | ||
2426 | +#else | ||
2427 | + | ||
2428 | +#define sched_trace_log_message(fmt, ...) | ||
2429 | + | ||
2430 | +#endif | ||
2431 | + | ||
2432 | +#endif /* __KERNEL__ */ | ||
2433 | + | ||
2434 | +#endif | ||
2435 | diff --git a/include/litmus/trace.h b/include/litmus/trace.h | ||
2436 | new file mode 100644 | ||
2437 | index 0000000..b8157e8 | ||
2438 | --- /dev/null | ||
2439 | +++ b/include/litmus/trace.h | ||
2440 | @@ -0,0 +1,103 @@ | ||
2441 | +#ifndef _SYS_TRACE_H_ | ||
2442 | +#define _SYS_TRACE_H_ | ||
2443 | + | ||
2444 | +#ifdef CONFIG_FEATHER_TRACE | ||
2445 | + | ||
2446 | +#include <litmus/feather_trace.h> | ||
2447 | +#include <litmus/feather_buffer.h> | ||
2448 | + | ||
2449 | + | ||
2450 | +/*********************** TIMESTAMPS ************************/ | ||
2451 | + | ||
2452 | +enum task_type_marker { | ||
2453 | + TSK_BE, | ||
2454 | + TSK_RT, | ||
2455 | + TSK_UNKNOWN | ||
2456 | +}; | ||
2457 | + | ||
2458 | +struct timestamp { | ||
2459 | + uint64_t timestamp; | ||
2460 | + uint32_t seq_no; | ||
2461 | + uint8_t cpu; | ||
2462 | + uint8_t event; | ||
2463 | + uint8_t task_type; | ||
2464 | +}; | ||
2465 | + | ||
2466 | +/* tracing callbacks */ | ||
2467 | +feather_callback void save_timestamp(unsigned long event); | ||
2468 | +feather_callback void save_timestamp_def(unsigned long event, unsigned long type); | ||
2469 | +feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); | ||
2470 | + | ||
2471 | +#define TIMESTAMP(id) ft_event0(id, save_timestamp) | ||
2472 | + | ||
2473 | +#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, def) | ||
2474 | + | ||
2475 | +#define TTIMESTAMP(id, task) ft_event1(id, save_timestamp_task, (unsigned long) task) | ||
2476 | + | ||
2477 | +#else /* !CONFIG_FEATHER_TRACE */ | ||
2478 | + | ||
2479 | +#define TIMESTAMP(id) /* no tracing */ | ||
2480 | + | ||
2481 | +#define DTIMESTAMP(id, def) /* no tracing */ | ||
2482 | + | ||
2483 | +#define TTIMESTAMP(id, task) /* no tracing */ | ||
2484 | + | ||
2485 | +#endif | ||
2486 | + | ||
2487 | + | ||
2488 | +/* Convention for timestamps | ||
2489 | + * ========================= | ||
2490 | + * | ||
2491 | + * In order to process the trace files with a common tool, we use the following | ||
2492 | + * convention to measure execution times: The end time id of a code segment is | ||
2493 | + * always the next number after the start time event id. | ||
2494 | + */ | ||
2495 | + | ||
2496 | +#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only | ||
2497 | + * care | ||
2498 | + * about | ||
2499 | + * next */ | ||
2500 | +#define TS_SCHED_END(t) TTIMESTAMP(101, t) | ||
2501 | +#define TS_SCHED2_START(t) TTIMESTAMP(102, t) | ||
2502 | +#define TS_SCHED2_END(t) TTIMESTAMP(103, t) | ||
2503 | + | ||
2504 | +#define TS_CXS_START(t) TTIMESTAMP(104, t) | ||
2505 | +#define TS_CXS_END(t) TTIMESTAMP(105, t) | ||
2506 | + | ||
2507 | +#define TS_RELEASE_START DTIMESTAMP(106, TSK_RT) | ||
2508 | +#define TS_RELEASE_END DTIMESTAMP(107, TSK_RT) | ||
2509 | + | ||
2510 | +#define TS_TICK_START(t) TTIMESTAMP(110, t) | ||
2511 | +#define TS_TICK_END(t) TTIMESTAMP(111, t) | ||
2512 | + | ||
2513 | + | ||
2514 | +#define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */ | ||
2515 | +#define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */ | ||
2516 | + | ||
2517 | +#define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */ | ||
2518 | +#define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */ | ||
2519 | + | ||
2520 | +#define TS_ENTER_NP_START TIMESTAMP(140) | ||
2521 | +#define TS_ENTER_NP_END TIMESTAMP(141) | ||
2522 | + | ||
2523 | +#define TS_EXIT_NP_START TIMESTAMP(150) | ||
2524 | +#define TS_EXIT_NP_END TIMESTAMP(151) | ||
2525 | + | ||
2526 | +#define TS_SRP_UP_START TIMESTAMP(160) | ||
2527 | +#define TS_SRP_UP_END TIMESTAMP(161) | ||
2528 | +#define TS_SRP_DOWN_START TIMESTAMP(162) | ||
2529 | +#define TS_SRP_DOWN_END TIMESTAMP(163) | ||
2530 | + | ||
2531 | +#define TS_PI_UP_START TIMESTAMP(170) | ||
2532 | +#define TS_PI_UP_END TIMESTAMP(171) | ||
2533 | +#define TS_PI_DOWN_START TIMESTAMP(172) | ||
2534 | +#define TS_PI_DOWN_END TIMESTAMP(173) | ||
2535 | + | ||
2536 | +#define TS_FIFO_UP_START TIMESTAMP(180) | ||
2537 | +#define TS_FIFO_UP_END TIMESTAMP(181) | ||
2538 | +#define TS_FIFO_DOWN_START TIMESTAMP(182) | ||
2539 | +#define TS_FIFO_DOWN_END TIMESTAMP(183) | ||
2540 | + | ||
2541 | + | ||
2542 | + | ||
2543 | +#endif /* !_SYS_TRACE_H_ */ | ||
2544 | diff --git a/include/litmus/unistd.h b/include/litmus/unistd.h | ||
2545 | new file mode 100644 | ||
2546 | index 0000000..8224235 | ||
2547 | --- /dev/null | ||
2548 | +++ b/include/litmus/unistd.h | ||
2549 | @@ -0,0 +1,20 @@ | ||
2550 | + | ||
2551 | +#define __LSC(x) (__NR_LITMUS + x) | ||
2552 | + | ||
2553 | +#define __NR_set_rt_task_param __LSC(0) | ||
2554 | +#define __NR_get_rt_task_param __LSC(1) | ||
2555 | +#define __NR_sleep_next_period __LSC(2) | ||
2556 | +#define __NR_register_np_flag __LSC(3) | ||
2557 | +#define __NR_exit_np __LSC(4) | ||
2558 | +#define __NR_od_open __LSC(5) | ||
2559 | +#define __NR_od_close __LSC(6) | ||
2560 | +#define __NR_fmlp_down __LSC(7) | ||
2561 | +#define __NR_fmlp_up __LSC(8) | ||
2562 | +#define __NR_srp_down __LSC(9) | ||
2563 | +#define __NR_srp_up __LSC(10) | ||
2564 | +#define __NR_query_job_no __LSC(11) | ||
2565 | +#define __NR_wait_for_job_release __LSC(12) | ||
2566 | +#define __NR_wait_for_ts_release __LSC(13) | ||
2567 | +#define __NR_release_ts __LSC(14) | ||
2568 | + | ||
2569 | +#define NR_litmus_syscalls 15 | ||
2570 | diff --git a/kernel/exit.c b/kernel/exit.c | ||
2571 | index 549c055..bc313b7 100644 | ||
2572 | --- a/kernel/exit.c | ||
2573 | +++ b/kernel/exit.c | ||
2574 | @@ -52,6 +52,8 @@ | ||
2575 | |||
2576 | extern void sem_exit (void); | ||
2577 | |||
2578 | +extern void exit_od_table(struct task_struct* t); | ||
2579 | + | ||
2580 | static void exit_mm(struct task_struct * tsk); | ||
2581 | |||
2582 | static void __unhash_process(struct task_struct *p) | ||
2583 | @@ -987,6 +989,8 @@ fastcall NORET_TYPE void do_exit(long code) | ||
2584 | if (unlikely(tsk->audit_context)) | ||
2585 | audit_free(tsk); | ||
2586 | |||
2587 | + exit_od_table(tsk); | ||
2588 | + | ||
2589 | tsk->exit_code = code; | ||
2590 | taskstats_exit(tsk, group_dead); | ||
2591 | |||
2592 | diff --git a/kernel/fork.c b/kernel/fork.c | ||
2593 | index 8dd8ff2..4c322d4 100644 | ||
2594 | --- a/kernel/fork.c | ||
2595 | +++ b/kernel/fork.c | ||
2596 | @@ -59,6 +59,9 @@ | ||
2597 | #include <asm/cacheflush.h> | ||
2598 | #include <asm/tlbflush.h> | ||
2599 | |||
2600 | +#include <litmus/litmus.h> | ||
2601 | +#include <litmus/sched_plugin.h> | ||
2602 | + | ||
2603 | /* | ||
2604 | * Protected counters by write_lock_irq(&tasklist_lock) | ||
2605 | */ | ||
2606 | @@ -121,6 +124,8 @@ void __put_task_struct(struct task_struct *tsk) | ||
2607 | WARN_ON(atomic_read(&tsk->usage)); | ||
2608 | WARN_ON(tsk == current); | ||
2609 | |||
2610 | + exit_litmus(tsk); | ||
2611 | + | ||
2612 | security_task_free(tsk); | ||
2613 | free_uid(tsk->user); | ||
2614 | put_group_info(tsk->group_info); | ||
2615 | @@ -182,6 +187,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | ||
2616 | *tsk = *orig; | ||
2617 | tsk->stack = ti; | ||
2618 | |||
2619 | + /* Don't let the new task be a real-time task. */ | ||
2620 | + memset(&tsk->rt_param, 0, sizeof(struct rt_task)); | ||
2621 | + | ||
2622 | err = prop_local_init_single(&tsk->dirties); | ||
2623 | if (err) { | ||
2624 | free_thread_info(ti); | ||
2625 | diff --git a/kernel/printk.c b/kernel/printk.c | ||
2626 | index 89011bf..9eb2dc5 100644 | ||
2627 | --- a/kernel/printk.c | ||
2628 | +++ b/kernel/printk.c | ||
2629 | @@ -54,6 +54,12 @@ int console_printk[4] = { | ||
2630 | DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ | ||
2631 | }; | ||
2632 | |||
2633 | +/* divert printk() messages when we have a LITMUS^RT | ||
2634 | + * debug listener | ||
2635 | + */ | ||
2636 | +#include <litmus/litmus.h> | ||
2637 | +int trace_override = 0; | ||
2638 | + | ||
2639 | /* | ||
2640 | * Low level drivers may need that to know if they can schedule in | ||
2641 | * their unblank() callback or not. So let's export it. | ||
2642 | @@ -652,6 +658,8 @@ asmlinkage int vprintk(const char *fmt, va_list args) | ||
2643 | |||
2644 | /* Emit the output into the temporary buffer */ | ||
2645 | printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args); | ||
2646 | + if (trace_override) | ||
2647 | + TRACE("%s", printk_buf); | ||
2648 | |||
2649 | /* | ||
2650 | * Copy the output into log_buf. If the caller didn't provide | ||
2651 | @@ -932,7 +940,7 @@ int is_console_locked(void) | ||
2652 | |||
2653 | void wake_up_klogd(void) | ||
2654 | { | ||
2655 | - if (!oops_in_progress && waitqueue_active(&log_wait)) | ||
2656 | + if (!trace_override && !oops_in_progress && waitqueue_active(&log_wait)) | ||
2657 | wake_up_interruptible(&log_wait); | ||
2658 | } | ||
2659 | |||
2660 | diff --git a/kernel/sched.c b/kernel/sched.c | ||
2661 | index e76b11c..fdeced2 100644 | ||
2662 | --- a/kernel/sched.c | ||
2663 | +++ b/kernel/sched.c | ||
2664 | @@ -67,6 +67,11 @@ | ||
2665 | #include <asm/tlb.h> | ||
2666 | #include <asm/irq_regs.h> | ||
2667 | |||
2668 | +#include <litmus/sched_trace.h> | ||
2669 | +#include <litmus/trace.h> | ||
2670 | + | ||
2671 | +#include <litmus/norqlock.h> | ||
2672 | + | ||
2673 | /* | ||
2674 | * Scheduler clock - returns current time in nanosec units. | ||
2675 | * This is default implementation. | ||
2676 | @@ -324,6 +329,8 @@ struct rq { | ||
2677 | |||
2678 | atomic_t nr_iowait; | ||
2679 | |||
2680 | + struct task_struct* litmus_next; | ||
2681 | + | ||
2682 | #ifdef CONFIG_SMP | ||
2683 | struct sched_domain *sd; | ||
2684 | |||
2685 | @@ -875,11 +882,12 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} | ||
2686 | #include "sched_idletask.c" | ||
2687 | #include "sched_fair.c" | ||
2688 | #include "sched_rt.c" | ||
2689 | +#include "../litmus/sched_litmus.c" | ||
2690 | #ifdef CONFIG_SCHED_DEBUG | ||
2691 | # include "sched_debug.c" | ||
2692 | #endif | ||
2693 | |||
2694 | -#define sched_class_highest (&rt_sched_class) | ||
2695 | +#define sched_class_highest (&litmus_sched_class) | ||
2696 | |||
2697 | /* | ||
2698 | * Update delta_exec, delta_fair fields for rq. | ||
2699 | @@ -1516,6 +1524,8 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | ||
2700 | int new_cpu; | ||
2701 | #endif | ||
2702 | |||
2703 | + if (is_realtime(p)) | ||
2704 | + TRACE_TASK(p, "try_to_wake_up()\n"); | ||
2705 | rq = task_rq_lock(p, &flags); | ||
2706 | old_state = p->state; | ||
2707 | if (!(old_state & state)) | ||
2708 | @@ -1529,7 +1539,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | ||
2709 | this_cpu = smp_processor_id(); | ||
2710 | |||
2711 | #ifdef CONFIG_SMP | ||
2712 | - if (unlikely(task_running(rq, p))) | ||
2713 | + if (unlikely(task_running(rq, p) || is_realtime(p))) | ||
2714 | goto out_activate; | ||
2715 | |||
2716 | new_cpu = cpu; | ||
2717 | @@ -1650,8 +1660,10 @@ out_activate: | ||
2718 | out_running: | ||
2719 | p->state = TASK_RUNNING; | ||
2720 | out: | ||
2721 | + if (is_realtime(p)) | ||
2722 | + TRACE_TASK(p, "try_to_wake_up() done, p->state=%d\n", p->state); | ||
2723 | task_rq_unlock(rq, &flags); | ||
2724 | - | ||
2725 | + tick_no_rqlock(); | ||
2726 | return success; | ||
2727 | } | ||
2728 | |||
2729 | @@ -1890,6 +1902,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | ||
2730 | */ | ||
2731 | prev_state = prev->state; | ||
2732 | finish_arch_switch(prev); | ||
2733 | + litmus->finish_switch(prev); | ||
2734 | + prev->rt_param.stack_in_use = NO_CPU; | ||
2735 | finish_lock_switch(rq, prev); | ||
2736 | fire_sched_in_preempt_notifiers(current); | ||
2737 | if (mm) | ||
2738 | @@ -3480,6 +3494,7 @@ void scheduler_tick(void) | ||
2739 | struct task_struct *curr = rq->curr; | ||
2740 | u64 next_tick = rq->tick_timestamp + TICK_NSEC; | ||
2741 | |||
2742 | + TS_TICK_START(current); | ||
2743 | spin_lock(&rq->lock); | ||
2744 | __update_rq_clock(rq); | ||
2745 | /* | ||
2746 | @@ -3491,12 +3506,17 @@ void scheduler_tick(void) | ||
2747 | update_cpu_load(rq); | ||
2748 | if (curr != rq->idle) /* FIXME: needed? */ | ||
2749 | curr->sched_class->task_tick(rq, curr); | ||
2750 | + TS_PLUGIN_TICK_START; | ||
2751 | + litmus_tick(rq, curr); | ||
2752 | + TS_PLUGIN_TICK_END; | ||
2753 | spin_unlock(&rq->lock); | ||
2754 | |||
2755 | #ifdef CONFIG_SMP | ||
2756 | rq->idle_at_tick = idle_cpu(cpu); | ||
2757 | - trigger_load_balance(rq, cpu); | ||
2758 | + if (!is_realtime(current)) | ||
2759 | + trigger_load_balance(rq, cpu); | ||
2760 | #endif | ||
2761 | + TS_TICK_END(current); | ||
2762 | } | ||
2763 | |||
2764 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) | ||
2765 | @@ -3594,11 +3614,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev) | ||
2766 | * Optimization: we know that if all tasks are in | ||
2767 | * the fair class we can call that function directly: | ||
2768 | */ | ||
2769 | - if (likely(rq->nr_running == rq->cfs.nr_running)) { | ||
2770 | + /* Don't do that for LITMUS. | ||
2771 | + if (likely(rq->nr_running == rq->cfs.nr_running)) { | ||
2772 | p = fair_sched_class.pick_next_task(rq); | ||
2773 | if (likely(p)) | ||
2774 | return p; | ||
2775 | } | ||
2776 | + */ | ||
2777 | |||
2778 | class = sched_class_highest; | ||
2779 | for ( ; ; ) { | ||
2780 | @@ -3633,6 +3655,7 @@ need_resched: | ||
2781 | |||
2782 | release_kernel_lock(prev); | ||
2783 | need_resched_nonpreemptible: | ||
2784 | + TS_SCHED_START; | ||
2785 | |||
2786 | schedule_debug(prev); | ||
2787 | |||
2788 | @@ -3643,6 +3666,9 @@ need_resched_nonpreemptible: | ||
2789 | __update_rq_clock(rq); | ||
2790 | spin_lock(&rq->lock); | ||
2791 | clear_tsk_need_resched(prev); | ||
2792 | + TS_PLUGIN_SCHED_START; | ||
2793 | + litmus_schedule(rq, prev); | ||
2794 | + TS_PLUGIN_SCHED_END; | ||
2795 | |||
2796 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | ||
2797 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && | ||
2798 | @@ -3666,19 +3692,35 @@ need_resched_nonpreemptible: | ||
2799 | rq->nr_switches++; | ||
2800 | rq->curr = next; | ||
2801 | ++*switch_count; | ||
2802 | + sched_trace_task_switch_away(prev); | ||
2803 | + sched_trace_task_switch_to(next); | ||
2804 | |||
2805 | + TS_SCHED_END(next); | ||
2806 | + TS_CXS_START(next); | ||
2807 | context_switch(rq, prev, next); /* unlocks the rq */ | ||
2808 | - } else | ||
2809 | + TS_CXS_END(current); | ||
2810 | + } else { | ||
2811 | + TS_SCHED_END(prev); | ||
2812 | spin_unlock_irq(&rq->lock); | ||
2813 | + } | ||
2814 | + TS_SCHED2_START(current); | ||
2815 | + | ||
2816 | + tick_no_rqlock(); | ||
2817 | |||
2818 | if (unlikely(reacquire_kernel_lock(current) < 0)) { | ||
2819 | cpu = smp_processor_id(); | ||
2820 | rq = cpu_rq(cpu); | ||
2821 | + TS_SCHED2_END(current); | ||
2822 | goto need_resched_nonpreemptible; | ||
2823 | } | ||
2824 | preempt_enable_no_resched(); | ||
2825 | - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) | ||
2826 | + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) { | ||
2827 | + TS_SCHED2_END(current); | ||
2828 | goto need_resched; | ||
2829 | + } | ||
2830 | + TS_SCHED2_END(current); | ||
2831 | + if (srp_active()) | ||
2832 | + srp_ceiling_block(); | ||
2833 | } | ||
2834 | EXPORT_SYMBOL(schedule); | ||
2835 | |||
2836 | @@ -3886,6 +3928,18 @@ void complete_all(struct completion *x) | ||
2837 | } | ||
2838 | EXPORT_SYMBOL(complete_all); | ||
2839 | |||
2840 | +void complete_n(struct completion *x, int n) | ||
2841 | +{ | ||
2842 | + unsigned long flags; | ||
2843 | + | ||
2844 | + spin_lock_irqsave(&x->wait.lock, flags); | ||
2845 | + x->done += n; | ||
2846 | + __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | ||
2847 | + n, 0, NULL); | ||
2848 | + spin_unlock_irqrestore(&x->wait.lock, flags); | ||
2849 | +} | ||
2850 | +EXPORT_SYMBOL(complete_n); | ||
2851 | + | ||
2852 | static inline long __sched | ||
2853 | do_wait_for_common(struct completion *x, long timeout, int state) | ||
2854 | { | ||
2855 | @@ -4236,6 +4290,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | ||
2856 | case SCHED_RR: | ||
2857 | p->sched_class = &rt_sched_class; | ||
2858 | break; | ||
2859 | + case SCHED_LITMUS: | ||
2860 | + p->sched_class = &litmus_sched_class; | ||
2861 | + break; | ||
2862 | } | ||
2863 | |||
2864 | p->rt_priority = prio; | ||
2865 | @@ -4268,7 +4325,7 @@ recheck: | ||
2866 | policy = oldpolicy = p->policy; | ||
2867 | else if (policy != SCHED_FIFO && policy != SCHED_RR && | ||
2868 | policy != SCHED_NORMAL && policy != SCHED_BATCH && | ||
2869 | - policy != SCHED_IDLE) | ||
2870 | + policy != SCHED_IDLE && policy != SCHED_LITMUS) | ||
2871 | return -EINVAL; | ||
2872 | /* | ||
2873 | * Valid priorities for SCHED_FIFO and SCHED_RR are | ||
2874 | @@ -4282,6 +4339,9 @@ recheck: | ||
2875 | if (rt_policy(policy) != (param->sched_priority != 0)) | ||
2876 | return -EINVAL; | ||
2877 | |||
2878 | + if (policy == SCHED_LITMUS && policy == p->policy) | ||
2879 | + return -EINVAL; | ||
2880 | + | ||
2881 | /* | ||
2882 | * Allow unprivileged RT tasks to decrease priority: | ||
2883 | */ | ||
2884 | @@ -4316,6 +4376,12 @@ recheck: | ||
2885 | return -EPERM; | ||
2886 | } | ||
2887 | |||
2888 | + if (policy == SCHED_LITMUS) { | ||
2889 | + retval = litmus_admit_task(p); | ||
2890 | + if (retval) | ||
2891 | + return retval; | ||
2892 | + } | ||
2893 | + | ||
2894 | retval = security_task_setscheduler(p, policy, param); | ||
2895 | if (retval) | ||
2896 | return retval; | ||
2897 | @@ -4345,9 +4411,17 @@ recheck: | ||
2898 | p->sched_class->put_prev_task(rq, p); | ||
2899 | } | ||
2900 | |||
2901 | + if (p->policy == SCHED_LITMUS) | ||
2902 | + litmus_exit_task(p); | ||
2903 | + | ||
2904 | oldprio = p->prio; | ||
2905 | __setscheduler(rq, p, policy, param->sched_priority); | ||
2906 | |||
2907 | + if (policy == SCHED_LITMUS) { | ||
2908 | + p->rt_param.stack_in_use = running ? rq->cpu : NO_CPU; | ||
2909 | + litmus->task_new(p, on_rq, running); | ||
2910 | + } | ||
2911 | + | ||
2912 | if (on_rq) { | ||
2913 | if (running) | ||
2914 | p->sched_class->set_curr_task(rq); | ||
2915 | @@ -4364,6 +4438,7 @@ recheck: | ||
2916 | check_preempt_curr(rq, p); | ||
2917 | } | ||
2918 | } | ||
2919 | + | ||
2920 | __task_rq_unlock(rq); | ||
2921 | spin_unlock_irqrestore(&p->pi_lock, flags); | ||
2922 | |||
2923 | @@ -4494,10 +4569,11 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | ||
2924 | read_lock(&tasklist_lock); | ||
2925 | |||
2926 | p = find_process_by_pid(pid); | ||
2927 | - if (!p) { | ||
2928 | + if (!p || is_realtime(p)) { | ||
2929 | + /* LITMUS tasks don't get to do this, transition to BE first */ | ||
2930 | read_unlock(&tasklist_lock); | ||
2931 | mutex_unlock(&sched_hotcpu_mutex); | ||
2932 | - return -ESRCH; | ||
2933 | + return p ? -EPERM : -ESRCH; | ||
2934 | } | ||
2935 | |||
2936 | /* | ||
2937 | diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c | ||
2938 | index da7c061..de30496 100644 | ||
2939 | --- a/kernel/sched_fair.c | ||
2940 | +++ b/kernel/sched_fair.c | ||
2941 | @@ -845,7 +845,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | ||
2942 | struct sched_entity *se = &curr->se, *pse = &p->se; | ||
2943 | unsigned long gran; | ||
2944 | |||
2945 | - if (unlikely(rt_prio(p->prio))) { | ||
2946 | + if (unlikely(rt_prio(p->prio) || p->policy == SCHED_LITMUS)) { | ||
2947 | update_rq_clock(rq); | ||
2948 | update_curr(cfs_rq); | ||
2949 | resched_task(curr); | ||
2950 | diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c | ||
2951 | index 9ba3daa..c7c938c 100644 | ||
2952 | --- a/kernel/sched_rt.c | ||
2953 | +++ b/kernel/sched_rt.c | ||
2954 | @@ -70,7 +70,7 @@ yield_task_rt(struct rq *rq) | ||
2955 | */ | ||
2956 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | ||
2957 | { | ||
2958 | - if (p->prio < rq->curr->prio) | ||
2959 | + if (p->prio < rq->curr->prio || p->policy == SCHED_LITMUS) | ||
2960 | resched_task(rq->curr); | ||
2961 | } | ||
2962 | |||
2963 | diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c | ||
2964 | index cb89fa8..d6dad22 100644 | ||
2965 | --- a/kernel/time/tick-sched.c | ||
2966 | +++ b/kernel/time/tick-sched.c | ||
2967 | @@ -568,6 +568,42 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | ||
2968 | } | ||
2969 | |||
2970 | /** | ||
2971 | + * tick_set_quanta_type - get the quanta type as a boot option | ||
2972 | + * Default is standard setup with ticks staggered over first | ||
2973 | + * half of tick period. | ||
2974 | + */ | ||
2975 | +int quanta_type = LINUX_DEFAULT_TICKS; | ||
2976 | +static int __init tick_set_quanta_type(char *str) | ||
2977 | +{ | ||
2978 | + if (strcmp("aligned", str) == 0) | ||
2979 | + quanta_type = LITMUS_ALIGNED_TICKS; | ||
2980 | + else if (strcmp("staggered", str) == 0) | ||
2981 | + quanta_type = LITMUS_STAGGERED_TICKS; | ||
2982 | + return 1; | ||
2983 | +} | ||
2984 | +__setup("quanta=", tick_set_quanta_type); | ||
2985 | + | ||
2986 | +u64 cpu_stagger_offset(int cpu) | ||
2987 | +{ | ||
2988 | + u64 offset = 0; | ||
2989 | + switch (quanta_type) { | ||
2990 | + case LITMUS_ALIGNED_TICKS: | ||
2991 | + offset = 0; | ||
2992 | + break; | ||
2993 | + case LITMUS_STAGGERED_TICKS: | ||
2994 | + offset = ktime_to_ns(tick_period); | ||
2995 | + do_div(offset, num_possible_cpus()); | ||
2996 | + offset *= cpu; | ||
2997 | + break; | ||
2998 | + default: | ||
2999 | + offset = ktime_to_ns(tick_period) >> 1; | ||
3000 | + do_div(offset, num_possible_cpus()); | ||
3001 | + offset *= cpu; | ||
3002 | + } | ||
3003 | + return offset; | ||
3004 | +} | ||
3005 | + | ||
3006 | +/** | ||
3007 | * tick_setup_sched_timer - setup the tick emulation timer | ||
3008 | */ | ||
3009 | void tick_setup_sched_timer(void) | ||
3010 | @@ -585,9 +621,11 @@ void tick_setup_sched_timer(void) | ||
3011 | |||
3012 | /* Get the next period (per cpu) */ | ||
3013 | ts->sched_timer.expires = tick_init_jiffy_update(); | ||
3014 | - offset = ktime_to_ns(tick_period) >> 1; | ||
3015 | - do_div(offset, num_possible_cpus()); | ||
3016 | - offset *= smp_processor_id(); | ||
3017 | + | ||
3018 | + /* Offset must be set correctly to achieve desired quanta type. */ | ||
3019 | + offset = cpu_stagger_offset(smp_processor_id()); | ||
3020 | + | ||
3021 | + /* Add correct offset to expiration time. */ | ||
3022 | ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset); | ||
3023 | |||
3024 | for (;;) { | ||
3025 | diff --git a/litmus/Kconfig b/litmus/Kconfig | ||
3026 | new file mode 100644 | ||
3027 | index 0000000..9a2ab90 | ||
3028 | --- /dev/null | ||
3029 | +++ b/litmus/Kconfig | ||
3030 | @@ -0,0 +1,78 @@ | ||
3031 | +menu "LITMUS^RT" | ||
3032 | + | ||
3033 | +menu "Real-Time Synchronization" | ||
3034 | + | ||
3035 | +config NP_SECTION | ||
3036 | + bool "Non-preemptive section support" | ||
3037 | + depends on !SPARC64 | ||
3038 | + default n | ||
3039 | + help | ||
3040 | + Include support for flag-based non-preemptive section signaling | ||
3041 | + from userspace. | ||
3042 | + | ||
3043 | + (currently broken on SPARC64) | ||
3044 | + | ||
3045 | + Say Yes if you want FMLP short critical section synchronization support. | ||
3046 | + | ||
3047 | + | ||
3048 | +config SRP | ||
3049 | + bool "Stack Resource Policy (SRP)" | ||
3050 | + default n | ||
3051 | + help | ||
3052 | + Include support for Baker's Stack Resource Policy. | ||
3053 | + | ||
3054 | + Say Yes if you want FMLP local long critical section synchronization support. | ||
3055 | + | ||
3056 | +config FMLP | ||
3057 | + bool "FMLP support" | ||
3058 | + depends on NP_SECTION | ||
3059 | + default n | ||
3060 | + help | ||
3061 | + Include support for deterministic multiprocessor real-time | ||
3062 | + synchronization support. | ||
3063 | + | ||
3064 | + Say Yes if you want FMLP long critical section synchronization support. | ||
3065 | + | ||
3066 | +endmenu | ||
3067 | + | ||
3068 | +menu "Tracing" | ||
3069 | + | ||
3070 | +config SCHED_TASK_TRACE | ||
3071 | + bool "Trace real-time tasks" | ||
3072 | + default y | ||
3073 | + help | ||
3074 | + Include support for the sched_trace_XXX() tracing functions. This | ||
3075 | + allows the collection of real-time task events such as job | ||
3076 | + completions, job releases, early completions, etc. This results in a | ||
3077 | + small overhead in the scheduling code. Disable if the overhead is not | ||
3078 | + acceptable (e.g., benchmarking). | ||
3079 | + | ||
3080 | + Say Yes for debugging. | ||
3081 | + Say No for overhead tracing. | ||
3082 | + | ||
3083 | +config SCHED_DEBUG_TRACE | ||
3084 | + bool "TRACE() debugging" | ||
3085 | + default y | ||
3086 | + help | ||
3087 | + Include support for sched_trace_log_messageg(), which is used to | ||
3088 | + implement TRACE(). If disabled, no TRACE() messages will be included | ||
3089 | + in the kernel, and no overheads due to debugging statements will be | ||
3090 | + incurred by the scheduler. Disable if the overhead is not acceptable | ||
3091 | + (e.g. benchmarking). | ||
3092 | + | ||
3093 | + Say Yes for debugging. | ||
3094 | + Say No for overhead tracing. | ||
3095 | + | ||
3096 | +config FEATHER_TRACE | ||
3097 | + bool "Feather-Trace Instrumentation Support" | ||
3098 | + default y | ||
3099 | + help | ||
3100 | + Include Feather-Trace trace points. Currently not supported on | ||
3101 | + sparc64. | ||
3102 | + | ||
3103 | + Say Yes for overhead tracing. | ||
3104 | + | ||
3105 | + | ||
3106 | +endmenu | ||
3107 | + | ||
3108 | +endmenu | ||
3109 | diff --git a/litmus/Makefile b/litmus/Makefile | ||
3110 | new file mode 100644 | ||
3111 | index 0000000..fa39a2b | ||
3112 | --- /dev/null | ||
3113 | +++ b/litmus/Makefile | ||
3114 | @@ -0,0 +1,16 @@ | ||
3115 | +# | ||
3116 | +# Makefile for LITMUS^RT | ||
3117 | +# | ||
3118 | + | ||
3119 | +obj-y = sched_plugin.o litmus.o \ | ||
3120 | + edf_common.o jobs.o \ | ||
3121 | + rt_domain.o fdso.o sync.o \ | ||
3122 | + fmlp.o srp.o norqlock.o \ | ||
3123 | + sched_gsn_edf.o \ | ||
3124 | + sched_psn_edf.o \ | ||
3125 | + sched_cedf.o \ | ||
3126 | + sched_pfair.o | ||
3127 | + | ||
3128 | +obj-$(CONFIG_FEATHER_TRACE) += trace.o ft_event.o ftdev.o | ||
3129 | +obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | ||
3130 | +obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o | ||
3131 | \ No newline at end of file | ||
3132 | diff --git a/litmus/edf_common.c b/litmus/edf_common.c | ||
3133 | new file mode 100644 | ||
3134 | index 0000000..84ece3e | ||
3135 | --- /dev/null | ||
3136 | +++ b/litmus/edf_common.c | ||
3137 | @@ -0,0 +1,95 @@ | ||
3138 | +/* | ||
3139 | + * kernel/edf_common.c | ||
3140 | + * | ||
3141 | + * Common functions for EDF based scheduler. | ||
3142 | + */ | ||
3143 | + | ||
3144 | +#include <linux/percpu.h> | ||
3145 | +#include <linux/sched.h> | ||
3146 | +#include <linux/list.h> | ||
3147 | + | ||
3148 | +#include <litmus/litmus.h> | ||
3149 | +#include <litmus/sched_plugin.h> | ||
3150 | +#include <litmus/sched_trace.h> | ||
3151 | + | ||
3152 | + | ||
3153 | +#include <litmus/edf_common.h> | ||
3154 | + | ||
3155 | +/* edf_higher_prio - returns true if first has a higher EDF priority | ||
3156 | + * than second. Deadline ties are broken by PID. | ||
3157 | + * | ||
3158 | + * both first and second may be NULL | ||
3159 | + */ | ||
3160 | +int edf_higher_prio(struct task_struct* first, | ||
3161 | + struct task_struct* second) | ||
3162 | +{ | ||
3163 | + struct task_struct *first_task = first; | ||
3164 | + struct task_struct *second_task = second; | ||
3165 | + | ||
3166 | + /* Check for inherited priorities. Change task | ||
3167 | + * used for comparison in such a case. | ||
3168 | + */ | ||
3169 | + if (first && first->rt_param.inh_task) | ||
3170 | + first_task = first->rt_param.inh_task; | ||
3171 | + if (second && second->rt_param.inh_task) | ||
3172 | + second_task = second->rt_param.inh_task; | ||
3173 | + | ||
3174 | + return | ||
3175 | + /* it has to exist in order to have higher priority */ | ||
3176 | + first_task && ( | ||
3177 | + /* does the second task exist and is it a real-time task? If | ||
3178 | + * not, the first task (which is a RT task) has higher | ||
3179 | + * priority. | ||
3180 | + */ | ||
3181 | + !second_task || !is_realtime(second_task) || | ||
3182 | + | ||
3183 | + /* is the deadline of the first task earlier? | ||
3184 | + * Then it has higher priority. | ||
3185 | + */ | ||
3186 | + earlier_deadline(first_task, second_task) || | ||
3187 | + | ||
3188 | + /* Do we have a deadline tie? | ||
3189 | + * Then break by PID. | ||
3190 | + */ | ||
3191 | + (get_deadline(first_task) == get_deadline(second_task) && | ||
3192 | + (first_task->pid < second_task->pid || | ||
3193 | + | ||
3194 | + /* If the PIDs are the same then the task with the inherited | ||
3195 | + * priority wins. | ||
3196 | + */ | ||
3197 | + (first_task->pid == second_task->pid && | ||
3198 | + !second->rt_param.inh_task)))); | ||
3199 | +} | ||
3200 | + | ||
3201 | +int edf_ready_order(struct heap_node* a, struct heap_node* b) | ||
3202 | +{ | ||
3203 | + return edf_higher_prio(heap2task(a), heap2task(b)); | ||
3204 | +} | ||
3205 | + | ||
3206 | +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
3207 | + release_jobs_t release) | ||
3208 | +{ | ||
3209 | + rt_domain_init(rt, edf_ready_order, resched, release); | ||
3210 | +} | ||
3211 | + | ||
3212 | +/* need_to_preempt - check whether the task t needs to be preempted | ||
3213 | + * call only with irqs disabled and with ready_lock acquired | ||
3214 | + * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! | ||
3215 | + */ | ||
3216 | +int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t) | ||
3217 | +{ | ||
3218 | + /* we need the read lock for edf_ready_queue */ | ||
3219 | + /* no need to preempt if there is nothing pending */ | ||
3220 | + if (!__jobs_pending(rt)) | ||
3221 | + return 0; | ||
3222 | + /* we need to reschedule if t doesn't exist */ | ||
3223 | + if (!t) | ||
3224 | + return 1; | ||
3225 | + | ||
3226 | + /* NOTE: We cannot check for non-preemptibility since we | ||
3227 | + * don't know what address space we're currently in. | ||
3228 | + */ | ||
3229 | + | ||
3230 | + /* make sure to get non-rt stuff out of the way */ | ||
3231 | + return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t); | ||
3232 | +} | ||
3233 | diff --git a/litmus/fdso.c b/litmus/fdso.c | ||
3234 | new file mode 100644 | ||
3235 | index 0000000..81ab0af | ||
3236 | --- /dev/null | ||
3237 | +++ b/litmus/fdso.c | ||
3238 | @@ -0,0 +1,282 @@ | ||
3239 | +/* fdso.c - file descriptor attached shared objects | ||
3240 | + * | ||
3241 | + * (c) 2007 B. Brandenburg, LITMUS^RT project | ||
3242 | + * | ||
3243 | + * Notes: | ||
3244 | + * - objects descriptor (OD) tables are not cloned during a fork. | ||
3245 | + * - objects are created on-demand, and freed after the last reference | ||
3246 | + * is dropped. | ||
3247 | + * - for now, object types are hard coded. | ||
3248 | + * - As long as we have live objects, we keep a reference to the inode. | ||
3249 | + */ | ||
3250 | + | ||
3251 | +#include <linux/errno.h> | ||
3252 | +#include <linux/sched.h> | ||
3253 | +#include <linux/mutex.h> | ||
3254 | +#include <linux/file.h> | ||
3255 | +#include <asm/uaccess.h> | ||
3256 | + | ||
3257 | +#include <litmus/fdso.h> | ||
3258 | + | ||
3259 | +extern struct fdso_ops fmlp_sem_ops; | ||
3260 | +extern struct fdso_ops srp_sem_ops; | ||
3261 | + | ||
3262 | +static const struct fdso_ops* fdso_ops[] = { | ||
3263 | + &fmlp_sem_ops, | ||
3264 | + &srp_sem_ops, | ||
3265 | +}; | ||
3266 | + | ||
3267 | +static void* fdso_create(obj_type_t type) | ||
3268 | +{ | ||
3269 | + if (fdso_ops[type]->create) | ||
3270 | + return fdso_ops[type]->create(); | ||
3271 | + else | ||
3272 | + return NULL; | ||
3273 | +} | ||
3274 | + | ||
3275 | +static void fdso_destroy(obj_type_t type, void* obj) | ||
3276 | +{ | ||
3277 | + fdso_ops[type]->destroy(obj); | ||
3278 | +} | ||
3279 | + | ||
3280 | +static int fdso_open(struct od_table_entry* entry, void* __user config) | ||
3281 | +{ | ||
3282 | + if (fdso_ops[entry->obj->type]->open) | ||
3283 | + return fdso_ops[entry->obj->type]->open(entry, config); | ||
3284 | + else | ||
3285 | + return 0; | ||
3286 | +} | ||
3287 | + | ||
3288 | +static int fdso_close(struct od_table_entry* entry) | ||
3289 | +{ | ||
3290 | + if (fdso_ops[entry->obj->type]->close) | ||
3291 | + return fdso_ops[entry->obj->type]->close(entry); | ||
3292 | + else | ||
3293 | + return 0; | ||
3294 | +} | ||
3295 | + | ||
3296 | +/* inode must be locked already */ | ||
3297 | +static struct inode_obj_id* alloc_inode_obj(struct inode* inode, | ||
3298 | + obj_type_t type, | ||
3299 | + unsigned int id) | ||
3300 | +{ | ||
3301 | + struct inode_obj_id* obj; | ||
3302 | + void* raw_obj; | ||
3303 | + | ||
3304 | + raw_obj = fdso_create(type); | ||
3305 | + if (!raw_obj) | ||
3306 | + return NULL; | ||
3307 | + | ||
3308 | + obj = kmalloc(sizeof(struct inode_obj_id), GFP_KERNEL); | ||
3309 | + if (!obj) | ||
3310 | + return NULL; | ||
3311 | + INIT_LIST_HEAD(&obj->list); | ||
3312 | + atomic_set(&obj->count, 1); | ||
3313 | + obj->type = type; | ||
3314 | + obj->id = id; | ||
3315 | + obj->obj = raw_obj; | ||
3316 | + obj->inode = inode; | ||
3317 | + | ||
3318 | + list_add(&obj->list, &inode->i_obj_list); | ||
3319 | + atomic_inc(&inode->i_count); | ||
3320 | + | ||
3321 | + printk(KERN_DEBUG "alloc_inode_obj(%p, %d, %d): object created\n", inode, type, id); | ||
3322 | + return obj; | ||
3323 | +} | ||
3324 | + | ||
3325 | +/* inode must be locked already */ | ||
3326 | +static struct inode_obj_id* get_inode_obj(struct inode* inode, | ||
3327 | + obj_type_t type, | ||
3328 | + unsigned int id) | ||
3329 | +{ | ||
3330 | + struct list_head* pos; | ||
3331 | + struct inode_obj_id* obj = NULL; | ||
3332 | + | ||
3333 | + list_for_each(pos, &inode->i_obj_list) { | ||
3334 | + obj = list_entry(pos, struct inode_obj_id, list); | ||
3335 | + if (obj->id == id && obj->type == type) { | ||
3336 | + atomic_inc(&obj->count); | ||
3337 | + return obj; | ||
3338 | + } | ||
3339 | + } | ||
3340 | + printk(KERN_DEBUG "get_inode_obj(%p, %d, %d): couldn't find object\n", inode, type, id); | ||
3341 | + return NULL; | ||
3342 | +} | ||
3343 | + | ||
3344 | + | ||
3345 | +static void put_inode_obj(struct inode_obj_id* obj) | ||
3346 | +{ | ||
3347 | + struct inode* inode; | ||
3348 | + int let_go = 0; | ||
3349 | + | ||
3350 | + inode = obj->inode; | ||
3351 | + if (atomic_dec_and_test(&obj->count)) { | ||
3352 | + | ||
3353 | + mutex_lock(&inode->i_obj_mutex); | ||
3354 | + /* no new references can be obtained */ | ||
3355 | + if (!atomic_read(&obj->count)) { | ||
3356 | + list_del(&obj->list); | ||
3357 | + fdso_destroy(obj->type, obj->obj); | ||
3358 | + kfree(obj); | ||
3359 | + let_go = 1; | ||
3360 | + } | ||
3361 | + mutex_unlock(&inode->i_obj_mutex); | ||
3362 | + if (let_go) | ||
3363 | + iput(inode); | ||
3364 | + } | ||
3365 | +} | ||
3366 | + | ||
3367 | +static struct od_table_entry* get_od_entry(struct task_struct* t) | ||
3368 | +{ | ||
3369 | + struct od_table_entry* table; | ||
3370 | + int i; | ||
3371 | + | ||
3372 | + | ||
3373 | + table = t->od_table; | ||
3374 | + if (!table) { | ||
3375 | + table = (struct od_table_entry*) | ||
3376 | + kzalloc(sizeof(struct od_table_entry) * | ||
3377 | + MAX_OBJECT_DESCRIPTORS, GFP_KERNEL); | ||
3378 | + t->od_table = table; | ||
3379 | + } | ||
3380 | + | ||
3381 | + for (i = 0; table && i < MAX_OBJECT_DESCRIPTORS; i++) | ||
3382 | + if (!table[i].used) { | ||
3383 | + table[i].used = 1; | ||
3384 | + return table + i; | ||
3385 | + } | ||
3386 | + return NULL; | ||
3387 | +} | ||
3388 | + | ||
3389 | +static int put_od_entry(struct od_table_entry* od) | ||
3390 | +{ | ||
3391 | + put_inode_obj(od->obj); | ||
3392 | + od->used = 0; | ||
3393 | + return 0; | ||
3394 | +} | ||
3395 | + | ||
3396 | +void exit_od_table(struct task_struct* t) | ||
3397 | +{ | ||
3398 | + int i; | ||
3399 | + | ||
3400 | + if (t->od_table) { | ||
3401 | + for (i = 0; i < MAX_OBJECT_DESCRIPTORS; i++) | ||
3402 | + if (t->od_table[i].used) | ||
3403 | + put_od_entry(t->od_table + i); | ||
3404 | + kfree(t->od_table); | ||
3405 | + t->od_table = NULL; | ||
3406 | + } | ||
3407 | +} | ||
3408 | + | ||
3409 | +static int do_sys_od_open(struct file* file, obj_type_t type, int id, | ||
3410 | + void* __user config) | ||
3411 | +{ | ||
3412 | + int idx = 0, err; | ||
3413 | + struct inode* inode; | ||
3414 | + struct inode_obj_id* obj = NULL; | ||
3415 | + struct od_table_entry* entry; | ||
3416 | + | ||
3417 | + inode = file->f_dentry->d_inode; | ||
3418 | + | ||
3419 | + entry = get_od_entry(current); | ||
3420 | + if (!entry) | ||
3421 | + return -ENOMEM; | ||
3422 | + | ||
3423 | + mutex_lock(&inode->i_obj_mutex); | ||
3424 | + obj = get_inode_obj(inode, type, id); | ||
3425 | + if (!obj) | ||
3426 | + obj = alloc_inode_obj(inode, type, id); | ||
3427 | + if (!obj) { | ||
3428 | + idx = -ENOMEM; | ||
3429 | + entry->used = 0; | ||
3430 | + } else { | ||
3431 | + entry->obj = obj; | ||
3432 | + entry->extra = NULL; | ||
3433 | + idx = entry - current->od_table; | ||
3434 | + } | ||
3435 | + | ||
3436 | + mutex_unlock(&inode->i_obj_mutex); | ||
3437 | + | ||
3438 | + err = fdso_open(entry, config); | ||
3439 | + if (err < 0) { | ||
3440 | + /* The class rejected the open call. | ||
3441 | + * We need to clean up and tell user space. | ||
3442 | + */ | ||
3443 | + put_od_entry(entry); | ||
3444 | + idx = err; | ||
3445 | + } | ||
3446 | + | ||
3447 | + return idx; | ||
3448 | +} | ||
3449 | + | ||
3450 | + | ||
3451 | +struct od_table_entry* __od_lookup(int od) | ||
3452 | +{ | ||
3453 | + struct task_struct *t = current; | ||
3454 | + | ||
3455 | + if (!t->od_table) | ||
3456 | + return NULL; | ||
3457 | + if (od < 0 || od >= MAX_OBJECT_DESCRIPTORS) | ||
3458 | + return NULL; | ||
3459 | + if (!t->od_table[od].used) | ||
3460 | + return NULL; | ||
3461 | + return t->od_table + od; | ||
3462 | +} | ||
3463 | + | ||
3464 | + | ||
3465 | +asmlinkage int sys_od_open(int fd, int type, int obj_id, void* __user config) | ||
3466 | +{ | ||
3467 | + int ret = 0; | ||
3468 | + struct file* file; | ||
3469 | + | ||
3470 | + /* | ||
3471 | + 1) get file from fd, get inode from file | ||
3472 | + 2) lock inode | ||
3473 | + 3) try to lookup object | ||
3474 | + 4) if not present create and enqueue object, inc inode refcnt | ||
3475 | + 5) increment refcnt of object | ||
3476 | + 6) alloc od_table_entry, setup ptrs | ||
3477 | + 7) unlock inode | ||
3478 | + 8) return offset in od_table as OD | ||
3479 | + */ | ||
3480 | + | ||
3481 | + if (type < MIN_OBJ_TYPE || type > MAX_OBJ_TYPE) { | ||
3482 | + ret = -EINVAL; | ||
3483 | + goto out; | ||
3484 | + } | ||
3485 | + | ||
3486 | + file = fget(fd); | ||
3487 | + if (!file) { | ||
3488 | + ret = -EBADF; | ||
3489 | + goto out; | ||
3490 | + } | ||
3491 | + | ||
3492 | + ret = do_sys_od_open(file, type, obj_id, config); | ||
3493 | + | ||
3494 | + fput(file); | ||
3495 | + | ||
3496 | +out: | ||
3497 | + return ret; | ||
3498 | +} | ||
3499 | + | ||
3500 | + | ||
3501 | +asmlinkage int sys_od_close(int od) | ||
3502 | +{ | ||
3503 | + int ret = -EINVAL; | ||
3504 | + struct task_struct *t = current; | ||
3505 | + | ||
3506 | + if (od < 0 || od >= MAX_OBJECT_DESCRIPTORS) | ||
3507 | + return ret; | ||
3508 | + | ||
3509 | + if (!t->od_table || !t->od_table[od].used) | ||
3510 | + return ret; | ||
3511 | + | ||
3512 | + | ||
3513 | + /* give the class a chance to reject the close | ||
3514 | + */ | ||
3515 | + ret = fdso_close(t->od_table + od); | ||
3516 | + if (ret == 0) | ||
3517 | + ret = put_od_entry(t->od_table + od); | ||
3518 | + | ||
3519 | + return ret; | ||
3520 | +} | ||
3521 | diff --git a/litmus/fmlp.c b/litmus/fmlp.c | ||
3522 | new file mode 100644 | ||
3523 | index 0000000..f34eeea | ||
3524 | --- /dev/null | ||
3525 | +++ b/litmus/fmlp.c | ||
3526 | @@ -0,0 +1,262 @@ | ||
3527 | +/* | ||
3528 | + * FMLP implementation. | ||
3529 | + * Much of the code here is borrowed from include/asm-i386/semaphore.h. | ||
3530 | + */ | ||
3531 | + | ||
3532 | +#include <asm/atomic.h> | ||
3533 | +#include <asm/semaphore.h> | ||
3534 | +#include <linux/sched.h> | ||
3535 | +#include <linux/wait.h> | ||
3536 | +#include <linux/spinlock.h> | ||
3537 | +#include <litmus/litmus.h> | ||
3538 | +#include <litmus/sched_plugin.h> | ||
3539 | +#include <litmus/edf_common.h> | ||
3540 | + | ||
3541 | +#include <litmus/fdso.h> | ||
3542 | + | ||
3543 | +#include <litmus/trace.h> | ||
3544 | + | ||
3545 | +#ifdef CONFIG_FMLP | ||
3546 | + | ||
3547 | +static void* create_fmlp_semaphore(void) | ||
3548 | +{ | ||
3549 | + struct pi_semaphore* sem; | ||
3550 | + int i; | ||
3551 | + | ||
3552 | + sem = kmalloc(sizeof(struct pi_semaphore), GFP_KERNEL); | ||
3553 | + if (!sem) | ||
3554 | + return NULL; | ||
3555 | + atomic_set(&sem->count, 1); | ||
3556 | + sem->sleepers = 0; | ||
3557 | + init_waitqueue_head(&sem->wait); | ||
3558 | + sem->hp.task = NULL; | ||
3559 | + sem->holder = NULL; | ||
3560 | + for (i = 0; i < NR_CPUS; i++) | ||
3561 | + sem->hp.cpu_task[i] = NULL; | ||
3562 | + return sem; | ||
3563 | +} | ||
3564 | + | ||
3565 | +static int open_fmlp_semaphore(struct od_table_entry* entry, void* __user arg) | ||
3566 | +{ | ||
3567 | + if (!fmlp_active()) | ||
3568 | + return -EBUSY; | ||
3569 | + return 0; | ||
3570 | +} | ||
3571 | + | ||
3572 | +static void destroy_fmlp_semaphore(void* sem) | ||
3573 | +{ | ||
3574 | + /* XXX assert invariants */ | ||
3575 | + kfree(sem); | ||
3576 | +} | ||
3577 | + | ||
3578 | +struct fdso_ops fmlp_sem_ops = { | ||
3579 | + .create = create_fmlp_semaphore, | ||
3580 | + .open = open_fmlp_semaphore, | ||
3581 | + .destroy = destroy_fmlp_semaphore | ||
3582 | +}; | ||
3583 | + | ||
3584 | +struct wq_pair { | ||
3585 | + struct task_struct* tsk; | ||
3586 | + struct pi_semaphore* sem; | ||
3587 | +}; | ||
3588 | + | ||
3589 | +static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
3590 | + void *key) | ||
3591 | +{ | ||
3592 | + struct wq_pair* wqp = (struct wq_pair*) wait->private; | ||
3593 | + set_rt_flags(wqp->tsk, RT_F_EXIT_SEM); | ||
3594 | + litmus->inherit_priority(wqp->sem, wqp->tsk); | ||
3595 | + TRACE_TASK(wqp->tsk, | ||
3596 | + "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n"); | ||
3597 | + /* point to task for default_wake_function() */ | ||
3598 | + wait->private = wqp->tsk; | ||
3599 | + default_wake_function(wait, mode, sync, key); | ||
3600 | + | ||
3601 | + /* Always return true since we know that if we encountered a task | ||
3602 | + * that was already running the wake_up raced with the schedule in | ||
3603 | + * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled | ||
3604 | + * immediately and own the lock. We must not wake up another task in | ||
3605 | + * any case. | ||
3606 | + */ | ||
3607 | + return 1; | ||
3608 | +} | ||
3609 | + | ||
3610 | +/* caller is responsible for locking */ | ||
3611 | +int edf_set_hp_task(struct pi_semaphore *sem) | ||
3612 | +{ | ||
3613 | + struct list_head *tmp, *next; | ||
3614 | + struct task_struct *queued; | ||
3615 | + int ret = 0; | ||
3616 | + | ||
3617 | + sem->hp.task = NULL; | ||
3618 | + list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
3619 | + queued = ((struct wq_pair*) | ||
3620 | + list_entry(tmp, wait_queue_t, | ||
3621 | + task_list)->private)->tsk; | ||
3622 | + | ||
3623 | + /* Compare task prios, find high prio task. */ | ||
3624 | + if (edf_higher_prio(queued, sem->hp.task)) { | ||
3625 | + sem->hp.task = queued; | ||
3626 | + ret = 1; | ||
3627 | + } | ||
3628 | + } | ||
3629 | + return ret; | ||
3630 | +} | ||
3631 | + | ||
3632 | +/* caller is responsible for locking */ | ||
3633 | +int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu) | ||
3634 | +{ | ||
3635 | + struct list_head *tmp, *next; | ||
3636 | + struct task_struct *queued; | ||
3637 | + int ret = 0; | ||
3638 | + | ||
3639 | + sem->hp.cpu_task[cpu] = NULL; | ||
3640 | + list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
3641 | + queued = ((struct wq_pair*) | ||
3642 | + list_entry(tmp, wait_queue_t, | ||
3643 | + task_list)->private)->tsk; | ||
3644 | + | ||
3645 | + /* Compare task prios, find high prio task. */ | ||
3646 | + if (get_partition(queued) == cpu && | ||
3647 | + edf_higher_prio(queued, sem->hp.cpu_task[cpu])) { | ||
3648 | + sem->hp.cpu_task[cpu] = queued; | ||
3649 | + ret = 1; | ||
3650 | + } | ||
3651 | + } | ||
3652 | + return ret; | ||
3653 | +} | ||
3654 | + | ||
3655 | +static int do_fmlp_down(struct pi_semaphore* sem) | ||
3656 | +{ | ||
3657 | + unsigned long flags; | ||
3658 | + struct task_struct *tsk = current; | ||
3659 | + struct wq_pair pair; | ||
3660 | + int suspended = 1; | ||
3661 | + wait_queue_t wait = { | ||
3662 | + .private = &pair, | ||
3663 | + .func = rt_pi_wake_up, | ||
3664 | + .task_list = {NULL, NULL} | ||
3665 | + }; | ||
3666 | + | ||
3667 | + pair.tsk = tsk; | ||
3668 | + pair.sem = sem; | ||
3669 | + spin_lock_irqsave(&sem->wait.lock, flags); | ||
3670 | + | ||
3671 | + if (atomic_dec_return(&sem->count) < 0 || | ||
3672 | + waitqueue_active(&sem->wait)) { | ||
3673 | + /* we need to suspend */ | ||
3674 | + tsk->state = TASK_UNINTERRUPTIBLE; | ||
3675 | + add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
3676 | + | ||
3677 | + TRACE_CUR("suspends on PI lock %p\n", sem); | ||
3678 | + litmus->pi_block(sem, tsk); | ||
3679 | + | ||
3680 | + /* release lock before sleeping */ | ||
3681 | + spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
3682 | + | ||
3683 | + TS_PI_DOWN_END; | ||
3684 | + preempt_enable_no_resched(); | ||
3685 | + | ||
3686 | + | ||
3687 | + /* we depend on the FIFO order | ||
3688 | + * Thus, we don't need to recheck when we wake up, we | ||
3689 | + * are guaranteed to have the lock since there is only one | ||
3690 | + * wake up per release | ||
3691 | + */ | ||
3692 | + schedule(); | ||
3693 | + | ||
3694 | + TRACE_CUR("woke up, now owns PI lock %p\n", sem); | ||
3695 | + | ||
3696 | + /* try_to_wake_up() set our state to TASK_RUNNING, | ||
3697 | + * all we need to do is to remove our wait queue entry | ||
3698 | + */ | ||
3699 | + remove_wait_queue(&sem->wait, &wait); | ||
3700 | + } else { | ||
3701 | + /* no priority inheritance necessary, since there are no queued | ||
3702 | + * tasks. | ||
3703 | + */ | ||
3704 | + suspended = 0; | ||
3705 | + TRACE_CUR("acquired PI lock %p, no contention\n", sem); | ||
3706 | + sem->holder = tsk; | ||
3707 | + sem->hp.task = tsk; | ||
3708 | + litmus->inherit_priority(sem, tsk); | ||
3709 | + spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
3710 | + } | ||
3711 | + return suspended; | ||
3712 | +} | ||
3713 | + | ||
3714 | +static void do_fmlp_up(struct pi_semaphore* sem) | ||
3715 | +{ | ||
3716 | + unsigned long flags; | ||
3717 | + | ||
3718 | + spin_lock_irqsave(&sem->wait.lock, flags); | ||
3719 | + | ||
3720 | + TRACE_CUR("releases PI lock %p\n", sem); | ||
3721 | + litmus->return_priority(sem); | ||
3722 | + sem->holder = NULL; | ||
3723 | + if (atomic_inc_return(&sem->count) < 1) | ||
3724 | + /* there is a task queued */ | ||
3725 | + wake_up_locked(&sem->wait); | ||
3726 | + | ||
3727 | + spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
3728 | +} | ||
3729 | + | ||
3730 | +asmlinkage long sys_fmlp_down(int sem_od) | ||
3731 | +{ | ||
3732 | + long ret = 0; | ||
3733 | + struct pi_semaphore * sem; | ||
3734 | + int suspended = 0; | ||
3735 | + | ||
3736 | + preempt_disable(); | ||
3737 | + TS_PI_DOWN_START; | ||
3738 | + | ||
3739 | + sem = lookup_fmlp_sem(sem_od); | ||
3740 | + if (sem) | ||
3741 | + suspended = do_fmlp_down(sem); | ||
3742 | + else | ||
3743 | + ret = -EINVAL; | ||
3744 | + | ||
3745 | + if (!suspended) { | ||
3746 | + TS_PI_DOWN_END; | ||
3747 | + preempt_enable(); | ||
3748 | + } | ||
3749 | + | ||
3750 | + return ret; | ||
3751 | +} | ||
3752 | + | ||
3753 | +asmlinkage long sys_fmlp_up(int sem_od) | ||
3754 | +{ | ||
3755 | + long ret = 0; | ||
3756 | + struct pi_semaphore * sem; | ||
3757 | + | ||
3758 | + preempt_disable(); | ||
3759 | + TS_PI_UP_START; | ||
3760 | + | ||
3761 | + sem = lookup_fmlp_sem(sem_od); | ||
3762 | + if (sem) | ||
3763 | + do_fmlp_up(sem); | ||
3764 | + else | ||
3765 | + ret = -EINVAL; | ||
3766 | + | ||
3767 | + | ||
3768 | + TS_PI_UP_END; | ||
3769 | + preempt_enable(); | ||
3770 | + | ||
3771 | + return ret; | ||
3772 | +} | ||
3773 | + | ||
3774 | +#else | ||
3775 | + | ||
3776 | +struct fdso_ops fmlp_sem_ops = {}; | ||
3777 | + | ||
3778 | +asmlinkage long sys_fmlp_down(int sem_od) | ||
3779 | +{ | ||
3780 | + return -ENOSYS; | ||
3781 | +} | ||
3782 | + | ||
3783 | +asmlinkage long sys_fmlp_up(int sem_od) | ||
3784 | +{ | ||
3785 | + return -ENOSYS; | ||
3786 | +} | ||
3787 | + | ||
3788 | +#endif | ||
3789 | diff --git a/litmus/ft_event.c b/litmus/ft_event.c | ||
3790 | new file mode 100644 | ||
3791 | index 0000000..6084b6d | ||
3792 | --- /dev/null | ||
3793 | +++ b/litmus/ft_event.c | ||
3794 | @@ -0,0 +1,43 @@ | ||
3795 | +#include <linux/types.h> | ||
3796 | + | ||
3797 | +#include <litmus/feather_trace.h> | ||
3798 | + | ||
3799 | +#ifndef __ARCH_HAS_FEATHER_TRACE | ||
3800 | +/* provide dummy implementation */ | ||
3801 | + | ||
3802 | +int ft_events[MAX_EVENTS]; | ||
3803 | + | ||
3804 | +int ft_enable_event(unsigned long id) | ||
3805 | +{ | ||
3806 | + if (id < MAX_EVENTS) { | ||
3807 | + ft_events[id]++; | ||
3808 | + return 1; | ||
3809 | + } else | ||
3810 | + return 0; | ||
3811 | +} | ||
3812 | + | ||
3813 | +int ft_disable_event(unsigned long id) | ||
3814 | +{ | ||
3815 | + if (id < MAX_EVENTS && ft_events[id]) { | ||
3816 | + ft_events[id]--; | ||
3817 | + return 1; | ||
3818 | + } else | ||
3819 | + return 0; | ||
3820 | +} | ||
3821 | + | ||
3822 | +int ft_disable_all_events(void) | ||
3823 | +{ | ||
3824 | + int i; | ||
3825 | + | ||
3826 | + for (i = 0; i < MAX_EVENTS; i++) | ||
3827 | + ft_events[i] = 0; | ||
3828 | + | ||
3829 | + return MAX_EVENTS; | ||
3830 | +} | ||
3831 | + | ||
3832 | +int ft_is_event_enabled(unsigned long id) | ||
3833 | +{ | ||
3834 | + return id < MAX_EVENTS && ft_events[id]; | ||
3835 | +} | ||
3836 | + | ||
3837 | +#endif | ||
3838 | diff --git a/litmus/ftdev.c b/litmus/ftdev.c | ||
3839 | new file mode 100644 | ||
3840 | index 0000000..7c933ff | ||
3841 | --- /dev/null | ||
3842 | +++ b/litmus/ftdev.c | ||
3843 | @@ -0,0 +1,352 @@ | ||
3844 | +#include <linux/sched.h> | ||
3845 | +#include <linux/fs.h> | ||
3846 | +#include <linux/cdev.h> | ||
3847 | +#include <asm/uaccess.h> | ||
3848 | +#include <linux/module.h> | ||
3849 | + | ||
3850 | +#include <litmus/litmus.h> | ||
3851 | +#include <litmus/feather_trace.h> | ||
3852 | +#include <litmus/ftdev.h> | ||
3853 | + | ||
3854 | +struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size) | ||
3855 | +{ | ||
3856 | + struct ft_buffer* buf; | ||
3857 | + size_t total = (size + 1) * count; | ||
3858 | + char* mem; | ||
3859 | + int order = 0, pages = 1; | ||
3860 | + | ||
3861 | + buf = kmalloc(sizeof(struct ft_buffer), GFP_KERNEL); | ||
3862 | + if (!buf) | ||
3863 | + return NULL; | ||
3864 | + | ||
3865 | + total = (total / PAGE_SIZE) + (total % PAGE_SIZE != 0); | ||
3866 | + while (pages < total) { | ||
3867 | + order++; | ||
3868 | + pages *= 2; | ||
3869 | + } | ||
3870 | + | ||
3871 | + mem = (char*) __get_free_pages(GFP_KERNEL, order); | ||
3872 | + if (!mem) { | ||
3873 | + kfree(buf); | ||
3874 | + return NULL; | ||
3875 | + } | ||
3876 | + | ||
3877 | + if (!init_ft_buffer(buf, count, size, | ||
3878 | + mem + (count * size), /* markers at the end */ | ||
3879 | + mem)) { /* buffer objects */ | ||
3880 | + free_pages((unsigned long) mem, order); | ||
3881 | + kfree(buf); | ||
3882 | + return NULL; | ||
3883 | + } | ||
3884 | + return buf; | ||
3885 | +} | ||
3886 | + | ||
3887 | +void free_ft_buffer(struct ft_buffer* buf) | ||
3888 | +{ | ||
3889 | + int order = 0, pages = 1; | ||
3890 | + size_t total; | ||
3891 | + | ||
3892 | + if (buf) { | ||
3893 | + total = (buf->slot_size + 1) * buf->slot_count; | ||
3894 | + total = (total / PAGE_SIZE) + (total % PAGE_SIZE != 0); | ||
3895 | + while (pages < total) { | ||
3896 | + order++; | ||
3897 | + pages *= 2; | ||
3898 | + } | ||
3899 | + free_pages((unsigned long) buf->buffer_mem, order); | ||
3900 | + kfree(buf); | ||
3901 | + } | ||
3902 | +} | ||
3903 | + | ||
3904 | +struct ftdev_event { | ||
3905 | + int id; | ||
3906 | + struct ftdev_event* next; | ||
3907 | +}; | ||
3908 | + | ||
3909 | +static int activate(struct ftdev_event** chain, int id) | ||
3910 | +{ | ||
3911 | + struct ftdev_event* ev = kmalloc(sizeof(struct ftdev_event), GFP_KERNEL); | ||
3912 | + if (ev) { | ||
3913 | + printk(KERN_INFO | ||
3914 | + "Enabling feather-trace event %d.\n", (int) id); | ||
3915 | + ft_enable_event(id); | ||
3916 | + ev->id = id; | ||
3917 | + ev->next = *chain; | ||
3918 | + *chain = ev; | ||
3919 | + } | ||
3920 | + return ev ? 0 : -ENOMEM; | ||
3921 | +} | ||
3922 | + | ||
3923 | +static void deactivate(struct ftdev_event** chain, int id) | ||
3924 | +{ | ||
3925 | + struct ftdev_event **cur = chain; | ||
3926 | + struct ftdev_event *nxt; | ||
3927 | + while (*cur) { | ||
3928 | + if ((*cur)->id == id) { | ||
3929 | + nxt = (*cur)->next; | ||
3930 | + kfree(*cur); | ||
3931 | + *cur = nxt; | ||
3932 | + printk(KERN_INFO | ||
3933 | + "Disabling feather-trace event %d.\n", (int) id); | ||
3934 | + ft_disable_event(id); | ||
3935 | + break; | ||
3936 | + } | ||
3937 | + cur = &(*cur)->next; | ||
3938 | + } | ||
3939 | +} | ||
3940 | + | ||
3941 | +static int ftdev_open(struct inode *in, struct file *filp) | ||
3942 | +{ | ||
3943 | + struct ftdev* ftdev; | ||
3944 | + struct ftdev_minor* ftdm; | ||
3945 | + unsigned int buf_idx = iminor(in); | ||
3946 | + int err = 0; | ||
3947 | + | ||
3948 | + ftdev = container_of(in->i_cdev, struct ftdev, cdev); | ||
3949 | + | ||
3950 | + if (buf_idx >= ftdev->minor_cnt) { | ||
3951 | + err = -ENODEV; | ||
3952 | + goto out; | ||
3953 | + } | ||
3954 | + if (ftdev->can_open && (err = ftdev->can_open(ftdev, buf_idx))) | ||
3955 | + goto out; | ||
3956 | + | ||
3957 | + ftdm = ftdev->minor + buf_idx; | ||
3958 | + filp->private_data = ftdm; | ||
3959 | + | ||
3960 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
3961 | + err = -ERESTARTSYS; | ||
3962 | + goto out; | ||
3963 | + } | ||
3964 | + | ||
3965 | + if (!ftdm->readers && ftdev->alloc) | ||
3966 | + err = ftdev->alloc(ftdev, buf_idx); | ||
3967 | + if (0 == err) | ||
3968 | + ftdm->readers++; | ||
3969 | + | ||
3970 | + mutex_unlock(&ftdm->lock); | ||
3971 | +out: | ||
3972 | + return err; | ||
3973 | +} | ||
3974 | + | ||
3975 | +static int ftdev_release(struct inode *in, struct file *filp) | ||
3976 | +{ | ||
3977 | + struct ftdev* ftdev; | ||
3978 | + struct ftdev_minor* ftdm; | ||
3979 | + unsigned int buf_idx = iminor(in); | ||
3980 | + int err = 0; | ||
3981 | + | ||
3982 | + ftdev = container_of(in->i_cdev, struct ftdev, cdev); | ||
3983 | + | ||
3984 | + if (buf_idx >= ftdev->minor_cnt) { | ||
3985 | + err = -ENODEV; | ||
3986 | + goto out; | ||
3987 | + } | ||
3988 | + ftdm = ftdev->minor + buf_idx; | ||
3989 | + | ||
3990 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
3991 | + err = -ERESTARTSYS; | ||
3992 | + goto out; | ||
3993 | + } | ||
3994 | + | ||
3995 | + if (ftdm->readers == 1) { | ||
3996 | + while (ftdm->events) | ||
3997 | + deactivate(&ftdm->events, ftdm->events->id); | ||
3998 | + | ||
3999 | + /* wait for any pending events to complete */ | ||
4000 | + set_current_state(TASK_UNINTERRUPTIBLE); | ||
4001 | + schedule_timeout(HZ); | ||
4002 | + | ||
4003 | + printk(KERN_ALERT "Failed trace writes: %u\n", | ||
4004 | + ftdm->buf->failed_writes); | ||
4005 | + | ||
4006 | + if (ftdev->free) | ||
4007 | + ftdev->free(ftdev, buf_idx); | ||
4008 | + } | ||
4009 | + | ||
4010 | + ftdm->readers--; | ||
4011 | + mutex_unlock(&ftdm->lock); | ||
4012 | +out: | ||
4013 | + return err; | ||
4014 | +} | ||
4015 | + | ||
4016 | +/* based on ft_buffer_read | ||
4017 | + * @returns < 0 : page fault | ||
4018 | + * = 0 : no data available | ||
4019 | + * = 1 : one slot copied | ||
4020 | + */ | ||
4021 | +static int ft_buffer_copy_to_user(struct ft_buffer* buf, char __user *dest) | ||
4022 | +{ | ||
4023 | + unsigned int idx; | ||
4024 | + int err = 0; | ||
4025 | + if (buf->free_count != buf->slot_count) { | ||
4026 | + /* data available */ | ||
4027 | + idx = buf->read_idx % buf->slot_count; | ||
4028 | + if (buf->slots[idx] == SLOT_READY) { | ||
4029 | + err = copy_to_user(dest, ((char*) buf->buffer_mem) + | ||
4030 | + idx * buf->slot_size, | ||
4031 | + buf->slot_size); | ||
4032 | + if (err == 0) { | ||
4033 | + /* copy ok */ | ||
4034 | + buf->slots[idx] = SLOT_FREE; | ||
4035 | + buf->read_idx++; | ||
4036 | + fetch_and_inc(&buf->free_count); | ||
4037 | + err = 1; | ||
4038 | + } | ||
4039 | + } | ||
4040 | + } | ||
4041 | + return err; | ||
4042 | +} | ||
4043 | + | ||
4044 | +static ssize_t ftdev_read(struct file *filp, | ||
4045 | + char __user *to, size_t len, loff_t *f_pos) | ||
4046 | +{ | ||
4047 | + /* we ignore f_pos, this is strictly sequential */ | ||
4048 | + | ||
4049 | + ssize_t err = 0; | ||
4050 | + size_t chunk; | ||
4051 | + int copied; | ||
4052 | + struct ftdev_minor* ftdm = filp->private_data; | ||
4053 | + | ||
4054 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
4055 | + err = -ERESTARTSYS; | ||
4056 | + goto out; | ||
4057 | + } | ||
4058 | + | ||
4059 | + | ||
4060 | + chunk = ftdm->buf->slot_size; | ||
4061 | + while (len >= chunk) { | ||
4062 | + copied = ft_buffer_copy_to_user(ftdm->buf, to); | ||
4063 | + if (copied == 1) { | ||
4064 | + len -= chunk; | ||
4065 | + to += chunk; | ||
4066 | + err += chunk; | ||
4067 | + } else if (err == 0 && copied == 0 && ftdm->events) { | ||
4068 | + /* Only wait if there are any events enabled and only | ||
4069 | + * if we haven't copied some data yet. We cannot wait | ||
4070 | + * here with copied data because that data would get | ||
4071 | + * lost if the task is interrupted (e.g., killed). | ||
4072 | + */ | ||
4073 | + set_current_state(TASK_INTERRUPTIBLE); | ||
4074 | + schedule_timeout(50); | ||
4075 | + if (signal_pending(current)) { | ||
4076 | + if (err == 0) | ||
4077 | + /* nothing read yet, signal problem */ | ||
4078 | + err = -ERESTARTSYS; | ||
4079 | + break; | ||
4080 | + } | ||
4081 | + } else if (copied < 0) { | ||
4082 | + /* page fault */ | ||
4083 | + err = copied; | ||
4084 | + break; | ||
4085 | + } else | ||
4086 | + /* nothing left to get, return to user space */ | ||
4087 | + break; | ||
4088 | + } | ||
4089 | + mutex_unlock(&ftdm->lock); | ||
4090 | +out: | ||
4091 | + return err; | ||
4092 | +} | ||
4093 | + | ||
4094 | +typedef uint32_t cmd_t; | ||
4095 | + | ||
4096 | +static ssize_t ftdev_write(struct file *filp, const char __user *from, | ||
4097 | + size_t len, loff_t *f_pos) | ||
4098 | +{ | ||
4099 | + struct ftdev_minor* ftdm = filp->private_data; | ||
4100 | + ssize_t err = -EINVAL; | ||
4101 | + cmd_t cmd; | ||
4102 | + cmd_t id; | ||
4103 | + | ||
4104 | + if (len % sizeof(cmd_t) || len < 2 * sizeof(cmd_t)) | ||
4105 | + goto out; | ||
4106 | + | ||
4107 | + if (copy_from_user(&cmd, from, sizeof(cmd_t))) { | ||
4108 | + err = -EFAULT; | ||
4109 | + goto out; | ||
4110 | + } | ||
4111 | + len -= sizeof(cmd_t); | ||
4112 | + from += sizeof(cmd_t); | ||
4113 | + | ||
4114 | + if (cmd != FTDEV_ENABLE_CMD && cmd != FTDEV_DISABLE_CMD) | ||
4115 | + goto out; | ||
4116 | + | ||
4117 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
4118 | + err = -ERESTARTSYS; | ||
4119 | + goto out; | ||
4120 | + } | ||
4121 | + | ||
4122 | + err = sizeof(cmd_t); | ||
4123 | + while (len) { | ||
4124 | + if (copy_from_user(&id, from, sizeof(cmd_t))) { | ||
4125 | + err = -EFAULT; | ||
4126 | + goto out_unlock; | ||
4127 | + } | ||
4128 | + /* FIXME: check id against list of acceptable events */ | ||
4129 | + len -= sizeof(cmd_t); | ||
4130 | + from += sizeof(cmd_t); | ||
4131 | + if (cmd == FTDEV_DISABLE_CMD) | ||
4132 | + deactivate(&ftdm->events, id); | ||
4133 | + else if (activate(&ftdm->events, id) != 0) { | ||
4134 | + err = -ENOMEM; | ||
4135 | + goto out_unlock; | ||
4136 | + } | ||
4137 | + err += sizeof(cmd_t); | ||
4138 | + } | ||
4139 | + | ||
4140 | +out_unlock: | ||
4141 | + mutex_unlock(&ftdm->lock); | ||
4142 | +out: | ||
4143 | + return err; | ||
4144 | +} | ||
4145 | + | ||
4146 | +struct file_operations ftdev_fops = { | ||
4147 | + .owner = THIS_MODULE, | ||
4148 | + .open = ftdev_open, | ||
4149 | + .release = ftdev_release, | ||
4150 | + .write = ftdev_write, | ||
4151 | + .read = ftdev_read, | ||
4152 | +}; | ||
4153 | + | ||
4154 | + | ||
4155 | +void ftdev_init(struct ftdev* ftdev, struct module* owner) | ||
4156 | +{ | ||
4157 | + int i; | ||
4158 | + cdev_init(&ftdev->cdev, &ftdev_fops); | ||
4159 | + ftdev->cdev.owner = owner; | ||
4160 | + ftdev->cdev.ops = &ftdev_fops; | ||
4161 | + ftdev->minor_cnt = 0; | ||
4162 | + for (i = 0; i < MAX_FTDEV_MINORS; i++) { | ||
4163 | + mutex_init(&ftdev->minor[i].lock); | ||
4164 | + ftdev->minor[i].readers = 0; | ||
4165 | + ftdev->minor[i].buf = NULL; | ||
4166 | + ftdev->minor[i].events = NULL; | ||
4167 | + } | ||
4168 | + ftdev->alloc = NULL; | ||
4169 | + ftdev->free = NULL; | ||
4170 | + ftdev->can_open = NULL; | ||
4171 | +} | ||
4172 | + | ||
4173 | +int register_ftdev(struct ftdev* ftdev, const char* name, int major) | ||
4174 | +{ | ||
4175 | + dev_t trace_dev; | ||
4176 | + int error = 0; | ||
4177 | + | ||
4178 | + trace_dev = MKDEV(major, 0); | ||
4179 | + error = register_chrdev_region(trace_dev, ftdev->minor_cnt, name); | ||
4180 | + if (error) | ||
4181 | + { | ||
4182 | + printk(KERN_WARNING "ftdev(%s): " | ||
4183 | + "Could not register major/minor number %d/%u\n", | ||
4184 | + name, major, ftdev->minor_cnt); | ||
4185 | + return error; | ||
4186 | + } | ||
4187 | + error = cdev_add(&ftdev->cdev, trace_dev, ftdev->minor_cnt); | ||
4188 | + if (error) { | ||
4189 | + printk(KERN_WARNING "ftdev(%s): " | ||
4190 | + "Could not add cdev for major/minor = %d/%u.\n", | ||
4191 | + name, major, ftdev->minor_cnt); | ||
4192 | + return error; | ||
4193 | + } | ||
4194 | + return error; | ||
4195 | +} | ||
4196 | diff --git a/litmus/jobs.c b/litmus/jobs.c | ||
4197 | new file mode 100644 | ||
4198 | index 0000000..e294bc5 | ||
4199 | --- /dev/null | ||
4200 | +++ b/litmus/jobs.c | ||
4201 | @@ -0,0 +1,43 @@ | ||
4202 | +/* litmus/jobs.c - common job control code | ||
4203 | + */ | ||
4204 | + | ||
4205 | +#include <linux/sched.h> | ||
4206 | + | ||
4207 | +#include <litmus/litmus.h> | ||
4208 | +#include <litmus/jobs.h> | ||
4209 | + | ||
4210 | +void prepare_for_next_period(struct task_struct *t) | ||
4211 | +{ | ||
4212 | + BUG_ON(!t); | ||
4213 | + /* prepare next release */ | ||
4214 | + t->rt_param.job_params.release = t->rt_param.job_params.deadline; | ||
4215 | + t->rt_param.job_params.deadline += get_rt_period(t); | ||
4216 | + t->rt_param.job_params.exec_time = 0; | ||
4217 | + /* update job sequence number */ | ||
4218 | + t->rt_param.job_params.job_no++; | ||
4219 | + | ||
4220 | + /* don't confuse Linux */ | ||
4221 | + t->time_slice = 1; | ||
4222 | +} | ||
4223 | + | ||
4224 | +void release_at(struct task_struct *t, lt_t start) | ||
4225 | +{ | ||
4226 | + t->rt_param.job_params.deadline = start; | ||
4227 | + prepare_for_next_period(t); | ||
4228 | + set_rt_flags(t, RT_F_RUNNING); | ||
4229 | +} | ||
4230 | + | ||
4231 | + | ||
4232 | +/* | ||
4233 | + * Deactivate current task until the beginning of the next period. | ||
4234 | + */ | ||
4235 | +long complete_job(void) | ||
4236 | +{ | ||
4237 | + /* Mark that we do not excute anymore */ | ||
4238 | + set_rt_flags(current, RT_F_SLEEP); | ||
4239 | + /* call schedule, this will return when a new job arrives | ||
4240 | + * it also takes care of preparing for the next release | ||
4241 | + */ | ||
4242 | + schedule(); | ||
4243 | + return 0; | ||
4244 | +} | ||
4245 | diff --git a/litmus/litmus.c b/litmus/litmus.c | ||
4246 | new file mode 100644 | ||
4247 | index 0000000..314bdda | ||
4248 | --- /dev/null | ||
4249 | +++ b/litmus/litmus.c | ||
4250 | @@ -0,0 +1,851 @@ | ||
4251 | +/* litmus.c -- Implementation of the LITMUS syscalls, the LITMUS intialization code, | ||
4252 | + * and the procfs interface.. | ||
4253 | + */ | ||
4254 | +#include <asm/uaccess.h> | ||
4255 | +#include <linux/uaccess.h> | ||
4256 | +#include <linux/sysrq.h> | ||
4257 | + | ||
4258 | +#include <linux/module.h> | ||
4259 | +#include <linux/proc_fs.h> | ||
4260 | +#include <linux/slab.h> | ||
4261 | + | ||
4262 | +#include <litmus/litmus.h> | ||
4263 | +#include <linux/sched.h> | ||
4264 | +#include <litmus/sched_plugin.h> | ||
4265 | + | ||
4266 | +#include <litmus/heap.h> | ||
4267 | + | ||
4268 | +#include <litmus/trace.h> | ||
4269 | + | ||
4270 | +/* Number of RT tasks that exist in the system */ | ||
4271 | +atomic_t rt_task_count = ATOMIC_INIT(0); | ||
4272 | +static DEFINE_SPINLOCK(task_transition_lock); | ||
4273 | + | ||
4274 | +/* Give log messages sequential IDs. */ | ||
4275 | +atomic_t __log_seq_no = ATOMIC_INIT(0); | ||
4276 | + | ||
4277 | +/* To send signals from the scheduler | ||
4278 | + * Must drop locks first. | ||
4279 | + */ | ||
4280 | +static LIST_HEAD(sched_sig_list); | ||
4281 | +static DEFINE_SPINLOCK(sched_sig_list_lock); | ||
4282 | + | ||
4283 | +static struct kmem_cache * heap_node_cache; | ||
4284 | + | ||
4285 | +struct heap_node* heap_node_alloc(int gfp_flags) | ||
4286 | +{ | ||
4287 | + return kmem_cache_alloc(heap_node_cache, gfp_flags); | ||
4288 | +} | ||
4289 | + | ||
4290 | +void heap_node_free(struct heap_node* hn) | ||
4291 | +{ | ||
4292 | + kmem_cache_free(heap_node_cache, hn); | ||
4293 | +} | ||
4294 | + | ||
4295 | +/* | ||
4296 | + * sys_set_task_rt_param | ||
4297 | + * @pid: Pid of the task which scheduling parameters must be changed | ||
4298 | + * @param: New real-time extension parameters such as the execution cost and | ||
4299 | + * period | ||
4300 | + * Syscall for manipulating with task rt extension params | ||
4301 | + * Returns EFAULT if param is NULL. | ||
4302 | + * ESRCH if pid is not corrsponding | ||
4303 | + * to a valid task. | ||
4304 | + * EINVAL if either period or execution cost is <=0 | ||
4305 | + * EPERM if pid is a real-time task | ||
4306 | + * 0 if success | ||
4307 | + * | ||
4308 | + * Only non-real-time tasks may be configured with this system call | ||
4309 | + * to avoid races with the scheduler. In practice, this means that a | ||
4310 | + * task's parameters must be set _before_ calling sys_prepare_rt_task() | ||
4311 | + */ | ||
4312 | +asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | ||
4313 | +{ | ||
4314 | + struct rt_task tp; | ||
4315 | + struct task_struct *target; | ||
4316 | + int retval = -EINVAL; | ||
4317 | + | ||
4318 | + printk("Setting up rt task parameters for process %d.\n", pid); | ||
4319 | + | ||
4320 | + if (pid < 0 || param == 0) { | ||
4321 | + goto out; | ||
4322 | + } | ||
4323 | + if (copy_from_user(&tp, param, sizeof(tp))) { | ||
4324 | + retval = -EFAULT; | ||
4325 | + goto out; | ||
4326 | + } | ||
4327 | + | ||
4328 | + /* Task search and manipulation must be protected */ | ||
4329 | + read_lock_irq(&tasklist_lock); | ||
4330 | + if (!(target = find_task_by_pid(pid))) { | ||
4331 | + retval = -ESRCH; | ||
4332 | + goto out_unlock; | ||
4333 | + } | ||
4334 | + | ||
4335 | + if (is_realtime(target)) { | ||
4336 | + /* The task is already a real-time task. | ||
4337 | + * We cannot not allow parameter changes at this point. | ||
4338 | + */ | ||
4339 | + retval = -EBUSY; | ||
4340 | + goto out_unlock; | ||
4341 | + } | ||
4342 | + | ||
4343 | + if (tp.exec_cost <= 0) | ||
4344 | + goto out_unlock; | ||
4345 | + if (tp.period <= 0) | ||
4346 | + goto out_unlock; | ||
4347 | + if (!cpu_online(tp.cpu)) | ||
4348 | + goto out_unlock; | ||
4349 | + if (tp.period < tp.exec_cost) | ||
4350 | + { | ||
4351 | + printk(KERN_INFO "litmus: real-time task %d rejected " | ||
4352 | + "because wcet > period\n", pid); | ||
4353 | + goto out_unlock; | ||
4354 | + } | ||
4355 | + | ||
4356 | + target->rt_param.task_params = tp; | ||
4357 | + | ||
4358 | + retval = 0; | ||
4359 | + out_unlock: | ||
4360 | + read_unlock_irq(&tasklist_lock); | ||
4361 | + out: | ||
4362 | + return retval; | ||
4363 | +} | ||
4364 | + | ||
4365 | +/* Getter of task's RT params | ||
4366 | + * returns EINVAL if param or pid is NULL | ||
4367 | + * returns ESRCH if pid does not correspond to a valid task | ||
4368 | + * returns EFAULT if copying of parameters has failed. | ||
4369 | + */ | ||
4370 | +asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param) | ||
4371 | +{ | ||
4372 | + int retval = -EINVAL; | ||
4373 | + struct task_struct *source; | ||
4374 | + struct rt_task lp; | ||
4375 | + if (param == 0 || pid < 0) | ||
4376 | + goto out; | ||
4377 | + read_lock(&tasklist_lock); | ||
4378 | + if (!(source = find_task_by_pid(pid))) { | ||
4379 | + retval = -ESRCH; | ||
4380 | + goto out_unlock; | ||
4381 | + } | ||
4382 | + lp = source->rt_param.task_params; | ||
4383 | + read_unlock(&tasklist_lock); | ||
4384 | + /* Do copying outside the lock */ | ||
4385 | + retval = | ||
4386 | + copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0; | ||
4387 | + return retval; | ||
4388 | + out_unlock: | ||
4389 | + read_unlock(&tasklist_lock); | ||
4390 | + out: | ||
4391 | + return retval; | ||
4392 | + | ||
4393 | +} | ||
4394 | + | ||
4395 | +/* | ||
4396 | + * This is the crucial function for periodic task implementation, | ||
4397 | + * It checks if a task is periodic, checks if such kind of sleep | ||
4398 | + * is permitted and calls plugin-specific sleep, which puts the | ||
4399 | + * task into a wait array. | ||
4400 | + * returns 0 on successful wakeup | ||
4401 | + * returns EPERM if current conditions do not permit such sleep | ||
4402 | + * returns EINVAL if current task is not able to go to sleep | ||
4403 | + */ | ||
4404 | +asmlinkage long sys_complete_job(void) | ||
4405 | +{ | ||
4406 | + int retval = -EPERM; | ||
4407 | + if (!is_realtime(current)) { | ||
4408 | + retval = -EINVAL; | ||
4409 | + goto out; | ||
4410 | + } | ||
4411 | + /* Task with negative or zero period cannot sleep */ | ||
4412 | + if (get_rt_period(current) <= 0) { | ||
4413 | + retval = -EINVAL; | ||
4414 | + goto out; | ||
4415 | + } | ||
4416 | + /* The plugin has to put the task into an | ||
4417 | + * appropriate queue and call schedule | ||
4418 | + */ | ||
4419 | + retval = litmus->complete_job(); | ||
4420 | + out: | ||
4421 | + return retval; | ||
4422 | +} | ||
4423 | + | ||
4424 | +/* This is an "improved" version of sys_complete_job that | ||
4425 | + * addresses the problem of unintentionally missing a job after | ||
4426 | + * an overrun. | ||
4427 | + * | ||
4428 | + * returns 0 on successful wakeup | ||
4429 | + * returns EPERM if current conditions do not permit such sleep | ||
4430 | + * returns EINVAL if current task is not able to go to sleep | ||
4431 | + */ | ||
4432 | +asmlinkage long sys_wait_for_job_release(unsigned int job) | ||
4433 | +{ | ||
4434 | + int retval = -EPERM; | ||
4435 | + if (!is_realtime(current)) { | ||
4436 | + retval = -EINVAL; | ||
4437 | + goto out; | ||
4438 | + } | ||
4439 | + | ||
4440 | + /* Task with negative or zero period cannot sleep */ | ||
4441 | + if (get_rt_period(current) <= 0) { | ||
4442 | + retval = -EINVAL; | ||
4443 | + goto out; | ||
4444 | + } | ||
4445 | + | ||
4446 | + retval = 0; | ||
4447 | + | ||
4448 | + /* first wait until we have "reached" the desired job | ||
4449 | + * | ||
4450 | + * This implementation has at least two problems: | ||
4451 | + * | ||
4452 | + * 1) It doesn't gracefully handle the wrap around of | ||
4453 | + * job_no. Since LITMUS is a prototype, this is not much | ||
4454 | + * of a problem right now. | ||
4455 | + * | ||
4456 | + * 2) It is theoretically racy if a job release occurs | ||
4457 | + * between checking job_no and calling sleep_next_period(). | ||
4458 | + * A proper solution would requiring adding another callback | ||
4459 | + * in the plugin structure and testing the condition with | ||
4460 | + * interrupts disabled. | ||
4461 | + * | ||
4462 | + * FIXME: At least problem 2 should be taken care of eventually. | ||
4463 | + */ | ||
4464 | + while (!retval && job > current->rt_param.job_params.job_no) | ||
4465 | + /* If the last job overran then job <= job_no and we | ||
4466 | + * don't send the task to sleep. | ||
4467 | + */ | ||
4468 | + retval = litmus->complete_job(); | ||
4469 | + out: | ||
4470 | + return retval; | ||
4471 | +} | ||
4472 | + | ||
4473 | +/* This is a helper syscall to query the current job sequence number. | ||
4474 | + * | ||
4475 | + * returns 0 on successful query | ||
4476 | + * returns EPERM if task is not a real-time task. | ||
4477 | + * returns EFAULT if &job is not a valid pointer. | ||
4478 | + */ | ||
4479 | +asmlinkage long sys_query_job_no(unsigned int __user *job) | ||
4480 | +{ | ||
4481 | + int retval = -EPERM; | ||
4482 | + if (is_realtime(current)) | ||
4483 | + retval = put_user(current->rt_param.job_params.job_no, job); | ||
4484 | + | ||
4485 | + return retval; | ||
4486 | +} | ||
4487 | + | ||
4488 | +struct sched_sig { | ||
4489 | + struct list_head list; | ||
4490 | + struct task_struct* task; | ||
4491 | + unsigned int signal:31; | ||
4492 | + int force:1; | ||
4493 | +}; | ||
4494 | + | ||
4495 | +static void __scheduler_signal(struct task_struct *t, unsigned int signo, | ||
4496 | + int force) | ||
4497 | +{ | ||
4498 | + struct sched_sig* sig; | ||
4499 | + | ||
4500 | + sig = kmalloc(GFP_ATOMIC, sizeof(struct sched_sig)); | ||
4501 | + if (!sig) { | ||
4502 | + TRACE_TASK(t, "dropping signal: %u\n", t); | ||
4503 | + return; | ||
4504 | + } | ||
4505 | + | ||
4506 | + spin_lock(&sched_sig_list_lock); | ||
4507 | + | ||
4508 | + sig->signal = signo; | ||
4509 | + sig->force = force; | ||
4510 | + sig->task = t; | ||
4511 | + get_task_struct(t); | ||
4512 | + list_add(&sig->list, &sched_sig_list); | ||
4513 | + | ||
4514 | + spin_unlock(&sched_sig_list_lock); | ||
4515 | +} | ||
4516 | + | ||
4517 | +void scheduler_signal(struct task_struct *t, unsigned int signo) | ||
4518 | +{ | ||
4519 | + __scheduler_signal(t, signo, 0); | ||
4520 | +} | ||
4521 | + | ||
4522 | +void force_scheduler_signal(struct task_struct *t, unsigned int signo) | ||
4523 | +{ | ||
4524 | + __scheduler_signal(t, signo, 1); | ||
4525 | +} | ||
4526 | + | ||
4527 | +/* FIXME: get rid of the locking and do this on a per-processor basis */ | ||
4528 | +void send_scheduler_signals(void) | ||
4529 | +{ | ||
4530 | + unsigned long flags; | ||
4531 | + struct list_head *p, *extra; | ||
4532 | + struct siginfo info; | ||
4533 | + struct sched_sig* sig; | ||
4534 | + struct task_struct* t; | ||
4535 | + struct list_head claimed; | ||
4536 | + | ||
4537 | + if (spin_trylock_irqsave(&sched_sig_list_lock, flags)) { | ||
4538 | + if (list_empty(&sched_sig_list)) | ||
4539 | + p = NULL; | ||
4540 | + else { | ||
4541 | + p = sched_sig_list.next; | ||
4542 | + list_del(&sched_sig_list); | ||
4543 | + INIT_LIST_HEAD(&sched_sig_list); | ||
4544 | + } | ||
4545 | + spin_unlock_irqrestore(&sched_sig_list_lock, flags); | ||
4546 | + | ||
4547 | + /* abort if there are no signals */ | ||
4548 | + if (!p) | ||
4549 | + return; | ||
4550 | + | ||
4551 | + /* take signal list we just obtained */ | ||
4552 | + list_add(&claimed, p); | ||
4553 | + | ||
4554 | + list_for_each_safe(p, extra, &claimed) { | ||
4555 | + list_del(p); | ||
4556 | + sig = list_entry(p, struct sched_sig, list); | ||
4557 | + t = sig->task; | ||
4558 | + info.si_signo = sig->signal; | ||
4559 | + info.si_errno = 0; | ||
4560 | + info.si_code = SI_KERNEL; | ||
4561 | + info.si_pid = 1; | ||
4562 | + info.si_uid = 0; | ||
4563 | + TRACE("sending signal %d to %d\n", info.si_signo, | ||
4564 | + t->pid); | ||
4565 | + if (sig->force) | ||
4566 | + force_sig_info(sig->signal, &info, t); | ||
4567 | + else | ||
4568 | + send_sig_info(sig->signal, &info, t); | ||
4569 | + put_task_struct(t); | ||
4570 | + kfree(sig); | ||
4571 | + } | ||
4572 | + } | ||
4573 | + | ||
4574 | +} | ||
4575 | + | ||
4576 | +#ifdef CONFIG_NP_SECTION | ||
4577 | + | ||
4578 | +static inline void np_mem_error(struct task_struct* t, const char* reason) | ||
4579 | +{ | ||
4580 | + if (t->state != TASK_DEAD && !(t->flags & PF_EXITING)) { | ||
4581 | + TRACE("np section: %s => %s/%d killed\n", | ||
4582 | + reason, t->comm, t->pid); | ||
4583 | + force_scheduler_signal(t, SIGKILL); | ||
4584 | + } | ||
4585 | +} | ||
4586 | + | ||
4587 | +/* sys_register_np_flag() allows real-time tasks to register an | ||
4588 | + * np section indicator. | ||
4589 | + * returns 0 if the flag was successfully registered | ||
4590 | + * returns EINVAL if current task is not a real-time task | ||
4591 | + * returns EFAULT if *flag couldn't be written | ||
4592 | + */ | ||
4593 | +asmlinkage long sys_register_np_flag(short __user *flag) | ||
4594 | +{ | ||
4595 | + int retval = -EINVAL; | ||
4596 | + short test_val = RT_PREEMPTIVE; | ||
4597 | + | ||
4598 | + /* avoid races with the scheduler */ | ||
4599 | + preempt_disable(); | ||
4600 | + TRACE("reg_np_flag(%p) for %s/%d\n", flag, | ||
4601 | + current->comm, current->pid); | ||
4602 | + | ||
4603 | + /* Let's first try to write to the address. | ||
4604 | + * That way it is initialized and any bugs | ||
4605 | + * involving dangling pointers will caught | ||
4606 | + * early. | ||
4607 | + * NULL indicates disabling np section support | ||
4608 | + * and should not be tested. | ||
4609 | + */ | ||
4610 | + if (flag) | ||
4611 | + retval = poke_kernel_address(test_val, flag); | ||
4612 | + else | ||
4613 | + retval = 0; | ||
4614 | + TRACE("reg_np_flag: retval=%d\n", retval); | ||
4615 | + if (unlikely(0 != retval)) | ||
4616 | + np_mem_error(current, "np flag: not writable"); | ||
4617 | + else | ||
4618 | + /* the pointer is ok */ | ||
4619 | + current->rt_param.np_flag = flag; | ||
4620 | + | ||
4621 | + preempt_enable(); | ||
4622 | + return retval; | ||
4623 | +} | ||
4624 | + | ||
4625 | + | ||
4626 | +void request_exit_np(struct task_struct *t) | ||
4627 | +{ | ||
4628 | + int ret; | ||
4629 | + short flag; | ||
4630 | + | ||
4631 | + /* We can only do this if t is actually currently scheduled on this CPU | ||
4632 | + * because otherwise we are in the wrong address space. Thus make sure | ||
4633 | + * to check. | ||
4634 | + */ | ||
4635 | + BUG_ON(t != current); | ||
4636 | + | ||
4637 | + if (unlikely(!is_realtime(t) || !t->rt_param.np_flag)) { | ||
4638 | + TRACE_TASK(t, "request_exit_np(): BAD TASK!\n"); | ||
4639 | + return; | ||
4640 | + } | ||
4641 | + | ||
4642 | + flag = RT_EXIT_NP_REQUESTED; | ||
4643 | + ret = poke_kernel_address(flag, t->rt_param.np_flag + 1); | ||
4644 | + TRACE("request_exit_np(%s/%d)\n", t->comm, t->pid); | ||
4645 | + if (unlikely(0 != ret)) | ||
4646 | + np_mem_error(current, "request_exit_np(): flag not writable"); | ||
4647 | + | ||
4648 | +} | ||
4649 | + | ||
4650 | + | ||
4651 | +int is_np(struct task_struct* t) | ||
4652 | +{ | ||
4653 | + int ret; | ||
4654 | + unsigned short flag = 0x5858; /* = XX, looks nicer in debug*/ | ||
4655 | + | ||
4656 | + BUG_ON(t != current); | ||
4657 | + | ||
4658 | + if (unlikely(t->rt_param.kernel_np)) | ||
4659 | + return 1; | ||
4660 | + else if (unlikely(t->rt_param.np_flag == NULL) || | ||
4661 | + t->flags & PF_EXITING || | ||
4662 | + t->state == TASK_DEAD) | ||
4663 | + return 0; | ||
4664 | + else { | ||
4665 | + /* This is the tricky part. The process has registered a | ||
4666 | + * non-preemptive section marker. We now need to check whether | ||
4667 | + * it is set to to NON_PREEMPTIVE. Along the way we could | ||
4668 | + * discover that the pointer points to an unmapped region (=> | ||
4669 | + * kill the task) or that the location contains some garbage | ||
4670 | + * value (=> also kill the task). Killing the task in any case | ||
4671 | + * forces userspace to play nicely. Any bugs will be discovered | ||
4672 | + * immediately. | ||
4673 | + */ | ||
4674 | + ret = probe_kernel_address(t->rt_param.np_flag, flag); | ||
4675 | + if (0 == ret && (flag == RT_NON_PREEMPTIVE || | ||
4676 | + flag == RT_PREEMPTIVE)) | ||
4677 | + return flag != RT_PREEMPTIVE; | ||
4678 | + else { | ||
4679 | + /* either we could not read from the address or | ||
4680 | + * it contained garbage => kill the process | ||
4681 | + * FIXME: Should we cause a SEGFAULT instead? | ||
4682 | + */ | ||
4683 | + TRACE("is_np: ret=%d flag=%c%c (%x)\n", ret, | ||
4684 | + flag & 0xff, (flag >> 8) & 0xff, flag); | ||
4685 | + np_mem_error(t, "is_np() could not read"); | ||
4686 | + return 0; | ||
4687 | + } | ||
4688 | + } | ||
4689 | +} | ||
4690 | + | ||
4691 | +/* | ||
4692 | + * sys_exit_np() allows real-time tasks to signal that it left a | ||
4693 | + * non-preemptable section. It will be called after the kernel requested a | ||
4694 | + * callback in the preemption indicator flag. | ||
4695 | + * returns 0 if the signal was valid and processed. | ||
4696 | + * returns EINVAL if current task is not a real-time task | ||
4697 | + */ | ||
4698 | +asmlinkage long sys_exit_np(void) | ||
4699 | +{ | ||
4700 | + int retval = -EINVAL; | ||
4701 | + | ||
4702 | + TS_EXIT_NP_START; | ||
4703 | + | ||
4704 | + if (!is_realtime(current)) | ||
4705 | + goto out; | ||
4706 | + | ||
4707 | + TRACE("sys_exit_np(%s/%d)\n", current->comm, current->pid); | ||
4708 | + /* force rescheduling so that we can be preempted */ | ||
4709 | + set_tsk_need_resched(current); | ||
4710 | + retval = 0; | ||
4711 | + out: | ||
4712 | + | ||
4713 | + TS_EXIT_NP_END; | ||
4714 | + return retval; | ||
4715 | +} | ||
4716 | + | ||
4717 | +#else /* !CONFIG_NP_SECTION */ | ||
4718 | + | ||
4719 | +asmlinkage long sys_register_np_flag(short __user *flag) | ||
4720 | +{ | ||
4721 | + return -ENOSYS; | ||
4722 | +} | ||
4723 | + | ||
4724 | +asmlinkage long sys_exit_np(void) | ||
4725 | +{ | ||
4726 | + return -ENOSYS; | ||
4727 | +} | ||
4728 | + | ||
4729 | +#endif /* CONFIG_NP_SECTION */ | ||
4730 | + | ||
4731 | + | ||
4732 | +/* p is a real-time task. Re-init its state as a best-effort task. */ | ||
4733 | +static void reinit_litmus_state(struct task_struct* p, int restore) | ||
4734 | +{ | ||
4735 | + struct rt_task user_config = {}; | ||
4736 | + __user short *np_flag = NULL; | ||
4737 | + | ||
4738 | + if (restore) { | ||
4739 | + /* Safe user-space provided configuration data. */ | ||
4740 | + user_config = p->rt_param.task_params; | ||
4741 | + np_flag = p->rt_param.np_flag; | ||
4742 | + } | ||
4743 | + | ||
4744 | + /* We probably should not be inheriting any task's priority | ||
4745 | + * at this point in time. | ||
4746 | + */ | ||
4747 | + WARN_ON(p->rt_param.inh_task); | ||
4748 | + | ||
4749 | + /* We need to restore the priority of the task. */ | ||
4750 | +// __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); | ||
4751 | + | ||
4752 | + /* Cleanup everything else. */ | ||
4753 | + memset(&p->rt_param, 0, sizeof(struct rt_task)); | ||
4754 | + | ||
4755 | + /* Restore preserved fields. */ | ||
4756 | + if (restore) { | ||
4757 | + p->rt_param.task_params = user_config; | ||
4758 | + p->rt_param.np_flag = np_flag; | ||
4759 | + } | ||
4760 | +} | ||
4761 | + | ||
4762 | +long litmus_admit_task(struct task_struct* tsk) | ||
4763 | +{ | ||
4764 | + long retval = 0; | ||
4765 | + long flags; | ||
4766 | + | ||
4767 | + BUG_ON(is_realtime(tsk)); | ||
4768 | + | ||
4769 | + if (get_rt_period(tsk) == 0 || | ||
4770 | + get_exec_cost(tsk) > get_rt_period(tsk)) { | ||
4771 | + TRACE_TASK(tsk, "litmus admit: invalid task parameters " | ||
4772 | + "(%lu, %lu)\n", | ||
4773 | + get_exec_cost(tsk), get_rt_period(tsk)); | ||
4774 | + return -EINVAL; | ||
4775 | + } | ||
4776 | + | ||
4777 | + if (!cpu_online(get_partition(tsk))) | ||
4778 | + { | ||
4779 | + TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n", | ||
4780 | + get_partition(tsk)); | ||
4781 | + return -EINVAL; | ||
4782 | + } | ||
4783 | + | ||
4784 | + INIT_LIST_HEAD(&tsk_rt(tsk)->list); | ||
4785 | + | ||
4786 | + /* avoid scheduler plugin changing underneath us */ | ||
4787 | + spin_lock_irqsave(&task_transition_lock, flags); | ||
4788 | + | ||
4789 | + /* allocate heap node for this task */ | ||
4790 | + tsk_rt(tsk)->heap_node = kmem_cache_alloc(heap_node_cache, GFP_ATOMIC); | ||
4791 | + if (!tsk_rt(tsk)->heap_node) { | ||
4792 | + printk(KERN_WARNING "litmus: no more heap node memory!?\n"); | ||
4793 | + retval = -ENOMEM; | ||
4794 | + } else | ||
4795 | + heap_node_init(&tsk_rt(tsk)->heap_node, tsk); | ||
4796 | + | ||
4797 | + if (!retval) | ||
4798 | + retval = litmus->admit_task(tsk); | ||
4799 | + | ||
4800 | + if (!retval) { | ||
4801 | + sched_trace_task_name(tsk); | ||
4802 | + sched_trace_task_param(tsk); | ||
4803 | + atomic_inc(&rt_task_count); | ||
4804 | + } | ||
4805 | + | ||
4806 | + spin_unlock_irqrestore(&task_transition_lock, flags); | ||
4807 | + | ||
4808 | + return retval; | ||
4809 | +} | ||
4810 | + | ||
4811 | +void litmus_exit_task(struct task_struct* tsk) | ||
4812 | +{ | ||
4813 | + if (is_realtime(tsk)) { | ||
4814 | + sched_trace_task_completion(tsk, 1); | ||
4815 | + litmus->task_exit(tsk); | ||
4816 | + BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node)); | ||
4817 | + kmem_cache_free(heap_node_cache, tsk_rt(tsk)->heap_node); | ||
4818 | + atomic_dec(&rt_task_count); | ||
4819 | + reinit_litmus_state(tsk, 1); | ||
4820 | + } | ||
4821 | +} | ||
4822 | + | ||
4823 | +/* Switching a plugin in use is tricky. | ||
4824 | + * We must watch out that no real-time tasks exists | ||
4825 | + * (and that none is created in parallel) and that the plugin is not | ||
4826 | + * currently in use on any processor (in theory). | ||
4827 | + * | ||
4828 | + * For now, we don't enforce the second part since it is unlikely to cause | ||
4829 | + * any trouble by itself as long as we don't unload modules. | ||
4830 | + */ | ||
4831 | +int switch_sched_plugin(struct sched_plugin* plugin) | ||
4832 | +{ | ||
4833 | + long flags; | ||
4834 | + int ret = 0; | ||
4835 | + | ||
4836 | + BUG_ON(!plugin); | ||
4837 | + | ||
4838 | + /* stop task transitions */ | ||
4839 | + spin_lock_irqsave(&task_transition_lock, flags); | ||
4840 | + | ||
4841 | + /* don't switch if there are active real-time tasks */ | ||
4842 | + if (atomic_read(&rt_task_count) == 0) { | ||
4843 | + ret = litmus->deactivate_plugin(); | ||
4844 | + if (0 != ret) | ||
4845 | + goto out; | ||
4846 | + ret = plugin->activate_plugin(); | ||
4847 | + if (0 != ret) { | ||
4848 | + printk(KERN_INFO "Can't activate %s (%d).\n", | ||
4849 | + plugin->plugin_name, ret); | ||
4850 | + plugin = &linux_sched_plugin; | ||
4851 | + } | ||
4852 | + printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name); | ||
4853 | + litmus = plugin; | ||
4854 | + } else | ||
4855 | + ret = -EBUSY; | ||
4856 | +out: | ||
4857 | + spin_unlock_irqrestore(&task_transition_lock, flags); | ||
4858 | + return ret; | ||
4859 | +} | ||
4860 | + | ||
4861 | +/* Called upon fork. | ||
4862 | + * p is the newly forked task. | ||
4863 | + */ | ||
4864 | +void litmus_fork(struct task_struct* p) | ||
4865 | +{ | ||
4866 | + if (is_realtime(p)) | ||
4867 | + /* clean out any litmus related state, don't preserve anything*/ | ||
4868 | + reinit_litmus_state(p, 0); | ||
4869 | +} | ||
4870 | + | ||
4871 | +/* Called upon execve(). | ||
4872 | + * current is doing the exec. | ||
4873 | + * Don't let address space specific stuff leak. | ||
4874 | + */ | ||
4875 | +void litmus_exec(void) | ||
4876 | +{ | ||
4877 | + struct task_struct* p = current; | ||
4878 | + | ||
4879 | + if (is_realtime(p)) { | ||
4880 | + WARN_ON(p->rt_param.inh_task); | ||
4881 | + p->rt_param.np_flag = NULL; | ||
4882 | + } | ||
4883 | +} | ||
4884 | + | ||
4885 | +void exit_litmus(struct task_struct *dead_tsk) | ||
4886 | +{ | ||
4887 | + if (is_realtime(dead_tsk)) | ||
4888 | + litmus_exit_task(dead_tsk); | ||
4889 | +} | ||
4890 | + | ||
4891 | + | ||
4892 | +void list_qsort(struct list_head* list, list_cmp_t less_than) | ||
4893 | +{ | ||
4894 | + struct list_head lt; | ||
4895 | + struct list_head geq; | ||
4896 | + struct list_head *pos, *extra, *pivot; | ||
4897 | + int n_lt = 0, n_geq = 0; | ||
4898 | + BUG_ON(!list); | ||
4899 | + | ||
4900 | + if (list->next == list) | ||
4901 | + return; | ||
4902 | + | ||
4903 | + INIT_LIST_HEAD(<); | ||
4904 | + INIT_LIST_HEAD(&geq); | ||
4905 | + | ||
4906 | + pivot = list->next; | ||
4907 | + list_del(pivot); | ||
4908 | + list_for_each_safe(pos, extra, list) { | ||
4909 | + list_del(pos); | ||
4910 | + if (less_than(pos, pivot)) { | ||
4911 | + list_add(pos, <); | ||
4912 | + n_lt++; | ||
4913 | + } else { | ||
4914 | + list_add(pos, &geq); | ||
4915 | + n_geq++; | ||
4916 | + } | ||
4917 | + } | ||
4918 | + if (n_lt < n_geq) { | ||
4919 | + list_qsort(<, less_than); | ||
4920 | + list_qsort(&geq, less_than); | ||
4921 | + } else { | ||
4922 | + list_qsort(&geq, less_than); | ||
4923 | + list_qsort(<, less_than); | ||
4924 | + } | ||
4925 | + list_splice(&geq, list); | ||
4926 | + list_add(pivot, list); | ||
4927 | + list_splice(<, list); | ||
4928 | +} | ||
4929 | + | ||
4930 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
4931 | +int sys_kill(int pid, int sig); | ||
4932 | + | ||
4933 | +static void sysrq_handle_kill_rt_tasks(int key, struct tty_struct *tty) | ||
4934 | +{ | ||
4935 | + struct task_struct *t; | ||
4936 | + read_lock(&tasklist_lock); | ||
4937 | + for_each_process(t) { | ||
4938 | + if (is_realtime(t)) { | ||
4939 | + sys_kill(t->pid, SIGKILL); | ||
4940 | + } | ||
4941 | + } | ||
4942 | + read_unlock(&tasklist_lock); | ||
4943 | +} | ||
4944 | + | ||
4945 | +static struct sysrq_key_op sysrq_kill_rt_tasks_op = { | ||
4946 | + .handler = sysrq_handle_kill_rt_tasks, | ||
4947 | + .help_msg = "Quit-rt-tasks", | ||
4948 | + .action_msg = "sent SIGKILL to all real-time tasks", | ||
4949 | +}; | ||
4950 | +#endif | ||
4951 | + | ||
4952 | +static int proc_read_stats(char *page, char **start, | ||
4953 | + off_t off, int count, | ||
4954 | + int *eof, void *data) | ||
4955 | +{ | ||
4956 | + int len; | ||
4957 | + | ||
4958 | + len = snprintf(page, PAGE_SIZE, | ||
4959 | + "real-time task count = %d\n", | ||
4960 | + atomic_read(&rt_task_count)); | ||
4961 | + return len; | ||
4962 | +} | ||
4963 | + | ||
4964 | +static int proc_read_plugins(char *page, char **start, | ||
4965 | + off_t off, int count, | ||
4966 | + int *eof, void *data) | ||
4967 | +{ | ||
4968 | + int len; | ||
4969 | + | ||
4970 | + len = print_sched_plugins(page, PAGE_SIZE); | ||
4971 | + return len; | ||
4972 | +} | ||
4973 | + | ||
4974 | +static int proc_read_curr(char *page, char **start, | ||
4975 | + off_t off, int count, | ||
4976 | + int *eof, void *data) | ||
4977 | +{ | ||
4978 | + int len; | ||
4979 | + | ||
4980 | + len = snprintf(page, PAGE_SIZE, "%s\n", litmus->plugin_name); | ||
4981 | + return len; | ||
4982 | +} | ||
4983 | + | ||
4984 | +static int proc_write_curr(struct file *file, | ||
4985 | + const char *buffer, | ||
4986 | + unsigned long count, | ||
4987 | + void *data) | ||
4988 | +{ | ||
4989 | + int len, ret; | ||
4990 | + char name[65]; | ||
4991 | + struct sched_plugin* found; | ||
4992 | + | ||
4993 | + if(count > 64) | ||
4994 | + len = 64; | ||
4995 | + else | ||
4996 | + len = count; | ||
4997 | + | ||
4998 | + if(copy_from_user(name, buffer, len)) | ||
4999 | + return -EFAULT; | ||
5000 | + | ||
5001 | + name[len] = '\0'; | ||
5002 | + /* chomp name */ | ||
5003 | + if (len > 1 && name[len - 1] == '\n') | ||
5004 | + name[len - 1] = '\0'; | ||
5005 | + | ||
5006 | + found = find_sched_plugin(name); | ||
5007 | + | ||
5008 | + if (found) { | ||
5009 | + ret = switch_sched_plugin(found); | ||
5010 | + if (ret != 0) | ||
5011 | + printk(KERN_INFO "Could not switch plugin: %d\n", ret); | ||
5012 | + } else | ||
5013 | + printk(KERN_INFO "Plugin '%s' is unknown.\n", name); | ||
5014 | + | ||
5015 | + return len; | ||
5016 | +} | ||
5017 | + | ||
5018 | + | ||
5019 | +static struct proc_dir_entry *litmus_dir = NULL, | ||
5020 | + *curr_file = NULL, | ||
5021 | + *stat_file = NULL, | ||
5022 | + *plugs_file = NULL; | ||
5023 | + | ||
5024 | +static int __init init_litmus_proc(void) | ||
5025 | +{ | ||
5026 | + litmus_dir = proc_mkdir("litmus", NULL); | ||
5027 | + if (!litmus_dir) { | ||
5028 | + printk(KERN_ERR "Could not allocate LITMUS^RT procfs entry.\n"); | ||
5029 | + return -ENOMEM; | ||
5030 | + } | ||
5031 | + litmus_dir->owner = THIS_MODULE; | ||
5032 | + | ||
5033 | + curr_file = create_proc_entry("active_plugin", | ||
5034 | + 0644, litmus_dir); | ||
5035 | + if (!curr_file) { | ||
5036 | + printk(KERN_ERR "Could not allocate active_plugin " | ||
5037 | + "procfs entry.\n"); | ||
5038 | + return -ENOMEM; | ||
5039 | + } | ||
5040 | + curr_file->owner = THIS_MODULE; | ||
5041 | + curr_file->read_proc = proc_read_curr; | ||
5042 | + curr_file->write_proc = proc_write_curr; | ||
5043 | + | ||
5044 | + stat_file = create_proc_read_entry("stats", 0444, litmus_dir, | ||
5045 | + proc_read_stats, NULL); | ||
5046 | + | ||
5047 | + plugs_file = create_proc_read_entry("plugins", 0444, litmus_dir, | ||
5048 | + proc_read_plugins, NULL); | ||
5049 | + | ||
5050 | + return 0; | ||
5051 | +} | ||
5052 | + | ||
5053 | +static void exit_litmus_proc(void) | ||
5054 | +{ | ||
5055 | + if (plugs_file) | ||
5056 | + remove_proc_entry("plugins", litmus_dir); | ||
5057 | + if (stat_file) | ||
5058 | + remove_proc_entry("stats", litmus_dir); | ||
5059 | + if (curr_file) | ||
5060 | + remove_proc_entry("active_plugin", litmus_dir); | ||
5061 | + if (litmus_dir) | ||
5062 | + remove_proc_entry("litmus", NULL); | ||
5063 | +} | ||
5064 | + | ||
5065 | +extern struct sched_plugin linux_sched_plugin; | ||
5066 | + | ||
5067 | +static int __init _init_litmus(void) | ||
5068 | +{ | ||
5069 | + /* Common initializers, | ||
5070 | + * mode change lock is used to enforce single mode change | ||
5071 | + * operation. | ||
5072 | + */ | ||
5073 | + printk("Starting LITMUS^RT kernel\n"); | ||
5074 | + | ||
5075 | + register_sched_plugin(&linux_sched_plugin); | ||
5076 | + | ||
5077 | + heap_node_cache = KMEM_CACHE(heap_node, 0); | ||
5078 | + if (!heap_node_cache) | ||
5079 | + return -ENOMEM; | ||
5080 | + | ||
5081 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
5082 | + /* offer some debugging help */ | ||
5083 | + if (!register_sysrq_key('q', &sysrq_kill_rt_tasks_op)) | ||
5084 | + printk("Registered kill rt tasks magic sysrq.\n"); | ||
5085 | + else | ||
5086 | + printk("Could not register kill rt tasks magic sysrq.\n"); | ||
5087 | +#endif | ||
5088 | + | ||
5089 | + init_litmus_proc(); | ||
5090 | + | ||
5091 | + return 0; | ||
5092 | +} | ||
5093 | + | ||
5094 | +static void _exit_litmus(void) | ||
5095 | +{ | ||
5096 | + exit_litmus_proc(); | ||
5097 | + kmem_cache_destroy(heap_node_cache); | ||
5098 | +} | ||
5099 | + | ||
5100 | +module_init(_init_litmus); | ||
5101 | +module_exit(_exit_litmus); | ||
5102 | diff --git a/litmus/norqlock.c b/litmus/norqlock.c | ||
5103 | new file mode 100644 | ||
5104 | index 0000000..11f85d3 | ||
5105 | --- /dev/null | ||
5106 | +++ b/litmus/norqlock.c | ||
5107 | @@ -0,0 +1,56 @@ | ||
5108 | +#include <linux/list.h> | ||
5109 | +#include <linux/bitops.h> | ||
5110 | +#include <linux/percpu.h> | ||
5111 | +#include <linux/module.h> | ||
5112 | +#include <linux/smp.h> | ||
5113 | + | ||
5114 | +#include <litmus/norqlock.h> | ||
5115 | + | ||
5116 | +struct worklist { | ||
5117 | + struct no_rqlock_work* next; | ||
5118 | +}; | ||
5119 | + | ||
5120 | +static DEFINE_PER_CPU(struct worklist, norq_worklist) = {NULL}; | ||
5121 | + | ||
5122 | +void init_no_rqlock_work(struct no_rqlock_work* w, work_t work, | ||
5123 | + unsigned long arg) | ||
5124 | +{ | ||
5125 | + w->active = 0; | ||
5126 | + w->work = work; | ||
5127 | + w->arg = arg; | ||
5128 | + w->next = NULL; | ||
5129 | +} | ||
5130 | + | ||
5131 | +void __do_without_rqlock(struct no_rqlock_work *work) | ||
5132 | +{ | ||
5133 | + long flags; | ||
5134 | + struct worklist* wl; | ||
5135 | + | ||
5136 | + local_irq_save(flags); | ||
5137 | + wl = &__get_cpu_var(norq_worklist); | ||
5138 | + work->next = wl->next; | ||
5139 | + wl->next = work; | ||
5140 | + local_irq_restore(flags); | ||
5141 | +} | ||
5142 | + | ||
5143 | +void tick_no_rqlock(void) | ||
5144 | +{ | ||
5145 | + long flags; | ||
5146 | + struct no_rqlock_work *todo, *next; | ||
5147 | + | ||
5148 | + local_irq_save(flags); | ||
5149 | + | ||
5150 | + next = __get_cpu_var(norq_worklist).next; | ||
5151 | + __get_cpu_var(norq_worklist).next = NULL; | ||
5152 | + | ||
5153 | + while (next) { | ||
5154 | + todo = next; | ||
5155 | + next = next->next; | ||
5156 | + todo->next = NULL; | ||
5157 | + smp_mb__before_clear_bit(); | ||
5158 | + clear_bit(0, (void*) &todo->active); | ||
5159 | + todo->work(todo->arg); | ||
5160 | + } | ||
5161 | + | ||
5162 | + local_irq_restore(flags); | ||
5163 | +} | ||
5164 | diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c | ||
5165 | new file mode 100644 | ||
5166 | index 0000000..be4ef5e | ||
5167 | --- /dev/null | ||
5168 | +++ b/litmus/rt_domain.c | ||
5169 | @@ -0,0 +1,289 @@ | ||
5170 | +/* | ||
5171 | + * kernel/rt_domain.c | ||
5172 | + * | ||
5173 | + * LITMUS real-time infrastructure. This file contains the | ||
5174 | + * functions that manipulate RT domains. RT domains are an abstraction | ||
5175 | + * of a ready queue and a release queue. | ||
5176 | + */ | ||
5177 | + | ||
5178 | +#include <linux/percpu.h> | ||
5179 | +#include <linux/sched.h> | ||
5180 | +#include <linux/list.h> | ||
5181 | + | ||
5182 | +#include <litmus/litmus.h> | ||
5183 | +#include <litmus/sched_plugin.h> | ||
5184 | +#include <litmus/sched_trace.h> | ||
5185 | + | ||
5186 | +#include <litmus/rt_domain.h> | ||
5187 | + | ||
5188 | +#include <litmus/trace.h> | ||
5189 | + | ||
5190 | +#include <litmus/heap.h> | ||
5191 | + | ||
5192 | +static int dummy_resched(rt_domain_t *rt) | ||
5193 | +{ | ||
5194 | + return 0; | ||
5195 | +} | ||
5196 | + | ||
5197 | +static int dummy_order(struct heap_node* a, struct heap_node* b) | ||
5198 | +{ | ||
5199 | + return 0; | ||
5200 | +} | ||
5201 | + | ||
5202 | +/* default implementation: use default lock */ | ||
5203 | +static void default_release_jobs(rt_domain_t* rt, struct heap* tasks) | ||
5204 | +{ | ||
5205 | + merge_ready(rt, tasks); | ||
5206 | +} | ||
5207 | + | ||
5208 | +static unsigned int time2slot(lt_t time) | ||
5209 | +{ | ||
5210 | + return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS; | ||
5211 | +} | ||
5212 | + | ||
5213 | +int heap_earlier_release(struct heap_node *_a, struct heap_node *_b) | ||
5214 | +{ | ||
5215 | + struct release_heap *a = _a->value; | ||
5216 | + struct release_heap *b = _b->value; | ||
5217 | + return lt_before(a->release_time, b->release_time); | ||
5218 | +} | ||
5219 | + | ||
5220 | +/* Caller most hold release lock. | ||
5221 | + * Will return heap for given time. If no such heap exists prior to the invocation | ||
5222 | + * it will be created. | ||
5223 | + */ | ||
5224 | +static struct release_heap* get_release_heap(rt_domain_t *rt, lt_t release_time) | ||
5225 | +{ | ||
5226 | + struct list_head* pos; | ||
5227 | + struct release_heap* heap = NULL; | ||
5228 | + struct release_heap* rh; | ||
5229 | + unsigned int slot = time2slot(release_time); | ||
5230 | + int inserted; | ||
5231 | + | ||
5232 | + /* initialize pos for the case that the list is empty */ | ||
5233 | + pos = rt->release_queue.slot[slot].next; | ||
5234 | + list_for_each(pos, &rt->release_queue.slot[slot]) { | ||
5235 | + rh = list_entry(pos, struct release_heap, list); | ||
5236 | + if (release_time == rh->release_time) { | ||
5237 | + /* perfect match -- this happens on hyperperiod | ||
5238 | + * boundaries | ||
5239 | + */ | ||
5240 | + heap = rh; | ||
5241 | + break; | ||
5242 | + } else if (lt_before(release_time, rh->release_time)) { | ||
5243 | + /* we need to insert a new node since rh is | ||
5244 | + * already in the future | ||
5245 | + */ | ||
5246 | + break; | ||
5247 | + } | ||
5248 | + } | ||
5249 | + if (!heap) { | ||
5250 | + /* must create new node */ | ||
5251 | + /* FIXME: use a kmemcache_t */ | ||
5252 | + rh = kmalloc(sizeof(struct release_heap), GFP_ATOMIC); | ||
5253 | + if (unlikely(!rh)) | ||
5254 | + /* Should be handled somehow. | ||
5255 | + * For now, let's just hope there is | ||
5256 | + * sufficient memory. | ||
5257 | + */ | ||
5258 | + panic("rt_domain: no more memory?"); | ||
5259 | + rh->release_time = release_time; | ||
5260 | + heap_init(&rh->heap); | ||
5261 | + list_add(&rh->list, pos->prev); | ||
5262 | + inserted = heap_add(heap_earlier_release, | ||
5263 | + &rt->release_queue.rel_heap, rh, | ||
5264 | + GFP_ATOMIC); | ||
5265 | + if (unlikely(!inserted)) | ||
5266 | + panic("rt_domain: no more heap memory?"); | ||
5267 | + heap = rh; | ||
5268 | + } | ||
5269 | + return heap; | ||
5270 | +} | ||
5271 | + | ||
5272 | +static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | ||
5273 | +{ | ||
5274 | + long flags; | ||
5275 | + rt_domain_t *rt; | ||
5276 | + struct release_heap* rh; | ||
5277 | + struct heap tasks; | ||
5278 | + struct list_head list, *pos, *safe; | ||
5279 | + lt_t release = 0; | ||
5280 | + int pending; | ||
5281 | + int repeat; | ||
5282 | + enum hrtimer_mode ret = HRTIMER_NORESTART; | ||
5283 | + | ||
5284 | + TS_RELEASE_START; | ||
5285 | + | ||
5286 | + INIT_LIST_HEAD(&list); | ||
5287 | + heap_init(&tasks); | ||
5288 | + | ||
5289 | + rt = container_of(timer, rt_domain_t, | ||
5290 | + release_queue.timer); | ||
5291 | + | ||
5292 | + do { | ||
5293 | + list_for_each_safe(pos, safe, &list) { | ||
5294 | + rh = list_entry(pos, struct release_heap, list); | ||
5295 | + heap_union(rt->order, &tasks, &rh->heap); | ||
5296 | + list_del(pos); | ||
5297 | + kfree(rh); | ||
5298 | + } | ||
5299 | + | ||
5300 | + /* call release callback */ | ||
5301 | + rt->release_jobs(rt, &tasks); | ||
5302 | + | ||
5303 | + | ||
5304 | + spin_lock_irqsave(&rt->release_lock, flags); | ||
5305 | + while ((pending = next_release(rt, &release))) { | ||
5306 | + if (lt_before(release, litmus_clock())) { | ||
5307 | + /* pick for release */ | ||
5308 | + rh = heap_take_del(heap_earlier_release, | ||
5309 | + &rt->release_queue.rel_heap); | ||
5310 | + list_move(&rh->list, &list); | ||
5311 | + } else | ||
5312 | + break; | ||
5313 | + } | ||
5314 | + repeat = !list_empty(&list); | ||
5315 | + if (!repeat) { | ||
5316 | + /* last iteration, setup timers, etc. */ | ||
5317 | + if (!pending) { | ||
5318 | + rt->release_queue.timer_armed = 0; | ||
5319 | + ret = HRTIMER_NORESTART; | ||
5320 | + } else { | ||
5321 | + rt->release_queue.timer_time = release; | ||
5322 | + timer->expires = ns_to_ktime(release); | ||
5323 | + ret = HRTIMER_RESTART; | ||
5324 | + } | ||
5325 | + } | ||
5326 | + spin_unlock_irqrestore(&rt->release_lock, flags); | ||
5327 | + } while (repeat); | ||
5328 | + | ||
5329 | + TS_RELEASE_END; | ||
5330 | + | ||
5331 | + return ret; | ||
5332 | +} | ||
5333 | + | ||
5334 | +static void arm_release_timer(unsigned long _rt) | ||
5335 | +{ | ||
5336 | + rt_domain_t *rt = (rt_domain_t*) _rt; | ||
5337 | + unsigned long flags; | ||
5338 | + struct list_head list; | ||
5339 | + struct list_head *pos, *safe; | ||
5340 | + struct task_struct* t; | ||
5341 | + struct release_heap* rh; | ||
5342 | + int earlier, armed; | ||
5343 | + lt_t release = 0; | ||
5344 | + | ||
5345 | + local_irq_save(flags); | ||
5346 | + spin_lock(&rt->tobe_lock); | ||
5347 | + list_replace_init(&rt->tobe_released, &list); | ||
5348 | + spin_unlock(&rt->tobe_lock); | ||
5349 | + | ||
5350 | + /* We only have to defend against the ISR since norq callbacks | ||
5351 | + * are serialized. | ||
5352 | + */ | ||
5353 | + spin_lock(&rt->release_lock); | ||
5354 | + | ||
5355 | + list_for_each_safe(pos, safe, &list) { | ||
5356 | + t = list_entry(pos, struct task_struct, rt_param.list); | ||
5357 | + sched_trace_task_release(t); | ||
5358 | + list_del(pos); | ||
5359 | + rh = get_release_heap(rt, get_release(t)); | ||
5360 | + heap_add(rt->order, &rh->heap, t, GFP_ATOMIC); | ||
5361 | + } | ||
5362 | + | ||
5363 | + next_release(rt, &release); | ||
5364 | + armed = rt->release_queue.timer_armed; | ||
5365 | + earlier = lt_before(release, rt->release_queue.timer_time); | ||
5366 | + /* We'll do the actual arming in a sec. The ISR doesn't care what these | ||
5367 | + * flags say, and it'll be true before another instance of this | ||
5368 | + * function can observe the flag due to the sequential nature of norq | ||
5369 | + * work. | ||
5370 | + */ | ||
5371 | + rt->release_queue.timer_armed = 1; | ||
5372 | + rt->release_queue.timer_time = release; | ||
5373 | + spin_unlock(&rt->release_lock); | ||
5374 | + if (!armed || earlier) { | ||
5375 | + if (armed) { | ||
5376 | + /* need to cancel first */ | ||
5377 | + hrtimer_cancel(&rt->release_queue.timer); | ||
5378 | + } | ||
5379 | + hrtimer_start(&rt->release_queue.timer, | ||
5380 | + ns_to_ktime(release), | ||
5381 | + HRTIMER_MODE_ABS); | ||
5382 | + } | ||
5383 | + local_irq_restore(flags); | ||
5384 | +} | ||
5385 | + | ||
5386 | +void rt_domain_init(rt_domain_t *rt, | ||
5387 | + heap_prio_t order, | ||
5388 | + check_resched_needed_t check, | ||
5389 | + release_jobs_t release | ||
5390 | + ) | ||
5391 | +{ | ||
5392 | + int i; | ||
5393 | + | ||
5394 | + BUG_ON(!rt); | ||
5395 | + if (!check) | ||
5396 | + check = dummy_resched; | ||
5397 | + if (!release) | ||
5398 | + release = default_release_jobs; | ||
5399 | + if (!order) | ||
5400 | + order = dummy_order; | ||
5401 | + | ||
5402 | + heap_init(&rt->ready_queue); | ||
5403 | + INIT_LIST_HEAD(&rt->tobe_released); | ||
5404 | + rt->release_queue.timer_armed = 0; | ||
5405 | + for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) | ||
5406 | + INIT_LIST_HEAD(&rt->release_queue.slot[i]); | ||
5407 | + | ||
5408 | + hrtimer_init(&rt->release_queue.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
5409 | + rt->release_queue.timer.function = on_release_timer; | ||
5410 | +#ifdef CONFIG_HIGH_RES_TIMERS | ||
5411 | + rt->release_queue.timer.cb_mode = HRTIMER_CB_IRQSAFE; | ||
5412 | +#endif | ||
5413 | + | ||
5414 | + spin_lock_init(&rt->ready_lock); | ||
5415 | + spin_lock_init(&rt->release_lock); | ||
5416 | + spin_lock_init(&rt->tobe_lock); | ||
5417 | + | ||
5418 | + rt->check_resched = check; | ||
5419 | + rt->release_jobs = release; | ||
5420 | + rt->order = order; | ||
5421 | + init_no_rqlock_work(&rt->arm_timer, arm_release_timer, (unsigned long) rt); | ||
5422 | +} | ||
5423 | + | ||
5424 | +/* add_ready - add a real-time task to the rt ready queue. It must be runnable. | ||
5425 | + * @new: the newly released task | ||
5426 | + */ | ||
5427 | +void __add_ready(rt_domain_t* rt, struct task_struct *new) | ||
5428 | +{ | ||
5429 | + TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n", | ||
5430 | + new->comm, new->pid, get_exec_cost(new), get_rt_period(new), | ||
5431 | + get_release(new), litmus_clock()); | ||
5432 | + | ||
5433 | + BUG_ON(heap_node_in_heap(tsk_rt(new)->heap_node)); | ||
5434 | + | ||
5435 | + heap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node); | ||
5436 | + rt->check_resched(rt); | ||
5437 | +} | ||
5438 | + | ||
5439 | +/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable. | ||
5440 | + * @tasks - the newly released tasks | ||
5441 | + */ | ||
5442 | +void __merge_ready(rt_domain_t* rt, struct heap* tasks) | ||
5443 | +{ | ||
5444 | + heap_union(rt->order, &rt->ready_queue, tasks); | ||
5445 | + rt->check_resched(rt); | ||
5446 | +} | ||
5447 | + | ||
5448 | +/* add_release - add a real-time task to the rt release queue. | ||
5449 | + * @task: the sleeping task | ||
5450 | + */ | ||
5451 | +void __add_release(rt_domain_t* rt, struct task_struct *task) | ||
5452 | +{ | ||
5453 | + TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); | ||
5454 | + list_add(&tsk_rt(task)->list, &rt->tobe_released); | ||
5455 | + task->rt_param.domain = rt; | ||
5456 | + do_without_rqlock(&rt->arm_timer); | ||
5457 | +} | ||
5458 | + | ||
5459 | diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c | ||
5460 | new file mode 100755 | ||
5461 | index 0000000..6c32e1c | ||
5462 | --- /dev/null | ||
5463 | +++ b/litmus/sched_cedf.c | ||
5464 | @@ -0,0 +1,705 @@ | ||
5465 | +/* | ||
5466 | + * kernel/sched_cedf.c | ||
5467 | + * | ||
5468 | + * Implementation of the Clustered EDF (C-EDF) scheduling algorithm. | ||
5469 | + * Linking is included so that support for synchronization (e.g., through | ||
5470 | + * the implementation of a "CSN-EDF" algorithm) can be added later if desired. | ||
5471 | + * | ||
5472 | + * This version uses the simple approach and serializes all scheduling | ||
5473 | + * decisions by the use of a queue lock. This is probably not the | ||
5474 | + * best way to do it, but it should suffice for now. | ||
5475 | + */ | ||
5476 | + | ||
5477 | +#include <linux/spinlock.h> | ||
5478 | +#include <linux/percpu.h> | ||
5479 | +#include <linux/sched.h> | ||
5480 | +#include <linux/list.h> | ||
5481 | + | ||
5482 | +#include <litmus/litmus.h> | ||
5483 | +#include <litmus/jobs.h> | ||
5484 | +#include <litmus/sched_plugin.h> | ||
5485 | +#include <litmus/edf_common.h> | ||
5486 | +#include <litmus/sched_trace.h> | ||
5487 | + | ||
5488 | +#include <linux/module.h> | ||
5489 | + | ||
5490 | +/* Overview of C-EDF operations. | ||
5491 | + * | ||
5492 | + * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
5493 | + * structure (NOT the actually scheduled | ||
5494 | + * task). If there is another linked task To | ||
5495 | + * already it will set To->linked_on = NO_CPU | ||
5496 | + * (thereby removing its association with this | ||
5497 | + * CPU). However, it will not requeue the | ||
5498 | + * previously linked task (if any). It will set | ||
5499 | + * T's state to RT_F_RUNNING and check whether | ||
5500 | + * it is already running somewhere else. If T | ||
5501 | + * is scheduled somewhere else it will link | ||
5502 | + * it to that CPU instead (and pull the linked | ||
5503 | + * task to cpu). T may be NULL. | ||
5504 | + * | ||
5505 | + * unlink(T) - Unlink removes T from all scheduler data | ||
5506 | + * structures. If it is linked to some CPU it | ||
5507 | + * will link NULL to that CPU. If it is | ||
5508 | + * currently queued in the cedf queue for | ||
5509 | + * a partition, it will be removed from | ||
5510 | + * the rt_domain. It is safe to call | ||
5511 | + * unlink(T) if T is not linked. T may not | ||
5512 | + * be NULL. | ||
5513 | + * | ||
5514 | + * requeue(T) - Requeue will insert T into the appropriate | ||
5515 | + * queue. If the system is in real-time mode and | ||
5516 | + * the T is released already, it will go into the | ||
5517 | + * ready queue. If the system is not in | ||
5518 | + * real-time mode is T, then T will go into the | ||
5519 | + * release queue. If T's release time is in the | ||
5520 | + * future, it will go into the release | ||
5521 | + * queue. That means that T's release time/job | ||
5522 | + * no/etc. has to be updated before requeue(T) is | ||
5523 | + * called. It is not safe to call requeue(T) | ||
5524 | + * when T is already queued. T may not be NULL. | ||
5525 | + * | ||
5526 | + * cedf_job_arrival(T) - This is the catch-all function when T enters | ||
5527 | + * the system after either a suspension or at a | ||
5528 | + * job release. It will queue T (which means it | ||
5529 | + * is not safe to call cedf_job_arrival(T) if | ||
5530 | + * T is already queued) and then check whether a | ||
5531 | + * preemption is necessary. If a preemption is | ||
5532 | + * necessary it will update the linkage | ||
5533 | + * accordingly and cause scheduled to be called | ||
5534 | + * (either with an IPI or need_resched). It is | ||
5535 | + * safe to call cedf_job_arrival(T) if T's | ||
5536 | + * next job has not been actually released yet | ||
5537 | + * (release time in the future). T will be put | ||
5538 | + * on the release queue in that case. | ||
5539 | + * | ||
5540 | + * job_completion(T) - Take care of everything that needs to be done | ||
5541 | + * to prepare T for its next release and place | ||
5542 | + * it in the right queue with | ||
5543 | + * cedf_job_arrival(). | ||
5544 | + * | ||
5545 | + * | ||
5546 | + * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is | ||
5547 | + * equivalent to unlink(T). Note that if you unlink a task from a CPU none of | ||
5548 | + * the functions will automatically propagate pending task from the ready queue | ||
5549 | + * to a linked task. This is the job of the calling function ( by means of | ||
5550 | + * __take_ready). | ||
5551 | + */ | ||
5552 | + | ||
5553 | +/* cpu_entry_t - maintain the linked and scheduled state | ||
5554 | + */ | ||
5555 | +typedef struct { | ||
5556 | + int cpu; | ||
5557 | + struct task_struct* linked; /* only RT tasks */ | ||
5558 | + struct task_struct* scheduled; /* only RT tasks */ | ||
5559 | + struct list_head list; | ||
5560 | + atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
5561 | +} cpu_entry_t; | ||
5562 | +DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); | ||
5563 | + | ||
5564 | +cpu_entry_t* cedf_cpu_entries_array[NR_CPUS]; | ||
5565 | + | ||
5566 | +#define set_will_schedule() \ | ||
5567 | + (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1)) | ||
5568 | +#define clear_will_schedule() \ | ||
5569 | + (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 0)) | ||
5570 | +#define test_will_schedule(cpu) \ | ||
5571 | + (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) | ||
5572 | + | ||
5573 | +#define NO_CPU 0xffffffff | ||
5574 | + | ||
5575 | +/* Cluster size -- currently four. This is a variable to allow for | ||
5576 | + * the possibility of changing the cluster size online in the future. | ||
5577 | + */ | ||
5578 | +int cluster_size = 4; | ||
5579 | + | ||
5580 | +typedef struct { | ||
5581 | + rt_domain_t domain; | ||
5582 | + int first_cpu; | ||
5583 | + int last_cpu; | ||
5584 | + | ||
5585 | + /* the cpus queue themselves according to priority in here */ | ||
5586 | + struct list_head cedf_cpu_queue; | ||
5587 | + | ||
5588 | + /* per-partition spinlock: protects the domain and | ||
5589 | + * serializes scheduling decisions | ||
5590 | + */ | ||
5591 | +#define slock domain.ready_lock | ||
5592 | +} cedf_domain_t; | ||
5593 | + | ||
5594 | +DEFINE_PER_CPU(cedf_domain_t*, cedf_domains) = NULL; | ||
5595 | + | ||
5596 | +cedf_domain_t* cedf_domains_array[NR_CPUS]; | ||
5597 | + | ||
5598 | + | ||
5599 | +/* These are defined similarly to partitioning, except that a | ||
5600 | + * tasks partition is any cpu of the cluster to which it | ||
5601 | + * is assigned, typically the lowest-numbered cpu. | ||
5602 | + */ | ||
5603 | +#define local_edf (&__get_cpu_var(cedf_domains)->domain) | ||
5604 | +#define local_cedf __get_cpu_var(cedf_domains) | ||
5605 | +#define remote_edf(cpu) (&per_cpu(cedf_domains, cpu)->domain) | ||
5606 | +#define remote_cedf(cpu) per_cpu(cedf_domains, cpu) | ||
5607 | +#define task_edf(task) remote_edf(get_partition(task)) | ||
5608 | +#define task_cedf(task) remote_cedf(get_partition(task)) | ||
5609 | + | ||
5610 | +/* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
5611 | + * order in the cpu queue. Caller must hold cedf lock. | ||
5612 | + * | ||
5613 | + * This really should be a heap. | ||
5614 | + */ | ||
5615 | +static void update_cpu_position(cpu_entry_t *entry) | ||
5616 | +{ | ||
5617 | + cpu_entry_t *other; | ||
5618 | + struct list_head *cedf_cpu_queue = | ||
5619 | + &(remote_cedf(entry->cpu))->cedf_cpu_queue; | ||
5620 | + struct list_head *pos; | ||
5621 | + | ||
5622 | + BUG_ON(!cedf_cpu_queue); | ||
5623 | + | ||
5624 | + if (likely(in_list(&entry->list))) | ||
5625 | + list_del(&entry->list); | ||
5626 | + /* if we do not execute real-time jobs we just move | ||
5627 | + * to the end of the queue | ||
5628 | + */ | ||
5629 | + if (entry->linked) { | ||
5630 | + list_for_each(pos, cedf_cpu_queue) { | ||
5631 | + other = list_entry(pos, cpu_entry_t, list); | ||
5632 | + if (edf_higher_prio(entry->linked, other->linked)) { | ||
5633 | + __list_add(&entry->list, pos->prev, pos); | ||
5634 | + return; | ||
5635 | + } | ||
5636 | + } | ||
5637 | + } | ||
5638 | + /* if we get this far we have the lowest priority job */ | ||
5639 | + list_add_tail(&entry->list, cedf_cpu_queue); | ||
5640 | +} | ||
5641 | + | ||
5642 | +/* link_task_to_cpu - Update the link of a CPU. | ||
5643 | + * Handles the case where the to-be-linked task is already | ||
5644 | + * scheduled on a different CPU. | ||
5645 | + */ | ||
5646 | +static noinline void link_task_to_cpu(struct task_struct* linked, | ||
5647 | + cpu_entry_t *entry) | ||
5648 | +{ | ||
5649 | + cpu_entry_t *sched; | ||
5650 | + struct task_struct* tmp; | ||
5651 | + int on_cpu; | ||
5652 | + | ||
5653 | + BUG_ON(linked && !is_realtime(linked)); | ||
5654 | + | ||
5655 | + /* Cannot link task to a CPU that doesn't belong to its partition... */ | ||
5656 | + BUG_ON(linked && remote_cedf(entry->cpu) != task_cedf(linked)); | ||
5657 | + | ||
5658 | + /* Currently linked task is set to be unlinked. */ | ||
5659 | + if (entry->linked) { | ||
5660 | + entry->linked->rt_param.linked_on = NO_CPU; | ||
5661 | + } | ||
5662 | + | ||
5663 | + /* Link new task to CPU. */ | ||
5664 | + if (linked) { | ||
5665 | + set_rt_flags(linked, RT_F_RUNNING); | ||
5666 | + /* handle task is already scheduled somewhere! */ | ||
5667 | + on_cpu = linked->rt_param.scheduled_on; | ||
5668 | + if (on_cpu != NO_CPU) { | ||
5669 | + sched = &per_cpu(cedf_cpu_entries, on_cpu); | ||
5670 | + /* this should only happen if not linked already */ | ||
5671 | + BUG_ON(sched->linked == linked); | ||
5672 | + | ||
5673 | + /* If we are already scheduled on the CPU to which we | ||
5674 | + * wanted to link, we don't need to do the swap -- | ||
5675 | + * we just link ourselves to the CPU and depend on | ||
5676 | + * the caller to get things right. | ||
5677 | + */ | ||
5678 | + if (entry != sched) { | ||
5679 | + tmp = sched->linked; | ||
5680 | + linked->rt_param.linked_on = sched->cpu; | ||
5681 | + sched->linked = linked; | ||
5682 | + update_cpu_position(sched); | ||
5683 | + linked = tmp; | ||
5684 | + } | ||
5685 | + } | ||
5686 | + if (linked) /* might be NULL due to swap */ | ||
5687 | + linked->rt_param.linked_on = entry->cpu; | ||
5688 | + } | ||
5689 | + entry->linked = linked; | ||
5690 | + | ||
5691 | + if (entry->linked) | ||
5692 | + TRACE_TASK(entry->linked, "linked to CPU %d, state:%d\n", | ||
5693 | + entry->cpu, entry->linked->state); | ||
5694 | + else | ||
5695 | + TRACE("NULL linked to CPU %d\n", entry->cpu); | ||
5696 | + | ||
5697 | + update_cpu_position(entry); | ||
5698 | +} | ||
5699 | + | ||
5700 | +/* unlink - Make sure a task is not linked any longer to an entry | ||
5701 | + * where it was linked before. Must hold cedf_lock. | ||
5702 | + */ | ||
5703 | +static noinline void unlink(struct task_struct* t) | ||
5704 | +{ | ||
5705 | + cpu_entry_t *entry; | ||
5706 | + | ||
5707 | + if (unlikely(!t)) { | ||
5708 | + TRACE_BUG_ON(!t); | ||
5709 | + return; | ||
5710 | + } | ||
5711 | + | ||
5712 | + if (t->rt_param.linked_on != NO_CPU) { | ||
5713 | + /* unlink */ | ||
5714 | + entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); | ||
5715 | + t->rt_param.linked_on = NO_CPU; | ||
5716 | + link_task_to_cpu(NULL, entry); | ||
5717 | + } else if (is_queued(t)) { | ||
5718 | + /* This is an interesting situation: t is scheduled, | ||
5719 | + * but was just recently unlinked. It cannot be | ||
5720 | + * linked anywhere else (because then it would have | ||
5721 | + * been relinked to this CPU), thus it must be in some | ||
5722 | + * queue. We must remove it from the list in this | ||
5723 | + * case. | ||
5724 | + */ | ||
5725 | + remove(task_edf(t), t); | ||
5726 | + } | ||
5727 | +} | ||
5728 | + | ||
5729 | + | ||
5730 | +/* preempt - force a CPU to reschedule | ||
5731 | + */ | ||
5732 | +static noinline void preempt(cpu_entry_t *entry) | ||
5733 | +{ | ||
5734 | + /* We cannot make the is_np() decision here if it is a remote CPU | ||
5735 | + * because requesting exit_np() requires that we currently use the | ||
5736 | + * address space of the task. Thus, in the remote case we just send | ||
5737 | + * the IPI and let schedule() handle the problem. | ||
5738 | + */ | ||
5739 | + | ||
5740 | + if (smp_processor_id() == entry->cpu) { | ||
5741 | + if (entry->scheduled && is_np(entry->scheduled)) | ||
5742 | + request_exit_np(entry->scheduled); | ||
5743 | + else | ||
5744 | + set_tsk_need_resched(current); | ||
5745 | + } else | ||
5746 | + /* in case that it is a remote CPU we have to defer the | ||
5747 | + * the decision to the remote CPU | ||
5748 | + * FIXME: We could save a few IPI's here if we leave the flag | ||
5749 | + * set when we are waiting for a np_exit(). | ||
5750 | + */ | ||
5751 | + if (!test_will_schedule(entry->cpu)) | ||
5752 | + smp_send_reschedule(entry->cpu); | ||
5753 | +} | ||
5754 | + | ||
5755 | +/* requeue - Put an unlinked task into c-edf domain. | ||
5756 | + * Caller must hold cedf_lock. | ||
5757 | + */ | ||
5758 | +static noinline void requeue(struct task_struct* task) | ||
5759 | +{ | ||
5760 | + cedf_domain_t* cedf; | ||
5761 | + rt_domain_t* edf; | ||
5762 | + | ||
5763 | + BUG_ON(!task); | ||
5764 | + /* sanity check rt_list before insertion */ | ||
5765 | + BUG_ON(is_queued(task)); | ||
5766 | + | ||
5767 | + /* Get correct real-time domain. */ | ||
5768 | + cedf = task_cedf(task); | ||
5769 | + edf = &cedf->domain; | ||
5770 | + | ||
5771 | + if (is_released(task, litmus_clock())) | ||
5772 | + __add_ready(edf, task); | ||
5773 | + else { | ||
5774 | + /* it has got to wait */ | ||
5775 | + add_release(edf, task); | ||
5776 | + } | ||
5777 | +} | ||
5778 | + | ||
5779 | +static void check_for_preemptions(cedf_domain_t* cedf) | ||
5780 | +{ | ||
5781 | + cpu_entry_t *last; | ||
5782 | + struct task_struct *task; | ||
5783 | + struct list_head *cedf_cpu_queue; | ||
5784 | + cedf_cpu_queue = &cedf->cedf_cpu_queue; | ||
5785 | + | ||
5786 | + for(last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list); | ||
5787 | + edf_preemption_needed(&cedf->domain, last->linked); | ||
5788 | + last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list)) { | ||
5789 | + /* preemption necessary */ | ||
5790 | + task = __take_ready(&cedf->domain); | ||
5791 | + TRACE("check_for_preemptions: task %d linked to %d, state:%d\n", | ||
5792 | + task->pid, last->cpu, task->state); | ||
5793 | + if (last->linked) | ||
5794 | + requeue(last->linked); | ||
5795 | + link_task_to_cpu(task, last); | ||
5796 | + preempt(last); | ||
5797 | + } | ||
5798 | + | ||
5799 | +} | ||
5800 | + | ||
5801 | +/* cedf_job_arrival: task is either resumed or released */ | ||
5802 | +static noinline void cedf_job_arrival(struct task_struct* task) | ||
5803 | +{ | ||
5804 | + cedf_domain_t* cedf; | ||
5805 | + rt_domain_t* edf; | ||
5806 | + | ||
5807 | + BUG_ON(!task); | ||
5808 | + | ||
5809 | + /* Get correct real-time domain. */ | ||
5810 | + cedf = task_cedf(task); | ||
5811 | + edf = &cedf->domain; | ||
5812 | + | ||
5813 | + /* first queue arriving job */ | ||
5814 | + requeue(task); | ||
5815 | + | ||
5816 | + /* then check for any necessary preemptions */ | ||
5817 | + check_for_preemptions(cedf); | ||
5818 | +} | ||
5819 | + | ||
5820 | +/* check for current job releases */ | ||
5821 | +static void cedf_release_jobs(rt_domain_t* rt, struct heap* tasks) | ||
5822 | +{ | ||
5823 | + cedf_domain_t* cedf = container_of(rt, cedf_domain_t, domain); | ||
5824 | + unsigned long flags; | ||
5825 | + | ||
5826 | + spin_lock_irqsave(&cedf->slock, flags); | ||
5827 | + | ||
5828 | + __merge_ready(&cedf->domain, tasks); | ||
5829 | + check_for_preemptions(cedf); | ||
5830 | + spin_unlock_irqrestore(&cedf->slock, flags); | ||
5831 | +} | ||
5832 | + | ||
5833 | +/* cedf_tick - this function is called for every local timer | ||
5834 | + * interrupt. | ||
5835 | + * | ||
5836 | + * checks whether the current task has expired and checks | ||
5837 | + * whether we need to preempt it if it has not expired | ||
5838 | + */ | ||
5839 | +static void cedf_tick(struct task_struct* t) | ||
5840 | +{ | ||
5841 | + BUG_ON(!t); | ||
5842 | + | ||
5843 | + if (is_realtime(t) && budget_exhausted(t)) { | ||
5844 | + if (!is_np(t)) { | ||
5845 | + /* np tasks will be preempted when they become | ||
5846 | + * preemptable again | ||
5847 | + */ | ||
5848 | + set_tsk_need_resched(t); | ||
5849 | + set_will_schedule(); | ||
5850 | + TRACE("cedf_scheduler_tick: " | ||
5851 | + "%d is preemptable (state:%d) " | ||
5852 | + " => FORCE_RESCHED\n", t->pid, t->state); | ||
5853 | + } else { | ||
5854 | + TRACE("cedf_scheduler_tick: " | ||
5855 | + "%d is non-preemptable (state:%d), " | ||
5856 | + "preemption delayed.\n", t->pid, t->state); | ||
5857 | + request_exit_np(t); | ||
5858 | + } | ||
5859 | + } | ||
5860 | +} | ||
5861 | + | ||
5862 | +/* caller holds cedf_lock */ | ||
5863 | +static noinline void job_completion(struct task_struct *t, int forced) | ||
5864 | +{ | ||
5865 | + BUG_ON(!t); | ||
5866 | + | ||
5867 | + sched_trace_task_completion(t, forced); | ||
5868 | + | ||
5869 | + TRACE_TASK(t, "job_completion(). [state:%d]\n", t->state); | ||
5870 | + | ||
5871 | + /* set flags */ | ||
5872 | + set_rt_flags(t, RT_F_SLEEP); | ||
5873 | + /* prepare for next period */ | ||
5874 | + prepare_for_next_period(t); | ||
5875 | + /* unlink */ | ||
5876 | + unlink(t); | ||
5877 | + /* requeue | ||
5878 | + * But don't requeue a blocking task. */ | ||
5879 | + if (is_running(t)) | ||
5880 | + cedf_job_arrival(t); | ||
5881 | +} | ||
5882 | + | ||
5883 | +/* Getting schedule() right is a bit tricky. schedule() may not make any | ||
5884 | + * assumptions on the state of the current task since it may be called for a | ||
5885 | + * number of reasons. The reasons include a scheduler_tick() determined that it | ||
5886 | + * was necessary, because sys_exit_np() was called, because some Linux | ||
5887 | + * subsystem determined so, or even (in the worst case) because there is a bug | ||
5888 | + * hidden somewhere. Thus, we must take extreme care to determine what the | ||
5889 | + * current state is. | ||
5890 | + * | ||
5891 | + * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
5892 | + * | ||
5893 | + * The following assertions for the scheduled task could hold: | ||
5894 | + * | ||
5895 | + * - !is_running(scheduled) // the job blocks | ||
5896 | + * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
5897 | + * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
5898 | + * - linked != scheduled // we need to reschedule (for any reason) | ||
5899 | + * - is_np(scheduled) // rescheduling must be delayed, | ||
5900 | + * sys_exit_np must be requested | ||
5901 | + * | ||
5902 | + * Any of these can occur together. | ||
5903 | + */ | ||
5904 | +static struct task_struct* cedf_schedule(struct task_struct * prev) | ||
5905 | +{ | ||
5906 | + cedf_domain_t* cedf = local_cedf; | ||
5907 | + rt_domain_t* edf = &cedf->domain; | ||
5908 | + cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
5909 | + int out_of_time, sleep, preempt, np, | ||
5910 | + exists, blocks; | ||
5911 | + struct task_struct* next = NULL; | ||
5912 | + | ||
5913 | + BUG_ON(!prev); | ||
5914 | + BUG_ON(!cedf); | ||
5915 | + BUG_ON(!edf); | ||
5916 | + BUG_ON(!entry); | ||
5917 | + BUG_ON(cedf != remote_cedf(entry->cpu)); | ||
5918 | + BUG_ON(is_realtime(prev) && cedf != task_cedf(prev)); | ||
5919 | + | ||
5920 | + /* Will be released in finish_switch. */ | ||
5921 | + spin_lock(&cedf->slock); | ||
5922 | + clear_will_schedule(); | ||
5923 | + | ||
5924 | + /* sanity checking */ | ||
5925 | + BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
5926 | + BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
5927 | + BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
5928 | + | ||
5929 | + /* (0) Determine state */ | ||
5930 | + exists = entry->scheduled != NULL; | ||
5931 | + blocks = exists && !is_running(entry->scheduled); | ||
5932 | + out_of_time = exists && budget_exhausted(entry->scheduled); | ||
5933 | + np = exists && is_np(entry->scheduled); | ||
5934 | + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
5935 | + preempt = entry->scheduled != entry->linked; | ||
5936 | + | ||
5937 | + /* If a task blocks we have no choice but to reschedule. | ||
5938 | + */ | ||
5939 | + if (blocks) | ||
5940 | + unlink(entry->scheduled); | ||
5941 | + | ||
5942 | + /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
5943 | + * We need to make sure to update the link structure anyway in case | ||
5944 | + * that we are still linked. Multiple calls to request_exit_np() don't | ||
5945 | + * hurt. | ||
5946 | + */ | ||
5947 | + if (np && (out_of_time || preempt || sleep)) { | ||
5948 | + unlink(entry->scheduled); | ||
5949 | + request_exit_np(entry->scheduled); | ||
5950 | + } | ||
5951 | + | ||
5952 | + /* Any task that is preemptable and either exhausts its execution | ||
5953 | + * budget or wants to sleep completes. We may have to reschedule after | ||
5954 | + * this. Don't do a job completion if blocks (can't have timers | ||
5955 | + * running for blocked jobs). Preemption go first for the same reason. | ||
5956 | + */ | ||
5957 | + if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
5958 | + job_completion(entry->scheduled, !sleep); | ||
5959 | + | ||
5960 | + /* Link pending task if we became unlinked. | ||
5961 | + */ | ||
5962 | + if (!entry->linked) | ||
5963 | + link_task_to_cpu(__take_ready(edf), entry); | ||
5964 | + | ||
5965 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
5966 | + * If linked different from scheduled select linked as next. | ||
5967 | + */ | ||
5968 | + if ((!np || blocks) && | ||
5969 | + entry->linked != entry->scheduled) { | ||
5970 | + /* Schedule a linked job? */ | ||
5971 | + if (entry->linked) { | ||
5972 | + entry->linked->rt_param.scheduled_on = entry->cpu; | ||
5973 | + next = entry->linked; | ||
5974 | + } | ||
5975 | + if (entry->scheduled) { | ||
5976 | + /* not gonna be scheduled soon */ | ||
5977 | + entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
5978 | + TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
5979 | + } | ||
5980 | + } else | ||
5981 | + /* Only override Linux scheduler if we have real-time task | ||
5982 | + * scheduled that needs to continue. | ||
5983 | + */ | ||
5984 | + if (exists) | ||
5985 | + next = prev; | ||
5986 | + | ||
5987 | + spin_unlock(&cedf->slock); | ||
5988 | + | ||
5989 | + return next; | ||
5990 | +} | ||
5991 | + | ||
5992 | +/* _finish_switch - we just finished the switch away from prev | ||
5993 | + */ | ||
5994 | +static void cedf_finish_switch(struct task_struct *prev) | ||
5995 | +{ | ||
5996 | + cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
5997 | + | ||
5998 | + BUG_ON(!prev); | ||
5999 | + BUG_ON(!entry); | ||
6000 | + | ||
6001 | + entry->scheduled = is_realtime(current) ? current : NULL; | ||
6002 | +} | ||
6003 | + | ||
6004 | +/* Prepare a task for running in RT mode | ||
6005 | + */ | ||
6006 | +static void cedf_task_new(struct task_struct *t, int on_rq, int running) | ||
6007 | +{ | ||
6008 | + unsigned long flags; | ||
6009 | + cedf_domain_t* cedf = task_cedf(t); | ||
6010 | + cpu_entry_t* entry; | ||
6011 | + | ||
6012 | + BUG_ON(!cedf); | ||
6013 | + | ||
6014 | + spin_lock_irqsave(&cedf->slock, flags); | ||
6015 | + if (running) { | ||
6016 | + entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); | ||
6017 | + BUG_ON(!entry); | ||
6018 | + BUG_ON(entry->scheduled); | ||
6019 | + entry->scheduled = t; | ||
6020 | + t->rt_param.scheduled_on = task_cpu(t); | ||
6021 | + } else | ||
6022 | + t->rt_param.scheduled_on = NO_CPU; | ||
6023 | + t->rt_param.linked_on = NO_CPU; | ||
6024 | + | ||
6025 | + /* setup job params */ | ||
6026 | + release_at(t, litmus_clock()); | ||
6027 | + | ||
6028 | + cedf_job_arrival(t); | ||
6029 | + spin_unlock_irqrestore(&cedf->slock, flags); | ||
6030 | +} | ||
6031 | + | ||
6032 | + | ||
6033 | +static void cedf_task_wake_up(struct task_struct *task) | ||
6034 | +{ | ||
6035 | + unsigned long flags; | ||
6036 | + cedf_domain_t* cedf; | ||
6037 | + lt_t now; | ||
6038 | + | ||
6039 | + BUG_ON(!task); | ||
6040 | + | ||
6041 | + cedf = task_cedf(task); | ||
6042 | + BUG_ON(!cedf); | ||
6043 | + | ||
6044 | + spin_lock_irqsave(&cedf->slock, flags); | ||
6045 | + /* We need to take suspensions because of semaphores into | ||
6046 | + * account! If a job resumes after being suspended due to acquiring | ||
6047 | + * a semaphore, it should never be treated as a new job release. | ||
6048 | + */ | ||
6049 | + if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
6050 | + set_rt_flags(task, RT_F_RUNNING); | ||
6051 | + } else { | ||
6052 | + now = litmus_clock(); | ||
6053 | + if (is_tardy(task, now)) { | ||
6054 | + /* new sporadic release */ | ||
6055 | + release_at(task, now); | ||
6056 | + sched_trace_task_release(task); | ||
6057 | + } | ||
6058 | + else if (task->time_slice) | ||
6059 | + /* came back in time before deadline | ||
6060 | + */ | ||
6061 | + set_rt_flags(task, RT_F_RUNNING); | ||
6062 | + } | ||
6063 | + cedf_job_arrival(task); | ||
6064 | + spin_unlock_irqrestore(&cedf->slock, flags); | ||
6065 | +} | ||
6066 | + | ||
6067 | + | ||
6068 | +static void cedf_task_block(struct task_struct *t) | ||
6069 | +{ | ||
6070 | + unsigned long flags; | ||
6071 | + | ||
6072 | + BUG_ON(!t); | ||
6073 | + | ||
6074 | + /* unlink if necessary */ | ||
6075 | + spin_lock_irqsave(&task_cedf(t)->slock, flags); | ||
6076 | + unlink(t); | ||
6077 | + spin_unlock_irqrestore(&task_cedf(t)->slock, flags); | ||
6078 | + | ||
6079 | + BUG_ON(!is_realtime(t)); | ||
6080 | +} | ||
6081 | + | ||
6082 | +static void cedf_task_exit(struct task_struct * t) | ||
6083 | +{ | ||
6084 | + unsigned long flags; | ||
6085 | + | ||
6086 | + BUG_ON(!t); | ||
6087 | + | ||
6088 | + /* unlink if necessary */ | ||
6089 | + spin_lock_irqsave(&task_cedf(t)->slock, flags); | ||
6090 | + unlink(t); | ||
6091 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
6092 | + cedf_cpu_entries_array[tsk_rt(t)->scheduled_on]-> | ||
6093 | + scheduled = NULL; | ||
6094 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
6095 | + } | ||
6096 | + spin_unlock_irqrestore(&task_cedf(t)->slock, flags); | ||
6097 | + | ||
6098 | + BUG_ON(!is_realtime(t)); | ||
6099 | + TRACE_TASK(t, "RIP\n"); | ||
6100 | +} | ||
6101 | + | ||
6102 | +static long cedf_admit_task(struct task_struct* tsk) | ||
6103 | +{ | ||
6104 | + return (task_cpu(tsk) >= task_cedf(tsk)->first_cpu && | ||
6105 | + task_cpu(tsk) <= task_cedf(tsk)->last_cpu) ? 0 : -EINVAL; | ||
6106 | +} | ||
6107 | + | ||
6108 | + | ||
6109 | +/* Plugin object */ | ||
6110 | +static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | ||
6111 | + .plugin_name = "C-EDF", | ||
6112 | + .finish_switch = cedf_finish_switch, | ||
6113 | + .tick = cedf_tick, | ||
6114 | + .task_new = cedf_task_new, | ||
6115 | + .complete_job = complete_job, | ||
6116 | + .task_exit = cedf_task_exit, | ||
6117 | + .schedule = cedf_schedule, | ||
6118 | + .task_wake_up = cedf_task_wake_up, | ||
6119 | + .task_block = cedf_task_block, | ||
6120 | + .admit_task = cedf_admit_task | ||
6121 | +}; | ||
6122 | + | ||
6123 | +static void cedf_domain_init(int first_cpu, int last_cpu) | ||
6124 | +{ | ||
6125 | + int cpu; | ||
6126 | + | ||
6127 | + /* Create new domain for this cluster. */ | ||
6128 | + cedf_domain_t *new_cedf_domain = kmalloc(sizeof(cedf_domain_t), | ||
6129 | + GFP_KERNEL); | ||
6130 | + | ||
6131 | + /* Initialize cluster domain. */ | ||
6132 | + edf_domain_init(&new_cedf_domain->domain, NULL, | ||
6133 | + cedf_release_jobs); | ||
6134 | + new_cedf_domain->first_cpu = first_cpu; | ||
6135 | + new_cedf_domain->last_cpu = last_cpu; | ||
6136 | + INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue); | ||
6137 | + | ||
6138 | + /* Assign all cpus in cluster to point to this domain. */ | ||
6139 | + for (cpu = first_cpu; cpu <= last_cpu; cpu++) { | ||
6140 | + remote_cedf(cpu) = new_cedf_domain; | ||
6141 | + cedf_domains_array[cpu] = new_cedf_domain; | ||
6142 | + } | ||
6143 | +} | ||
6144 | + | ||
6145 | +static int __init init_cedf(void) | ||
6146 | +{ | ||
6147 | + int cpu; | ||
6148 | + cpu_entry_t *entry; | ||
6149 | + | ||
6150 | + /* initialize CPU state */ | ||
6151 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
6152 | + entry = &per_cpu(cedf_cpu_entries, cpu); | ||
6153 | + cedf_cpu_entries_array[cpu] = entry; | ||
6154 | + atomic_set(&entry->will_schedule, 0); | ||
6155 | + entry->linked = NULL; | ||
6156 | + entry->scheduled = NULL; | ||
6157 | + entry->cpu = cpu; | ||
6158 | + INIT_LIST_HEAD(&entry->list); | ||
6159 | + } | ||
6160 | + | ||
6161 | + /* initialize all cluster domains */ | ||
6162 | + for (cpu = 0; cpu < NR_CPUS; cpu += cluster_size) | ||
6163 | + cedf_domain_init(cpu, cpu+cluster_size-1); | ||
6164 | + | ||
6165 | + return register_sched_plugin(&cedf_plugin); | ||
6166 | +} | ||
6167 | + | ||
6168 | +module_init(init_cedf); | ||
6169 | + | ||
6170 | diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c | ||
6171 | new file mode 100644 | ||
6172 | index 0000000..dada868 | ||
6173 | --- /dev/null | ||
6174 | +++ b/litmus/sched_gsn_edf.c | ||
6175 | @@ -0,0 +1,728 @@ | ||
6176 | +/* | ||
6177 | + * kernel/sched_gsn_edf.c | ||
6178 | + * | ||
6179 | + * Implementation of the GSN-EDF scheduling algorithm. | ||
6180 | + * | ||
6181 | + * This version uses the simple approach and serializes all scheduling | ||
6182 | + * decisions by the use of a queue lock. This is probably not the | ||
6183 | + * best way to do it, but it should suffice for now. | ||
6184 | + */ | ||
6185 | + | ||
6186 | +#include <linux/spinlock.h> | ||
6187 | +#include <linux/percpu.h> | ||
6188 | +#include <linux/sched.h> | ||
6189 | + | ||
6190 | +#include <litmus/litmus.h> | ||
6191 | +#include <litmus/jobs.h> | ||
6192 | +#include <litmus/sched_plugin.h> | ||
6193 | +#include <litmus/edf_common.h> | ||
6194 | +#include <litmus/sched_trace.h> | ||
6195 | + | ||
6196 | +#include <litmus/heap.h> | ||
6197 | + | ||
6198 | +#include <linux/module.h> | ||
6199 | + | ||
6200 | +/* Overview of GSN-EDF operations. | ||
6201 | + * | ||
6202 | + * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | ||
6203 | + * description only covers how the individual operations are implemented in | ||
6204 | + * LITMUS. | ||
6205 | + * | ||
6206 | + * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
6207 | + * structure (NOT the actually scheduled | ||
6208 | + * task). If there is another linked task To | ||
6209 | + * already it will set To->linked_on = NO_CPU | ||
6210 | + * (thereby removing its association with this | ||
6211 | + * CPU). However, it will not requeue the | ||
6212 | + * previously linked task (if any). It will set | ||
6213 | + * T's state to RT_F_RUNNING and check whether | ||
6214 | + * it is already running somewhere else. If T | ||
6215 | + * is scheduled somewhere else it will link | ||
6216 | + * it to that CPU instead (and pull the linked | ||
6217 | + * task to cpu). T may be NULL. | ||
6218 | + * | ||
6219 | + * unlink(T) - Unlink removes T from all scheduler data | ||
6220 | + * structures. If it is linked to some CPU it | ||
6221 | + * will link NULL to that CPU. If it is | ||
6222 | + * currently queued in the gsnedf queue it will | ||
6223 | + * be removed from the rt_domain. It is safe to | ||
6224 | + * call unlink(T) if T is not linked. T may not | ||
6225 | + * be NULL. | ||
6226 | + * | ||
6227 | + * requeue(T) - Requeue will insert T into the appropriate | ||
6228 | + * queue. If the system is in real-time mode and | ||
6229 | + * the T is released already, it will go into the | ||
6230 | + * ready queue. If the system is not in | ||
6231 | + * real-time mode is T, then T will go into the | ||
6232 | + * release queue. If T's release time is in the | ||
6233 | + * future, it will go into the release | ||
6234 | + * queue. That means that T's release time/job | ||
6235 | + * no/etc. has to be updated before requeu(T) is | ||
6236 | + * called. It is not safe to call requeue(T) | ||
6237 | + * when T is already queued. T may not be NULL. | ||
6238 | + * | ||
6239 | + * gsnedf_job_arrival(T) - This is the catch all function when T enters | ||
6240 | + * the system after either a suspension or at a | ||
6241 | + * job release. It will queue T (which means it | ||
6242 | + * is not safe to call gsnedf_job_arrival(T) if | ||
6243 | + * T is already queued) and then check whether a | ||
6244 | + * preemption is necessary. If a preemption is | ||
6245 | + * necessary it will update the linkage | ||
6246 | + * accordingly and cause scheduled to be called | ||
6247 | + * (either with an IPI or need_resched). It is | ||
6248 | + * safe to call gsnedf_job_arrival(T) if T's | ||
6249 | + * next job has not been actually released yet | ||
6250 | + * (releast time in the future). T will be put | ||
6251 | + * on the release queue in that case. | ||
6252 | + * | ||
6253 | + * job_completion(T) - Take care of everything that needs to be done | ||
6254 | + * to prepare T for its next release and place | ||
6255 | + * it in the right queue with | ||
6256 | + * gsnedf_job_arrival(). | ||
6257 | + * | ||
6258 | + * | ||
6259 | + * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is | ||
6260 | + * equivalent to unlink(T). Note that if you unlink a task from a CPU none of | ||
6261 | + * the functions will automatically propagate pending task from the ready queue | ||
6262 | + * to a linked task. This is the job of the calling function ( by means of | ||
6263 | + * __take_ready). | ||
6264 | + */ | ||
6265 | + | ||
6266 | + | ||
6267 | +/* cpu_entry_t - maintain the linked and scheduled state | ||
6268 | + */ | ||
6269 | +typedef struct { | ||
6270 | + int cpu; | ||
6271 | + struct task_struct* linked; /* only RT tasks */ | ||
6272 | + struct task_struct* scheduled; /* only RT tasks */ | ||
6273 | + atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
6274 | + struct heap_node* hn; | ||
6275 | +} cpu_entry_t; | ||
6276 | +DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | ||
6277 | + | ||
6278 | +cpu_entry_t* gsnedf_cpus[NR_CPUS]; | ||
6279 | + | ||
6280 | +#define set_will_schedule() \ | ||
6281 | + (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 1)) | ||
6282 | +#define clear_will_schedule() \ | ||
6283 | + (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 0)) | ||
6284 | +#define test_will_schedule(cpu) \ | ||
6285 | + (atomic_read(&per_cpu(gsnedf_cpu_entries, cpu).will_schedule)) | ||
6286 | + | ||
6287 | + | ||
6288 | +#define NO_CPU 0xffffffff | ||
6289 | + | ||
6290 | +/* the cpus queue themselves according to priority in here */ | ||
6291 | +static struct heap_node gsnedf_heap_node[NR_CPUS]; | ||
6292 | +static struct heap gsnedf_cpu_heap; | ||
6293 | + | ||
6294 | +static rt_domain_t gsnedf; | ||
6295 | +#define gsnedf_lock (gsnedf.ready_lock) | ||
6296 | + | ||
6297 | + | ||
6298 | +static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) | ||
6299 | +{ | ||
6300 | + cpu_entry_t *a, *b; | ||
6301 | + a = _a->value; | ||
6302 | + b = _b->value; | ||
6303 | + /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
6304 | + * the top of the heap. | ||
6305 | + */ | ||
6306 | + return edf_higher_prio(b->linked, a->linked); | ||
6307 | +} | ||
6308 | + | ||
6309 | +/* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
6310 | + * order in the cpu queue. Caller must hold gsnedf lock. | ||
6311 | + */ | ||
6312 | +static void update_cpu_position(cpu_entry_t *entry) | ||
6313 | +{ | ||
6314 | + if (likely(heap_node_in_heap(entry->hn))) | ||
6315 | + heap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
6316 | + heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
6317 | +} | ||
6318 | + | ||
6319 | +/* caller must hold gsnedf lock */ | ||
6320 | +static cpu_entry_t* lowest_prio_cpu(void) | ||
6321 | +{ | ||
6322 | + struct heap_node* hn; | ||
6323 | + hn = heap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | ||
6324 | + return hn->value; | ||
6325 | +} | ||
6326 | + | ||
6327 | + | ||
6328 | +/* link_task_to_cpu - Update the link of a CPU. | ||
6329 | + * Handles the case where the to-be-linked task is already | ||
6330 | + * scheduled on a different CPU. | ||
6331 | + */ | ||
6332 | +static noinline void link_task_to_cpu(struct task_struct* linked, | ||
6333 | + cpu_entry_t *entry) | ||
6334 | +{ | ||
6335 | + cpu_entry_t *sched; | ||
6336 | + struct task_struct* tmp; | ||
6337 | + int on_cpu; | ||
6338 | + | ||
6339 | + BUG_ON(linked && !is_realtime(linked)); | ||
6340 | + | ||
6341 | + /* Currently linked task is set to be unlinked. */ | ||
6342 | + if (entry->linked) { | ||
6343 | + entry->linked->rt_param.linked_on = NO_CPU; | ||
6344 | + } | ||
6345 | + | ||
6346 | + /* Link new task to CPU. */ | ||
6347 | + if (linked) { | ||
6348 | + set_rt_flags(linked, RT_F_RUNNING); | ||
6349 | + /* handle task is already scheduled somewhere! */ | ||
6350 | + on_cpu = linked->rt_param.scheduled_on; | ||
6351 | + if (on_cpu != NO_CPU) { | ||
6352 | + sched = &per_cpu(gsnedf_cpu_entries, on_cpu); | ||
6353 | + /* this should only happen if not linked already */ | ||
6354 | + BUG_ON(sched->linked == linked); | ||
6355 | + | ||
6356 | + /* If we are already scheduled on the CPU to which we | ||
6357 | + * wanted to link, we don't need to do the swap -- | ||
6358 | + * we just link ourselves to the CPU and depend on | ||
6359 | + * the caller to get things right. | ||
6360 | + */ | ||
6361 | + if (entry != sched) { | ||
6362 | + TRACE_TASK(linked, | ||
6363 | + "already scheduled on %d, updating link.\n", | ||
6364 | + sched->cpu); | ||
6365 | + tmp = sched->linked; | ||
6366 | + linked->rt_param.linked_on = sched->cpu; | ||
6367 | + sched->linked = linked; | ||
6368 | + update_cpu_position(sched); | ||
6369 | + linked = tmp; | ||
6370 | + } | ||
6371 | + } | ||
6372 | + if (linked) /* might be NULL due to swap */ | ||
6373 | + linked->rt_param.linked_on = entry->cpu; | ||
6374 | + } | ||
6375 | + entry->linked = linked; | ||
6376 | + if (linked) | ||
6377 | + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
6378 | + else | ||
6379 | + TRACE("NULL linked to %d.\n", entry->cpu); | ||
6380 | + update_cpu_position(entry); | ||
6381 | +} | ||
6382 | + | ||
6383 | +/* unlink - Make sure a task is not linked any longer to an entry | ||
6384 | + * where it was linked before. Must hold gsnedf_lock. | ||
6385 | + */ | ||
6386 | +static noinline void unlink(struct task_struct* t) | ||
6387 | +{ | ||
6388 | + cpu_entry_t *entry; | ||
6389 | + | ||
6390 | + if (unlikely(!t)) { | ||
6391 | + TRACE_BUG_ON(!t); | ||
6392 | + return; | ||
6393 | + } | ||
6394 | + | ||
6395 | + if (t->rt_param.linked_on != NO_CPU) { | ||
6396 | + /* unlink */ | ||
6397 | + entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); | ||
6398 | + t->rt_param.linked_on = NO_CPU; | ||
6399 | + link_task_to_cpu(NULL, entry); | ||
6400 | + } else if (is_queued(t)) { | ||
6401 | + /* This is an interesting situation: t is scheduled, | ||
6402 | + * but was just recently unlinked. It cannot be | ||
6403 | + * linked anywhere else (because then it would have | ||
6404 | + * been relinked to this CPU), thus it must be in some | ||
6405 | + * queue. We must remove it from the list in this | ||
6406 | + * case. | ||
6407 | + */ | ||
6408 | + remove(&gsnedf, t); | ||
6409 | + } | ||
6410 | +} | ||
6411 | + | ||
6412 | + | ||
6413 | +/* preempt - force a CPU to reschedule | ||
6414 | + */ | ||
6415 | +static noinline void preempt(cpu_entry_t *entry) | ||
6416 | +{ | ||
6417 | + /* We cannot make the is_np() decision here if it is a remote CPU | ||
6418 | + * because requesting exit_np() requires that we currently use the | ||
6419 | + * address space of the task. Thus, in the remote case we just send | ||
6420 | + * the IPI and let schedule() handle the problem. | ||
6421 | + */ | ||
6422 | + | ||
6423 | + if (smp_processor_id() == entry->cpu) { | ||
6424 | + if (entry->scheduled && is_np(entry->scheduled)) | ||
6425 | + request_exit_np(entry->scheduled); | ||
6426 | + else | ||
6427 | + set_tsk_need_resched(current); | ||
6428 | + } else | ||
6429 | + /* in case that it is a remote CPU we have to defer the | ||
6430 | + * the decision to the remote CPU | ||
6431 | + * FIXME: We could save a few IPI's here if we leave the flag | ||
6432 | + * set when we are waiting for a np_exit(). | ||
6433 | + */ | ||
6434 | + if (!test_will_schedule(entry->cpu)) | ||
6435 | + smp_send_reschedule(entry->cpu); | ||
6436 | +} | ||
6437 | + | ||
6438 | +/* requeue - Put an unlinked task into gsn-edf domain. | ||
6439 | + * Caller must hold gsnedf_lock. | ||
6440 | + */ | ||
6441 | +static noinline void requeue(struct task_struct* task) | ||
6442 | +{ | ||
6443 | + BUG_ON(!task); | ||
6444 | + /* sanity check before insertion */ | ||
6445 | + BUG_ON(is_queued(task)); | ||
6446 | + | ||
6447 | + if (is_released(task, litmus_clock())) | ||
6448 | + __add_ready(&gsnedf, task); | ||
6449 | + else { | ||
6450 | + /* it has got to wait */ | ||
6451 | + add_release(&gsnedf, task); | ||
6452 | + } | ||
6453 | +} | ||
6454 | + | ||
6455 | +/* check for any necessary preemptions */ | ||
6456 | +static void check_for_preemptions(void) | ||
6457 | +{ | ||
6458 | + struct task_struct *task; | ||
6459 | + cpu_entry_t* last; | ||
6460 | + | ||
6461 | + for(last = lowest_prio_cpu(); | ||
6462 | + edf_preemption_needed(&gsnedf, last->linked); | ||
6463 | + last = lowest_prio_cpu()) { | ||
6464 | + /* preemption necessary */ | ||
6465 | + task = __take_ready(&gsnedf); | ||
6466 | + TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
6467 | + task->pid, last->cpu); | ||
6468 | + if (last->linked) | ||
6469 | + requeue(last->linked); | ||
6470 | + link_task_to_cpu(task, last); | ||
6471 | + preempt(last); | ||
6472 | + } | ||
6473 | +} | ||
6474 | + | ||
6475 | +/* gsnedf_job_arrival: task is either resumed or released */ | ||
6476 | +static noinline void gsnedf_job_arrival(struct task_struct* task) | ||
6477 | +{ | ||
6478 | + BUG_ON(!task); | ||
6479 | + | ||
6480 | + requeue(task); | ||
6481 | + check_for_preemptions(); | ||
6482 | +} | ||
6483 | + | ||
6484 | +static void gsnedf_release_jobs(rt_domain_t* rt, struct heap* tasks) | ||
6485 | +{ | ||
6486 | + unsigned long flags; | ||
6487 | + | ||
6488 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
6489 | + | ||
6490 | + __merge_ready(rt, tasks); | ||
6491 | + check_for_preemptions(); | ||
6492 | + | ||
6493 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
6494 | +} | ||
6495 | + | ||
6496 | +/* caller holds gsnedf_lock */ | ||
6497 | +static noinline void job_completion(struct task_struct *t, int forced) | ||
6498 | +{ | ||
6499 | + BUG_ON(!t); | ||
6500 | + | ||
6501 | + sched_trace_task_completion(t, forced); | ||
6502 | + | ||
6503 | + TRACE_TASK(t, "job_completion().\n"); | ||
6504 | + | ||
6505 | + /* set flags */ | ||
6506 | + set_rt_flags(t, RT_F_SLEEP); | ||
6507 | + /* prepare for next period */ | ||
6508 | + prepare_for_next_period(t); | ||
6509 | + if (is_released(t, litmus_clock())) | ||
6510 | + sched_trace_task_release(t); | ||
6511 | + /* unlink */ | ||
6512 | + unlink(t); | ||
6513 | + /* requeue | ||
6514 | + * But don't requeue a blocking task. */ | ||
6515 | + if (is_running(t)) | ||
6516 | + gsnedf_job_arrival(t); | ||
6517 | +} | ||
6518 | + | ||
6519 | +/* gsnedf_tick - this function is called for every local timer | ||
6520 | + * interrupt. | ||
6521 | + * | ||
6522 | + * checks whether the current task has expired and checks | ||
6523 | + * whether we need to preempt it if it has not expired | ||
6524 | + */ | ||
6525 | +static void gsnedf_tick(struct task_struct* t) | ||
6526 | +{ | ||
6527 | + if (is_realtime(t) && budget_exhausted(t)) { | ||
6528 | + if (!is_np(t)) { | ||
6529 | + /* np tasks will be preempted when they become | ||
6530 | + * preemptable again | ||
6531 | + */ | ||
6532 | + set_tsk_need_resched(t); | ||
6533 | + set_will_schedule(); | ||
6534 | + TRACE("gsnedf_scheduler_tick: " | ||
6535 | + "%d is preemptable " | ||
6536 | + " => FORCE_RESCHED\n", t->pid); | ||
6537 | + } else { | ||
6538 | + TRACE("gsnedf_scheduler_tick: " | ||
6539 | + "%d is non-preemptable, " | ||
6540 | + "preemption delayed.\n", t->pid); | ||
6541 | + request_exit_np(t); | ||
6542 | + } | ||
6543 | + } | ||
6544 | +} | ||
6545 | + | ||
6546 | +/* Getting schedule() right is a bit tricky. schedule() may not make any | ||
6547 | + * assumptions on the state of the current task since it may be called for a | ||
6548 | + * number of reasons. The reasons include a scheduler_tick() determined that it | ||
6549 | + * was necessary, because sys_exit_np() was called, because some Linux | ||
6550 | + * subsystem determined so, or even (in the worst case) because there is a bug | ||
6551 | + * hidden somewhere. Thus, we must take extreme care to determine what the | ||
6552 | + * current state is. | ||
6553 | + * | ||
6554 | + * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
6555 | + * | ||
6556 | + * The following assertions for the scheduled task could hold: | ||
6557 | + * | ||
6558 | + * - !is_running(scheduled) // the job blocks | ||
6559 | + * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
6560 | + * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
6561 | + * - linked != scheduled // we need to reschedule (for any reason) | ||
6562 | + * - is_np(scheduled) // rescheduling must be delayed, | ||
6563 | + * sys_exit_np must be requested | ||
6564 | + * | ||
6565 | + * Any of these can occur together. | ||
6566 | + */ | ||
6567 | +static struct task_struct* gsnedf_schedule(struct task_struct * prev) | ||
6568 | +{ | ||
6569 | + cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
6570 | + int out_of_time, sleep, preempt, np, exists, blocks; | ||
6571 | + struct task_struct* next = NULL; | ||
6572 | + | ||
6573 | + /* Will be released in finish_switch. */ | ||
6574 | + spin_lock(&gsnedf_lock); | ||
6575 | + clear_will_schedule(); | ||
6576 | + | ||
6577 | + /* sanity checking */ | ||
6578 | + BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
6579 | + BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
6580 | + BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
6581 | + | ||
6582 | + /* (0) Determine state */ | ||
6583 | + exists = entry->scheduled != NULL; | ||
6584 | + blocks = exists && !is_running(entry->scheduled); | ||
6585 | + out_of_time = exists && budget_exhausted(entry->scheduled); | ||
6586 | + np = exists && is_np(entry->scheduled); | ||
6587 | + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
6588 | + preempt = entry->scheduled != entry->linked; | ||
6589 | + | ||
6590 | + TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | ||
6591 | + | ||
6592 | + if (exists) | ||
6593 | + TRACE_TASK(prev, | ||
6594 | + "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
6595 | + "state:%d sig:%d\n", | ||
6596 | + blocks, out_of_time, np, sleep, preempt, | ||
6597 | + prev->state, signal_pending(prev)); | ||
6598 | + if (entry->linked && preempt) | ||
6599 | + TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
6600 | + entry->linked->comm, entry->linked->pid); | ||
6601 | + | ||
6602 | + | ||
6603 | + /* If a task blocks we have no choice but to reschedule. | ||
6604 | + */ | ||
6605 | + if (blocks) | ||
6606 | + unlink(entry->scheduled); | ||
6607 | + | ||
6608 | + /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
6609 | + * We need to make sure to update the link structure anyway in case | ||
6610 | + * that we are still linked. Multiple calls to request_exit_np() don't | ||
6611 | + * hurt. | ||
6612 | + */ | ||
6613 | + if (np && (out_of_time || preempt || sleep)) { | ||
6614 | + unlink(entry->scheduled); | ||
6615 | + request_exit_np(entry->scheduled); | ||
6616 | + } | ||
6617 | + | ||
6618 | + /* Any task that is preemptable and either exhausts its execution | ||
6619 | + * budget or wants to sleep completes. We may have to reschedule after | ||
6620 | + * this. Don't do a job completion if we block (can't have timers running | ||
6621 | + * for blocked jobs). Preemption go first for the same reason. | ||
6622 | + */ | ||
6623 | + if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
6624 | + job_completion(entry->scheduled, !sleep); | ||
6625 | + | ||
6626 | + /* Link pending task if we became unlinked. | ||
6627 | + */ | ||
6628 | + if (!entry->linked) | ||
6629 | + link_task_to_cpu(__take_ready(&gsnedf), entry); | ||
6630 | + | ||
6631 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
6632 | + * If linked is different from scheduled, then select linked as next. | ||
6633 | + */ | ||
6634 | + if ((!np || blocks) && | ||
6635 | + entry->linked != entry->scheduled) { | ||
6636 | + /* Schedule a linked job? */ | ||
6637 | + if (entry->linked) { | ||
6638 | + entry->linked->rt_param.scheduled_on = entry->cpu; | ||
6639 | + next = entry->linked; | ||
6640 | + } | ||
6641 | + if (entry->scheduled) { | ||
6642 | + /* not gonna be scheduled soon */ | ||
6643 | + entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
6644 | + TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
6645 | + } | ||
6646 | + } else | ||
6647 | + /* Only override Linux scheduler if we have a real-time task | ||
6648 | + * scheduled that needs to continue. | ||
6649 | + */ | ||
6650 | + if (exists) | ||
6651 | + next = prev; | ||
6652 | + | ||
6653 | + spin_unlock(&gsnedf_lock); | ||
6654 | + | ||
6655 | + TRACE("gsnedf_lock released, next=0x%p\n", next); | ||
6656 | + | ||
6657 | + | ||
6658 | + if (next) | ||
6659 | + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
6660 | + else if (exists && !next) | ||
6661 | + TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
6662 | + | ||
6663 | + | ||
6664 | + return next; | ||
6665 | +} | ||
6666 | + | ||
6667 | + | ||
6668 | +/* _finish_switch - we just finished the switch away from prev | ||
6669 | + */ | ||
6670 | +static void gsnedf_finish_switch(struct task_struct *prev) | ||
6671 | +{ | ||
6672 | + cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
6673 | + | ||
6674 | + entry->scheduled = is_realtime(current) ? current : NULL; | ||
6675 | + TRACE_TASK(prev, "switched away from\n"); | ||
6676 | +} | ||
6677 | + | ||
6678 | + | ||
6679 | +/* Prepare a task for running in RT mode | ||
6680 | + */ | ||
6681 | +static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | ||
6682 | +{ | ||
6683 | + unsigned long flags; | ||
6684 | + cpu_entry_t* entry; | ||
6685 | + | ||
6686 | + TRACE("gsn edf: task new %d\n", t->pid); | ||
6687 | + | ||
6688 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
6689 | + if (running) { | ||
6690 | + entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); | ||
6691 | + BUG_ON(entry->scheduled); | ||
6692 | + entry->scheduled = t; | ||
6693 | + t->rt_param.scheduled_on = task_cpu(t); | ||
6694 | + } else | ||
6695 | + t->rt_param.scheduled_on = NO_CPU; | ||
6696 | + t->rt_param.linked_on = NO_CPU; | ||
6697 | + | ||
6698 | + /* setup job params */ | ||
6699 | + release_at(t, litmus_clock()); | ||
6700 | + | ||
6701 | + gsnedf_job_arrival(t); | ||
6702 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
6703 | +} | ||
6704 | + | ||
6705 | +static void gsnedf_task_wake_up(struct task_struct *task) | ||
6706 | +{ | ||
6707 | + unsigned long flags; | ||
6708 | + lt_t now; | ||
6709 | + | ||
6710 | + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
6711 | + | ||
6712 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
6713 | + /* We need to take suspensions because of semaphores into | ||
6714 | + * account! If a job resumes after being suspended due to acquiring | ||
6715 | + * a semaphore, it should never be treated as a new job release. | ||
6716 | + */ | ||
6717 | + if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
6718 | + set_rt_flags(task, RT_F_RUNNING); | ||
6719 | + } else { | ||
6720 | + now = litmus_clock(); | ||
6721 | + if (is_tardy(task, now)) { | ||
6722 | + /* new sporadic release */ | ||
6723 | + release_at(task, now); | ||
6724 | + sched_trace_task_release(task); | ||
6725 | + } | ||
6726 | + else if (task->time_slice) | ||
6727 | + /* came back in time before deadline | ||
6728 | + */ | ||
6729 | + set_rt_flags(task, RT_F_RUNNING); | ||
6730 | + } | ||
6731 | + gsnedf_job_arrival(task); | ||
6732 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
6733 | +} | ||
6734 | + | ||
6735 | +static void gsnedf_task_block(struct task_struct *t) | ||
6736 | +{ | ||
6737 | + unsigned long flags; | ||
6738 | + | ||
6739 | + TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
6740 | + | ||
6741 | + /* unlink if necessary */ | ||
6742 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
6743 | + unlink(t); | ||
6744 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
6745 | + | ||
6746 | + BUG_ON(!is_realtime(t)); | ||
6747 | +} | ||
6748 | + | ||
6749 | + | ||
6750 | +static void gsnedf_task_exit(struct task_struct * t) | ||
6751 | +{ | ||
6752 | + unsigned long flags; | ||
6753 | + | ||
6754 | + /* unlink if necessary */ | ||
6755 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
6756 | + unlink(t); | ||
6757 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
6758 | + gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
6759 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
6760 | + } | ||
6761 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
6762 | + | ||
6763 | + BUG_ON(!is_realtime(t)); | ||
6764 | + TRACE_TASK(t, "RIP\n"); | ||
6765 | +} | ||
6766 | + | ||
6767 | +#ifdef CONFIG_FMLP | ||
6768 | +static long gsnedf_pi_block(struct pi_semaphore *sem, | ||
6769 | + struct task_struct *new_waiter) | ||
6770 | +{ | ||
6771 | + /* This callback has to handle the situation where a new waiter is | ||
6772 | + * added to the wait queue of the semaphore. | ||
6773 | + * | ||
6774 | + * We must check if has a higher priority than the currently | ||
6775 | + * highest-priority task, and then potentially reschedule. | ||
6776 | + */ | ||
6777 | + | ||
6778 | + BUG_ON(!new_waiter); | ||
6779 | + | ||
6780 | + if (edf_higher_prio(new_waiter, sem->hp.task)) { | ||
6781 | + TRACE_TASK(new_waiter, " boosts priority\n"); | ||
6782 | + /* called with IRQs disabled */ | ||
6783 | + spin_lock(&gsnedf_lock); | ||
6784 | + /* store new highest-priority task */ | ||
6785 | + sem->hp.task = new_waiter; | ||
6786 | + if (sem->holder) { | ||
6787 | + /* let holder inherit */ | ||
6788 | + sem->holder->rt_param.inh_task = new_waiter; | ||
6789 | + unlink(sem->holder); | ||
6790 | + gsnedf_job_arrival(sem->holder); | ||
6791 | + } | ||
6792 | + spin_unlock(&gsnedf_lock); | ||
6793 | + } | ||
6794 | + | ||
6795 | + return 0; | ||
6796 | +} | ||
6797 | + | ||
6798 | +static long gsnedf_inherit_priority(struct pi_semaphore *sem, | ||
6799 | + struct task_struct *new_owner) | ||
6800 | +{ | ||
6801 | + /* We don't need to acquire the gsnedf_lock since at the time of this | ||
6802 | + * call new_owner isn't actually scheduled yet (it's still sleeping) | ||
6803 | + * and since the calling function already holds sem->wait.lock, which | ||
6804 | + * prevents concurrent sem->hp.task changes. | ||
6805 | + */ | ||
6806 | + | ||
6807 | + if (sem->hp.task && sem->hp.task != new_owner) { | ||
6808 | + new_owner->rt_param.inh_task = sem->hp.task; | ||
6809 | + TRACE_TASK(new_owner, "inherited priority from %s/%d\n", | ||
6810 | + sem->hp.task->comm, sem->hp.task->pid); | ||
6811 | + } else | ||
6812 | + TRACE_TASK(new_owner, | ||
6813 | + "cannot inherit priority, " | ||
6814 | + "no higher priority job waits.\n"); | ||
6815 | + return 0; | ||
6816 | +} | ||
6817 | + | ||
6818 | +/* This function is called on a semaphore release, and assumes that | ||
6819 | + * the current task is also the semaphore holder. | ||
6820 | + */ | ||
6821 | +static long gsnedf_return_priority(struct pi_semaphore *sem) | ||
6822 | +{ | ||
6823 | + struct task_struct* t = current; | ||
6824 | + int ret = 0; | ||
6825 | + | ||
6826 | + /* Find new highest-priority semaphore task | ||
6827 | + * if holder task is the current hp.task. | ||
6828 | + * | ||
6829 | + * Calling function holds sem->wait.lock. | ||
6830 | + */ | ||
6831 | + if (t == sem->hp.task) | ||
6832 | + edf_set_hp_task(sem); | ||
6833 | + | ||
6834 | + TRACE_CUR("gsnedf_return_priority for lock %p\n", sem); | ||
6835 | + | ||
6836 | + if (t->rt_param.inh_task) { | ||
6837 | + /* interrupts already disabled by PI code */ | ||
6838 | + spin_lock(&gsnedf_lock); | ||
6839 | + | ||
6840 | + /* Reset inh_task to NULL. */ | ||
6841 | + t->rt_param.inh_task = NULL; | ||
6842 | + | ||
6843 | + /* Check if rescheduling is necessary */ | ||
6844 | + unlink(t); | ||
6845 | + gsnedf_job_arrival(t); | ||
6846 | + spin_unlock(&gsnedf_lock); | ||
6847 | + } | ||
6848 | + | ||
6849 | + return ret; | ||
6850 | +} | ||
6851 | + | ||
6852 | +#endif | ||
6853 | + | ||
6854 | +static long gsnedf_admit_task(struct task_struct* tsk) | ||
6855 | +{ | ||
6856 | + return 0; | ||
6857 | +} | ||
6858 | + | ||
6859 | + | ||
6860 | +/* Plugin object */ | ||
6861 | +static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | ||
6862 | + .plugin_name = "GSN-EDF", | ||
6863 | + .finish_switch = gsnedf_finish_switch, | ||
6864 | + .tick = gsnedf_tick, | ||
6865 | + .task_new = gsnedf_task_new, | ||
6866 | + .complete_job = complete_job, | ||
6867 | + .task_exit = gsnedf_task_exit, | ||
6868 | + .schedule = gsnedf_schedule, | ||
6869 | + .task_wake_up = gsnedf_task_wake_up, | ||
6870 | + .task_block = gsnedf_task_block, | ||
6871 | +#ifdef CONFIG_FMLP | ||
6872 | + .fmlp_active = 1, | ||
6873 | + .pi_block = gsnedf_pi_block, | ||
6874 | + .inherit_priority = gsnedf_inherit_priority, | ||
6875 | + .return_priority = gsnedf_return_priority, | ||
6876 | +#endif | ||
6877 | + .admit_task = gsnedf_admit_task | ||
6878 | +}; | ||
6879 | + | ||
6880 | + | ||
6881 | +static int __init init_gsn_edf(void) | ||
6882 | +{ | ||
6883 | + int cpu; | ||
6884 | + cpu_entry_t *entry; | ||
6885 | + | ||
6886 | + heap_init(&gsnedf_cpu_heap); | ||
6887 | + /* initialize CPU state */ | ||
6888 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
6889 | + entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
6890 | + gsnedf_cpus[cpu] = entry; | ||
6891 | + atomic_set(&entry->will_schedule, 0); | ||
6892 | + entry->linked = NULL; | ||
6893 | + entry->scheduled = NULL; | ||
6894 | + entry->cpu = cpu; | ||
6895 | + entry->hn = &gsnedf_heap_node[cpu]; | ||
6896 | + heap_node_init(&entry->hn, entry); | ||
6897 | + } | ||
6898 | + edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | ||
6899 | + return register_sched_plugin(&gsn_edf_plugin); | ||
6900 | +} | ||
6901 | + | ||
6902 | + | ||
6903 | +module_init(init_gsn_edf); | ||
6904 | diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c | ||
6905 | new file mode 100644 | ||
6906 | index 0000000..b4858f8 | ||
6907 | --- /dev/null | ||
6908 | +++ b/litmus/sched_litmus.c | ||
6909 | @@ -0,0 +1,230 @@ | ||
6910 | +/* This file is included from kernel/sched.c */ | ||
6911 | + | ||
6912 | +#include <litmus/litmus.h> | ||
6913 | +#include <litmus/sched_plugin.h> | ||
6914 | + | ||
6915 | +static void update_time_litmus(struct rq *rq, struct task_struct *p) | ||
6916 | +{ | ||
6917 | + lt_t now = litmus_clock(); | ||
6918 | + p->rt_param.job_params.exec_time += | ||
6919 | + now - p->rt_param.job_params.exec_start; | ||
6920 | + p->rt_param.job_params.exec_start = now; | ||
6921 | +} | ||
6922 | + | ||
6923 | +static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
6924 | +static void double_rq_unlock(struct rq *rq1, struct rq *rq2); | ||
6925 | + | ||
6926 | +static void litmus_tick(struct rq *rq, struct task_struct *p) | ||
6927 | +{ | ||
6928 | + if (is_realtime(p)) | ||
6929 | + update_time_litmus(rq, p); | ||
6930 | + litmus->tick(p); | ||
6931 | +} | ||
6932 | + | ||
6933 | +#define NO_CPU -1 | ||
6934 | + | ||
6935 | +static void litmus_schedule(struct rq *rq, struct task_struct *prev) | ||
6936 | +{ | ||
6937 | + struct rq* other_rq; | ||
6938 | + long prev_state; | ||
6939 | + lt_t _maybe_deadlock = 0; | ||
6940 | + /* WARNING: rq is _not_ locked! */ | ||
6941 | + if (is_realtime(prev)) | ||
6942 | + update_time_litmus(rq, prev); | ||
6943 | + | ||
6944 | + /* let the plugin schedule */ | ||
6945 | + rq->litmus_next = litmus->schedule(prev); | ||
6946 | + | ||
6947 | + /* check if a global plugin pulled a task from a different RQ */ | ||
6948 | + if (rq->litmus_next && task_rq(rq->litmus_next) != rq) { | ||
6949 | + /* we need to migrate the task */ | ||
6950 | + other_rq = task_rq(rq->litmus_next); | ||
6951 | + TRACE_TASK(rq->litmus_next, "migrate from %d\n", other_rq->cpu); | ||
6952 | + | ||
6953 | + /* while we drop the lock, the prev task could change its | ||
6954 | + * state | ||
6955 | + */ | ||
6956 | + prev_state = prev->state; | ||
6957 | + mb(); | ||
6958 | + spin_unlock(&rq->lock); | ||
6959 | + | ||
6960 | + /* Don't race with a concurrent switch. | ||
6961 | + * This could deadlock in the case of cross or circular migrations. | ||
6962 | + * It's the job of the plugin to make sure that doesn't happen. | ||
6963 | + */ | ||
6964 | + TRACE_TASK(rq->litmus_next, "stack_in_use=%d\n", | ||
6965 | + rq->litmus_next->rt_param.stack_in_use); | ||
6966 | + if (rq->litmus_next->rt_param.stack_in_use != NO_CPU) { | ||
6967 | + TRACE_TASK(rq->litmus_next, "waiting to deschedule\n"); | ||
6968 | + _maybe_deadlock = litmus_clock(); | ||
6969 | + } | ||
6970 | + while (rq->litmus_next->rt_param.stack_in_use != NO_CPU) { | ||
6971 | + cpu_relax(); | ||
6972 | + mb(); | ||
6973 | + if (rq->litmus_next->rt_param.stack_in_use == NO_CPU) | ||
6974 | + TRACE_TASK(rq->litmus_next, | ||
6975 | + "descheduled. Proceeding.\n"); | ||
6976 | + if (lt_before(_maybe_deadlock + 10000000, litmus_clock())) { | ||
6977 | + /* We've been spinning for 10ms. | ||
6978 | + * Something can't be right! | ||
6979 | + * Let's abandon the task and bail out; at least | ||
6980 | + * we will have debug info instead of a hard | ||
6981 | + * deadlock. | ||
6982 | + */ | ||
6983 | + TRACE_TASK(rq->litmus_next, | ||
6984 | + "stack too long in use. Deadlock?\n"); | ||
6985 | + rq->litmus_next = NULL; | ||
6986 | + | ||
6987 | + /* bail out */ | ||
6988 | + spin_lock(&rq->lock); | ||
6989 | + return; | ||
6990 | + } | ||
6991 | + } | ||
6992 | +#ifdef __ARCH_WANT_UNLOCKED_CTXSW | ||
6993 | + if (rq->litmus_next->oncpu) | ||
6994 | + TRACE_TASK(rq->litmus_next, "waiting for !oncpu"); | ||
6995 | + while (rq->litmus_next->oncpu) { | ||
6996 | + cpu_relax(); | ||
6997 | + mb(); | ||
6998 | + } | ||
6999 | +#endif | ||
7000 | + double_rq_lock(rq, other_rq); | ||
7001 | + mb(); | ||
7002 | + if (prev->state != prev_state && is_realtime(prev)) { | ||
7003 | + TRACE_TASK(prev, | ||
7004 | + "state changed while we dropped" | ||
7005 | + " the lock: now=%d, old=%d\n", | ||
7006 | + prev->state, prev_state); | ||
7007 | + if (prev_state && !prev->state) { | ||
7008 | + /* prev task became unblocked | ||
7009 | + * we need to simulate normal sequence of events | ||
7010 | + * to scheduler plugins. | ||
7011 | + */ | ||
7012 | + litmus->task_block(prev); | ||
7013 | + litmus->task_wake_up(prev); | ||
7014 | + } | ||
7015 | + } | ||
7016 | + | ||
7017 | + set_task_cpu(rq->litmus_next, smp_processor_id()); | ||
7018 | + | ||
7019 | + /* DEBUG: now that we have the lock we need to make sure a | ||
7020 | + * couple of things still hold: | ||
7021 | + * - it is still a real-time task | ||
7022 | + * - it is still runnable (could have been stopped) | ||
7023 | + */ | ||
7024 | + if (!is_realtime(rq->litmus_next) || | ||
7025 | + !is_running(rq->litmus_next)) { | ||
7026 | + /* BAD BAD BAD */ | ||
7027 | + TRACE_TASK(rq->litmus_next, | ||
7028 | + "migration invariant FAILED: rt=%d running=%d\n", | ||
7029 | + is_realtime(rq->litmus_next), | ||
7030 | + is_running(rq->litmus_next)); | ||
7031 | + /* drop the task */ | ||
7032 | + rq->litmus_next = NULL; | ||
7033 | + } | ||
7034 | + /* release the other CPU's runqueue, but keep ours */ | ||
7035 | + spin_unlock(&other_rq->lock); | ||
7036 | + } | ||
7037 | + if (rq->litmus_next) | ||
7038 | + rq->litmus_next->rt_param.stack_in_use = rq->cpu; | ||
7039 | +} | ||
7040 | + | ||
7041 | +static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, int wakeup) | ||
7042 | +{ | ||
7043 | + if (wakeup) { | ||
7044 | + sched_trace_task_resume(p); | ||
7045 | + litmus->task_wake_up(p); | ||
7046 | + } else | ||
7047 | + TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); | ||
7048 | +} | ||
7049 | + | ||
7050 | +static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, int sleep) | ||
7051 | +{ | ||
7052 | + if (sleep) { | ||
7053 | + litmus->task_block(p); | ||
7054 | + sched_trace_task_block(p); | ||
7055 | + } else | ||
7056 | + TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n"); | ||
7057 | +} | ||
7058 | + | ||
7059 | +static void yield_task_litmus(struct rq *rq) | ||
7060 | +{ | ||
7061 | + BUG_ON(rq->curr != current); | ||
7062 | + litmus->complete_job(); | ||
7063 | +} | ||
7064 | + | ||
7065 | +/* Plugins are responsible for this. | ||
7066 | + */ | ||
7067 | +static void check_preempt_curr_litmus(struct rq *rq, struct task_struct *p) | ||
7068 | +{ | ||
7069 | +} | ||
7070 | + | ||
7071 | +/* has already been taken care of */ | ||
7072 | +static void put_prev_task_litmus(struct rq *rq, struct task_struct *p) | ||
7073 | +{ | ||
7074 | +} | ||
7075 | + | ||
7076 | +static struct task_struct *pick_next_task_litmus(struct rq *rq) | ||
7077 | +{ | ||
7078 | + struct task_struct* picked = rq->litmus_next; | ||
7079 | + rq->litmus_next = NULL; | ||
7080 | + if (picked) | ||
7081 | + picked->rt_param.job_params.exec_start = litmus_clock(); | ||
7082 | + return picked; | ||
7083 | +} | ||
7084 | + | ||
7085 | +static void task_tick_litmus(struct rq *rq, struct task_struct *p) | ||
7086 | +{ | ||
7087 | +} | ||
7088 | + | ||
7089 | +/* This is called when a task became a real-time task, either due | ||
7090 | + * to a SCHED_* class transition or due to PI mutex inheritance.\ | ||
7091 | + * We don't handle Linux PI mutex inheritance yet. Use LITMUS provided | ||
7092 | + * synchronization primitives instead. | ||
7093 | + */ | ||
7094 | +static void set_curr_task_litmus(struct rq *rq) | ||
7095 | +{ | ||
7096 | + rq->curr->rt_param.job_params.exec_start = litmus_clock(); | ||
7097 | +} | ||
7098 | + | ||
7099 | + | ||
7100 | +#ifdef CONFIG_SMP | ||
7101 | + | ||
7102 | +/* we don't repartition at runtime */ | ||
7103 | + | ||
7104 | +static unsigned long | ||
7105 | +load_balance_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
7106 | + unsigned long max_load_move, | ||
7107 | + struct sched_domain *sd, enum cpu_idle_type idle, | ||
7108 | + int *all_pinned, int *this_best_prio) | ||
7109 | +{ | ||
7110 | + return 0; | ||
7111 | +} | ||
7112 | + | ||
7113 | +static int | ||
7114 | +move_one_task_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
7115 | + struct sched_domain *sd, enum cpu_idle_type idle) | ||
7116 | +{ | ||
7117 | + return 0; | ||
7118 | +} | ||
7119 | +#endif | ||
7120 | + | ||
7121 | +const struct sched_class litmus_sched_class = { | ||
7122 | + .next = &rt_sched_class, | ||
7123 | + .enqueue_task = enqueue_task_litmus, | ||
7124 | + .dequeue_task = dequeue_task_litmus, | ||
7125 | + .yield_task = yield_task_litmus, | ||
7126 | + | ||
7127 | + .check_preempt_curr = check_preempt_curr_litmus, | ||
7128 | + | ||
7129 | + .pick_next_task = pick_next_task_litmus, | ||
7130 | + .put_prev_task = put_prev_task_litmus, | ||
7131 | + | ||
7132 | +#ifdef CONFIG_SMP | ||
7133 | + .load_balance = load_balance_litmus, | ||
7134 | + .move_one_task = move_one_task_litmus, | ||
7135 | +#endif | ||
7136 | + | ||
7137 | + .set_curr_task = set_curr_task_litmus, | ||
7138 | + .task_tick = task_tick_litmus, | ||
7139 | +}; | ||
7140 | diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c | ||
7141 | new file mode 100755 | ||
7142 | index 0000000..a733c95 | ||
7143 | --- /dev/null | ||
7144 | +++ b/litmus/sched_pfair.c | ||
7145 | @@ -0,0 +1,895 @@ | ||
7146 | +/* | ||
7147 | + * kernel/sched_pfair.c | ||
7148 | + * | ||
7149 | + * Implementation of the (global) Pfair scheduling algorithm. | ||
7150 | + * | ||
7151 | + */ | ||
7152 | + | ||
7153 | +#include <asm/div64.h> | ||
7154 | +#include <linux/delay.h> | ||
7155 | +#include <linux/module.h> | ||
7156 | +#include <linux/spinlock.h> | ||
7157 | +#include <linux/percpu.h> | ||
7158 | +#include <linux/sched.h> | ||
7159 | +#include <linux/list.h> | ||
7160 | + | ||
7161 | +#include <litmus/litmus.h> | ||
7162 | +#include <litmus/jobs.h> | ||
7163 | +#include <litmus/rt_domain.h> | ||
7164 | +#include <litmus/sched_plugin.h> | ||
7165 | +#include <litmus/sched_trace.h> | ||
7166 | + | ||
7167 | +#include <litmus/heap.h> | ||
7168 | + | ||
7169 | +struct subtask { | ||
7170 | + /* measured in quanta relative to job release */ | ||
7171 | + quanta_t release; | ||
7172 | + quanta_t deadline; | ||
7173 | + quanta_t overlap; /* called "b bit" by PD^2 */ | ||
7174 | + quanta_t group_deadline; | ||
7175 | +}; | ||
7176 | + | ||
7177 | +struct pfair_param { | ||
7178 | + quanta_t quanta; /* number of subtasks */ | ||
7179 | + quanta_t cur; /* index of current subtask */ | ||
7180 | + | ||
7181 | + quanta_t release; /* in quanta */ | ||
7182 | + quanta_t period; /* in quanta */ | ||
7183 | + | ||
7184 | + quanta_t last_quantum; /* when scheduled last */ | ||
7185 | + int last_cpu; /* where scheduled last */ | ||
7186 | + | ||
7187 | + unsigned int present; /* Can the task be scheduled? */ | ||
7188 | + unsigned int sporadic_release; /* On wakeup, new sporadic release? */ | ||
7189 | + | ||
7190 | + struct subtask subtasks[0]; /* allocate together with pfair_param */ | ||
7191 | +}; | ||
7192 | + | ||
7193 | +#define tsk_pfair(tsk) ((tsk)->rt_param.pfair) | ||
7194 | + | ||
7195 | +struct pfair_state { | ||
7196 | + int cpu; | ||
7197 | + volatile quanta_t cur_tick; /* updated by the CPU that is advancing | ||
7198 | + * the time */ | ||
7199 | + volatile quanta_t local_tick; /* What tick is the local CPU currently | ||
7200 | + * executing? Updated only by the local | ||
7201 | + * CPU. In QEMU, this may lag behind the | ||
7202 | + * current tick. In a real system, with | ||
7203 | + * proper timers and aligned quanta, | ||
7204 | + * that should only be the | ||
7205 | + * case for a very short time after the | ||
7206 | + * time advanced. With staggered quanta, | ||
7207 | + * it will lag for the duration of the | ||
7208 | + * offset. | ||
7209 | + */ | ||
7210 | + | ||
7211 | + struct task_struct* linked; /* the task that should be executing */ | ||
7212 | + struct task_struct* local; /* the local copy of linked */ | ||
7213 | + struct task_struct* scheduled; /* what is actually scheduled */ | ||
7214 | + | ||
7215 | + unsigned long missed_quanta; | ||
7216 | + lt_t offset; /* stagger offset */ | ||
7217 | +}; | ||
7218 | + | ||
7219 | +/* Currently, we limit the maximum period of any task to 2000 quanta. | ||
7220 | + * The reason is that it makes the implementation easier since we do not | ||
7221 | + * need to reallocate the release wheel on task arrivals. | ||
7222 | + * In the future | ||
7223 | + */ | ||
7224 | +#define PFAIR_MAX_PERIOD 2000 | ||
7225 | + | ||
7226 | +/* This is the release queue wheel. It is indexed by pfair_time % | ||
7227 | + * PFAIR_MAX_PERIOD. Each heap is ordered by PFAIR priority, so that it can be | ||
7228 | + * merged with the ready queue. | ||
7229 | + */ | ||
7230 | +static struct heap release_queue[PFAIR_MAX_PERIOD]; | ||
7231 | + | ||
7232 | +DEFINE_PER_CPU(struct pfair_state, pfair_state); | ||
7233 | +struct pfair_state* pstate[NR_CPUS]; /* short cut */ | ||
7234 | + | ||
7235 | +#define NO_CPU 0xffffffff | ||
7236 | + | ||
7237 | +static quanta_t pfair_time = 0; /* the "official" PFAIR clock */ | ||
7238 | +static quanta_t merge_time = 0; /* Updated after the release queue has been | ||
7239 | + * merged. Used by drop_all_references(). | ||
7240 | + */ | ||
7241 | + | ||
7242 | +static rt_domain_t pfair; | ||
7243 | + | ||
7244 | +/* The pfair_lock is used to serialize all scheduling events. | ||
7245 | + */ | ||
7246 | +#define pfair_lock pfair.ready_lock | ||
7247 | + | ||
7248 | +/* Enable for lots of trace info. | ||
7249 | + * #define PFAIR_DEBUG | ||
7250 | + */ | ||
7251 | + | ||
7252 | +#ifdef PFAIR_DEBUG | ||
7253 | +#define PTRACE_TASK(t, f, args...) TRACE_TASK(t, f, ## args) | ||
7254 | +#define PTRACE(f, args...) TRACE(f, ## args) | ||
7255 | +#else | ||
7256 | +#define PTRACE_TASK(t, f, args...) | ||
7257 | +#define PTRACE(f, args...) | ||
7258 | +#endif | ||
7259 | + | ||
7260 | +/* gcc will inline all of these accessor functions... */ | ||
7261 | +static struct subtask* cur_subtask(struct task_struct* t) | ||
7262 | +{ | ||
7263 | + return tsk_pfair(t)->subtasks + tsk_pfair(t)->cur; | ||
7264 | +} | ||
7265 | + | ||
7266 | +static quanta_t cur_deadline(struct task_struct* t) | ||
7267 | +{ | ||
7268 | + return cur_subtask(t)->deadline + tsk_pfair(t)->release; | ||
7269 | +} | ||
7270 | + | ||
7271 | + | ||
7272 | +static quanta_t cur_sub_release(struct task_struct* t) | ||
7273 | +{ | ||
7274 | + return cur_subtask(t)->release + tsk_pfair(t)->release; | ||
7275 | +} | ||
7276 | + | ||
7277 | +static quanta_t cur_release(struct task_struct* t) | ||
7278 | +{ | ||
7279 | +#ifdef EARLY_RELEASE | ||
7280 | + /* only the release of the first subtask counts when we early | ||
7281 | + * release */ | ||
7282 | + return tsk_pfair(t)->release; | ||
7283 | +#else | ||
7284 | + return cur_sub_release(t); | ||
7285 | +#endif | ||
7286 | +} | ||
7287 | + | ||
7288 | +static quanta_t cur_overlap(struct task_struct* t) | ||
7289 | +{ | ||
7290 | + return cur_subtask(t)->overlap; | ||
7291 | +} | ||
7292 | + | ||
7293 | +static quanta_t cur_group_deadline(struct task_struct* t) | ||
7294 | +{ | ||
7295 | + quanta_t gdl = cur_subtask(t)->group_deadline; | ||
7296 | + if (gdl) | ||
7297 | + return gdl + tsk_pfair(t)->release; | ||
7298 | + else | ||
7299 | + return gdl; | ||
7300 | +} | ||
7301 | + | ||
7302 | +static int is_present(struct task_struct* t) | ||
7303 | +{ | ||
7304 | + return t && tsk_pfair(t)->present; | ||
7305 | +} | ||
7306 | + | ||
7307 | +static int pfair_higher_prio(struct task_struct* first, | ||
7308 | + struct task_struct* second) | ||
7309 | +{ | ||
7310 | + return /* first task must exist */ | ||
7311 | + first && ( | ||
7312 | + /* Does the second task exist and is it a real-time task? If | ||
7313 | + * not, the first task (which is a RT task) has higher | ||
7314 | + * priority. | ||
7315 | + */ | ||
7316 | + !second || !is_realtime(second) || | ||
7317 | + | ||
7318 | + /* Is the (subtask) deadline of the first task earlier? | ||
7319 | + * Then it has higher priority. | ||
7320 | + */ | ||
7321 | + time_before(cur_deadline(first), cur_deadline(second)) || | ||
7322 | + | ||
7323 | + /* Do we have a deadline tie? | ||
7324 | + * Then break by B-bit. | ||
7325 | + */ | ||
7326 | + (cur_deadline(first) == cur_deadline(second) && | ||
7327 | + (cur_overlap(first) > cur_overlap(second) || | ||
7328 | + | ||
7329 | + /* Do we have a B-bit tie? | ||
7330 | + * Then break by group deadline. | ||
7331 | + */ | ||
7332 | + (cur_overlap(first) == cur_overlap(second) && | ||
7333 | + (time_after(cur_group_deadline(first), | ||
7334 | + cur_group_deadline(second)) || | ||
7335 | + | ||
7336 | + /* Do we have a group deadline tie? | ||
7337 | + * Then break by PID, which are unique. | ||
7338 | + */ | ||
7339 | + (cur_group_deadline(first) == | ||
7340 | + cur_group_deadline(second) && | ||
7341 | + first->pid < second->pid)))))); | ||
7342 | +} | ||
7343 | + | ||
7344 | +int pfair_ready_order(struct heap_node* a, struct heap_node* b) | ||
7345 | +{ | ||
7346 | + return pfair_higher_prio(heap2task(a), heap2task(b)); | ||
7347 | +} | ||
7348 | + | ||
7349 | +/* return the proper release queue for time t */ | ||
7350 | +static struct heap* relq(quanta_t t) | ||
7351 | +{ | ||
7352 | + struct heap* rq = &release_queue[t % PFAIR_MAX_PERIOD]; | ||
7353 | + return rq; | ||
7354 | +} | ||
7355 | + | ||
7356 | +static void prepare_release(struct task_struct* t, quanta_t at) | ||
7357 | +{ | ||
7358 | + tsk_pfair(t)->release = at; | ||
7359 | + tsk_pfair(t)->cur = 0; | ||
7360 | +} | ||
7361 | + | ||
7362 | +static void __pfair_add_release(struct task_struct* t, struct heap* queue) | ||
7363 | +{ | ||
7364 | + heap_insert(pfair_ready_order, queue, | ||
7365 | + tsk_rt(t)->heap_node); | ||
7366 | +} | ||
7367 | + | ||
7368 | +static void pfair_add_release(struct task_struct* t) | ||
7369 | +{ | ||
7370 | + BUG_ON(heap_node_in_heap(tsk_rt(t)->heap_node)); | ||
7371 | + __pfair_add_release(t, relq(cur_release(t))); | ||
7372 | +} | ||
7373 | + | ||
7374 | +/* pull released tasks from the release queue */ | ||
7375 | +static void poll_releases(quanta_t time) | ||
7376 | +{ | ||
7377 | + __merge_ready(&pfair, relq(time)); | ||
7378 | + merge_time = time; | ||
7379 | +} | ||
7380 | + | ||
7381 | +static void check_preempt(struct task_struct* t) | ||
7382 | +{ | ||
7383 | + int cpu = NO_CPU; | ||
7384 | + if (tsk_rt(t)->linked_on != tsk_rt(t)->scheduled_on && | ||
7385 | + tsk_pfair(t)->present) { | ||
7386 | + /* the task can be scheduled and | ||
7387 | + * is not scheduled where it ought to be scheduled | ||
7388 | + */ | ||
7389 | + cpu = tsk_rt(t)->linked_on != NO_CPU ? | ||
7390 | + tsk_rt(t)->linked_on : | ||
7391 | + tsk_rt(t)->scheduled_on; | ||
7392 | + PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n", | ||
7393 | + tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on); | ||
7394 | + /* preempt */ | ||
7395 | + if (cpu == smp_processor_id()) | ||
7396 | + set_tsk_need_resched(current); | ||
7397 | + else { | ||
7398 | + smp_send_reschedule(cpu); | ||
7399 | + } | ||
7400 | + } | ||
7401 | +} | ||
7402 | + | ||
7403 | +/* caller must hold pfair_lock */ | ||
7404 | +static void drop_all_references(struct task_struct *t) | ||
7405 | +{ | ||
7406 | + int cpu; | ||
7407 | + struct pfair_state* s; | ||
7408 | + struct heap* q; | ||
7409 | + if (heap_node_in_heap(tsk_rt(t)->heap_node)) { | ||
7410 | + /* figure out what queue the node is in */ | ||
7411 | + if (time_before_eq(cur_release(t), merge_time)) | ||
7412 | + q = &pfair.ready_queue; | ||
7413 | + else | ||
7414 | + q = relq(cur_release(t)); | ||
7415 | + heap_delete(pfair_ready_order, q, | ||
7416 | + tsk_rt(t)->heap_node); | ||
7417 | + } | ||
7418 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
7419 | + s = &per_cpu(pfair_state, cpu); | ||
7420 | + if (s->linked == t) | ||
7421 | + s->linked = NULL; | ||
7422 | + if (s->local == t) | ||
7423 | + s->local = NULL; | ||
7424 | + if (s->scheduled == t) | ||
7425 | + s->scheduled = NULL; | ||
7426 | + } | ||
7427 | +} | ||
7428 | + | ||
7429 | +/* returns 1 if the task needs to go the release queue */ | ||
7430 | +static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) | ||
7431 | +{ | ||
7432 | + struct pfair_param* p = tsk_pfair(t); | ||
7433 | + int to_relq; | ||
7434 | + p->cur = (p->cur + 1) % p->quanta; | ||
7435 | + if (!p->cur) { | ||
7436 | + sched_trace_task_completion(t, 1); | ||
7437 | + if (tsk_pfair(t)->present) { | ||
7438 | + /* we start a new job */ | ||
7439 | + prepare_for_next_period(t); | ||
7440 | + sched_trace_task_release(t); | ||
7441 | + get_rt_flags(t) = RT_F_RUNNING; | ||
7442 | + p->release += p->period; | ||
7443 | + } else { | ||
7444 | + /* remove task from system until it wakes */ | ||
7445 | + drop_all_references(t); | ||
7446 | + tsk_pfair(t)->sporadic_release = 1; | ||
7447 | + TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n", | ||
7448 | + cpu, p->cur); | ||
7449 | + return 0; | ||
7450 | + } | ||
7451 | + } | ||
7452 | + to_relq = time_after(cur_release(t), time); | ||
7453 | + TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d\n", | ||
7454 | + cpu, p->cur, to_relq); | ||
7455 | + return to_relq; | ||
7456 | +} | ||
7457 | + | ||
7458 | +static void advance_subtasks(quanta_t time) | ||
7459 | +{ | ||
7460 | + int cpu, missed; | ||
7461 | + struct task_struct* l; | ||
7462 | + struct pfair_param* p; | ||
7463 | + | ||
7464 | + for_each_online_cpu(cpu) { | ||
7465 | + l = pstate[cpu]->linked; | ||
7466 | + missed = pstate[cpu]->linked != pstate[cpu]->local; | ||
7467 | + if (l) { | ||
7468 | + p = tsk_pfair(l); | ||
7469 | + p->last_quantum = time; | ||
7470 | + p->last_cpu = cpu; | ||
7471 | + if (advance_subtask(time, l, cpu)) { | ||
7472 | + pstate[cpu]->linked = NULL; | ||
7473 | + pfair_add_release(l); | ||
7474 | + } | ||
7475 | + } | ||
7476 | + } | ||
7477 | +} | ||
7478 | + | ||
7479 | +static int target_cpu(quanta_t time, struct task_struct* t, int default_cpu) | ||
7480 | +{ | ||
7481 | + int cpu; | ||
7482 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
7483 | + /* always observe scheduled_on linkage */ | ||
7484 | + default_cpu = tsk_rt(t)->scheduled_on; | ||
7485 | + } else if (tsk_pfair(t)->last_quantum == time - 1) { | ||
7486 | + /* back2back quanta */ | ||
7487 | + /* Only observe last_quantum if no scheduled_on is in the way. | ||
7488 | + * This should only kick in if a CPU missed quanta, and that | ||
7489 | + * *should* only happen in QEMU. | ||
7490 | + */ | ||
7491 | + cpu = tsk_pfair(t)->last_cpu; | ||
7492 | + if (!pstate[cpu]->linked || | ||
7493 | + tsk_rt(pstate[cpu]->linked)->scheduled_on != cpu) { | ||
7494 | + default_cpu = cpu; | ||
7495 | + } | ||
7496 | + } | ||
7497 | + return default_cpu; | ||
7498 | +} | ||
7499 | + | ||
7500 | +/* returns one if linking was redirected */ | ||
7501 | +static int pfair_link(quanta_t time, int cpu, | ||
7502 | + struct task_struct* t) | ||
7503 | +{ | ||
7504 | + int target = target_cpu(time, t, cpu); | ||
7505 | + struct task_struct* prev = pstate[cpu]->linked; | ||
7506 | + struct task_struct* other; | ||
7507 | + | ||
7508 | + if (target != cpu) { | ||
7509 | + other = pstate[target]->linked; | ||
7510 | + pstate[target]->linked = t; | ||
7511 | + tsk_rt(t)->linked_on = target; | ||
7512 | + if (!other) | ||
7513 | + /* linked ok, but reschedule this CPU */ | ||
7514 | + return 1; | ||
7515 | + if (target < cpu) { | ||
7516 | + /* link other to cpu instead */ | ||
7517 | + tsk_rt(other)->linked_on = cpu; | ||
7518 | + pstate[cpu]->linked = other; | ||
7519 | + if (prev) { | ||
7520 | + /* prev got pushed back into the ready queue */ | ||
7521 | + tsk_rt(prev)->linked_on = NO_CPU; | ||
7522 | + __add_ready(&pfair, prev); | ||
7523 | + } | ||
7524 | + /* we are done with this cpu */ | ||
7525 | + return 0; | ||
7526 | + } else { | ||
7527 | + /* re-add other, it's original CPU was not considered yet */ | ||
7528 | + tsk_rt(other)->linked_on = NO_CPU; | ||
7529 | + __add_ready(&pfair, other); | ||
7530 | + /* reschedule this CPU */ | ||
7531 | + return 1; | ||
7532 | + } | ||
7533 | + } else { | ||
7534 | + pstate[cpu]->linked = t; | ||
7535 | + tsk_rt(t)->linked_on = cpu; | ||
7536 | + if (prev) { | ||
7537 | + /* prev got pushed back into the ready queue */ | ||
7538 | + tsk_rt(prev)->linked_on = NO_CPU; | ||
7539 | + __add_ready(&pfair, prev); | ||
7540 | + } | ||
7541 | + /* we are done with this CPU */ | ||
7542 | + return 0; | ||
7543 | + } | ||
7544 | +} | ||
7545 | + | ||
7546 | +static void schedule_subtasks(quanta_t time) | ||
7547 | +{ | ||
7548 | + int cpu, retry; | ||
7549 | + | ||
7550 | + for_each_online_cpu(cpu) { | ||
7551 | + retry = 1; | ||
7552 | + while (retry) { | ||
7553 | + if (pfair_higher_prio(__peek_ready(&pfair), | ||
7554 | + pstate[cpu]->linked)) | ||
7555 | + retry = pfair_link(time, cpu, | ||
7556 | + __take_ready(&pfair)); | ||
7557 | + else | ||
7558 | + retry = 0; | ||
7559 | + } | ||
7560 | + } | ||
7561 | +} | ||
7562 | + | ||
7563 | +static void schedule_next_quantum(quanta_t time) | ||
7564 | +{ | ||
7565 | + int cpu; | ||
7566 | + | ||
7567 | + /* called with interrupts disabled */ | ||
7568 | + PTRACE("--- Q %lu at %llu PRE-SPIN\n", | ||
7569 | + time, litmus_clock()); | ||
7570 | + spin_lock(&pfair_lock); | ||
7571 | + PTRACE("<<< Q %lu at %llu\n", | ||
7572 | + time, litmus_clock()); | ||
7573 | + | ||
7574 | + sched_trace_quantum_boundary(); | ||
7575 | + | ||
7576 | + advance_subtasks(time); | ||
7577 | + poll_releases(time); | ||
7578 | + schedule_subtasks(time); | ||
7579 | + | ||
7580 | + for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
7581 | + if (pstate[cpu]->linked) | ||
7582 | + PTRACE_TASK(pstate[cpu]->linked, | ||
7583 | + " linked on %d.\n", cpu); | ||
7584 | + else | ||
7585 | + PTRACE("(null) linked on %d.\n", cpu); | ||
7586 | + | ||
7587 | + /* We are done. Advance time. */ | ||
7588 | + mb(); | ||
7589 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
7590 | + if (pstate[cpu]->local_tick != pstate[cpu]->cur_tick) { | ||
7591 | + TRACE("BAD Quantum not acked on %d " | ||
7592 | + "(l:%lu c:%lu p:%lu)\n", | ||
7593 | + cpu, | ||
7594 | + pstate[cpu]->local_tick, | ||
7595 | + pstate[cpu]->cur_tick, | ||
7596 | + pfair_time); | ||
7597 | + pstate[cpu]->missed_quanta++; | ||
7598 | + } | ||
7599 | + pstate[cpu]->cur_tick = time; | ||
7600 | + } | ||
7601 | + PTRACE(">>> Q %lu at %llu\n", | ||
7602 | + time, litmus_clock()); | ||
7603 | + spin_unlock(&pfair_lock); | ||
7604 | +} | ||
7605 | + | ||
7606 | +static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) | ||
7607 | +{ | ||
7608 | + quanta_t loc; | ||
7609 | + | ||
7610 | + goto first; /* skip mb() on first iteration */ | ||
7611 | + do { | ||
7612 | + cpu_relax(); | ||
7613 | + mb(); | ||
7614 | + first: loc = state->cur_tick; | ||
7615 | + /* FIXME: what if loc > cur? */ | ||
7616 | + } while (time_before(loc, q)); | ||
7617 | + PTRACE("observed cur_tick:%lu >= q:%lu\n", | ||
7618 | + loc, q); | ||
7619 | +} | ||
7620 | + | ||
7621 | +static quanta_t current_quantum(struct pfair_state* state) | ||
7622 | +{ | ||
7623 | + lt_t t = litmus_clock() - state->offset; | ||
7624 | + return time2quanta(t, FLOOR); | ||
7625 | +} | ||
7626 | + | ||
7627 | +static void catchup_quanta(quanta_t from, quanta_t target, | ||
7628 | + struct pfair_state* state) | ||
7629 | +{ | ||
7630 | + quanta_t cur = from, time; | ||
7631 | + TRACE("+++< BAD catching up quanta from %lu to %lu\n", | ||
7632 | + from, target); | ||
7633 | + while (time_before(cur, target)) { | ||
7634 | + wait_for_quantum(cur, state); | ||
7635 | + cur++; | ||
7636 | + time = cmpxchg(&pfair_time, | ||
7637 | + cur - 1, /* expected */ | ||
7638 | + cur /* next */ | ||
7639 | + ); | ||
7640 | + if (time == cur - 1) | ||
7641 | + schedule_next_quantum(cur); | ||
7642 | + } | ||
7643 | + TRACE("+++> catching up done\n"); | ||
7644 | +} | ||
7645 | + | ||
7646 | +/* pfair_tick - this function is called for every local timer | ||
7647 | + * interrupt. | ||
7648 | + */ | ||
7649 | +static void pfair_tick(struct task_struct* t) | ||
7650 | +{ | ||
7651 | + struct pfair_state* state = &__get_cpu_var(pfair_state); | ||
7652 | + quanta_t time, cur; | ||
7653 | + int retry = 10; | ||
7654 | + | ||
7655 | + do { | ||
7656 | + cur = current_quantum(state); | ||
7657 | + PTRACE("q %lu at %llu\n", cur, litmus_clock()); | ||
7658 | + | ||
7659 | + /* Attempt to advance time. First CPU to get here | ||
7660 | + * will prepare the next quantum. | ||
7661 | + */ | ||
7662 | + time = cmpxchg(&pfair_time, | ||
7663 | + cur - 1, /* expected */ | ||
7664 | + cur /* next */ | ||
7665 | + ); | ||
7666 | + if (time == cur - 1) { | ||
7667 | + /* exchange succeeded */ | ||
7668 | + wait_for_quantum(cur - 1, state); | ||
7669 | + schedule_next_quantum(cur); | ||
7670 | + retry = 0; | ||
7671 | + } else if (time_before(time, cur - 1)) { | ||
7672 | + /* the whole system missed a tick !? */ | ||
7673 | + catchup_quanta(time, cur, state); | ||
7674 | + retry--; | ||
7675 | + } else if (time_after(time, cur)) { | ||
7676 | + /* our timer lagging behind!? */ | ||
7677 | + TRACE("BAD pfair_time:%lu > cur:%lu\n", time, cur); | ||
7678 | + retry--; | ||
7679 | + } else { | ||
7680 | + /* Some other CPU already started scheduling | ||
7681 | + * this quantum. Let it do its job and then update. | ||
7682 | + */ | ||
7683 | + retry = 0; | ||
7684 | + } | ||
7685 | + } while (retry); | ||
7686 | + | ||
7687 | + /* Spin locally until time advances. */ | ||
7688 | + wait_for_quantum(cur, state); | ||
7689 | + | ||
7690 | + /* copy assignment */ | ||
7691 | + /* FIXME: what if we race with a future update? Corrupted state? */ | ||
7692 | + state->local = state->linked; | ||
7693 | + /* signal that we are done */ | ||
7694 | + mb(); | ||
7695 | + state->local_tick = state->cur_tick; | ||
7696 | + | ||
7697 | + if (state->local != current | ||
7698 | + && (is_realtime(current) || is_present(state->local))) | ||
7699 | + set_tsk_need_resched(current); | ||
7700 | +} | ||
7701 | + | ||
7702 | +static int safe_to_schedule(struct task_struct* t, int cpu) | ||
7703 | +{ | ||
7704 | + int where = tsk_rt(t)->scheduled_on; | ||
7705 | + if (where != NO_CPU && where != cpu) { | ||
7706 | + TRACE_TASK(t, "BAD: can't be scheduled on %d, " | ||
7707 | + "scheduled already on %d.\n", cpu, where); | ||
7708 | + return 0; | ||
7709 | + } else | ||
7710 | + return tsk_pfair(t)->present && get_rt_flags(t) == RT_F_RUNNING; | ||
7711 | +} | ||
7712 | + | ||
7713 | +static struct task_struct* pfair_schedule(struct task_struct * prev) | ||
7714 | +{ | ||
7715 | + struct pfair_state* state = &__get_cpu_var(pfair_state); | ||
7716 | + int blocks; | ||
7717 | + struct task_struct* next = NULL; | ||
7718 | + | ||
7719 | + spin_lock(&pfair_lock); | ||
7720 | + | ||
7721 | + blocks = is_realtime(prev) && !is_running(prev); | ||
7722 | + | ||
7723 | + if (blocks) | ||
7724 | + tsk_pfair(prev)->present = 0; | ||
7725 | + | ||
7726 | + if (state->local && safe_to_schedule(state->local, state->cpu)) | ||
7727 | + next = state->local; | ||
7728 | + | ||
7729 | + if (prev != next) { | ||
7730 | + tsk_rt(prev)->scheduled_on = NO_CPU; | ||
7731 | + if (next) | ||
7732 | + tsk_rt(next)->scheduled_on = state->cpu; | ||
7733 | + } | ||
7734 | + | ||
7735 | + spin_unlock(&pfair_lock); | ||
7736 | + | ||
7737 | + if (next) | ||
7738 | + TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", | ||
7739 | + tsk_pfair(next)->release, pfair_time, litmus_clock()); | ||
7740 | + else if (is_realtime(prev)) | ||
7741 | + TRACE("Becomes idle at %lu (%llu)\n", pfair_time, litmus_clock()); | ||
7742 | + | ||
7743 | + return next; | ||
7744 | +} | ||
7745 | + | ||
7746 | +static void pfair_task_new(struct task_struct * t, int on_rq, int running) | ||
7747 | +{ | ||
7748 | + unsigned long flags; | ||
7749 | + | ||
7750 | + TRACE("pfair: task new %d state:%d\n", t->pid, t->state); | ||
7751 | + | ||
7752 | + spin_lock_irqsave(&pfair_lock, flags); | ||
7753 | + if (running) | ||
7754 | + t->rt_param.scheduled_on = task_cpu(t); | ||
7755 | + else | ||
7756 | + t->rt_param.scheduled_on = NO_CPU; | ||
7757 | + | ||
7758 | + prepare_release(t, pfair_time + 1); | ||
7759 | + tsk_pfair(t)->present = running; | ||
7760 | + tsk_pfair(t)->sporadic_release = 0; | ||
7761 | + pfair_add_release(t); | ||
7762 | + check_preempt(t); | ||
7763 | + | ||
7764 | + spin_unlock_irqrestore(&pfair_lock, flags); | ||
7765 | +} | ||
7766 | + | ||
7767 | +static void pfair_task_wake_up(struct task_struct *t) | ||
7768 | +{ | ||
7769 | + unsigned long flags; | ||
7770 | + lt_t now; | ||
7771 | + | ||
7772 | + TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", | ||
7773 | + litmus_clock(), cur_release(t), pfair_time); | ||
7774 | + | ||
7775 | + spin_lock_irqsave(&pfair_lock, flags); | ||
7776 | + | ||
7777 | + tsk_pfair(t)->present = 1; | ||
7778 | + | ||
7779 | + /* It is a little unclear how to deal with Pfair | ||
7780 | + * tasks that block for a while and then wake. For now, | ||
7781 | + * if a task blocks and wakes before its next job release, | ||
7782 | + * then it may resume if it is currently linked somewhere | ||
7783 | + * (as if it never blocked at all). Otherwise, we have a | ||
7784 | + * new sporadic job release. | ||
7785 | + */ | ||
7786 | + if (tsk_pfair(t)->sporadic_release) { | ||
7787 | + now = litmus_clock(); | ||
7788 | + release_at(t, now); | ||
7789 | + prepare_release(t, time2quanta(now, CEIL)); | ||
7790 | + sched_trace_task_release(t); | ||
7791 | + /* FIXME: race with pfair_time advancing */ | ||
7792 | + pfair_add_release(t); | ||
7793 | + tsk_pfair(t)->sporadic_release = 0; | ||
7794 | + } | ||
7795 | + | ||
7796 | + check_preempt(t); | ||
7797 | + | ||
7798 | + spin_unlock_irqrestore(&pfair_lock, flags); | ||
7799 | + TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); | ||
7800 | +} | ||
7801 | + | ||
7802 | +static void pfair_task_block(struct task_struct *t) | ||
7803 | +{ | ||
7804 | + BUG_ON(!is_realtime(t)); | ||
7805 | + TRACE_TASK(t, "blocks at %llu, state:%d\n", | ||
7806 | + litmus_clock(), t->state); | ||
7807 | +} | ||
7808 | + | ||
7809 | +static void pfair_task_exit(struct task_struct * t) | ||
7810 | +{ | ||
7811 | + unsigned long flags; | ||
7812 | + | ||
7813 | + BUG_ON(!is_realtime(t)); | ||
7814 | + | ||
7815 | + /* Remote task from release or ready queue, and ensure | ||
7816 | + * that it is not the scheduled task for ANY CPU. We | ||
7817 | + * do this blanket check because occassionally when | ||
7818 | + * tasks exit while blocked, the task_cpu of the task | ||
7819 | + * might not be the same as the CPU that the PFAIR scheduler | ||
7820 | + * has chosen for it. | ||
7821 | + */ | ||
7822 | + spin_lock_irqsave(&pfair_lock, flags); | ||
7823 | + | ||
7824 | + TRACE_TASK(t, "RIP, state:%d\n", t->state); | ||
7825 | + drop_all_references(t); | ||
7826 | + | ||
7827 | + spin_unlock_irqrestore(&pfair_lock, flags); | ||
7828 | + | ||
7829 | + kfree(t->rt_param.pfair); | ||
7830 | + t->rt_param.pfair = NULL; | ||
7831 | +} | ||
7832 | + | ||
7833 | + | ||
7834 | +static void pfair_release_at(struct task_struct* task, lt_t start) | ||
7835 | +{ | ||
7836 | + unsigned long flags; | ||
7837 | + quanta_t release; | ||
7838 | + | ||
7839 | + BUG_ON(!is_realtime(task)); | ||
7840 | + | ||
7841 | + spin_lock_irqsave(&pfair_lock, flags); | ||
7842 | + release_at(task, start); | ||
7843 | + release = time2quanta(start, CEIL); | ||
7844 | + | ||
7845 | + if (release - pfair_time >= PFAIR_MAX_PERIOD) | ||
7846 | + release = pfair_time + PFAIR_MAX_PERIOD; | ||
7847 | + | ||
7848 | + TRACE_TASK(task, "sys release at %lu\n", release); | ||
7849 | + | ||
7850 | + drop_all_references(task); | ||
7851 | + prepare_release(task, release); | ||
7852 | + pfair_add_release(task); | ||
7853 | + | ||
7854 | + /* Clear sporadic release flag, since this release subsumes any | ||
7855 | + * sporadic release on wake. | ||
7856 | + */ | ||
7857 | + tsk_pfair(task)->sporadic_release = 0; | ||
7858 | + | ||
7859 | + spin_unlock_irqrestore(&pfair_lock, flags); | ||
7860 | +} | ||
7861 | + | ||
7862 | +static void init_subtask(struct subtask* sub, unsigned long i, | ||
7863 | + lt_t quanta, lt_t period) | ||
7864 | +{ | ||
7865 | + /* since i is zero-based, the formulas are shifted by one */ | ||
7866 | + lt_t tmp; | ||
7867 | + | ||
7868 | + /* release */ | ||
7869 | + tmp = period * i; | ||
7870 | + do_div(tmp, quanta); /* floor */ | ||
7871 | + sub->release = (quanta_t) tmp; | ||
7872 | + | ||
7873 | + /* deadline */ | ||
7874 | + tmp = period * (i + 1); | ||
7875 | + if (do_div(tmp, quanta)) /* ceil */ | ||
7876 | + tmp++; | ||
7877 | + sub->deadline = (quanta_t) tmp; | ||
7878 | + | ||
7879 | + /* next release */ | ||
7880 | + tmp = period * (i + 1); | ||
7881 | + do_div(tmp, quanta); /* floor */ | ||
7882 | + sub->overlap = sub->deadline - (quanta_t) tmp; | ||
7883 | + | ||
7884 | + /* Group deadline. | ||
7885 | + * Based on the formula given in Uma's thesis. | ||
7886 | + */ | ||
7887 | + if (2 * quanta >= period) { | ||
7888 | + /* heavy */ | ||
7889 | + tmp = (sub->deadline - (i + 1)) * period; | ||
7890 | + if (period > quanta && | ||
7891 | + do_div(tmp, (period - quanta))) /* ceil */ | ||
7892 | + tmp++; | ||
7893 | + sub->group_deadline = (quanta_t) tmp; | ||
7894 | + } else | ||
7895 | + sub->group_deadline = 0; | ||
7896 | +} | ||
7897 | + | ||
7898 | +static void dump_subtasks(struct task_struct* t) | ||
7899 | +{ | ||
7900 | + unsigned long i; | ||
7901 | + for (i = 0; i < t->rt_param.pfair->quanta; i++) | ||
7902 | + TRACE_TASK(t, "SUBTASK %lu: rel=%lu dl=%lu bbit:%lu gdl:%lu\n", | ||
7903 | + i + 1, | ||
7904 | + t->rt_param.pfair->subtasks[i].release, | ||
7905 | + t->rt_param.pfair->subtasks[i].deadline, | ||
7906 | + t->rt_param.pfair->subtasks[i].overlap, | ||
7907 | + t->rt_param.pfair->subtasks[i].group_deadline); | ||
7908 | +} | ||
7909 | + | ||
7910 | +static long pfair_admit_task(struct task_struct* t) | ||
7911 | +{ | ||
7912 | + lt_t quanta; | ||
7913 | + lt_t period; | ||
7914 | + s64 quantum_length = ktime_to_ns(tick_period); | ||
7915 | + struct pfair_param* param; | ||
7916 | + unsigned long i; | ||
7917 | + | ||
7918 | + /* Pfair is a tick-based method, so the time | ||
7919 | + * of interest is jiffies. Calculate tick-based | ||
7920 | + * times for everything. | ||
7921 | + * (Ceiling of exec cost, floor of period.) | ||
7922 | + */ | ||
7923 | + | ||
7924 | + quanta = get_exec_cost(t); | ||
7925 | + period = get_rt_period(t); | ||
7926 | + | ||
7927 | + quanta = time2quanta(get_exec_cost(t), CEIL); | ||
7928 | + | ||
7929 | + if (do_div(period, quantum_length)) | ||
7930 | + printk(KERN_WARNING | ||
7931 | + "The period of %s/%d is not a multiple of %llu.\n", | ||
7932 | + t->comm, t->pid, (unsigned long long) quantum_length); | ||
7933 | + | ||
7934 | + if (period >= PFAIR_MAX_PERIOD) { | ||
7935 | + printk(KERN_WARNING | ||
7936 | + "PFAIR: Rejecting task %s/%d; its period is too long.\n", | ||
7937 | + t->comm, t->pid); | ||
7938 | + return -EINVAL; | ||
7939 | + } | ||
7940 | + | ||
7941 | + if (quanta == period) { | ||
7942 | + /* special case: task has weight 1.0 */ | ||
7943 | + printk(KERN_INFO | ||
7944 | + "Admitting weight 1.0 task. (%s/%d, %llu, %llu).\n", | ||
7945 | + t->comm, t->pid, quanta, period); | ||
7946 | + quanta = 1; | ||
7947 | + period = 1; | ||
7948 | + } | ||
7949 | + | ||
7950 | + param = kmalloc(sizeof(struct pfair_param) + | ||
7951 | + quanta * sizeof(struct subtask), GFP_ATOMIC); | ||
7952 | + | ||
7953 | + if (!param) | ||
7954 | + return -ENOMEM; | ||
7955 | + | ||
7956 | + param->quanta = quanta; | ||
7957 | + param->cur = 0; | ||
7958 | + param->release = 0; | ||
7959 | + param->period = period; | ||
7960 | + | ||
7961 | + for (i = 0; i < quanta; i++) | ||
7962 | + init_subtask(param->subtasks + i, i, quanta, period); | ||
7963 | + | ||
7964 | + if (t->rt_param.pfair) | ||
7965 | + /* get rid of stale allocation */ | ||
7966 | + kfree(t->rt_param.pfair); | ||
7967 | + | ||
7968 | + t->rt_param.pfair = param; | ||
7969 | + | ||
7970 | + /* spew out some debug info */ | ||
7971 | + dump_subtasks(t); | ||
7972 | + | ||
7973 | + return 0; | ||
7974 | +} | ||
7975 | + | ||
7976 | +static long pfair_activate_plugin(void) | ||
7977 | +{ | ||
7978 | + int cpu; | ||
7979 | + struct pfair_state* state; | ||
7980 | + | ||
7981 | + state = &__get_cpu_var(pfair_state); | ||
7982 | + pfair_time = current_quantum(state); | ||
7983 | + | ||
7984 | + TRACE("Activating PFAIR at q=%lu\n", pfair_time); | ||
7985 | + | ||
7986 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
7987 | + state = &per_cpu(pfair_state, cpu); | ||
7988 | + state->cur_tick = pfair_time; | ||
7989 | + state->local_tick = pfair_time; | ||
7990 | + state->missed_quanta = 0; | ||
7991 | + state->offset = cpu_stagger_offset(cpu); | ||
7992 | + } | ||
7993 | + | ||
7994 | + return 0; | ||
7995 | +} | ||
7996 | + | ||
7997 | +/* Plugin object */ | ||
7998 | +static struct sched_plugin pfair_plugin __cacheline_aligned_in_smp = { | ||
7999 | + .plugin_name = "PFAIR", | ||
8000 | + .tick = pfair_tick, | ||
8001 | + .task_new = pfair_task_new, | ||
8002 | + .task_exit = pfair_task_exit, | ||
8003 | + .schedule = pfair_schedule, | ||
8004 | + .task_wake_up = pfair_task_wake_up, | ||
8005 | + .task_block = pfair_task_block, | ||
8006 | + .admit_task = pfair_admit_task, | ||
8007 | + .release_at = pfair_release_at, | ||
8008 | + .complete_job = complete_job, | ||
8009 | + .activate_plugin = pfair_activate_plugin, | ||
8010 | +}; | ||
8011 | + | ||
8012 | +static int __init init_pfair(void) | ||
8013 | +{ | ||
8014 | + int cpu, i; | ||
8015 | + struct pfair_state *state; | ||
8016 | + | ||
8017 | + /* initialize release queue */ | ||
8018 | + for (i = 0; i < PFAIR_MAX_PERIOD; i++) | ||
8019 | + heap_init(&release_queue[i]); | ||
8020 | + | ||
8021 | + /* initialize CPU state */ | ||
8022 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
8023 | + state = &per_cpu(pfair_state, cpu); | ||
8024 | + state->cpu = cpu; | ||
8025 | + state->cur_tick = 0; | ||
8026 | + state->local_tick = 0; | ||
8027 | + state->linked = NULL; | ||
8028 | + state->local = NULL; | ||
8029 | + state->scheduled = NULL; | ||
8030 | + state->missed_quanta = 0; | ||
8031 | + state->offset = cpu_stagger_offset(cpu); | ||
8032 | + pstate[cpu] = state; | ||
8033 | + } | ||
8034 | + | ||
8035 | + rt_domain_init(&pfair, pfair_ready_order, NULL, NULL); | ||
8036 | + return register_sched_plugin(&pfair_plugin); | ||
8037 | +} | ||
8038 | + | ||
8039 | +module_init(init_pfair); | ||
8040 | + | ||
8041 | diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c | ||
8042 | new file mode 100644 | ||
8043 | index 0000000..0be091e | ||
8044 | --- /dev/null | ||
8045 | +++ b/litmus/sched_plugin.c | ||
8046 | @@ -0,0 +1,199 @@ | ||
8047 | +/* sched_plugin.c -- core infrastructure for the scheduler plugin system | ||
8048 | + * | ||
8049 | + * This file includes the initialization of the plugin system, the no-op Linux | ||
8050 | + * scheduler plugin and some dummy functions. | ||
8051 | + */ | ||
8052 | + | ||
8053 | +#include <linux/list.h> | ||
8054 | +#include <linux/spinlock.h> | ||
8055 | + | ||
8056 | +#include <litmus/litmus.h> | ||
8057 | +#include <litmus/sched_plugin.h> | ||
8058 | + | ||
8059 | +#include <litmus/jobs.h> | ||
8060 | + | ||
8061 | +/************************************************************* | ||
8062 | + * Dummy plugin functions * | ||
8063 | + *************************************************************/ | ||
8064 | + | ||
8065 | +static void litmus_dummy_finish_switch(struct task_struct * prev) | ||
8066 | +{ | ||
8067 | +} | ||
8068 | + | ||
8069 | +static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) | ||
8070 | +{ | ||
8071 | + return NULL; | ||
8072 | +} | ||
8073 | + | ||
8074 | +static void litmus_dummy_tick(struct task_struct* tsk) | ||
8075 | +{ | ||
8076 | +} | ||
8077 | + | ||
8078 | +static long litmus_dummy_admit_task(struct task_struct* tsk) | ||
8079 | +{ | ||
8080 | + printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n", | ||
8081 | + tsk->comm, tsk->pid); | ||
8082 | + return -EINVAL; | ||
8083 | +} | ||
8084 | + | ||
8085 | +static void litmus_dummy_task_new(struct task_struct *t, int on_rq, int running) | ||
8086 | +{ | ||
8087 | +} | ||
8088 | + | ||
8089 | +static void litmus_dummy_task_wake_up(struct task_struct *task) | ||
8090 | +{ | ||
8091 | +} | ||
8092 | + | ||
8093 | +static void litmus_dummy_task_block(struct task_struct *task) | ||
8094 | +{ | ||
8095 | +} | ||
8096 | + | ||
8097 | +static void litmus_dummy_task_exit(struct task_struct *task) | ||
8098 | +{ | ||
8099 | +} | ||
8100 | + | ||
8101 | +static long litmus_dummy_complete_job(void) | ||
8102 | +{ | ||
8103 | + return -ENOSYS; | ||
8104 | +} | ||
8105 | + | ||
8106 | +static long litmus_dummy_activate_plugin(void) | ||
8107 | +{ | ||
8108 | + return 0; | ||
8109 | +} | ||
8110 | + | ||
8111 | +static long litmus_dummy_deactivate_plugin(void) | ||
8112 | +{ | ||
8113 | + return 0; | ||
8114 | +} | ||
8115 | + | ||
8116 | +#ifdef CONFIG_FMLP | ||
8117 | + | ||
8118 | +static long litmus_dummy_inherit_priority(struct pi_semaphore *sem, | ||
8119 | + struct task_struct *new_owner) | ||
8120 | +{ | ||
8121 | + return -ENOSYS; | ||
8122 | +} | ||
8123 | + | ||
8124 | +static long litmus_dummy_return_priority(struct pi_semaphore *sem) | ||
8125 | +{ | ||
8126 | + return -ENOSYS; | ||
8127 | +} | ||
8128 | + | ||
8129 | +static long litmus_dummy_pi_block(struct pi_semaphore *sem, | ||
8130 | + struct task_struct *new_waiter) | ||
8131 | +{ | ||
8132 | + return -ENOSYS; | ||
8133 | +} | ||
8134 | + | ||
8135 | +#endif | ||
8136 | + | ||
8137 | + | ||
8138 | +/* The default scheduler plugin. It doesn't do anything and lets Linux do its | ||
8139 | + * job. | ||
8140 | + */ | ||
8141 | +struct sched_plugin linux_sched_plugin = { | ||
8142 | + .plugin_name = "Linux", | ||
8143 | + .tick = litmus_dummy_tick, | ||
8144 | + .task_new = litmus_dummy_task_new, | ||
8145 | + .task_exit = litmus_dummy_task_exit, | ||
8146 | + .task_wake_up = litmus_dummy_task_wake_up, | ||
8147 | + .task_block = litmus_dummy_task_block, | ||
8148 | + .complete_job = litmus_dummy_complete_job, | ||
8149 | + .schedule = litmus_dummy_schedule, | ||
8150 | + .finish_switch = litmus_dummy_finish_switch, | ||
8151 | + .activate_plugin = litmus_dummy_activate_plugin, | ||
8152 | + .deactivate_plugin = litmus_dummy_deactivate_plugin, | ||
8153 | +#ifdef CONFIG_FMLP | ||
8154 | + .inherit_priority = litmus_dummy_inherit_priority, | ||
8155 | + .return_priority = litmus_dummy_return_priority, | ||
8156 | + .pi_block = litmus_dummy_pi_block, | ||
8157 | +#endif | ||
8158 | + .admit_task = litmus_dummy_admit_task | ||
8159 | +}; | ||
8160 | + | ||
8161 | +/* | ||
8162 | + * The reference to current plugin that is used to schedule tasks within | ||
8163 | + * the system. It stores references to actual function implementations | ||
8164 | + * Should be initialized by calling "init_***_plugin()" | ||
8165 | + */ | ||
8166 | +struct sched_plugin *litmus = &linux_sched_plugin; | ||
8167 | + | ||
8168 | +/* the list of registered scheduling plugins */ | ||
8169 | +static LIST_HEAD(sched_plugins); | ||
8170 | +static DEFINE_SPINLOCK(sched_plugins_lock); | ||
8171 | + | ||
8172 | +#define CHECK(func) {\ | ||
8173 | + if (!plugin->func) \ | ||
8174 | + plugin->func = litmus_dummy_ ## func;} | ||
8175 | + | ||
8176 | +/* FIXME: get reference to module */ | ||
8177 | +int register_sched_plugin(struct sched_plugin* plugin) | ||
8178 | +{ | ||
8179 | + printk(KERN_INFO "Registering LITMUS^RT plugin %s.\n", | ||
8180 | + plugin->plugin_name); | ||
8181 | + | ||
8182 | + /* make sure we don't trip over null pointers later */ | ||
8183 | + CHECK(finish_switch); | ||
8184 | + CHECK(schedule); | ||
8185 | + CHECK(tick); | ||
8186 | + CHECK(task_wake_up); | ||
8187 | + CHECK(task_exit); | ||
8188 | + CHECK(task_block); | ||
8189 | + CHECK(task_new); | ||
8190 | + CHECK(complete_job); | ||
8191 | + CHECK(activate_plugin); | ||
8192 | + CHECK(deactivate_plugin); | ||
8193 | +#ifdef CONFIG_FMLP | ||
8194 | + CHECK(inherit_priority); | ||
8195 | + CHECK(return_priority); | ||
8196 | + CHECK(pi_block); | ||
8197 | +#endif | ||
8198 | + CHECK(admit_task); | ||
8199 | + | ||
8200 | + if (!plugin->release_at) | ||
8201 | + plugin->release_at = release_at; | ||
8202 | + | ||
8203 | + spin_lock(&sched_plugins_lock); | ||
8204 | + list_add(&plugin->list, &sched_plugins); | ||
8205 | + spin_unlock(&sched_plugins_lock); | ||
8206 | + | ||
8207 | + return 0; | ||
8208 | +} | ||
8209 | + | ||
8210 | + | ||
8211 | +/* FIXME: reference counting, etc. */ | ||
8212 | +struct sched_plugin* find_sched_plugin(const char* name) | ||
8213 | +{ | ||
8214 | + struct list_head *pos; | ||
8215 | + struct sched_plugin *plugin; | ||
8216 | + | ||
8217 | + spin_lock(&sched_plugins_lock); | ||
8218 | + list_for_each(pos, &sched_plugins) { | ||
8219 | + plugin = list_entry(pos, struct sched_plugin, list); | ||
8220 | + if (!strcmp(plugin->plugin_name, name)) | ||
8221 | + goto out_unlock; | ||
8222 | + } | ||
8223 | + plugin = NULL; | ||
8224 | + | ||
8225 | +out_unlock: | ||
8226 | + spin_unlock(&sched_plugins_lock); | ||
8227 | + return plugin; | ||
8228 | +} | ||
8229 | + | ||
8230 | +int print_sched_plugins(char* buf, int max) | ||
8231 | +{ | ||
8232 | + int count = 0; | ||
8233 | + struct list_head *pos; | ||
8234 | + struct sched_plugin *plugin; | ||
8235 | + | ||
8236 | + spin_lock(&sched_plugins_lock); | ||
8237 | + list_for_each(pos, &sched_plugins) { | ||
8238 | + plugin = list_entry(pos, struct sched_plugin, list); | ||
8239 | + count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); | ||
8240 | + if (max - count <= 0) | ||
8241 | + break; | ||
8242 | + } | ||
8243 | + spin_unlock(&sched_plugins_lock); | ||
8244 | + return count; | ||
8245 | +} | ||
8246 | diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c | ||
8247 | new file mode 100644 | ||
8248 | index 0000000..9a2bdfc | ||
8249 | --- /dev/null | ||
8250 | +++ b/litmus/sched_psn_edf.c | ||
8251 | @@ -0,0 +1,454 @@ | ||
8252 | + | ||
8253 | +/* | ||
8254 | + * kernel/sched_psn_edf.c | ||
8255 | + * | ||
8256 | + * Implementation of the PSN-EDF scheduler plugin. | ||
8257 | + * Based on kern/sched_part_edf.c and kern/sched_gsn_edf.c. | ||
8258 | + * | ||
8259 | + * Suspensions and non-preemptable sections are supported. | ||
8260 | + * Priority inheritance is not supported. | ||
8261 | + */ | ||
8262 | + | ||
8263 | +#include <linux/percpu.h> | ||
8264 | +#include <linux/sched.h> | ||
8265 | +#include <linux/list.h> | ||
8266 | +#include <linux/spinlock.h> | ||
8267 | + | ||
8268 | +#include <linux/module.h> | ||
8269 | + | ||
8270 | +#include <litmus/litmus.h> | ||
8271 | +#include <litmus/jobs.h> | ||
8272 | +#include <litmus/sched_plugin.h> | ||
8273 | +#include <litmus/edf_common.h> | ||
8274 | + | ||
8275 | + | ||
8276 | +typedef struct { | ||
8277 | + rt_domain_t domain; | ||
8278 | + int cpu; | ||
8279 | + struct task_struct* scheduled; /* only RT tasks */ | ||
8280 | + | ||
8281 | +/* scheduling lock | ||
8282 | + */ | ||
8283 | +#define slock domain.ready_lock | ||
8284 | +/* protects the domain and | ||
8285 | + * serializes scheduling decisions | ||
8286 | + */ | ||
8287 | +} psnedf_domain_t; | ||
8288 | + | ||
8289 | +DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains); | ||
8290 | + | ||
8291 | +#define local_edf (&__get_cpu_var(psnedf_domains).domain) | ||
8292 | +#define local_pedf (&__get_cpu_var(psnedf_domains)) | ||
8293 | +#define remote_edf(cpu) (&per_cpu(psnedf_domains, cpu).domain) | ||
8294 | +#define remote_pedf(cpu) (&per_cpu(psnedf_domains, cpu)) | ||
8295 | +#define task_edf(task) remote_edf(get_partition(task)) | ||
8296 | +#define task_pedf(task) remote_pedf(get_partition(task)) | ||
8297 | + | ||
8298 | + | ||
8299 | +static void psnedf_domain_init(psnedf_domain_t* pedf, | ||
8300 | + check_resched_needed_t check, | ||
8301 | + release_jobs_t release, | ||
8302 | + int cpu) | ||
8303 | +{ | ||
8304 | + edf_domain_init(&pedf->domain, check, release); | ||
8305 | + pedf->cpu = cpu; | ||
8306 | + pedf->scheduled = NULL; | ||
8307 | +} | ||
8308 | + | ||
8309 | +static void requeue(struct task_struct* t, rt_domain_t *edf) | ||
8310 | +{ | ||
8311 | + if (t->state != TASK_RUNNING) | ||
8312 | + TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); | ||
8313 | + | ||
8314 | + set_rt_flags(t, RT_F_RUNNING); | ||
8315 | + if (is_released(t, litmus_clock())) | ||
8316 | + __add_ready(edf, t); | ||
8317 | + else | ||
8318 | + add_release(edf, t); /* it has got to wait */ | ||
8319 | +} | ||
8320 | + | ||
8321 | +/* we assume the lock is being held */ | ||
8322 | +static void preempt(psnedf_domain_t *pedf) | ||
8323 | +{ | ||
8324 | + if (smp_processor_id() == pedf->cpu) { | ||
8325 | + if (pedf->scheduled && is_np(pedf->scheduled)) | ||
8326 | + request_exit_np(pedf->scheduled); | ||
8327 | + else | ||
8328 | + set_tsk_need_resched(current); | ||
8329 | + } else | ||
8330 | + /* in case that it is a remote CPU we have to defer the | ||
8331 | + * the decision to the remote CPU | ||
8332 | + */ | ||
8333 | + smp_send_reschedule(pedf->cpu); | ||
8334 | +} | ||
8335 | + | ||
8336 | +/* This check is trivial in partioned systems as we only have to consider | ||
8337 | + * the CPU of the partition. | ||
8338 | + */ | ||
8339 | +static int psnedf_check_resched(rt_domain_t *edf) | ||
8340 | +{ | ||
8341 | + psnedf_domain_t *pedf = container_of(edf, psnedf_domain_t, domain); | ||
8342 | + int ret = 0; | ||
8343 | + | ||
8344 | + /* because this is a callback from rt_domain_t we already hold | ||
8345 | + * the necessary lock for the ready queue | ||
8346 | + */ | ||
8347 | + if (edf_preemption_needed(edf, pedf->scheduled)) { | ||
8348 | + preempt(pedf); | ||
8349 | + ret = 1; | ||
8350 | + } | ||
8351 | + return ret; | ||
8352 | +} | ||
8353 | + | ||
8354 | +static void psnedf_tick(struct task_struct *t) | ||
8355 | +{ | ||
8356 | + psnedf_domain_t *pedf = local_pedf; | ||
8357 | + | ||
8358 | + /* Check for inconsistency. We don't need the lock for this since | ||
8359 | + * ->scheduled is only changed in schedule, which obviously is not | ||
8360 | + * executing in parallel on this CPU | ||
8361 | + */ | ||
8362 | + BUG_ON(is_realtime(t) && t != pedf->scheduled); | ||
8363 | + | ||
8364 | + if (is_realtime(t) && budget_exhausted(t)) { | ||
8365 | + if (!is_np(t)) | ||
8366 | + set_tsk_need_resched(t); | ||
8367 | + else { | ||
8368 | + TRACE("psnedf_scheduler_tick: " | ||
8369 | + "%d is non-preemptable, " | ||
8370 | + "preemption delayed.\n", t->pid); | ||
8371 | + request_exit_np(t); | ||
8372 | + } | ||
8373 | + } | ||
8374 | +} | ||
8375 | + | ||
8376 | +static void job_completion(struct task_struct* t) | ||
8377 | +{ | ||
8378 | + TRACE_TASK(t, "job_completion().\n"); | ||
8379 | + set_rt_flags(t, RT_F_SLEEP); | ||
8380 | + prepare_for_next_period(t); | ||
8381 | +} | ||
8382 | + | ||
8383 | +static struct task_struct* psnedf_schedule(struct task_struct * prev) | ||
8384 | +{ | ||
8385 | + psnedf_domain_t* pedf = local_pedf; | ||
8386 | + rt_domain_t* edf = &pedf->domain; | ||
8387 | + struct task_struct* next; | ||
8388 | + | ||
8389 | + int out_of_time, sleep, preempt, | ||
8390 | + np, exists, blocks, resched; | ||
8391 | + | ||
8392 | + spin_lock(&pedf->slock); | ||
8393 | + | ||
8394 | + /* sanity checking */ | ||
8395 | + BUG_ON(pedf->scheduled && pedf->scheduled != prev); | ||
8396 | + BUG_ON(pedf->scheduled && !is_realtime(prev)); | ||
8397 | + | ||
8398 | + /* (0) Determine state */ | ||
8399 | + exists = pedf->scheduled != NULL; | ||
8400 | + blocks = exists && !is_running(pedf->scheduled); | ||
8401 | + out_of_time = exists && budget_exhausted(pedf->scheduled); | ||
8402 | + np = exists && is_np(pedf->scheduled); | ||
8403 | + sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP; | ||
8404 | + preempt = edf_preemption_needed(edf, prev); | ||
8405 | + | ||
8406 | + /* If we need to preempt do so. | ||
8407 | + * The following checks set resched to 1 in case of special | ||
8408 | + * circumstances. | ||
8409 | + */ | ||
8410 | + resched = preempt; | ||
8411 | + | ||
8412 | + /* If a task blocks we have no choice but to reschedule. | ||
8413 | + */ | ||
8414 | + if (blocks) | ||
8415 | + resched = 1; | ||
8416 | + | ||
8417 | + /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
8418 | + * Multiple calls to request_exit_np() don't hurt. | ||
8419 | + */ | ||
8420 | + if (np && (out_of_time || preempt || sleep)) | ||
8421 | + request_exit_np(pedf->scheduled); | ||
8422 | + | ||
8423 | + /* Any task that is preemptable and either exhausts its execution | ||
8424 | + * budget or wants to sleep completes. We may have to reschedule after | ||
8425 | + * this. | ||
8426 | + */ | ||
8427 | + if (!np && (out_of_time || sleep) && !blocks) { | ||
8428 | + job_completion(pedf->scheduled); | ||
8429 | + resched = 1; | ||
8430 | + } | ||
8431 | + | ||
8432 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
8433 | + * Switch if we are in RT mode and have no task or if we need to | ||
8434 | + * resched. | ||
8435 | + */ | ||
8436 | + next = NULL; | ||
8437 | + if ((!np || blocks) && (resched || !exists)) { | ||
8438 | + /* Take care of a previously scheduled | ||
8439 | + * job by taking it out of the Linux runqueue. | ||
8440 | + */ | ||
8441 | + if (pedf->scheduled && !blocks) | ||
8442 | + requeue(pedf->scheduled, edf); | ||
8443 | + next = __take_ready(edf); | ||
8444 | + } else | ||
8445 | + /* Only override Linux scheduler if we have a real-time task | ||
8446 | + * scheduled that needs to continue. | ||
8447 | + */ | ||
8448 | + if (exists) | ||
8449 | + next = prev; | ||
8450 | + | ||
8451 | + if (next) { | ||
8452 | + TRACE_TASK(next, " == next\n"); | ||
8453 | + set_rt_flags(next, RT_F_RUNNING); | ||
8454 | + } else { | ||
8455 | + TRACE("becoming idle.\n"); | ||
8456 | + } | ||
8457 | + | ||
8458 | + pedf->scheduled = next; | ||
8459 | + spin_unlock(&pedf->slock); | ||
8460 | + | ||
8461 | + return next; | ||
8462 | +} | ||
8463 | + | ||
8464 | + | ||
8465 | +/* Prepare a task for running in RT mode | ||
8466 | + */ | ||
8467 | +static void psnedf_task_new(struct task_struct * t, int on_rq, int running) | ||
8468 | +{ | ||
8469 | + rt_domain_t* edf = task_edf(t); | ||
8470 | + psnedf_domain_t* pedf = task_pedf(t); | ||
8471 | + unsigned long flags; | ||
8472 | + | ||
8473 | + TRACE_TASK(t, "new\n"); | ||
8474 | + | ||
8475 | + /* setup job parameters */ | ||
8476 | + release_at(t, litmus_clock()); | ||
8477 | + | ||
8478 | + /* The task should be running in the queue, otherwise signal | ||
8479 | + * code will try to wake it up with fatal consequences. | ||
8480 | + */ | ||
8481 | + spin_lock_irqsave(&pedf->slock, flags); | ||
8482 | + if (running) { | ||
8483 | + /* there shouldn't be anything else running at the time */ | ||
8484 | + BUG_ON(pedf->scheduled); | ||
8485 | + pedf->scheduled = t; | ||
8486 | + } else { | ||
8487 | + requeue(t, edf); | ||
8488 | + /* maybe we have to reschedule */ | ||
8489 | + preempt(pedf); | ||
8490 | + } | ||
8491 | + spin_unlock_irqrestore(&pedf->slock, flags); | ||
8492 | +} | ||
8493 | + | ||
8494 | +static void psnedf_task_wake_up(struct task_struct *task) | ||
8495 | +{ | ||
8496 | + unsigned long flags; | ||
8497 | + psnedf_domain_t* pedf = task_pedf(task); | ||
8498 | + rt_domain_t* edf = task_edf(task); | ||
8499 | + lt_t now; | ||
8500 | + | ||
8501 | + TRACE_TASK(task, "wake up\n"); | ||
8502 | + spin_lock_irqsave(&pedf->slock, flags); | ||
8503 | + BUG_ON(is_queued(task)); | ||
8504 | + /* We need to take suspensions because of semaphores into | ||
8505 | + * account! If a job resumes after being suspended due to acquiring | ||
8506 | + * a semaphore, it should never be treated as a new job release. | ||
8507 | + * | ||
8508 | + * FIXME: This should be done in some more predictable and userspace-controlled way. | ||
8509 | + */ | ||
8510 | + now = litmus_clock(); | ||
8511 | + if (is_tardy(task, now) && | ||
8512 | + get_rt_flags(task) != RT_F_EXIT_SEM) { | ||
8513 | + /* new sporadic release */ | ||
8514 | + release_at(task, now); | ||
8515 | + sched_trace_task_release(task); | ||
8516 | + } | ||
8517 | + requeue(task, edf); | ||
8518 | + spin_unlock_irqrestore(&pedf->slock, flags); | ||
8519 | + TRACE_TASK(task, "wake up done\n"); | ||
8520 | +} | ||
8521 | + | ||
8522 | +static void psnedf_task_block(struct task_struct *t) | ||
8523 | +{ | ||
8524 | + /* only running tasks can block, thus t is in no queue */ | ||
8525 | + TRACE_TASK(t, "block, state=%d\n", t->state); | ||
8526 | + BUG_ON(!is_realtime(t)); | ||
8527 | + BUG_ON(is_queued(t)); | ||
8528 | +} | ||
8529 | + | ||
8530 | +static void psnedf_task_exit(struct task_struct * t) | ||
8531 | +{ | ||
8532 | + unsigned long flags; | ||
8533 | + psnedf_domain_t* pedf = task_pedf(t); | ||
8534 | + rt_domain_t* edf; | ||
8535 | + | ||
8536 | + spin_lock_irqsave(&pedf->slock, flags); | ||
8537 | + if (is_queued(t)) { | ||
8538 | + /* dequeue */ | ||
8539 | + edf = task_edf(t); | ||
8540 | + remove(edf, t); | ||
8541 | + } | ||
8542 | + if (pedf->scheduled == t) | ||
8543 | + pedf->scheduled = NULL; | ||
8544 | + preempt(pedf); | ||
8545 | + spin_unlock_irqrestore(&pedf->slock, flags); | ||
8546 | +} | ||
8547 | + | ||
8548 | +#ifdef CONFIG_FMLP | ||
8549 | +static long psnedf_pi_block(struct pi_semaphore *sem, | ||
8550 | + struct task_struct *new_waiter) | ||
8551 | +{ | ||
8552 | + psnedf_domain_t* pedf; | ||
8553 | + rt_domain_t* edf; | ||
8554 | + struct task_struct* t; | ||
8555 | + int cpu = get_partition(new_waiter); | ||
8556 | + | ||
8557 | + BUG_ON(!new_waiter); | ||
8558 | + | ||
8559 | + if (edf_higher_prio(new_waiter, sem->hp.cpu_task[cpu])) { | ||
8560 | + TRACE_TASK(new_waiter, " boosts priority\n"); | ||
8561 | + pedf = task_pedf(new_waiter); | ||
8562 | + edf = task_edf(new_waiter); | ||
8563 | + | ||
8564 | + /* interrupts already disabled */ | ||
8565 | + spin_lock(&pedf->slock); | ||
8566 | + | ||
8567 | + /* store new highest-priority task */ | ||
8568 | + sem->hp.cpu_task[cpu] = new_waiter; | ||
8569 | + if (sem->holder && | ||
8570 | + get_partition(sem->holder) == get_partition(new_waiter)) { | ||
8571 | + /* let holder inherit */ | ||
8572 | + sem->holder->rt_param.inh_task = new_waiter; | ||
8573 | + t = sem->holder; | ||
8574 | + if (is_queued(t)) { | ||
8575 | + /* queued in domain*/ | ||
8576 | + remove(edf, t); | ||
8577 | + /* readd to make priority change take place */ | ||
8578 | + /* FIXME: this looks outdated */ | ||
8579 | + if (is_released(t, litmus_clock())) | ||
8580 | + __add_ready(edf, t); | ||
8581 | + else | ||
8582 | + add_release(edf, t); | ||
8583 | + } | ||
8584 | + } | ||
8585 | + | ||
8586 | + /* check if we need to reschedule */ | ||
8587 | + if (edf_preemption_needed(edf, current)) | ||
8588 | + preempt(pedf); | ||
8589 | + | ||
8590 | + spin_unlock(&pedf->slock); | ||
8591 | + } | ||
8592 | + | ||
8593 | + return 0; | ||
8594 | +} | ||
8595 | + | ||
8596 | +static long psnedf_inherit_priority(struct pi_semaphore *sem, | ||
8597 | + struct task_struct *new_owner) | ||
8598 | +{ | ||
8599 | + int cpu = get_partition(new_owner); | ||
8600 | + | ||
8601 | + new_owner->rt_param.inh_task = sem->hp.cpu_task[cpu]; | ||
8602 | + if (sem->hp.cpu_task[cpu] && new_owner != sem->hp.cpu_task[cpu]) { | ||
8603 | + TRACE_TASK(new_owner, | ||
8604 | + "inherited priority from %s/%d\n", | ||
8605 | + sem->hp.cpu_task[cpu]->comm, | ||
8606 | + sem->hp.cpu_task[cpu]->pid); | ||
8607 | + } else | ||
8608 | + TRACE_TASK(new_owner, | ||
8609 | + "cannot inherit priority: " | ||
8610 | + "no higher priority job waits on this CPU!\n"); | ||
8611 | + /* make new owner non-preemptable as required by FMLP under | ||
8612 | + * PSN-EDF. | ||
8613 | + */ | ||
8614 | + make_np(new_owner); | ||
8615 | + return 0; | ||
8616 | +} | ||
8617 | + | ||
8618 | + | ||
8619 | +/* This function is called on a semaphore release, and assumes that | ||
8620 | + * the current task is also the semaphore holder. | ||
8621 | + */ | ||
8622 | +static long psnedf_return_priority(struct pi_semaphore *sem) | ||
8623 | +{ | ||
8624 | + struct task_struct* t = current; | ||
8625 | + psnedf_domain_t* pedf = task_pedf(t); | ||
8626 | + rt_domain_t* edf = task_edf(t); | ||
8627 | + int ret = 0; | ||
8628 | + int cpu = get_partition(current); | ||
8629 | + | ||
8630 | + | ||
8631 | + /* Find new highest-priority semaphore task | ||
8632 | + * if holder task is the current hp.cpu_task[cpu]. | ||
8633 | + * | ||
8634 | + * Calling function holds sem->wait.lock. | ||
8635 | + */ | ||
8636 | + if (t == sem->hp.cpu_task[cpu]) | ||
8637 | + edf_set_hp_cpu_task(sem, cpu); | ||
8638 | + | ||
8639 | + take_np(t); | ||
8640 | + if (current->rt_param.inh_task) { | ||
8641 | + TRACE_CUR("return priority of %s/%d\n", | ||
8642 | + current->rt_param.inh_task->comm, | ||
8643 | + current->rt_param.inh_task->pid); | ||
8644 | + spin_lock(&pedf->slock); | ||
8645 | + | ||
8646 | + /* Reset inh_task to NULL. */ | ||
8647 | + current->rt_param.inh_task = NULL; | ||
8648 | + | ||
8649 | + /* check if we need to reschedule */ | ||
8650 | + if (edf_preemption_needed(edf, current)) | ||
8651 | + preempt(pedf); | ||
8652 | + | ||
8653 | + spin_unlock(&pedf->slock); | ||
8654 | + } else | ||
8655 | + TRACE_CUR(" no priority to return %p\n", sem); | ||
8656 | + | ||
8657 | + return ret; | ||
8658 | +} | ||
8659 | + | ||
8660 | +#endif | ||
8661 | + | ||
8662 | +static long psnedf_admit_task(struct task_struct* tsk) | ||
8663 | +{ | ||
8664 | + return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | ||
8665 | +} | ||
8666 | + | ||
8667 | +/* Plugin object */ | ||
8668 | +static struct sched_plugin psn_edf_plugin __cacheline_aligned_in_smp = { | ||
8669 | + .plugin_name = "PSN-EDF", | ||
8670 | +#ifdef CONFIG_SRP | ||
8671 | + .srp_active = 1, | ||
8672 | +#endif | ||
8673 | + .tick = psnedf_tick, | ||
8674 | + .task_new = psnedf_task_new, | ||
8675 | + .complete_job = complete_job, | ||
8676 | + .task_exit = psnedf_task_exit, | ||
8677 | + .schedule = psnedf_schedule, | ||
8678 | + .task_wake_up = psnedf_task_wake_up, | ||
8679 | + .task_block = psnedf_task_block, | ||
8680 | +#ifdef CONFIG_FMLP | ||
8681 | + .fmlp_active = 1, | ||
8682 | + .pi_block = psnedf_pi_block, | ||
8683 | + .inherit_priority = psnedf_inherit_priority, | ||
8684 | + .return_priority = psnedf_return_priority, | ||
8685 | +#endif | ||
8686 | + .admit_task = psnedf_admit_task | ||
8687 | +}; | ||
8688 | + | ||
8689 | + | ||
8690 | +static int __init init_psn_edf(void) | ||
8691 | +{ | ||
8692 | + int i; | ||
8693 | + | ||
8694 | + for (i = 0; i < NR_CPUS; i++) | ||
8695 | + { | ||
8696 | + psnedf_domain_init(remote_pedf(i), | ||
8697 | + psnedf_check_resched, | ||
8698 | + NULL, i); | ||
8699 | + } | ||
8700 | + return register_sched_plugin(&psn_edf_plugin); | ||
8701 | +} | ||
8702 | + | ||
8703 | + | ||
8704 | + | ||
8705 | +module_init(init_psn_edf); | ||
8706 | diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c | ||
8707 | new file mode 100644 | ||
8708 | index 0000000..913d999 | ||
8709 | --- /dev/null | ||
8710 | +++ b/litmus/sched_task_trace.c | ||
8711 | @@ -0,0 +1,192 @@ | ||
8712 | +/* sched_task_trace.c -- record scheduling events to a byte stream | ||
8713 | + * | ||
8714 | + */ | ||
8715 | + | ||
8716 | +#include <linux/module.h> | ||
8717 | +#include <linux/sched.h> | ||
8718 | +#include <linux/percpu.h> | ||
8719 | + | ||
8720 | +#include <litmus/ftdev.h> | ||
8721 | +#include <litmus/litmus.h> | ||
8722 | + | ||
8723 | +#include <litmus/sched_trace.h> | ||
8724 | +#include <litmus/feather_trace.h> | ||
8725 | +#include <litmus/ftdev.h> | ||
8726 | + | ||
8727 | +#define FT_TASK_TRACE_MAJOR 253 | ||
8728 | +#define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */ | ||
8729 | + | ||
8730 | +#define now() litmus_clock() | ||
8731 | + | ||
8732 | +struct local_buffer { | ||
8733 | + struct st_event_record record[NO_EVENTS]; | ||
8734 | + char flag[NO_EVENTS]; | ||
8735 | + struct ft_buffer ftbuf; | ||
8736 | +}; | ||
8737 | + | ||
8738 | +DEFINE_PER_CPU(struct local_buffer, st_event_buffer); | ||
8739 | + | ||
8740 | +static struct ftdev st_dev; | ||
8741 | + | ||
8742 | +static int st_dev_can_open(struct ftdev *dev, unsigned int cpu) | ||
8743 | +{ | ||
8744 | + return cpu_online(cpu) ? 0 : -ENODEV; | ||
8745 | +} | ||
8746 | + | ||
8747 | +static int __init init_sched_task_trace(void) | ||
8748 | +{ | ||
8749 | + struct local_buffer* buf; | ||
8750 | + int i, ok = 0; | ||
8751 | + ftdev_init(&st_dev, THIS_MODULE); | ||
8752 | + for (i = 0; i < NR_CPUS; i++) { | ||
8753 | + buf = &per_cpu(st_event_buffer, i); | ||
8754 | + ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, | ||
8755 | + sizeof(struct st_event_record), | ||
8756 | + buf->flag, | ||
8757 | + buf->record); | ||
8758 | + st_dev.minor[i].buf = &buf->ftbuf; | ||
8759 | + } | ||
8760 | + if (ok == NR_CPUS) { | ||
8761 | + st_dev.minor_cnt = NR_CPUS; | ||
8762 | + st_dev.can_open = st_dev_can_open; | ||
8763 | + return register_ftdev(&st_dev, "sched_trace", FT_TASK_TRACE_MAJOR); | ||
8764 | + } else | ||
8765 | + return -EINVAL; | ||
8766 | +} | ||
8767 | + | ||
8768 | +module_init(init_sched_task_trace); | ||
8769 | + | ||
8770 | + | ||
8771 | +static inline struct st_event_record* get_record(u8 type, struct task_struct* t) | ||
8772 | +{ | ||
8773 | + struct st_event_record* rec; | ||
8774 | + struct local_buffer* buf; | ||
8775 | + | ||
8776 | + buf = &get_cpu_var(st_event_buffer); | ||
8777 | + if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) { | ||
8778 | + rec->hdr.type = type; | ||
8779 | + rec->hdr.cpu = smp_processor_id(); | ||
8780 | + rec->hdr.pid = t ? t->pid : 0; | ||
8781 | + rec->hdr.job = t ? t->rt_param.job_params.job_no : 0; | ||
8782 | + } else | ||
8783 | + put_cpu_var(st_event_buffer); | ||
8784 | + /* rec will be NULL if it failed */ | ||
8785 | + return rec; | ||
8786 | +} | ||
8787 | + | ||
8788 | +static inline void put_record(struct st_event_record* rec) | ||
8789 | +{ | ||
8790 | + struct local_buffer* buf; | ||
8791 | + buf = &__get_cpu_var(st_event_buffer); | ||
8792 | + ft_buffer_finish_write(&buf->ftbuf, rec); | ||
8793 | + put_cpu_var(st_event_buffer); | ||
8794 | +} | ||
8795 | + | ||
8796 | +feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task) | ||
8797 | +{ | ||
8798 | + struct task_struct *t = (struct task_struct*) _task; | ||
8799 | + struct st_event_record* rec = get_record(ST_NAME, t); | ||
8800 | + int i; | ||
8801 | + if (rec) { | ||
8802 | + for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++) | ||
8803 | + rec->data.name.cmd[i] = t->comm[i]; | ||
8804 | + put_record(rec); | ||
8805 | + } | ||
8806 | +} | ||
8807 | + | ||
8808 | +feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task) | ||
8809 | +{ | ||
8810 | + struct task_struct *t = (struct task_struct*) _task; | ||
8811 | + struct st_event_record* rec = get_record(ST_PARAM, t); | ||
8812 | + if (rec) { | ||
8813 | + rec->data.param.wcet = get_exec_cost(t); | ||
8814 | + rec->data.param.period = get_rt_period(t); | ||
8815 | + rec->data.param.phase = get_rt_phase(t); | ||
8816 | + rec->data.param.partition = get_partition(t); | ||
8817 | + put_record(rec); | ||
8818 | + } | ||
8819 | +} | ||
8820 | + | ||
8821 | +feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task) | ||
8822 | +{ | ||
8823 | + struct task_struct *t = (struct task_struct*) _task; | ||
8824 | + struct st_event_record* rec = get_record(ST_RELEASE, t); | ||
8825 | + if (rec) { | ||
8826 | + rec->data.release.release = get_release(t); | ||
8827 | + rec->data.release.deadline = get_deadline(t); | ||
8828 | + put_record(rec); | ||
8829 | + } | ||
8830 | +} | ||
8831 | + | ||
8832 | +/* skipped: st_assigned_data, we don't use it atm */ | ||
8833 | + | ||
8834 | +feather_callback void do_sched_trace_task_switch_to(unsigned long id, unsigned long _task) | ||
8835 | +{ | ||
8836 | + struct task_struct *t = (struct task_struct*) _task; | ||
8837 | + struct st_event_record* rec; | ||
8838 | + if (is_realtime(t)) { | ||
8839 | + rec = get_record(ST_SWITCH_TO, t); | ||
8840 | + if (rec) { | ||
8841 | + rec->data.switch_to.when = now(); | ||
8842 | + rec->data.switch_to.exec_time = get_exec_time(t); | ||
8843 | + put_record(rec); | ||
8844 | + } | ||
8845 | + } | ||
8846 | +} | ||
8847 | + | ||
8848 | +feather_callback void do_sched_trace_task_switch_away(unsigned long id, unsigned long _task) | ||
8849 | +{ | ||
8850 | + struct task_struct *t = (struct task_struct*) _task; | ||
8851 | + struct st_event_record* rec; | ||
8852 | + if (is_realtime(t)) { | ||
8853 | + rec = get_record(ST_SWITCH_AWAY, t); | ||
8854 | + if (rec) { | ||
8855 | + rec->data.switch_away.when = now(); | ||
8856 | + rec->data.switch_away.exec_time = get_exec_time(t); | ||
8857 | + put_record(rec); | ||
8858 | + } | ||
8859 | + } | ||
8860 | +} | ||
8861 | + | ||
8862 | +feather_callback void do_sched_trace_task_completion(unsigned long id, unsigned long _task, | ||
8863 | + unsigned long forced) | ||
8864 | +{ | ||
8865 | + struct task_struct *t = (struct task_struct*) _task; | ||
8866 | + struct st_event_record* rec = get_record(ST_COMPLETION, t); | ||
8867 | + if (rec) { | ||
8868 | + rec->data.completion.when = now(); | ||
8869 | + rec->data.completion.forced = forced; | ||
8870 | + put_record(rec); | ||
8871 | + } | ||
8872 | +} | ||
8873 | + | ||
8874 | +feather_callback void do_sched_trace_task_block(unsigned long id, unsigned long _task) | ||
8875 | +{ | ||
8876 | + struct task_struct *t = (struct task_struct*) _task; | ||
8877 | + struct st_event_record* rec = get_record(ST_BLOCK, t); | ||
8878 | + if (rec) { | ||
8879 | + rec->data.block.when = now(); | ||
8880 | + put_record(rec); | ||
8881 | + } | ||
8882 | +} | ||
8883 | + | ||
8884 | +feather_callback void do_sched_trace_task_resume(unsigned long id, unsigned long _task) | ||
8885 | +{ | ||
8886 | + struct task_struct *t = (struct task_struct*) _task; | ||
8887 | + struct st_event_record* rec = get_record(ST_RESUME, t); | ||
8888 | + if (rec) { | ||
8889 | + rec->data.resume.when = now(); | ||
8890 | + put_record(rec); | ||
8891 | + } | ||
8892 | +} | ||
8893 | + | ||
8894 | +feather_callback void do_sched_trace_sys_release(unsigned long id, unsigned long _start) | ||
8895 | +{ | ||
8896 | + lt_t *start = (lt_t*) _start; | ||
8897 | + struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL); | ||
8898 | + if (rec) { | ||
8899 | + rec->data.sys_release.when = now(); | ||
8900 | + rec->data.sys_release.release = *start; | ||
8901 | + put_record(rec); | ||
8902 | + } | ||
8903 | +} | ||
8904 | diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c | ||
8905 | new file mode 100644 | ||
8906 | index 0000000..527a58b | ||
8907 | --- /dev/null | ||
8908 | +++ b/litmus/sched_trace.c | ||
8909 | @@ -0,0 +1,462 @@ | ||
8910 | +/* sched_trace.c -- record scheduling events to a byte stream. | ||
8911 | + * | ||
8912 | + * TODO: Move ring buffer to a lockfree implementation. | ||
8913 | + */ | ||
8914 | + | ||
8915 | +#include <linux/spinlock.h> | ||
8916 | +#include <linux/fs.h> | ||
8917 | +#include <linux/cdev.h> | ||
8918 | +#include <asm/semaphore.h> | ||
8919 | +#include <asm/uaccess.h> | ||
8920 | +#include <linux/module.h> | ||
8921 | + | ||
8922 | +#include <litmus/sched_trace.h> | ||
8923 | +#include <litmus/litmus.h> | ||
8924 | + | ||
8925 | +typedef struct { | ||
8926 | + /* guard read and write pointers */ | ||
8927 | + spinlock_t lock; | ||
8928 | + /* guard against concurrent freeing of buffer */ | ||
8929 | + rwlock_t del_lock; | ||
8930 | + | ||
8931 | + /* memory allocated for ring buffer */ | ||
8932 | + unsigned long order; | ||
8933 | + char* buf; | ||
8934 | + char* end; | ||
8935 | + | ||
8936 | + /* Read/write pointer. May not cross. | ||
8937 | + * They point to the position of next write and | ||
8938 | + * last read. | ||
8939 | + */ | ||
8940 | + char* writep; | ||
8941 | + char* readp; | ||
8942 | + | ||
8943 | +} ring_buffer_t; | ||
8944 | + | ||
8945 | +#define EMPTY_RING_BUFFER { \ | ||
8946 | + .lock = SPIN_LOCK_UNLOCKED, \ | ||
8947 | + .del_lock = RW_LOCK_UNLOCKED, \ | ||
8948 | + .buf = NULL, \ | ||
8949 | + .end = NULL, \ | ||
8950 | + .writep = NULL, \ | ||
8951 | + .readp = NULL \ | ||
8952 | +} | ||
8953 | + | ||
8954 | +void rb_init(ring_buffer_t* buf) | ||
8955 | +{ | ||
8956 | + *buf = (ring_buffer_t) EMPTY_RING_BUFFER; | ||
8957 | +} | ||
8958 | + | ||
8959 | +int rb_alloc_buf(ring_buffer_t* buf, unsigned long order) | ||
8960 | +{ | ||
8961 | + unsigned long flags; | ||
8962 | + int error = 0; | ||
8963 | + char *mem; | ||
8964 | + | ||
8965 | + /* do memory allocation while not atomic */ | ||
8966 | + mem = (char *) __get_free_pages(GFP_KERNEL, order); | ||
8967 | + if (!mem) | ||
8968 | + return -ENOMEM; | ||
8969 | + write_lock_irqsave(&buf->del_lock, flags); | ||
8970 | + BUG_ON(buf->buf); | ||
8971 | + buf->buf = mem; | ||
8972 | + buf->end = buf->buf + PAGE_SIZE * (1 << order) - 1; | ||
8973 | + memset(buf->buf, 0xff, buf->end - buf->buf); | ||
8974 | + buf->order = order; | ||
8975 | + buf->writep = buf->buf + 1; | ||
8976 | + buf->readp = buf->buf; | ||
8977 | + write_unlock_irqrestore(&buf->del_lock, flags); | ||
8978 | + return error; | ||
8979 | +} | ||
8980 | + | ||
8981 | +int rb_free_buf(ring_buffer_t* buf) | ||
8982 | +{ | ||
8983 | + unsigned long flags; | ||
8984 | + int error = 0; | ||
8985 | + write_lock_irqsave(&buf->del_lock, flags); | ||
8986 | + BUG_ON(!buf->buf); | ||
8987 | + free_pages((unsigned long) buf->buf, buf->order); | ||
8988 | + buf->buf = NULL; | ||
8989 | + buf->end = NULL; | ||
8990 | + buf->writep = NULL; | ||
8991 | + buf->readp = NULL; | ||
8992 | + write_unlock_irqrestore(&buf->del_lock, flags); | ||
8993 | + return error; | ||
8994 | +} | ||
8995 | + | ||
8996 | +/* Assumption: concurrent writes are serialized externally | ||
8997 | + * | ||
8998 | + * Will only succeed if there is enough space for all len bytes. | ||
8999 | + */ | ||
9000 | +int rb_put(ring_buffer_t* buf, char* mem, size_t len) | ||
9001 | +{ | ||
9002 | + unsigned long flags; | ||
9003 | + char* r , *w; | ||
9004 | + int error = 0; | ||
9005 | + read_lock_irqsave(&buf->del_lock, flags); | ||
9006 | + if (!buf->buf) { | ||
9007 | + error = -ENODEV; | ||
9008 | + goto out; | ||
9009 | + } | ||
9010 | + spin_lock(&buf->lock); | ||
9011 | + r = buf->readp; | ||
9012 | + w = buf->writep; | ||
9013 | + spin_unlock(&buf->lock); | ||
9014 | + if (r < w && buf->end - w >= len - 1) { | ||
9015 | + /* easy case: there is enough space in the buffer | ||
9016 | + * to write it in one continous chunk*/ | ||
9017 | + memcpy(w, mem, len); | ||
9018 | + w += len; | ||
9019 | + if (w > buf->end) | ||
9020 | + /* special case: fit exactly into buffer | ||
9021 | + * w is now buf->end + 1 | ||
9022 | + */ | ||
9023 | + w = buf->buf; | ||
9024 | + } else if (w < r && r - w >= len) { /* >= len because may not cross */ | ||
9025 | + /* we are constrained by the read pointer but we there | ||
9026 | + * is enough space | ||
9027 | + */ | ||
9028 | + memcpy(w, mem, len); | ||
9029 | + w += len; | ||
9030 | + } else if (r <= w && buf->end - w < len - 1) { | ||
9031 | + /* the wrap around case: there may or may not be space */ | ||
9032 | + if ((buf->end - w) + (r - buf->buf) >= len - 1) { | ||
9033 | + /* copy chunk that fits at the end */ | ||
9034 | + memcpy(w, mem, buf->end - w + 1); | ||
9035 | + mem += buf->end - w + 1; | ||
9036 | + len -= (buf->end - w + 1); | ||
9037 | + w = buf->buf; | ||
9038 | + /* copy the rest */ | ||
9039 | + memcpy(w, mem, len); | ||
9040 | + w += len; | ||
9041 | + } | ||
9042 | + else | ||
9043 | + error = -ENOMEM; | ||
9044 | + } else { | ||
9045 | + error = -ENOMEM; | ||
9046 | + } | ||
9047 | + if (!error) { | ||
9048 | + spin_lock(&buf->lock); | ||
9049 | + buf->writep = w; | ||
9050 | + spin_unlock(&buf->lock); | ||
9051 | + } | ||
9052 | + out: | ||
9053 | + read_unlock_irqrestore(&buf->del_lock, flags); | ||
9054 | + return error; | ||
9055 | +} | ||
9056 | + | ||
9057 | +/* Assumption: concurrent reads are serialized externally */ | ||
9058 | +int rb_get(ring_buffer_t* buf, char* mem, size_t len) | ||
9059 | +{ | ||
9060 | + unsigned long flags; | ||
9061 | + char* r , *w; | ||
9062 | + int error = 0; | ||
9063 | + read_lock_irqsave(&buf->del_lock, flags); | ||
9064 | + if (!buf->buf) { | ||
9065 | + error = -ENODEV; | ||
9066 | + goto out; | ||
9067 | + } | ||
9068 | + spin_lock(&buf->lock); | ||
9069 | + r = buf->readp; | ||
9070 | + w = buf->writep; | ||
9071 | + spin_unlock(&buf->lock); | ||
9072 | + | ||
9073 | + if (w <= r && buf->end - r >= len) { | ||
9074 | + /* easy case: there is enough data in the buffer | ||
9075 | + * to get it in one chunk*/ | ||
9076 | + memcpy(mem, r + 1, len); | ||
9077 | + r += len; | ||
9078 | + error = len; | ||
9079 | + | ||
9080 | + } else if (r + 1 < w && w - r - 1 >= len) { | ||
9081 | + /* we are constrained by the write pointer but | ||
9082 | + * there is enough data | ||
9083 | + */ | ||
9084 | + memcpy(mem, r + 1, len); | ||
9085 | + r += len; | ||
9086 | + error = len; | ||
9087 | + | ||
9088 | + } else if (r + 1 < w && w - r - 1 < len) { | ||
9089 | + /* we are constrained by the write pointer and there | ||
9090 | + * there is not enough data | ||
9091 | + */ | ||
9092 | + memcpy(mem, r + 1, w - r - 1); | ||
9093 | + error = w - r - 1; | ||
9094 | + r += w - r - 1; | ||
9095 | + | ||
9096 | + } else if (w <= r && buf->end - r < len) { | ||
9097 | + /* the wrap around case: there may or may not be enough data | ||
9098 | + * first let's get what is available | ||
9099 | + */ | ||
9100 | + memcpy(mem, r + 1, buf->end - r); | ||
9101 | + error += (buf->end - r); | ||
9102 | + mem += (buf->end - r); | ||
9103 | + len -= (buf->end - r); | ||
9104 | + r += (buf->end - r); | ||
9105 | + | ||
9106 | + if (w > buf->buf) { | ||
9107 | + /* there is more to get */ | ||
9108 | + r = buf->buf - 1; | ||
9109 | + if (w - r >= len) { | ||
9110 | + /* plenty */ | ||
9111 | + memcpy(mem, r + 1, len); | ||
9112 | + error += len; | ||
9113 | + r += len; | ||
9114 | + } else { | ||
9115 | + memcpy(mem, r + 1, w - r - 1); | ||
9116 | + error += w - r - 1; | ||
9117 | + r += w - r - 1; | ||
9118 | + } | ||
9119 | + } | ||
9120 | + } /* nothing available */ | ||
9121 | + | ||
9122 | + if (error > 0) { | ||
9123 | + spin_lock(&buf->lock); | ||
9124 | + buf->readp = r; | ||
9125 | + spin_unlock(&buf->lock); | ||
9126 | + } | ||
9127 | + out: | ||
9128 | + read_unlock_irqrestore(&buf->del_lock, flags); | ||
9129 | + return error; | ||
9130 | +} | ||
9131 | + | ||
9132 | + | ||
9133 | + | ||
9134 | +/******************************************************************************/ | ||
9135 | +/* DEVICE FILE DRIVER */ | ||
9136 | +/******************************************************************************/ | ||
9137 | + | ||
9138 | + | ||
9139 | + | ||
9140 | +/* Allocate a buffer of about 1 MB per CPU. | ||
9141 | + * | ||
9142 | + */ | ||
9143 | +#define BUFFER_ORDER 8 | ||
9144 | + | ||
9145 | +typedef struct { | ||
9146 | + ring_buffer_t buf; | ||
9147 | + atomic_t reader_cnt; | ||
9148 | + struct semaphore reader_mutex; | ||
9149 | +} trace_buffer_t; | ||
9150 | + | ||
9151 | + | ||
9152 | +/* This does not initialize the semaphore!! */ | ||
9153 | + | ||
9154 | +#define EMPTY_TRACE_BUFFER \ | ||
9155 | + { .buf = EMPTY_RING_BUFFER, .reader_cnt = ATOMIC_INIT(0)} | ||
9156 | + | ||
9157 | +static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED; | ||
9158 | +static trace_buffer_t log_buffer = EMPTY_TRACE_BUFFER; | ||
9159 | + | ||
9160 | +static void init_log_buffer(void) | ||
9161 | +{ | ||
9162 | + /* only initialize the mutex, the rest was initialized as part | ||
9163 | + * of the static initialization macro | ||
9164 | + */ | ||
9165 | + init_MUTEX(&log_buffer.reader_mutex); | ||
9166 | +} | ||
9167 | + | ||
9168 | +static ssize_t log_read(struct file *filp, char __user *to, size_t len, | ||
9169 | + loff_t *f_pos) | ||
9170 | +{ | ||
9171 | + /* we ignore f_pos, this is strictly sequential */ | ||
9172 | + | ||
9173 | + ssize_t error = -EINVAL; | ||
9174 | + char* mem; | ||
9175 | + trace_buffer_t *buf = filp->private_data; | ||
9176 | + | ||
9177 | + if (down_interruptible(&buf->reader_mutex)) { | ||
9178 | + error = -ERESTARTSYS; | ||
9179 | + goto out; | ||
9180 | + } | ||
9181 | + | ||
9182 | + if (len > 64 * 1024) | ||
9183 | + len = 64 * 1024; | ||
9184 | + mem = kmalloc(len, GFP_KERNEL); | ||
9185 | + if (!mem) { | ||
9186 | + error = -ENOMEM; | ||
9187 | + goto out_unlock; | ||
9188 | + } | ||
9189 | + | ||
9190 | + error = rb_get(&buf->buf, mem, len); | ||
9191 | + while (!error) { | ||
9192 | + set_current_state(TASK_INTERRUPTIBLE); | ||
9193 | + schedule_timeout(110); | ||
9194 | + if (signal_pending(current)) | ||
9195 | + error = -ERESTARTSYS; | ||
9196 | + else | ||
9197 | + error = rb_get(&buf->buf, mem, len); | ||
9198 | + } | ||
9199 | + | ||
9200 | + if (error > 0 && copy_to_user(to, mem, error)) | ||
9201 | + error = -EFAULT; | ||
9202 | + | ||
9203 | + kfree(mem); | ||
9204 | + out_unlock: | ||
9205 | + up(&buf->reader_mutex); | ||
9206 | + out: | ||
9207 | + return error; | ||
9208 | +} | ||
9209 | + | ||
9210 | + | ||
9211 | + | ||
9212 | +extern int trace_override; | ||
9213 | + | ||
9214 | +/* log_open - open the global log message ring buffer. | ||
9215 | + */ | ||
9216 | +static int log_open(struct inode *in, struct file *filp) | ||
9217 | +{ | ||
9218 | + int error = -EINVAL; | ||
9219 | + trace_buffer_t* buf; | ||
9220 | + | ||
9221 | + buf = &log_buffer; | ||
9222 | + | ||
9223 | + if (down_interruptible(&buf->reader_mutex)) { | ||
9224 | + error = -ERESTARTSYS; | ||
9225 | + goto out; | ||
9226 | + } | ||
9227 | + | ||
9228 | + /* first open must allocate buffers */ | ||
9229 | + if (atomic_inc_return(&buf->reader_cnt) == 1) { | ||
9230 | + if ((error = rb_alloc_buf(&buf->buf, BUFFER_ORDER))) | ||
9231 | + { | ||
9232 | + atomic_dec(&buf->reader_cnt); | ||
9233 | + goto out_unlock; | ||
9234 | + } | ||
9235 | + } | ||
9236 | + | ||
9237 | + error = 0; | ||
9238 | + filp->private_data = buf; | ||
9239 | + printk(KERN_DEBUG "sched_trace buf: from 0x%p to 0x%p length: %x\n", | ||
9240 | + buf->buf.buf, buf->buf.end, buf->buf.end - buf->buf.buf); | ||
9241 | + trace_override++; | ||
9242 | + out_unlock: | ||
9243 | + up(&buf->reader_mutex); | ||
9244 | + out: | ||
9245 | + return error; | ||
9246 | +} | ||
9247 | + | ||
9248 | +static int log_release(struct inode *in, struct file *filp) | ||
9249 | +{ | ||
9250 | + int error = -EINVAL; | ||
9251 | + trace_buffer_t* buf = filp->private_data; | ||
9252 | + | ||
9253 | + BUG_ON(!filp->private_data); | ||
9254 | + | ||
9255 | + if (down_interruptible(&buf->reader_mutex)) { | ||
9256 | + error = -ERESTARTSYS; | ||
9257 | + goto out; | ||
9258 | + } | ||
9259 | + | ||
9260 | + /* last release must deallocate buffers */ | ||
9261 | + if (atomic_dec_return(&buf->reader_cnt) == 0) { | ||
9262 | + error = rb_free_buf(&buf->buf); | ||
9263 | + } | ||
9264 | + | ||
9265 | + trace_override--; | ||
9266 | + up(&buf->reader_mutex); | ||
9267 | + out: | ||
9268 | + return error; | ||
9269 | +} | ||
9270 | + | ||
9271 | +/******************************************************************************/ | ||
9272 | +/* Device Registration */ | ||
9273 | +/******************************************************************************/ | ||
9274 | + | ||
9275 | +/* the major numbes are from the unassigned/local use block | ||
9276 | + * | ||
9277 | + * This should be converted to dynamic allocation at some point... | ||
9278 | + */ | ||
9279 | +#define LOG_MAJOR 251 | ||
9280 | + | ||
9281 | +/* log_fops - The file operations for accessing the global LITMUS log message | ||
9282 | + * buffer. | ||
9283 | + * | ||
9284 | + * Except for opening the device file it uses the same operations as trace_fops. | ||
9285 | + */ | ||
9286 | +struct file_operations log_fops = { | ||
9287 | + .owner = THIS_MODULE, | ||
9288 | + .open = log_open, | ||
9289 | + .release = log_release, | ||
9290 | + .read = log_read, | ||
9291 | +}; | ||
9292 | + | ||
9293 | +static int __init register_buffer_dev(const char* name, | ||
9294 | + struct file_operations* fops, | ||
9295 | + int major, int count) | ||
9296 | +{ | ||
9297 | + dev_t trace_dev; | ||
9298 | + struct cdev *cdev; | ||
9299 | + int error = 0; | ||
9300 | + | ||
9301 | + trace_dev = MKDEV(major, 0); | ||
9302 | + error = register_chrdev_region(trace_dev, count, name); | ||
9303 | + if (error) | ||
9304 | + { | ||
9305 | + printk(KERN_WARNING "sched trace: " | ||
9306 | + "Could not register major/minor number %d\n", major); | ||
9307 | + return error; | ||
9308 | + } | ||
9309 | + cdev = cdev_alloc(); | ||
9310 | + if (!cdev) { | ||
9311 | + printk(KERN_WARNING "sched trace: " | ||
9312 | + "Could not get a cdev for %s.\n", name); | ||
9313 | + return -ENOMEM; | ||
9314 | + } | ||
9315 | + cdev->owner = THIS_MODULE; | ||
9316 | + cdev->ops = fops; | ||
9317 | + error = cdev_add(cdev, trace_dev, count); | ||
9318 | + if (error) { | ||
9319 | + printk(KERN_WARNING "sched trace: " | ||
9320 | + "add_cdev failed for %s.\n", name); | ||
9321 | + return -ENOMEM; | ||
9322 | + } | ||
9323 | + return error; | ||
9324 | + | ||
9325 | +} | ||
9326 | + | ||
9327 | +static int __init init_sched_trace(void) | ||
9328 | +{ | ||
9329 | + printk("Initializing TRACE() device\n"); | ||
9330 | + init_log_buffer(); | ||
9331 | + | ||
9332 | + return register_buffer_dev("litmus_log", &log_fops, | ||
9333 | + LOG_MAJOR, 1); | ||
9334 | +} | ||
9335 | + | ||
9336 | +module_init(init_sched_trace); | ||
9337 | + | ||
9338 | +#define MSG_SIZE 255 | ||
9339 | +static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); | ||
9340 | + | ||
9341 | +/* sched_trace_log_message - This is the only function that accesses the the | ||
9342 | + * log buffer inside the kernel for writing. | ||
9343 | + * Concurrent access to it is serialized via the | ||
9344 | + * log_buffer_lock. | ||
9345 | + * | ||
9346 | + * The maximum length of a formatted message is 255. | ||
9347 | + */ | ||
9348 | +void sched_trace_log_message(const char* fmt, ...) | ||
9349 | +{ | ||
9350 | + unsigned long flags; | ||
9351 | + va_list args; | ||
9352 | + size_t len; | ||
9353 | + char* buf; | ||
9354 | + | ||
9355 | + va_start(args, fmt); | ||
9356 | + local_irq_save(flags); | ||
9357 | + | ||
9358 | + /* format message */ | ||
9359 | + buf = __get_cpu_var(fmt_buffer); | ||
9360 | + len = vscnprintf(buf, MSG_SIZE, fmt, args); | ||
9361 | + | ||
9362 | + spin_lock(&log_buffer_lock); | ||
9363 | + /* Don't copy the trailing null byte, we don't want null bytes | ||
9364 | + * in a text file. | ||
9365 | + */ | ||
9366 | + rb_put(&log_buffer.buf, buf, len); | ||
9367 | + spin_unlock(&log_buffer_lock); | ||
9368 | + | ||
9369 | + local_irq_restore(flags); | ||
9370 | + va_end(args); | ||
9371 | +} | ||
9372 | diff --git a/litmus/srp.c b/litmus/srp.c | ||
9373 | new file mode 100644 | ||
9374 | index 0000000..71639b9 | ||
9375 | --- /dev/null | ||
9376 | +++ b/litmus/srp.c | ||
9377 | @@ -0,0 +1,318 @@ | ||
9378 | +/* ************************************************************************** */ | ||
9379 | +/* STACK RESOURCE POLICY */ | ||
9380 | +/* ************************************************************************** */ | ||
9381 | + | ||
9382 | +#include <asm/atomic.h> | ||
9383 | +#include <linux/wait.h> | ||
9384 | +#include <litmus/litmus.h> | ||
9385 | +#include <litmus/sched_plugin.h> | ||
9386 | + | ||
9387 | +#include <litmus/fdso.h> | ||
9388 | + | ||
9389 | +#include <litmus/trace.h> | ||
9390 | + | ||
9391 | + | ||
9392 | +#ifdef CONFIG_SRP | ||
9393 | + | ||
9394 | +struct srp_priority { | ||
9395 | + struct list_head list; | ||
9396 | + unsigned int period; | ||
9397 | + pid_t pid; | ||
9398 | +}; | ||
9399 | + | ||
9400 | +#define list2prio(l) list_entry(l, struct srp_priority, list) | ||
9401 | + | ||
9402 | +/* SRP task priority comparison function. Smaller periods have highest | ||
9403 | + * priority, tie-break is PID. Special case: period == 0 <=> no priority | ||
9404 | + */ | ||
9405 | +static int srp_higher_prio(struct srp_priority* first, | ||
9406 | + struct srp_priority* second) | ||
9407 | +{ | ||
9408 | + if (!first->period) | ||
9409 | + return 0; | ||
9410 | + else | ||
9411 | + return !second->period || | ||
9412 | + first->period < second->period || ( | ||
9413 | + first->period == second->period && | ||
9414 | + first->pid < second->pid); | ||
9415 | +} | ||
9416 | + | ||
9417 | +struct srp { | ||
9418 | + struct list_head ceiling; | ||
9419 | + wait_queue_head_t ceiling_blocked; | ||
9420 | +}; | ||
9421 | + | ||
9422 | + | ||
9423 | +atomic_t srp_objects_in_use = ATOMIC_INIT(0); | ||
9424 | + | ||
9425 | +DEFINE_PER_CPU(struct srp, srp); | ||
9426 | + | ||
9427 | + | ||
9428 | +/* Initialize SRP semaphores at boot time. */ | ||
9429 | +static int __init srp_init(void) | ||
9430 | +{ | ||
9431 | + int i; | ||
9432 | + | ||
9433 | + printk("Initializing SRP per-CPU ceilings..."); | ||
9434 | + for (i = 0; i < NR_CPUS; i++) { | ||
9435 | + init_waitqueue_head(&per_cpu(srp, i).ceiling_blocked); | ||
9436 | + INIT_LIST_HEAD(&per_cpu(srp, i).ceiling); | ||
9437 | + } | ||
9438 | + printk(" done!\n"); | ||
9439 | + | ||
9440 | + return 0; | ||
9441 | +} | ||
9442 | +module_init(srp_init); | ||
9443 | + | ||
9444 | + | ||
9445 | +#define system_ceiling(srp) list2prio(srp->ceiling.next) | ||
9446 | + | ||
9447 | + | ||
9448 | +#define UNDEF_SEM -2 | ||
9449 | + | ||
9450 | + | ||
9451 | +/* struct for uniprocessor SRP "semaphore" */ | ||
9452 | +struct srp_semaphore { | ||
9453 | + struct srp_priority ceiling; | ||
9454 | + struct task_struct* owner; | ||
9455 | + int cpu; /* cpu associated with this "semaphore" and resource */ | ||
9456 | +}; | ||
9457 | + | ||
9458 | +#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling) | ||
9459 | + | ||
9460 | +static int srp_exceeds_ceiling(struct task_struct* first, | ||
9461 | + struct srp* srp) | ||
9462 | +{ | ||
9463 | + return list_empty(&srp->ceiling) || | ||
9464 | + get_rt_period(first) < system_ceiling(srp)->period || | ||
9465 | + (get_rt_period(first) == system_ceiling(srp)->period && | ||
9466 | + first->pid < system_ceiling(srp)->pid) || | ||
9467 | + ceiling2sem(system_ceiling(srp))->owner == first; | ||
9468 | +} | ||
9469 | + | ||
9470 | +static void srp_add_prio(struct srp* srp, struct srp_priority* prio) | ||
9471 | +{ | ||
9472 | + struct list_head *pos; | ||
9473 | + if (in_list(&prio->list)) { | ||
9474 | + printk(KERN_CRIT "WARNING: SRP violation detected, prio is already in " | ||
9475 | + "ceiling list! cpu=%d, srp=%p\n", smp_processor_id(), ceiling2sem(prio)); | ||
9476 | + return; | ||
9477 | + } | ||
9478 | + list_for_each(pos, &srp->ceiling) | ||
9479 | + if (unlikely(srp_higher_prio(prio, list2prio(pos)))) { | ||
9480 | + __list_add(&prio->list, pos->prev, pos); | ||
9481 | + return; | ||
9482 | + } | ||
9483 | + | ||
9484 | + list_add_tail(&prio->list, &srp->ceiling); | ||
9485 | +} | ||
9486 | + | ||
9487 | + | ||
9488 | +static void* create_srp_semaphore(void) | ||
9489 | +{ | ||
9490 | + struct srp_semaphore* sem; | ||
9491 | + | ||
9492 | + sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
9493 | + if (!sem) | ||
9494 | + return NULL; | ||
9495 | + | ||
9496 | + INIT_LIST_HEAD(&sem->ceiling.list); | ||
9497 | + sem->ceiling.period = 0; | ||
9498 | + sem->cpu = UNDEF_SEM; | ||
9499 | + sem->owner = NULL; | ||
9500 | + atomic_inc(&srp_objects_in_use); | ||
9501 | + return sem; | ||
9502 | +} | ||
9503 | + | ||
9504 | +static noinline int open_srp_semaphore(struct od_table_entry* entry, void* __user arg) | ||
9505 | +{ | ||
9506 | + struct srp_semaphore* sem = (struct srp_semaphore*) entry->obj->obj; | ||
9507 | + int ret = 0; | ||
9508 | + struct task_struct* t = current; | ||
9509 | + struct srp_priority t_prio; | ||
9510 | + | ||
9511 | + TRACE("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu); | ||
9512 | + if (!srp_active()) | ||
9513 | + return -EBUSY; | ||
9514 | + | ||
9515 | + if (sem->cpu == UNDEF_SEM) | ||
9516 | + sem->cpu = get_partition(t); | ||
9517 | + else if (sem->cpu != get_partition(t)) | ||
9518 | + ret = -EPERM; | ||
9519 | + | ||
9520 | + if (ret == 0) { | ||
9521 | + t_prio.period = get_rt_period(t); | ||
9522 | + t_prio.pid = t->pid; | ||
9523 | + if (srp_higher_prio(&t_prio, &sem->ceiling)) { | ||
9524 | + sem->ceiling.period = t_prio.period; | ||
9525 | + sem->ceiling.pid = t_prio.pid; | ||
9526 | + } | ||
9527 | + } | ||
9528 | + | ||
9529 | + return ret; | ||
9530 | +} | ||
9531 | + | ||
9532 | +static void destroy_srp_semaphore(void* sem) | ||
9533 | +{ | ||
9534 | + /* XXX invariants */ | ||
9535 | + atomic_dec(&srp_objects_in_use); | ||
9536 | + kfree(sem); | ||
9537 | +} | ||
9538 | + | ||
9539 | +struct fdso_ops srp_sem_ops = { | ||
9540 | + .create = create_srp_semaphore, | ||
9541 | + .open = open_srp_semaphore, | ||
9542 | + .destroy = destroy_srp_semaphore | ||
9543 | +}; | ||
9544 | + | ||
9545 | + | ||
9546 | +static void do_srp_down(struct srp_semaphore* sem) | ||
9547 | +{ | ||
9548 | + /* Update ceiling. */ | ||
9549 | + srp_add_prio(&__get_cpu_var(srp), &sem->ceiling); | ||
9550 | + WARN_ON(sem->owner != NULL); | ||
9551 | + sem->owner = current; | ||
9552 | + TRACE_CUR("acquired srp 0x%p\n", sem); | ||
9553 | +} | ||
9554 | + | ||
9555 | +static void do_srp_up(struct srp_semaphore* sem) | ||
9556 | +{ | ||
9557 | + /* Determine new system priority ceiling for this CPU. */ | ||
9558 | + WARN_ON(!in_list(&sem->ceiling.list)); | ||
9559 | + if (in_list(&sem->ceiling.list)) | ||
9560 | + list_del(&sem->ceiling.list); | ||
9561 | + | ||
9562 | + sem->owner = NULL; | ||
9563 | + | ||
9564 | + /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
9565 | + TRACE_CUR("released srp 0x%p\n", sem); | ||
9566 | + wake_up_all(&__get_cpu_var(srp).ceiling_blocked); | ||
9567 | +} | ||
9568 | + | ||
9569 | +/* Adjust the system-wide priority ceiling if resource is claimed. */ | ||
9570 | +asmlinkage long sys_srp_down(int sem_od) | ||
9571 | +{ | ||
9572 | + int cpu; | ||
9573 | + int ret = -EINVAL; | ||
9574 | + struct srp_semaphore* sem; | ||
9575 | + | ||
9576 | + /* disabling preemptions is sufficient protection since | ||
9577 | + * SRP is strictly per CPU and we don't interfere with any | ||
9578 | + * interrupt handlers | ||
9579 | + */ | ||
9580 | + preempt_disable(); | ||
9581 | + TS_SRP_DOWN_START; | ||
9582 | + | ||
9583 | + cpu = smp_processor_id(); | ||
9584 | + sem = lookup_srp_sem(sem_od); | ||
9585 | + if (sem && sem->cpu == cpu) { | ||
9586 | + do_srp_down(sem); | ||
9587 | + ret = 0; | ||
9588 | + } | ||
9589 | + | ||
9590 | + TS_SRP_DOWN_END; | ||
9591 | + preempt_enable(); | ||
9592 | + return ret; | ||
9593 | +} | ||
9594 | + | ||
9595 | +/* Adjust the system-wide priority ceiling if resource is freed. */ | ||
9596 | +asmlinkage long sys_srp_up(int sem_od) | ||
9597 | +{ | ||
9598 | + int cpu; | ||
9599 | + int ret = -EINVAL; | ||
9600 | + struct srp_semaphore* sem; | ||
9601 | + | ||
9602 | + preempt_disable(); | ||
9603 | + TS_SRP_UP_START; | ||
9604 | + | ||
9605 | + cpu = smp_processor_id(); | ||
9606 | + sem = lookup_srp_sem(sem_od); | ||
9607 | + | ||
9608 | + if (sem && sem->cpu == cpu) { | ||
9609 | + do_srp_up(sem); | ||
9610 | + ret = 0; | ||
9611 | + } | ||
9612 | + | ||
9613 | + TS_SRP_UP_END; | ||
9614 | + preempt_enable(); | ||
9615 | + return ret; | ||
9616 | +} | ||
9617 | + | ||
9618 | +static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
9619 | + void *key) | ||
9620 | +{ | ||
9621 | + int cpu = smp_processor_id(); | ||
9622 | + struct task_struct *tsk = wait->private; | ||
9623 | + if (cpu != get_partition(tsk)) | ||
9624 | + TRACE_TASK(tsk, "srp_wake_up on wrong cpu, partition is %d\b", | ||
9625 | + get_partition(tsk)); | ||
9626 | + else if (srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) | ||
9627 | + return default_wake_function(wait, mode, sync, key); | ||
9628 | + return 0; | ||
9629 | +} | ||
9630 | + | ||
9631 | + | ||
9632 | + | ||
9633 | +static void do_ceiling_block(struct task_struct *tsk) | ||
9634 | +{ | ||
9635 | + wait_queue_t wait = { | ||
9636 | + .private = tsk, | ||
9637 | + .func = srp_wake_up, | ||
9638 | + .task_list = {NULL, NULL} | ||
9639 | + }; | ||
9640 | + | ||
9641 | + tsk->state = TASK_UNINTERRUPTIBLE; | ||
9642 | + add_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait); | ||
9643 | + tsk->rt_param.srp_non_recurse = 1; | ||
9644 | + preempt_enable_no_resched(); | ||
9645 | + schedule(); | ||
9646 | + preempt_disable(); | ||
9647 | + tsk->rt_param.srp_non_recurse = 0; | ||
9648 | + remove_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait); | ||
9649 | +} | ||
9650 | + | ||
9651 | +/* Wait for current task priority to exceed system-wide priority ceiling. | ||
9652 | + */ | ||
9653 | +void srp_ceiling_block(void) | ||
9654 | +{ | ||
9655 | + struct task_struct *tsk = current; | ||
9656 | + | ||
9657 | + /* Only applies to real-time tasks, but optimize for RT tasks. */ | ||
9658 | + if (unlikely(!is_realtime(tsk))) | ||
9659 | + return; | ||
9660 | + | ||
9661 | + /* Avoid recursive ceiling blocking. */ | ||
9662 | + if (unlikely(tsk->rt_param.srp_non_recurse)) | ||
9663 | + return; | ||
9664 | + | ||
9665 | + /* Bail out early if there aren't any SRP resources around. */ | ||
9666 | + if (likely(!atomic_read(&srp_objects_in_use))) | ||
9667 | + return; | ||
9668 | + | ||
9669 | + preempt_disable(); | ||
9670 | + if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) { | ||
9671 | + TRACE_CUR("is priority ceiling blocked.\n"); | ||
9672 | + while (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) | ||
9673 | + do_ceiling_block(tsk); | ||
9674 | + TRACE_CUR("finally exceeds system ceiling.\n"); | ||
9675 | + } else | ||
9676 | + TRACE_CUR("is not priority ceiling blocked\n"); | ||
9677 | + preempt_enable(); | ||
9678 | +} | ||
9679 | + | ||
9680 | + | ||
9681 | +#else | ||
9682 | + | ||
9683 | +asmlinkage long sys_srp_down(int sem_od) | ||
9684 | +{ | ||
9685 | + return -ENOSYS; | ||
9686 | +} | ||
9687 | + | ||
9688 | +asmlinkage long sys_srp_up(int sem_od) | ||
9689 | +{ | ||
9690 | + return -ENOSYS; | ||
9691 | +} | ||
9692 | + | ||
9693 | +struct fdso_ops srp_sem_ops = {}; | ||
9694 | + | ||
9695 | +#endif | ||
9696 | diff --git a/litmus/sync.c b/litmus/sync.c | ||
9697 | new file mode 100644 | ||
9698 | index 0000000..d5069f9 | ||
9699 | --- /dev/null | ||
9700 | +++ b/litmus/sync.c | ||
9701 | @@ -0,0 +1,90 @@ | ||
9702 | +/* litmus/sync.c - Support for synchronous and asynchronous task system releases. | ||
9703 | + * | ||
9704 | + * | ||
9705 | + */ | ||
9706 | + | ||
9707 | +#include <asm/atomic.h> | ||
9708 | +#include <asm/uaccess.h> | ||
9709 | +#include <linux/spinlock.h> | ||
9710 | +#include <linux/list.h> | ||
9711 | +#include <linux/sched.h> | ||
9712 | +#include <linux/completion.h> | ||
9713 | + | ||
9714 | +#include <litmus/litmus.h> | ||
9715 | +#include <litmus/sched_plugin.h> | ||
9716 | +#include <litmus/jobs.h> | ||
9717 | + | ||
9718 | +#include <litmus/sched_trace.h> | ||
9719 | + | ||
9720 | +static DECLARE_COMPLETION(ts_release); | ||
9721 | + | ||
9722 | +static long do_wait_for_ts_release(void) | ||
9723 | +{ | ||
9724 | + long ret = 0; | ||
9725 | + | ||
9726 | + /* If the interruption races with a release, the completion object | ||
9727 | + * may have a non-zero counter. To avoid this problem, this should | ||
9728 | + * be replaced by wait_for_completion(). | ||
9729 | + * | ||
9730 | + * For debugging purposes, this is interruptible for now. | ||
9731 | + */ | ||
9732 | + ret = wait_for_completion_interruptible(&ts_release); | ||
9733 | + | ||
9734 | + return ret; | ||
9735 | +} | ||
9736 | + | ||
9737 | + | ||
9738 | +static long do_release_ts(lt_t start) | ||
9739 | +{ | ||
9740 | + int task_count = 0; | ||
9741 | + long flags; | ||
9742 | + struct list_head *pos; | ||
9743 | + struct task_struct *t; | ||
9744 | + | ||
9745 | + | ||
9746 | + spin_lock_irqsave(&ts_release.wait.lock, flags); | ||
9747 | + TRACE("<<<<<< synchronous task system release >>>>>>\n"); | ||
9748 | + | ||
9749 | + sched_trace_sys_release(&start); | ||
9750 | + list_for_each(pos, &ts_release.wait.task_list) { | ||
9751 | + t = (struct task_struct*) list_entry(pos, | ||
9752 | + struct __wait_queue, | ||
9753 | + task_list)->private; | ||
9754 | + task_count++; | ||
9755 | + litmus->release_at(t, start + t->rt_param.task_params.phase); | ||
9756 | + sched_trace_task_release(t); | ||
9757 | + } | ||
9758 | + | ||
9759 | + spin_unlock_irqrestore(&ts_release.wait.lock, flags); | ||
9760 | + | ||
9761 | + complete_n(&ts_release, task_count); | ||
9762 | + | ||
9763 | + return task_count; | ||
9764 | +} | ||
9765 | + | ||
9766 | + | ||
9767 | +asmlinkage long sys_wait_for_ts_release(void) | ||
9768 | +{ | ||
9769 | + long ret = -EPERM; | ||
9770 | + struct task_struct *t = current; | ||
9771 | + | ||
9772 | + if (is_realtime(t)) | ||
9773 | + ret = do_wait_for_ts_release(); | ||
9774 | + | ||
9775 | + return ret; | ||
9776 | +} | ||
9777 | + | ||
9778 | + | ||
9779 | +asmlinkage long sys_release_ts(lt_t __user *__delay) | ||
9780 | +{ | ||
9781 | + long ret; | ||
9782 | + lt_t delay; | ||
9783 | + | ||
9784 | + /* FIXME: check capabilities... */ | ||
9785 | + | ||
9786 | + ret = copy_from_user(&delay, __delay, sizeof(lt_t)); | ||
9787 | + if (ret == 0) | ||
9788 | + ret = do_release_ts(litmus_clock() + delay); | ||
9789 | + | ||
9790 | + return ret; | ||
9791 | +} | ||
9792 | diff --git a/litmus/trace.c b/litmus/trace.c | ||
9793 | new file mode 100644 | ||
9794 | index 0000000..8851198 | ||
9795 | --- /dev/null | ||
9796 | +++ b/litmus/trace.c | ||
9797 | @@ -0,0 +1,83 @@ | ||
9798 | +#include <linux/module.h> | ||
9799 | + | ||
9800 | +#include <litmus/ftdev.h> | ||
9801 | +#include <litmus/litmus.h> | ||
9802 | +#include <litmus/trace.h> | ||
9803 | + | ||
9804 | +/******************************************************************************/ | ||
9805 | +/* Allocation */ | ||
9806 | +/******************************************************************************/ | ||
9807 | + | ||
9808 | +static struct ftdev overhead_dev; | ||
9809 | + | ||
9810 | +#define trace_ts_buf overhead_dev.minor[0].buf | ||
9811 | + | ||
9812 | +static unsigned int ts_seq_no = 0; | ||
9813 | + | ||
9814 | +static inline void __save_timestamp(unsigned long event, uint8_t type) | ||
9815 | +{ | ||
9816 | + unsigned int seq_no; | ||
9817 | + struct timestamp *ts; | ||
9818 | + seq_no = fetch_and_inc((int *) &ts_seq_no); | ||
9819 | + if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | ||
9820 | + ts->event = event; | ||
9821 | + ts->timestamp = ft_timestamp(); | ||
9822 | + ts->seq_no = seq_no; | ||
9823 | + ts->cpu = raw_smp_processor_id(); | ||
9824 | + ts->task_type = type; | ||
9825 | + ft_buffer_finish_write(trace_ts_buf, ts); | ||
9826 | + } | ||
9827 | +} | ||
9828 | + | ||
9829 | +feather_callback void save_timestamp(unsigned long event) | ||
9830 | +{ | ||
9831 | + __save_timestamp(event, TSK_UNKNOWN); | ||
9832 | +} | ||
9833 | + | ||
9834 | +feather_callback void save_timestamp_def(unsigned long event, unsigned long type) | ||
9835 | +{ | ||
9836 | + __save_timestamp(event, (uint8_t) type); | ||
9837 | +} | ||
9838 | + | ||
9839 | +feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr) | ||
9840 | +{ | ||
9841 | + int rt = is_realtime((struct task_struct *) t_ptr); | ||
9842 | + __save_timestamp(event, rt ? TSK_RT : TSK_BE); | ||
9843 | +} | ||
9844 | + | ||
9845 | +/******************************************************************************/ | ||
9846 | +/* DEVICE FILE DRIVER */ | ||
9847 | +/******************************************************************************/ | ||
9848 | + | ||
9849 | +#define NO_TIMESTAMPS (2 << 19) /* that should be 8 megs of ram, we may not get | ||
9850 | + * as much */ | ||
9851 | +#define FT_TRACE_MAJOR 252 | ||
9852 | + | ||
9853 | +static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
9854 | +{ | ||
9855 | + unsigned int count = NO_TIMESTAMPS; | ||
9856 | + while (count && !trace_ts_buf) { | ||
9857 | + printk("time stamp buffer: trying to allocate %u time stamps.\n", count); | ||
9858 | + ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); | ||
9859 | + count /= 2; | ||
9860 | + } | ||
9861 | + return ftdev->minor[idx].buf ? 0 : -ENOMEM; | ||
9862 | +} | ||
9863 | + | ||
9864 | +static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
9865 | +{ | ||
9866 | + free_ft_buffer(ftdev->minor[idx].buf); | ||
9867 | + ftdev->minor[idx].buf = NULL; | ||
9868 | +} | ||
9869 | + | ||
9870 | +static int __init init_ft_overhead_trace(void) | ||
9871 | +{ | ||
9872 | + printk("Initializing Feather-Trace overhead tracing device.\n"); | ||
9873 | + ftdev_init(&overhead_dev, THIS_MODULE); | ||
9874 | + overhead_dev.minor_cnt = 1; /* only one buffer */ | ||
9875 | + overhead_dev.alloc = alloc_timestamp_buffer; | ||
9876 | + overhead_dev.free = free_timestamp_buffer; | ||
9877 | + return register_ftdev(&overhead_dev, "ft_trace", FT_TRACE_MAJOR); | ||
9878 | +} | ||
9879 | + | ||
9880 | +module_init(init_ft_overhead_trace); | ||
diff --git a/download/2008.2/qemu-config b/download/2008.2/qemu-config new file mode 100644 index 0000000..a2e2d5b --- /dev/null +++ b/download/2008.2/qemu-config | |||
@@ -0,0 +1,1419 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.24 | ||
4 | # Mon Sep 22 14:34:13 2008 | ||
5 | # | ||
6 | # CONFIG_64BIT is not set | ||
7 | CONFIG_X86_32=y | ||
8 | # CONFIG_X86_64 is not set | ||
9 | CONFIG_X86=y | ||
10 | CONFIG_GENERIC_TIME=y | ||
11 | CONFIG_GENERIC_CMOS_UPDATE=y | ||
12 | CONFIG_CLOCKSOURCE_WATCHDOG=y | ||
13 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
14 | CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y | ||
15 | CONFIG_LOCKDEP_SUPPORT=y | ||
16 | CONFIG_STACKTRACE_SUPPORT=y | ||
17 | CONFIG_SEMAPHORE_SLEEPERS=y | ||
18 | CONFIG_MMU=y | ||
19 | CONFIG_ZONE_DMA=y | ||
20 | CONFIG_QUICKLIST=y | ||
21 | CONFIG_GENERIC_ISA_DMA=y | ||
22 | CONFIG_GENERIC_IOMAP=y | ||
23 | CONFIG_GENERIC_BUG=y | ||
24 | CONFIG_GENERIC_HWEIGHT=y | ||
25 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | ||
26 | CONFIG_DMI=y | ||
27 | # CONFIG_RWSEM_GENERIC_SPINLOCK is not set | ||
28 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | ||
29 | # CONFIG_ARCH_HAS_ILOG2_U32 is not set | ||
30 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | ||
31 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
32 | # CONFIG_GENERIC_TIME_VSYSCALL is not set | ||
33 | CONFIG_ARCH_SUPPORTS_OPROFILE=y | ||
34 | # CONFIG_ZONE_DMA32 is not set | ||
35 | CONFIG_ARCH_POPULATES_NODE_MAP=y | ||
36 | # CONFIG_AUDIT_ARCH is not set | ||
37 | CONFIG_GENERIC_HARDIRQS=y | ||
38 | CONFIG_GENERIC_IRQ_PROBE=y | ||
39 | CONFIG_GENERIC_PENDING_IRQ=y | ||
40 | CONFIG_X86_SMP=y | ||
41 | CONFIG_X86_HT=y | ||
42 | CONFIG_X86_BIOS_REBOOT=y | ||
43 | CONFIG_X86_TRAMPOLINE=y | ||
44 | CONFIG_KTIME_SCALAR=y | ||
45 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
46 | |||
47 | # | ||
48 | # General setup | ||
49 | # | ||
50 | CONFIG_EXPERIMENTAL=y | ||
51 | CONFIG_LOCK_KERNEL=y | ||
52 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
53 | CONFIG_LOCALVERSION="" | ||
54 | # CONFIG_LOCALVERSION_AUTO is not set | ||
55 | CONFIG_SWAP=y | ||
56 | CONFIG_SYSVIPC=y | ||
57 | CONFIG_SYSVIPC_SYSCTL=y | ||
58 | CONFIG_POSIX_MQUEUE=y | ||
59 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
60 | # CONFIG_TASKSTATS is not set | ||
61 | CONFIG_USER_NS=y | ||
62 | CONFIG_PID_NS=y | ||
63 | # CONFIG_AUDIT is not set | ||
64 | CONFIG_IKCONFIG=y | ||
65 | CONFIG_IKCONFIG_PROC=y | ||
66 | CONFIG_LOG_BUF_SHIFT=17 | ||
67 | CONFIG_CGROUPS=y | ||
68 | CONFIG_CGROUP_DEBUG=y | ||
69 | CONFIG_CGROUP_NS=y | ||
70 | # CONFIG_CPUSETS is not set | ||
71 | # CONFIG_FAIR_GROUP_SCHED is not set | ||
72 | # CONFIG_CGROUP_CPUACCT is not set | ||
73 | CONFIG_SYSFS_DEPRECATED=y | ||
74 | CONFIG_RELAY=y | ||
75 | CONFIG_BLK_DEV_INITRD=y | ||
76 | CONFIG_INITRAMFS_SOURCE="" | ||
77 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
78 | CONFIG_SYSCTL=y | ||
79 | # CONFIG_EMBEDDED is not set | ||
80 | CONFIG_UID16=y | ||
81 | CONFIG_SYSCTL_SYSCALL=y | ||
82 | CONFIG_KALLSYMS=y | ||
83 | CONFIG_KALLSYMS_ALL=y | ||
84 | CONFIG_KALLSYMS_EXTRA_PASS=y | ||
85 | CONFIG_HOTPLUG=y | ||
86 | CONFIG_PRINTK=y | ||
87 | CONFIG_BUG=y | ||
88 | CONFIG_ELF_CORE=y | ||
89 | CONFIG_BASE_FULL=y | ||
90 | CONFIG_FUTEX=y | ||
91 | CONFIG_ANON_INODES=y | ||
92 | CONFIG_EPOLL=y | ||
93 | CONFIG_SIGNALFD=y | ||
94 | CONFIG_EVENTFD=y | ||
95 | CONFIG_SHMEM=y | ||
96 | CONFIG_VM_EVENT_COUNTERS=y | ||
97 | CONFIG_SLAB=y | ||
98 | # CONFIG_SLUB is not set | ||
99 | # CONFIG_SLOB is not set | ||
100 | CONFIG_SLABINFO=y | ||
101 | CONFIG_RT_MUTEXES=y | ||
102 | # CONFIG_TINY_SHMEM is not set | ||
103 | CONFIG_BASE_SMALL=0 | ||
104 | # CONFIG_MODULES is not set | ||
105 | CONFIG_BLOCK=y | ||
106 | CONFIG_LBD=y | ||
107 | CONFIG_BLK_DEV_IO_TRACE=y | ||
108 | CONFIG_LSF=y | ||
109 | # CONFIG_BLK_DEV_BSG is not set | ||
110 | |||
111 | # | ||
112 | # IO Schedulers | ||
113 | # | ||
114 | CONFIG_IOSCHED_NOOP=y | ||
115 | # CONFIG_IOSCHED_AS is not set | ||
116 | # CONFIG_IOSCHED_DEADLINE is not set | ||
117 | # CONFIG_IOSCHED_CFQ is not set | ||
118 | # CONFIG_DEFAULT_AS is not set | ||
119 | # CONFIG_DEFAULT_DEADLINE is not set | ||
120 | # CONFIG_DEFAULT_CFQ is not set | ||
121 | CONFIG_DEFAULT_NOOP=y | ||
122 | CONFIG_DEFAULT_IOSCHED="noop" | ||
123 | |||
124 | # | ||
125 | # Processor type and features | ||
126 | # | ||
127 | CONFIG_TICK_ONESHOT=y | ||
128 | # CONFIG_NO_HZ is not set | ||
129 | CONFIG_HIGH_RES_TIMERS=y | ||
130 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
131 | CONFIG_SMP=y | ||
132 | CONFIG_X86_PC=y | ||
133 | # CONFIG_X86_ELAN is not set | ||
134 | # CONFIG_X86_VOYAGER is not set | ||
135 | # CONFIG_X86_NUMAQ is not set | ||
136 | # CONFIG_X86_SUMMIT is not set | ||
137 | # CONFIG_X86_BIGSMP is not set | ||
138 | # CONFIG_X86_VISWS is not set | ||
139 | # CONFIG_X86_GENERICARCH is not set | ||
140 | # CONFIG_X86_ES7000 is not set | ||
141 | # CONFIG_X86_VSMP is not set | ||
142 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | ||
143 | # CONFIG_PARAVIRT_GUEST is not set | ||
144 | # CONFIG_M386 is not set | ||
145 | # CONFIG_M486 is not set | ||
146 | # CONFIG_M586 is not set | ||
147 | # CONFIG_M586TSC is not set | ||
148 | # CONFIG_M586MMX is not set | ||
149 | # CONFIG_M686 is not set | ||
150 | # CONFIG_MPENTIUMII is not set | ||
151 | # CONFIG_MPENTIUMIII is not set | ||
152 | # CONFIG_MPENTIUMM is not set | ||
153 | CONFIG_MPENTIUM4=y | ||
154 | # CONFIG_MK6 is not set | ||
155 | # CONFIG_MK7 is not set | ||
156 | # CONFIG_MK8 is not set | ||
157 | # CONFIG_MCRUSOE is not set | ||
158 | # CONFIG_MEFFICEON is not set | ||
159 | # CONFIG_MWINCHIPC6 is not set | ||
160 | # CONFIG_MWINCHIP2 is not set | ||
161 | # CONFIG_MWINCHIP3D is not set | ||
162 | # CONFIG_MGEODEGX1 is not set | ||
163 | # CONFIG_MGEODE_LX is not set | ||
164 | # CONFIG_MCYRIXIII is not set | ||
165 | # CONFIG_MVIAC3_2 is not set | ||
166 | # CONFIG_MVIAC7 is not set | ||
167 | # CONFIG_MPSC is not set | ||
168 | # CONFIG_MCORE2 is not set | ||
169 | # CONFIG_GENERIC_CPU is not set | ||
170 | # CONFIG_X86_GENERIC is not set | ||
171 | CONFIG_X86_CMPXCHG=y | ||
172 | CONFIG_X86_L1_CACHE_SHIFT=7 | ||
173 | CONFIG_X86_XADD=y | ||
174 | CONFIG_X86_WP_WORKS_OK=y | ||
175 | CONFIG_X86_INVLPG=y | ||
176 | CONFIG_X86_BSWAP=y | ||
177 | CONFIG_X86_POPAD_OK=y | ||
178 | CONFIG_X86_GOOD_APIC=y | ||
179 | CONFIG_X86_INTEL_USERCOPY=y | ||
180 | CONFIG_X86_USE_PPRO_CHECKSUM=y | ||
181 | CONFIG_X86_TSC=y | ||
182 | CONFIG_X86_CMOV=y | ||
183 | CONFIG_X86_MINIMUM_CPU_FAMILY=4 | ||
184 | # CONFIG_HPET_TIMER is not set | ||
185 | CONFIG_NR_CPUS=4 | ||
186 | # CONFIG_SCHED_SMT is not set | ||
187 | # CONFIG_SCHED_MC is not set | ||
188 | # CONFIG_PREEMPT_NONE is not set | ||
189 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
190 | CONFIG_PREEMPT=y | ||
191 | # CONFIG_PREEMPT_BKL is not set | ||
192 | CONFIG_X86_LOCAL_APIC=y | ||
193 | CONFIG_X86_IO_APIC=y | ||
194 | CONFIG_X86_MCE=y | ||
195 | CONFIG_X86_MCE_NONFATAL=y | ||
196 | CONFIG_X86_MCE_P4THERMAL=y | ||
197 | CONFIG_VM86=y | ||
198 | # CONFIG_TOSHIBA is not set | ||
199 | # CONFIG_I8K is not set | ||
200 | # CONFIG_X86_REBOOTFIXUPS is not set | ||
201 | # CONFIG_MICROCODE is not set | ||
202 | CONFIG_X86_MSR=y | ||
203 | CONFIG_X86_CPUID=y | ||
204 | # CONFIG_NOHIGHMEM is not set | ||
205 | CONFIG_HIGHMEM4G=y | ||
206 | # CONFIG_HIGHMEM64G is not set | ||
207 | CONFIG_PAGE_OFFSET=0xC0000000 | ||
208 | CONFIG_HIGHMEM=y | ||
209 | CONFIG_ARCH_FLATMEM_ENABLE=y | ||
210 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | ||
211 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y | ||
212 | CONFIG_SELECT_MEMORY_MODEL=y | ||
213 | CONFIG_FLATMEM_MANUAL=y | ||
214 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
215 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
216 | CONFIG_FLATMEM=y | ||
217 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
218 | CONFIG_SPARSEMEM_STATIC=y | ||
219 | # CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set | ||
220 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
221 | # CONFIG_RESOURCES_64BIT is not set | ||
222 | CONFIG_ZONE_DMA_FLAG=1 | ||
223 | CONFIG_BOUNCE=y | ||
224 | CONFIG_NR_QUICK=1 | ||
225 | CONFIG_VIRT_TO_BUS=y | ||
226 | # CONFIG_HIGHPTE is not set | ||
227 | # CONFIG_MATH_EMULATION is not set | ||
228 | CONFIG_MTRR=y | ||
229 | CONFIG_IRQBALANCE=y | ||
230 | CONFIG_SECCOMP=y | ||
231 | # CONFIG_HZ_100 is not set | ||
232 | # CONFIG_HZ_250 is not set | ||
233 | # CONFIG_HZ_300 is not set | ||
234 | CONFIG_HZ_1000=y | ||
235 | CONFIG_HZ=1000 | ||
236 | # CONFIG_KEXEC is not set | ||
237 | # CONFIG_CRASH_DUMP is not set | ||
238 | CONFIG_PHYSICAL_START=0x100000 | ||
239 | # CONFIG_RELOCATABLE is not set | ||
240 | CONFIG_PHYSICAL_ALIGN=0x100000 | ||
241 | # CONFIG_HOTPLUG_CPU is not set | ||
242 | CONFIG_COMPAT_VDSO=y | ||
243 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | ||
244 | |||
245 | # | ||
246 | # Power management options | ||
247 | # | ||
248 | # CONFIG_PM is not set | ||
249 | CONFIG_SUSPEND_SMP_POSSIBLE=y | ||
250 | CONFIG_HIBERNATION_SMP_POSSIBLE=y | ||
251 | |||
252 | # | ||
253 | # CPU Frequency scaling | ||
254 | # | ||
255 | # CONFIG_CPU_FREQ is not set | ||
256 | # CONFIG_CPU_IDLE is not set | ||
257 | |||
258 | # | ||
259 | # Bus options (PCI etc.) | ||
260 | # | ||
261 | CONFIG_PCI=y | ||
262 | # CONFIG_PCI_GOBIOS is not set | ||
263 | # CONFIG_PCI_GOMMCONFIG is not set | ||
264 | # CONFIG_PCI_GODIRECT is not set | ||
265 | CONFIG_PCI_GOANY=y | ||
266 | CONFIG_PCI_BIOS=y | ||
267 | CONFIG_PCI_DIRECT=y | ||
268 | CONFIG_PCI_DOMAINS=y | ||
269 | # CONFIG_PCIEPORTBUS is not set | ||
270 | CONFIG_ARCH_SUPPORTS_MSI=y | ||
271 | # CONFIG_PCI_MSI is not set | ||
272 | # CONFIG_PCI_LEGACY is not set | ||
273 | # CONFIG_PCI_DEBUG is not set | ||
274 | # CONFIG_HT_IRQ is not set | ||
275 | CONFIG_ISA_DMA_API=y | ||
276 | CONFIG_ISA=y | ||
277 | # CONFIG_EISA is not set | ||
278 | # CONFIG_MCA is not set | ||
279 | # CONFIG_SCx200 is not set | ||
280 | CONFIG_K8_NB=y | ||
281 | # CONFIG_PCCARD is not set | ||
282 | # CONFIG_HOTPLUG_PCI is not set | ||
283 | |||
284 | # | ||
285 | # Executable file formats / Emulations | ||
286 | # | ||
287 | CONFIG_BINFMT_ELF=y | ||
288 | CONFIG_BINFMT_AOUT=y | ||
289 | CONFIG_BINFMT_MISC=y | ||
290 | |||
291 | # | ||
292 | # Networking | ||
293 | # | ||
294 | CONFIG_NET=y | ||
295 | |||
296 | # | ||
297 | # Networking options | ||
298 | # | ||
299 | CONFIG_PACKET=y | ||
300 | CONFIG_PACKET_MMAP=y | ||
301 | CONFIG_UNIX=y | ||
302 | # CONFIG_NET_KEY is not set | ||
303 | CONFIG_INET=y | ||
304 | # CONFIG_IP_MULTICAST is not set | ||
305 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
306 | CONFIG_IP_FIB_HASH=y | ||
307 | # CONFIG_IP_PNP is not set | ||
308 | # CONFIG_NET_IPIP is not set | ||
309 | # CONFIG_NET_IPGRE is not set | ||
310 | # CONFIG_ARPD is not set | ||
311 | # CONFIG_SYN_COOKIES is not set | ||
312 | # CONFIG_INET_AH is not set | ||
313 | # CONFIG_INET_ESP is not set | ||
314 | # CONFIG_INET_IPCOMP is not set | ||
315 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
316 | # CONFIG_INET_TUNNEL is not set | ||
317 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | ||
318 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
319 | # CONFIG_INET_XFRM_MODE_BEET is not set | ||
320 | # CONFIG_INET_LRO is not set | ||
321 | # CONFIG_INET_DIAG is not set | ||
322 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
323 | CONFIG_TCP_CONG_CUBIC=y | ||
324 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
325 | # CONFIG_TCP_MD5SIG is not set | ||
326 | # CONFIG_IPV6 is not set | ||
327 | # CONFIG_INET6_XFRM_TUNNEL is not set | ||
328 | # CONFIG_INET6_TUNNEL is not set | ||
329 | # CONFIG_NETWORK_SECMARK is not set | ||
330 | # CONFIG_NETFILTER is not set | ||
331 | # CONFIG_IP_DCCP is not set | ||
332 | # CONFIG_IP_SCTP is not set | ||
333 | # CONFIG_TIPC is not set | ||
334 | # CONFIG_ATM is not set | ||
335 | # CONFIG_BRIDGE is not set | ||
336 | # CONFIG_VLAN_8021Q is not set | ||
337 | # CONFIG_DECNET is not set | ||
338 | # CONFIG_LLC2 is not set | ||
339 | # CONFIG_IPX is not set | ||
340 | # CONFIG_ATALK is not set | ||
341 | # CONFIG_X25 is not set | ||
342 | # CONFIG_LAPB is not set | ||
343 | # CONFIG_ECONET is not set | ||
344 | # CONFIG_WAN_ROUTER is not set | ||
345 | # CONFIG_NET_SCHED is not set | ||
346 | |||
347 | # | ||
348 | # Network testing | ||
349 | # | ||
350 | # CONFIG_NET_PKTGEN is not set | ||
351 | # CONFIG_HAMRADIO is not set | ||
352 | # CONFIG_IRDA is not set | ||
353 | # CONFIG_BT is not set | ||
354 | # CONFIG_AF_RXRPC is not set | ||
355 | |||
356 | # | ||
357 | # Wireless | ||
358 | # | ||
359 | # CONFIG_CFG80211 is not set | ||
360 | # CONFIG_WIRELESS_EXT is not set | ||
361 | # CONFIG_MAC80211 is not set | ||
362 | # CONFIG_IEEE80211 is not set | ||
363 | # CONFIG_RFKILL is not set | ||
364 | # CONFIG_NET_9P is not set | ||
365 | |||
366 | # | ||
367 | # Device Drivers | ||
368 | # | ||
369 | |||
370 | # | ||
371 | # Generic Driver Options | ||
372 | # | ||
373 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
374 | CONFIG_STANDALONE=y | ||
375 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
376 | CONFIG_FW_LOADER=y | ||
377 | # CONFIG_DEBUG_DRIVER is not set | ||
378 | # CONFIG_DEBUG_DEVRES is not set | ||
379 | # CONFIG_SYS_HYPERVISOR is not set | ||
380 | CONFIG_CONNECTOR=y | ||
381 | CONFIG_PROC_EVENTS=y | ||
382 | # CONFIG_MTD is not set | ||
383 | # CONFIG_PARPORT is not set | ||
384 | CONFIG_PNP=y | ||
385 | # CONFIG_PNP_DEBUG is not set | ||
386 | |||
387 | # | ||
388 | # Protocols | ||
389 | # | ||
390 | CONFIG_ISAPNP=y | ||
391 | # CONFIG_PNPBIOS is not set | ||
392 | # CONFIG_PNPACPI is not set | ||
393 | # CONFIG_BLK_DEV is not set | ||
394 | # CONFIG_MISC_DEVICES is not set | ||
395 | CONFIG_IDE=y | ||
396 | CONFIG_BLK_DEV_IDE=y | ||
397 | |||
398 | # | ||
399 | # Please see Documentation/ide.txt for help/info on IDE drives | ||
400 | # | ||
401 | # CONFIG_BLK_DEV_IDE_SATA is not set | ||
402 | # CONFIG_BLK_DEV_HD_IDE is not set | ||
403 | CONFIG_BLK_DEV_IDEDISK=y | ||
404 | CONFIG_IDEDISK_MULTI_MODE=y | ||
405 | # CONFIG_BLK_DEV_IDECD is not set | ||
406 | # CONFIG_BLK_DEV_IDETAPE is not set | ||
407 | CONFIG_BLK_DEV_IDEFLOPPY=y | ||
408 | # CONFIG_BLK_DEV_IDESCSI is not set | ||
409 | # CONFIG_IDE_TASK_IOCTL is not set | ||
410 | CONFIG_IDE_PROC_FS=y | ||
411 | |||
412 | # | ||
413 | # IDE chipset support/bugfixes | ||
414 | # | ||
415 | CONFIG_IDE_GENERIC=y | ||
416 | # CONFIG_BLK_DEV_PLATFORM is not set | ||
417 | # CONFIG_BLK_DEV_CMD640 is not set | ||
418 | # CONFIG_BLK_DEV_IDEPNP is not set | ||
419 | |||
420 | # | ||
421 | # PCI IDE chipsets support | ||
422 | # | ||
423 | CONFIG_BLK_DEV_IDEPCI=y | ||
424 | CONFIG_IDEPCI_SHARE_IRQ=y | ||
425 | CONFIG_IDEPCI_PCIBUS_ORDER=y | ||
426 | # CONFIG_BLK_DEV_OFFBOARD is not set | ||
427 | CONFIG_BLK_DEV_GENERIC=y | ||
428 | # CONFIG_BLK_DEV_OPTI621 is not set | ||
429 | CONFIG_BLK_DEV_RZ1000=y | ||
430 | CONFIG_BLK_DEV_IDEDMA_PCI=y | ||
431 | CONFIG_BLK_DEV_AEC62XX=y | ||
432 | CONFIG_BLK_DEV_ALI15X3=y | ||
433 | # CONFIG_WDC_ALI15X3 is not set | ||
434 | CONFIG_BLK_DEV_AMD74XX=y | ||
435 | CONFIG_BLK_DEV_ATIIXP=y | ||
436 | CONFIG_BLK_DEV_CMD64X=y | ||
437 | CONFIG_BLK_DEV_TRIFLEX=y | ||
438 | CONFIG_BLK_DEV_CY82C693=y | ||
439 | # CONFIG_BLK_DEV_CS5520 is not set | ||
440 | CONFIG_BLK_DEV_CS5530=y | ||
441 | # CONFIG_BLK_DEV_CS5535 is not set | ||
442 | CONFIG_BLK_DEV_HPT34X=y | ||
443 | # CONFIG_HPT34X_AUTODMA is not set | ||
444 | CONFIG_BLK_DEV_HPT366=y | ||
445 | CONFIG_BLK_DEV_JMICRON=y | ||
446 | CONFIG_BLK_DEV_SC1200=y | ||
447 | CONFIG_BLK_DEV_PIIX=y | ||
448 | # CONFIG_BLK_DEV_IT8213 is not set | ||
449 | CONFIG_BLK_DEV_IT821X=y | ||
450 | CONFIG_BLK_DEV_NS87415=y | ||
451 | CONFIG_BLK_DEV_PDC202XX_OLD=y | ||
452 | CONFIG_PDC202XX_BURST=y | ||
453 | CONFIG_BLK_DEV_PDC202XX_NEW=y | ||
454 | CONFIG_BLK_DEV_SVWKS=y | ||
455 | CONFIG_BLK_DEV_SIIMAGE=y | ||
456 | CONFIG_BLK_DEV_SIS5513=y | ||
457 | CONFIG_BLK_DEV_SLC90E66=y | ||
458 | CONFIG_BLK_DEV_TRM290=y | ||
459 | CONFIG_BLK_DEV_VIA82CXXX=y | ||
460 | # CONFIG_BLK_DEV_TC86C001 is not set | ||
461 | # CONFIG_IDE_ARM is not set | ||
462 | |||
463 | # | ||
464 | # Other IDE chipsets support | ||
465 | # | ||
466 | |||
467 | # | ||
468 | # Note: most of these also require special kernel boot parameters | ||
469 | # | ||
470 | # CONFIG_BLK_DEV_4DRIVES is not set | ||
471 | # CONFIG_BLK_DEV_ALI14XX is not set | ||
472 | # CONFIG_BLK_DEV_DTC2278 is not set | ||
473 | # CONFIG_BLK_DEV_HT6560B is not set | ||
474 | # CONFIG_BLK_DEV_QD65XX is not set | ||
475 | # CONFIG_BLK_DEV_UMC8672 is not set | ||
476 | CONFIG_BLK_DEV_IDEDMA=y | ||
477 | CONFIG_IDE_ARCH_OBSOLETE_INIT=y | ||
478 | # CONFIG_BLK_DEV_HD is not set | ||
479 | |||
480 | # | ||
481 | # SCSI device support | ||
482 | # | ||
483 | # CONFIG_RAID_ATTRS is not set | ||
484 | CONFIG_SCSI=y | ||
485 | CONFIG_SCSI_DMA=y | ||
486 | # CONFIG_SCSI_TGT is not set | ||
487 | # CONFIG_SCSI_NETLINK is not set | ||
488 | # CONFIG_SCSI_PROC_FS is not set | ||
489 | |||
490 | # | ||
491 | # SCSI support type (disk, tape, CD-ROM) | ||
492 | # | ||
493 | # CONFIG_BLK_DEV_SD is not set | ||
494 | # CONFIG_CHR_DEV_ST is not set | ||
495 | # CONFIG_CHR_DEV_OSST is not set | ||
496 | # CONFIG_BLK_DEV_SR is not set | ||
497 | # CONFIG_CHR_DEV_SG is not set | ||
498 | # CONFIG_CHR_DEV_SCH is not set | ||
499 | |||
500 | # | ||
501 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | ||
502 | # | ||
503 | # CONFIG_SCSI_MULTI_LUN is not set | ||
504 | # CONFIG_SCSI_CONSTANTS is not set | ||
505 | # CONFIG_SCSI_LOGGING is not set | ||
506 | # CONFIG_SCSI_SCAN_ASYNC is not set | ||
507 | |||
508 | # | ||
509 | # SCSI Transports | ||
510 | # | ||
511 | # CONFIG_SCSI_SPI_ATTRS is not set | ||
512 | # CONFIG_SCSI_FC_ATTRS is not set | ||
513 | # CONFIG_SCSI_ISCSI_ATTRS is not set | ||
514 | # CONFIG_SCSI_SAS_LIBSAS is not set | ||
515 | # CONFIG_SCSI_SRP_ATTRS is not set | ||
516 | # CONFIG_SCSI_LOWLEVEL is not set | ||
517 | CONFIG_ATA=y | ||
518 | # CONFIG_ATA_NONSTANDARD is not set | ||
519 | # CONFIG_SATA_AHCI is not set | ||
520 | # CONFIG_SATA_SVW is not set | ||
521 | # CONFIG_ATA_PIIX is not set | ||
522 | # CONFIG_SATA_MV is not set | ||
523 | # CONFIG_SATA_NV is not set | ||
524 | # CONFIG_PDC_ADMA is not set | ||
525 | # CONFIG_SATA_QSTOR is not set | ||
526 | # CONFIG_SATA_PROMISE is not set | ||
527 | # CONFIG_SATA_SX4 is not set | ||
528 | # CONFIG_SATA_SIL is not set | ||
529 | # CONFIG_SATA_SIL24 is not set | ||
530 | # CONFIG_SATA_SIS is not set | ||
531 | # CONFIG_SATA_ULI is not set | ||
532 | # CONFIG_SATA_VIA is not set | ||
533 | # CONFIG_SATA_VITESSE is not set | ||
534 | # CONFIG_SATA_INIC162X is not set | ||
535 | # CONFIG_PATA_ALI is not set | ||
536 | # CONFIG_PATA_AMD is not set | ||
537 | # CONFIG_PATA_ARTOP is not set | ||
538 | # CONFIG_PATA_ATIIXP is not set | ||
539 | # CONFIG_PATA_CMD640_PCI is not set | ||
540 | # CONFIG_PATA_CMD64X is not set | ||
541 | # CONFIG_PATA_CS5520 is not set | ||
542 | # CONFIG_PATA_CS5530 is not set | ||
543 | # CONFIG_PATA_CS5535 is not set | ||
544 | # CONFIG_PATA_CS5536 is not set | ||
545 | # CONFIG_PATA_CYPRESS is not set | ||
546 | # CONFIG_PATA_EFAR is not set | ||
547 | # CONFIG_ATA_GENERIC is not set | ||
548 | # CONFIG_PATA_HPT366 is not set | ||
549 | # CONFIG_PATA_HPT37X is not set | ||
550 | # CONFIG_PATA_HPT3X2N is not set | ||
551 | # CONFIG_PATA_HPT3X3 is not set | ||
552 | # CONFIG_PATA_ISAPNP is not set | ||
553 | # CONFIG_PATA_IT821X is not set | ||
554 | # CONFIG_PATA_IT8213 is not set | ||
555 | # CONFIG_PATA_JMICRON is not set | ||
556 | # CONFIG_PATA_LEGACY is not set | ||
557 | # CONFIG_PATA_TRIFLEX is not set | ||
558 | # CONFIG_PATA_MARVELL is not set | ||
559 | # CONFIG_PATA_MPIIX is not set | ||
560 | CONFIG_PATA_OLDPIIX=y | ||
561 | # CONFIG_PATA_NETCELL is not set | ||
562 | # CONFIG_PATA_NS87410 is not set | ||
563 | # CONFIG_PATA_NS87415 is not set | ||
564 | # CONFIG_PATA_OPTI is not set | ||
565 | # CONFIG_PATA_OPTIDMA is not set | ||
566 | # CONFIG_PATA_PDC_OLD is not set | ||
567 | # CONFIG_PATA_QDI is not set | ||
568 | # CONFIG_PATA_RADISYS is not set | ||
569 | # CONFIG_PATA_RZ1000 is not set | ||
570 | # CONFIG_PATA_SC1200 is not set | ||
571 | # CONFIG_PATA_SERVERWORKS is not set | ||
572 | # CONFIG_PATA_PDC2027X is not set | ||
573 | # CONFIG_PATA_SIL680 is not set | ||
574 | # CONFIG_PATA_SIS is not set | ||
575 | # CONFIG_PATA_VIA is not set | ||
576 | # CONFIG_PATA_WINBOND is not set | ||
577 | # CONFIG_PATA_WINBOND_VLB is not set | ||
578 | # CONFIG_MD is not set | ||
579 | # CONFIG_FUSION is not set | ||
580 | |||
581 | # | ||
582 | # IEEE 1394 (FireWire) support | ||
583 | # | ||
584 | # CONFIG_FIREWIRE is not set | ||
585 | # CONFIG_IEEE1394 is not set | ||
586 | # CONFIG_I2O is not set | ||
587 | # CONFIG_MACINTOSH_DRIVERS is not set | ||
588 | CONFIG_NETDEVICES=y | ||
589 | # CONFIG_NETDEVICES_MULTIQUEUE is not set | ||
590 | CONFIG_DUMMY=y | ||
591 | CONFIG_BONDING=y | ||
592 | # CONFIG_MACVLAN is not set | ||
593 | CONFIG_EQUALIZER=y | ||
594 | CONFIG_TUN=y | ||
595 | # CONFIG_VETH is not set | ||
596 | CONFIG_NET_SB1000=y | ||
597 | # CONFIG_ARCNET is not set | ||
598 | # CONFIG_PHYLIB is not set | ||
599 | CONFIG_NET_ETHERNET=y | ||
600 | CONFIG_MII=y | ||
601 | CONFIG_HAPPYMEAL=y | ||
602 | CONFIG_SUNGEM=y | ||
603 | CONFIG_CASSINI=y | ||
604 | CONFIG_NET_VENDOR_3COM=y | ||
605 | CONFIG_EL1=y | ||
606 | CONFIG_EL2=y | ||
607 | CONFIG_ELPLUS=y | ||
608 | CONFIG_EL16=y | ||
609 | CONFIG_EL3=y | ||
610 | CONFIG_3C515=y | ||
611 | CONFIG_VORTEX=y | ||
612 | CONFIG_TYPHOON=y | ||
613 | CONFIG_LANCE=y | ||
614 | CONFIG_NET_VENDOR_SMC=y | ||
615 | CONFIG_WD80x3=y | ||
616 | CONFIG_ULTRA=y | ||
617 | CONFIG_SMC9194=y | ||
618 | CONFIG_NET_VENDOR_RACAL=y | ||
619 | CONFIG_NI52=y | ||
620 | CONFIG_NI65=y | ||
621 | CONFIG_NET_TULIP=y | ||
622 | CONFIG_DE2104X=y | ||
623 | CONFIG_TULIP=y | ||
624 | # CONFIG_TULIP_MWI is not set | ||
625 | # CONFIG_TULIP_MMIO is not set | ||
626 | # CONFIG_TULIP_NAPI is not set | ||
627 | CONFIG_DE4X5=y | ||
628 | CONFIG_WINBOND_840=y | ||
629 | CONFIG_DM9102=y | ||
630 | CONFIG_ULI526X=y | ||
631 | CONFIG_AT1700=y | ||
632 | CONFIG_DEPCA=y | ||
633 | CONFIG_HP100=y | ||
634 | CONFIG_NET_ISA=y | ||
635 | CONFIG_E2100=y | ||
636 | CONFIG_EWRK3=y | ||
637 | CONFIG_EEXPRESS=y | ||
638 | CONFIG_EEXPRESS_PRO=y | ||
639 | CONFIG_HPLAN_PLUS=y | ||
640 | CONFIG_HPLAN=y | ||
641 | CONFIG_LP486E=y | ||
642 | CONFIG_ETH16I=y | ||
643 | CONFIG_NE2000=y | ||
644 | CONFIG_ZNET=y | ||
645 | CONFIG_SEEQ8005=y | ||
646 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
647 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
648 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
649 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
650 | CONFIG_NET_PCI=y | ||
651 | CONFIG_PCNET32=y | ||
652 | # CONFIG_PCNET32_NAPI is not set | ||
653 | CONFIG_AMD8111_ETH=y | ||
654 | # CONFIG_AMD8111E_NAPI is not set | ||
655 | CONFIG_ADAPTEC_STARFIRE=y | ||
656 | # CONFIG_ADAPTEC_STARFIRE_NAPI is not set | ||
657 | CONFIG_AC3200=y | ||
658 | CONFIG_APRICOT=y | ||
659 | CONFIG_B44=y | ||
660 | CONFIG_B44_PCI_AUTOSELECT=y | ||
661 | CONFIG_B44_PCICORE_AUTOSELECT=y | ||
662 | CONFIG_B44_PCI=y | ||
663 | CONFIG_FORCEDETH=y | ||
664 | # CONFIG_FORCEDETH_NAPI is not set | ||
665 | CONFIG_CS89x0=y | ||
666 | CONFIG_EEPRO100=y | ||
667 | CONFIG_E100=y | ||
668 | CONFIG_FEALNX=y | ||
669 | CONFIG_NATSEMI=y | ||
670 | CONFIG_NE2K_PCI=y | ||
671 | CONFIG_8139CP=y | ||
672 | CONFIG_8139TOO=y | ||
673 | # CONFIG_8139TOO_PIO is not set | ||
674 | CONFIG_8139TOO_TUNE_TWISTER=y | ||
675 | CONFIG_8139TOO_8129=y | ||
676 | # CONFIG_8139_OLD_RX_RESET is not set | ||
677 | CONFIG_SIS900=y | ||
678 | CONFIG_EPIC100=y | ||
679 | CONFIG_SUNDANCE=y | ||
680 | # CONFIG_SUNDANCE_MMIO is not set | ||
681 | CONFIG_TLAN=y | ||
682 | CONFIG_VIA_RHINE=y | ||
683 | # CONFIG_VIA_RHINE_MMIO is not set | ||
684 | # CONFIG_VIA_RHINE_NAPI is not set | ||
685 | # CONFIG_SC92031 is not set | ||
686 | CONFIG_NETDEV_1000=y | ||
687 | # CONFIG_ACENIC is not set | ||
688 | # CONFIG_DL2K is not set | ||
689 | # CONFIG_E1000 is not set | ||
690 | # CONFIG_E1000E is not set | ||
691 | # CONFIG_IP1000 is not set | ||
692 | # CONFIG_NS83820 is not set | ||
693 | # CONFIG_HAMACHI is not set | ||
694 | # CONFIG_YELLOWFIN is not set | ||
695 | # CONFIG_R8169 is not set | ||
696 | # CONFIG_SIS190 is not set | ||
697 | # CONFIG_SKGE is not set | ||
698 | # CONFIG_SKY2 is not set | ||
699 | # CONFIG_SK98LIN is not set | ||
700 | # CONFIG_VIA_VELOCITY is not set | ||
701 | CONFIG_TIGON3=y | ||
702 | # CONFIG_BNX2 is not set | ||
703 | # CONFIG_QLA3XXX is not set | ||
704 | # CONFIG_ATL1 is not set | ||
705 | # CONFIG_NETDEV_10000 is not set | ||
706 | # CONFIG_TR is not set | ||
707 | |||
708 | # | ||
709 | # Wireless LAN | ||
710 | # | ||
711 | # CONFIG_WLAN_PRE80211 is not set | ||
712 | # CONFIG_WLAN_80211 is not set | ||
713 | # CONFIG_WAN is not set | ||
714 | # CONFIG_FDDI is not set | ||
715 | # CONFIG_HIPPI is not set | ||
716 | CONFIG_PPP=y | ||
717 | CONFIG_PPP_MULTILINK=y | ||
718 | CONFIG_PPP_FILTER=y | ||
719 | CONFIG_PPP_ASYNC=y | ||
720 | CONFIG_PPP_SYNC_TTY=y | ||
721 | CONFIG_PPP_DEFLATE=y | ||
722 | CONFIG_PPP_BSDCOMP=y | ||
723 | CONFIG_PPP_MPPE=y | ||
724 | CONFIG_PPPOE=y | ||
725 | # CONFIG_PPPOL2TP is not set | ||
726 | CONFIG_SLIP=y | ||
727 | # CONFIG_SLIP_COMPRESSED is not set | ||
728 | CONFIG_SLHC=y | ||
729 | # CONFIG_SLIP_SMART is not set | ||
730 | # CONFIG_SLIP_MODE_SLIP6 is not set | ||
731 | # CONFIG_NET_FC is not set | ||
732 | # CONFIG_SHAPER is not set | ||
733 | CONFIG_NETCONSOLE=y | ||
734 | # CONFIG_NETCONSOLE_DYNAMIC is not set | ||
735 | CONFIG_NETPOLL=y | ||
736 | # CONFIG_NETPOLL_TRAP is not set | ||
737 | CONFIG_NET_POLL_CONTROLLER=y | ||
738 | # CONFIG_ISDN is not set | ||
739 | # CONFIG_PHONE is not set | ||
740 | |||
741 | # | ||
742 | # Input device support | ||
743 | # | ||
744 | CONFIG_INPUT=y | ||
745 | CONFIG_INPUT_FF_MEMLESS=y | ||
746 | CONFIG_INPUT_POLLDEV=y | ||
747 | |||
748 | # | ||
749 | # Userland interfaces | ||
750 | # | ||
751 | CONFIG_INPUT_MOUSEDEV=y | ||
752 | CONFIG_INPUT_MOUSEDEV_PSAUX=y | ||
753 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 | ||
754 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | ||
755 | CONFIG_INPUT_JOYDEV=y | ||
756 | CONFIG_INPUT_EVDEV=y | ||
757 | # CONFIG_INPUT_EVBUG is not set | ||
758 | |||
759 | # | ||
760 | # Input Device Drivers | ||
761 | # | ||
762 | CONFIG_INPUT_KEYBOARD=y | ||
763 | CONFIG_KEYBOARD_ATKBD=y | ||
764 | # CONFIG_KEYBOARD_SUNKBD is not set | ||
765 | # CONFIG_KEYBOARD_LKKBD is not set | ||
766 | # CONFIG_KEYBOARD_XTKBD is not set | ||
767 | # CONFIG_KEYBOARD_NEWTON is not set | ||
768 | CONFIG_KEYBOARD_STOWAWAY=y | ||
769 | CONFIG_INPUT_MOUSE=y | ||
770 | CONFIG_MOUSE_PS2=y | ||
771 | CONFIG_MOUSE_PS2_ALPS=y | ||
772 | CONFIG_MOUSE_PS2_LOGIPS2PP=y | ||
773 | CONFIG_MOUSE_PS2_SYNAPTICS=y | ||
774 | CONFIG_MOUSE_PS2_LIFEBOOK=y | ||
775 | CONFIG_MOUSE_PS2_TRACKPOINT=y | ||
776 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set | ||
777 | CONFIG_MOUSE_SERIAL=y | ||
778 | # CONFIG_MOUSE_INPORT is not set | ||
779 | # CONFIG_MOUSE_LOGIBM is not set | ||
780 | # CONFIG_MOUSE_PC110PAD is not set | ||
781 | # CONFIG_MOUSE_VSXXXAA is not set | ||
782 | CONFIG_INPUT_JOYSTICK=y | ||
783 | CONFIG_JOYSTICK_ANALOG=y | ||
784 | CONFIG_JOYSTICK_A3D=y | ||
785 | CONFIG_JOYSTICK_ADI=y | ||
786 | CONFIG_JOYSTICK_COBRA=y | ||
787 | CONFIG_JOYSTICK_GF2K=y | ||
788 | CONFIG_JOYSTICK_GRIP=y | ||
789 | CONFIG_JOYSTICK_GRIP_MP=y | ||
790 | CONFIG_JOYSTICK_GUILLEMOT=y | ||
791 | CONFIG_JOYSTICK_INTERACT=y | ||
792 | CONFIG_JOYSTICK_SIDEWINDER=y | ||
793 | CONFIG_JOYSTICK_TMDC=y | ||
794 | CONFIG_JOYSTICK_IFORCE=y | ||
795 | CONFIG_JOYSTICK_IFORCE_232=y | ||
796 | CONFIG_JOYSTICK_WARRIOR=y | ||
797 | CONFIG_JOYSTICK_MAGELLAN=y | ||
798 | CONFIG_JOYSTICK_SPACEORB=y | ||
799 | CONFIG_JOYSTICK_SPACEBALL=y | ||
800 | CONFIG_JOYSTICK_STINGER=y | ||
801 | CONFIG_JOYSTICK_TWIDJOY=y | ||
802 | # CONFIG_JOYSTICK_JOYDUMP is not set | ||
803 | # CONFIG_INPUT_TABLET is not set | ||
804 | CONFIG_INPUT_TOUCHSCREEN=y | ||
805 | # CONFIG_TOUCHSCREEN_ADS7846 is not set | ||
806 | # CONFIG_TOUCHSCREEN_FUJITSU is not set | ||
807 | CONFIG_TOUCHSCREEN_GUNZE=y | ||
808 | CONFIG_TOUCHSCREEN_ELO=y | ||
809 | CONFIG_TOUCHSCREEN_MTOUCH=y | ||
810 | CONFIG_TOUCHSCREEN_MK712=y | ||
811 | CONFIG_TOUCHSCREEN_PENMOUNT=y | ||
812 | CONFIG_TOUCHSCREEN_TOUCHRIGHT=y | ||
813 | CONFIG_TOUCHSCREEN_TOUCHWIN=y | ||
814 | # CONFIG_TOUCHSCREEN_UCB1400 is not set | ||
815 | CONFIG_INPUT_MISC=y | ||
816 | CONFIG_INPUT_PCSPKR=y | ||
817 | # CONFIG_INPUT_WISTRON_BTNS is not set | ||
818 | CONFIG_INPUT_UINPUT=y | ||
819 | |||
820 | # | ||
821 | # Hardware I/O ports | ||
822 | # | ||
823 | CONFIG_SERIO=y | ||
824 | CONFIG_SERIO_I8042=y | ||
825 | CONFIG_SERIO_SERPORT=y | ||
826 | CONFIG_SERIO_CT82C710=y | ||
827 | CONFIG_SERIO_PCIPS2=y | ||
828 | CONFIG_SERIO_LIBPS2=y | ||
829 | CONFIG_SERIO_RAW=y | ||
830 | CONFIG_GAMEPORT=y | ||
831 | CONFIG_GAMEPORT_NS558=y | ||
832 | CONFIG_GAMEPORT_L4=y | ||
833 | CONFIG_GAMEPORT_EMU10K1=y | ||
834 | CONFIG_GAMEPORT_FM801=y | ||
835 | |||
836 | # | ||
837 | # Character devices | ||
838 | # | ||
839 | CONFIG_VT=y | ||
840 | CONFIG_VT_CONSOLE=y | ||
841 | CONFIG_HW_CONSOLE=y | ||
842 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
843 | CONFIG_SERIAL_NONSTANDARD=y | ||
844 | # CONFIG_COMPUTONE is not set | ||
845 | CONFIG_ROCKETPORT=y | ||
846 | CONFIG_CYCLADES=y | ||
847 | # CONFIG_CYZ_INTR is not set | ||
848 | # CONFIG_DIGIEPCA is not set | ||
849 | # CONFIG_ESPSERIAL is not set | ||
850 | # CONFIG_MOXA_INTELLIO is not set | ||
851 | # CONFIG_MOXA_SMARTIO is not set | ||
852 | # CONFIG_MOXA_SMARTIO_NEW is not set | ||
853 | # CONFIG_ISI is not set | ||
854 | CONFIG_SYNCLINK=y | ||
855 | CONFIG_SYNCLINKMP=y | ||
856 | # CONFIG_SYNCLINK_GT is not set | ||
857 | CONFIG_N_HDLC=y | ||
858 | # CONFIG_SPECIALIX is not set | ||
859 | # CONFIG_SX is not set | ||
860 | # CONFIG_RIO is not set | ||
861 | # CONFIG_STALDRV is not set | ||
862 | |||
863 | # | ||
864 | # Serial drivers | ||
865 | # | ||
866 | CONFIG_SERIAL_8250=y | ||
867 | CONFIG_SERIAL_8250_CONSOLE=y | ||
868 | CONFIG_FIX_EARLYCON_MEM=y | ||
869 | CONFIG_SERIAL_8250_PCI=y | ||
870 | CONFIG_SERIAL_8250_PNP=y | ||
871 | CONFIG_SERIAL_8250_NR_UARTS=4 | ||
872 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | ||
873 | # CONFIG_SERIAL_8250_EXTENDED is not set | ||
874 | |||
875 | # | ||
876 | # Non-8250 serial port support | ||
877 | # | ||
878 | CONFIG_SERIAL_CORE=y | ||
879 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
880 | CONFIG_SERIAL_JSM=y | ||
881 | CONFIG_UNIX98_PTYS=y | ||
882 | # CONFIG_LEGACY_PTYS is not set | ||
883 | CONFIG_IPMI_HANDLER=y | ||
884 | # CONFIG_IPMI_PANIC_EVENT is not set | ||
885 | CONFIG_IPMI_DEVICE_INTERFACE=y | ||
886 | CONFIG_IPMI_SI=y | ||
887 | CONFIG_IPMI_WATCHDOG=y | ||
888 | CONFIG_IPMI_POWEROFF=y | ||
889 | CONFIG_HW_RANDOM=y | ||
890 | CONFIG_HW_RANDOM_INTEL=y | ||
891 | CONFIG_HW_RANDOM_AMD=y | ||
892 | CONFIG_HW_RANDOM_GEODE=y | ||
893 | CONFIG_HW_RANDOM_VIA=y | ||
894 | CONFIG_NVRAM=y | ||
895 | CONFIG_RTC=y | ||
896 | # CONFIG_DTLK is not set | ||
897 | # CONFIG_R3964 is not set | ||
898 | # CONFIG_APPLICOM is not set | ||
899 | # CONFIG_SONYPI is not set | ||
900 | # CONFIG_MWAVE is not set | ||
901 | # CONFIG_PC8736x_GPIO is not set | ||
902 | # CONFIG_NSC_GPIO is not set | ||
903 | # CONFIG_CS5535_GPIO is not set | ||
904 | # CONFIG_RAW_DRIVER is not set | ||
905 | CONFIG_HANGCHECK_TIMER=y | ||
906 | CONFIG_TCG_TPM=y | ||
907 | CONFIG_TCG_ATMEL=y | ||
908 | # CONFIG_TELCLOCK is not set | ||
909 | CONFIG_DEVPORT=y | ||
910 | # CONFIG_I2C is not set | ||
911 | |||
912 | # | ||
913 | # SPI support | ||
914 | # | ||
915 | CONFIG_SPI=y | ||
916 | # CONFIG_SPI_DEBUG is not set | ||
917 | CONFIG_SPI_MASTER=y | ||
918 | |||
919 | # | ||
920 | # SPI Master Controller Drivers | ||
921 | # | ||
922 | # CONFIG_SPI_BITBANG is not set | ||
923 | |||
924 | # | ||
925 | # SPI Protocol Masters | ||
926 | # | ||
927 | # CONFIG_SPI_AT25 is not set | ||
928 | # CONFIG_SPI_SPIDEV is not set | ||
929 | # CONFIG_SPI_TLE62X0 is not set | ||
930 | # CONFIG_W1 is not set | ||
931 | # CONFIG_POWER_SUPPLY is not set | ||
932 | # CONFIG_HWMON is not set | ||
933 | # CONFIG_WATCHDOG is not set | ||
934 | |||
935 | # | ||
936 | # Sonics Silicon Backplane | ||
937 | # | ||
938 | CONFIG_SSB_POSSIBLE=y | ||
939 | CONFIG_SSB=y | ||
940 | CONFIG_SSB_PCIHOST_POSSIBLE=y | ||
941 | CONFIG_SSB_PCIHOST=y | ||
942 | # CONFIG_SSB_DEBUG is not set | ||
943 | CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y | ||
944 | CONFIG_SSB_DRIVER_PCICORE=y | ||
945 | |||
946 | # | ||
947 | # Multifunction device drivers | ||
948 | # | ||
949 | # CONFIG_MFD_SM501 is not set | ||
950 | |||
951 | # | ||
952 | # Multimedia devices | ||
953 | # | ||
954 | # CONFIG_VIDEO_DEV is not set | ||
955 | # CONFIG_DVB_CORE is not set | ||
956 | CONFIG_DAB=y | ||
957 | |||
958 | # | ||
959 | # Graphics support | ||
960 | # | ||
961 | CONFIG_AGP=y | ||
962 | CONFIG_AGP_ALI=y | ||
963 | CONFIG_AGP_ATI=y | ||
964 | CONFIG_AGP_AMD=y | ||
965 | CONFIG_AGP_AMD64=y | ||
966 | CONFIG_AGP_INTEL=y | ||
967 | CONFIG_AGP_NVIDIA=y | ||
968 | CONFIG_AGP_SIS=y | ||
969 | CONFIG_AGP_SWORKS=y | ||
970 | CONFIG_AGP_VIA=y | ||
971 | CONFIG_AGP_EFFICEON=y | ||
972 | CONFIG_DRM=y | ||
973 | # CONFIG_DRM_TDFX is not set | ||
974 | CONFIG_DRM_R128=y | ||
975 | CONFIG_DRM_RADEON=y | ||
976 | # CONFIG_DRM_I810 is not set | ||
977 | CONFIG_DRM_I830=y | ||
978 | # CONFIG_DRM_I915 is not set | ||
979 | CONFIG_DRM_MGA=y | ||
980 | CONFIG_DRM_SIS=y | ||
981 | CONFIG_DRM_VIA=y | ||
982 | CONFIG_DRM_SAVAGE=y | ||
983 | # CONFIG_VGASTATE is not set | ||
984 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
985 | CONFIG_FB=y | ||
986 | CONFIG_FIRMWARE_EDID=y | ||
987 | # CONFIG_FB_DDC is not set | ||
988 | CONFIG_FB_CFB_FILLRECT=y | ||
989 | CONFIG_FB_CFB_COPYAREA=y | ||
990 | CONFIG_FB_CFB_IMAGEBLIT=y | ||
991 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set | ||
992 | # CONFIG_FB_SYS_FILLRECT is not set | ||
993 | # CONFIG_FB_SYS_COPYAREA is not set | ||
994 | # CONFIG_FB_SYS_IMAGEBLIT is not set | ||
995 | # CONFIG_FB_SYS_FOPS is not set | ||
996 | CONFIG_FB_DEFERRED_IO=y | ||
997 | # CONFIG_FB_SVGALIB is not set | ||
998 | # CONFIG_FB_MACMODES is not set | ||
999 | # CONFIG_FB_BACKLIGHT is not set | ||
1000 | CONFIG_FB_MODE_HELPERS=y | ||
1001 | # CONFIG_FB_TILEBLITTING is not set | ||
1002 | |||
1003 | # | ||
1004 | # Frame buffer hardware drivers | ||
1005 | # | ||
1006 | # CONFIG_FB_CIRRUS is not set | ||
1007 | # CONFIG_FB_PM2 is not set | ||
1008 | # CONFIG_FB_CYBER2000 is not set | ||
1009 | # CONFIG_FB_ARC is not set | ||
1010 | # CONFIG_FB_ASILIANT is not set | ||
1011 | # CONFIG_FB_IMSTT is not set | ||
1012 | # CONFIG_FB_VGA16 is not set | ||
1013 | # CONFIG_FB_UVESA is not set | ||
1014 | CONFIG_FB_VESA=y | ||
1015 | # CONFIG_FB_EFI is not set | ||
1016 | # CONFIG_FB_HECUBA is not set | ||
1017 | # CONFIG_FB_HGA is not set | ||
1018 | # CONFIG_FB_S1D13XXX is not set | ||
1019 | # CONFIG_FB_NVIDIA is not set | ||
1020 | # CONFIG_FB_RIVA is not set | ||
1021 | # CONFIG_FB_I810 is not set | ||
1022 | # CONFIG_FB_LE80578 is not set | ||
1023 | # CONFIG_FB_INTEL is not set | ||
1024 | # CONFIG_FB_MATROX is not set | ||
1025 | # CONFIG_FB_RADEON is not set | ||
1026 | # CONFIG_FB_ATY128 is not set | ||
1027 | # CONFIG_FB_ATY is not set | ||
1028 | # CONFIG_FB_S3 is not set | ||
1029 | # CONFIG_FB_SAVAGE is not set | ||
1030 | # CONFIG_FB_SIS is not set | ||
1031 | # CONFIG_FB_NEOMAGIC is not set | ||
1032 | # CONFIG_FB_KYRO is not set | ||
1033 | # CONFIG_FB_3DFX is not set | ||
1034 | # CONFIG_FB_VOODOO1 is not set | ||
1035 | # CONFIG_FB_VT8623 is not set | ||
1036 | # CONFIG_FB_CYBLA is not set | ||
1037 | # CONFIG_FB_TRIDENT is not set | ||
1038 | # CONFIG_FB_ARK is not set | ||
1039 | # CONFIG_FB_PM3 is not set | ||
1040 | # CONFIG_FB_GEODE is not set | ||
1041 | # CONFIG_FB_VIRTUAL is not set | ||
1042 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | ||
1043 | CONFIG_LCD_CLASS_DEVICE=y | ||
1044 | # CONFIG_LCD_LTV350QV is not set | ||
1045 | CONFIG_BACKLIGHT_CLASS_DEVICE=y | ||
1046 | # CONFIG_BACKLIGHT_CORGI is not set | ||
1047 | # CONFIG_BACKLIGHT_PROGEAR is not set | ||
1048 | |||
1049 | # | ||
1050 | # Display device support | ||
1051 | # | ||
1052 | # CONFIG_DISPLAY_SUPPORT is not set | ||
1053 | |||
1054 | # | ||
1055 | # Console display driver support | ||
1056 | # | ||
1057 | CONFIG_VGA_CONSOLE=y | ||
1058 | # CONFIG_VGACON_SOFT_SCROLLBACK is not set | ||
1059 | CONFIG_VIDEO_SELECT=y | ||
1060 | # CONFIG_MDA_CONSOLE is not set | ||
1061 | CONFIG_DUMMY_CONSOLE=y | ||
1062 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
1063 | # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set | ||
1064 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set | ||
1065 | CONFIG_FONTS=y | ||
1066 | # CONFIG_FONT_8x8 is not set | ||
1067 | CONFIG_FONT_8x16=y | ||
1068 | # CONFIG_FONT_6x11 is not set | ||
1069 | # CONFIG_FONT_7x14 is not set | ||
1070 | # CONFIG_FONT_PEARL_8x8 is not set | ||
1071 | # CONFIG_FONT_ACORN_8x8 is not set | ||
1072 | # CONFIG_FONT_MINI_4x6 is not set | ||
1073 | # CONFIG_FONT_SUN8x16 is not set | ||
1074 | # CONFIG_FONT_SUN12x22 is not set | ||
1075 | # CONFIG_FONT_10x18 is not set | ||
1076 | CONFIG_LOGO=y | ||
1077 | CONFIG_LOGO_LINUX_MONO=y | ||
1078 | CONFIG_LOGO_LINUX_VGA16=y | ||
1079 | CONFIG_LOGO_LINUX_CLUT224=y | ||
1080 | |||
1081 | # | ||
1082 | # Sound | ||
1083 | # | ||
1084 | # CONFIG_SOUND is not set | ||
1085 | CONFIG_HID_SUPPORT=y | ||
1086 | CONFIG_HID=y | ||
1087 | CONFIG_HID_DEBUG=y | ||
1088 | # CONFIG_HIDRAW is not set | ||
1089 | # CONFIG_USB_SUPPORT is not set | ||
1090 | # CONFIG_MMC is not set | ||
1091 | # CONFIG_NEW_LEDS is not set | ||
1092 | # CONFIG_INFINIBAND is not set | ||
1093 | # CONFIG_EDAC is not set | ||
1094 | CONFIG_RTC_LIB=y | ||
1095 | CONFIG_RTC_CLASS=y | ||
1096 | CONFIG_RTC_HCTOSYS=y | ||
1097 | CONFIG_RTC_HCTOSYS_DEVICE="rtc0" | ||
1098 | # CONFIG_RTC_DEBUG is not set | ||
1099 | |||
1100 | # | ||
1101 | # RTC interfaces | ||
1102 | # | ||
1103 | CONFIG_RTC_INTF_SYSFS=y | ||
1104 | CONFIG_RTC_INTF_PROC=y | ||
1105 | CONFIG_RTC_INTF_DEV=y | ||
1106 | CONFIG_RTC_INTF_DEV_UIE_EMUL=y | ||
1107 | CONFIG_RTC_DRV_TEST=y | ||
1108 | |||
1109 | # | ||
1110 | # SPI RTC drivers | ||
1111 | # | ||
1112 | CONFIG_RTC_DRV_RS5C348=y | ||
1113 | CONFIG_RTC_DRV_MAX6902=y | ||
1114 | |||
1115 | # | ||
1116 | # Platform RTC drivers | ||
1117 | # | ||
1118 | # CONFIG_RTC_DRV_CMOS is not set | ||
1119 | CONFIG_RTC_DRV_DS1553=y | ||
1120 | # CONFIG_RTC_DRV_STK17TA8 is not set | ||
1121 | CONFIG_RTC_DRV_DS1742=y | ||
1122 | CONFIG_RTC_DRV_M48T86=y | ||
1123 | # CONFIG_RTC_DRV_M48T59 is not set | ||
1124 | CONFIG_RTC_DRV_V3020=y | ||
1125 | |||
1126 | # | ||
1127 | # on-CPU RTC drivers | ||
1128 | # | ||
1129 | # CONFIG_DMADEVICES is not set | ||
1130 | # CONFIG_VIRTUALIZATION is not set | ||
1131 | |||
1132 | # | ||
1133 | # Userspace I/O | ||
1134 | # | ||
1135 | # CONFIG_UIO is not set | ||
1136 | |||
1137 | # | ||
1138 | # Firmware Drivers | ||
1139 | # | ||
1140 | CONFIG_EDD=y | ||
1141 | CONFIG_DELL_RBU=y | ||
1142 | CONFIG_DCDBAS=y | ||
1143 | CONFIG_DMIID=y | ||
1144 | |||
1145 | # | ||
1146 | # File systems | ||
1147 | # | ||
1148 | CONFIG_EXT2_FS=y | ||
1149 | CONFIG_EXT2_FS_XATTR=y | ||
1150 | CONFIG_EXT2_FS_POSIX_ACL=y | ||
1151 | CONFIG_EXT2_FS_SECURITY=y | ||
1152 | # CONFIG_EXT2_FS_XIP is not set | ||
1153 | CONFIG_EXT3_FS=y | ||
1154 | CONFIG_EXT3_FS_XATTR=y | ||
1155 | CONFIG_EXT3_FS_POSIX_ACL=y | ||
1156 | CONFIG_EXT3_FS_SECURITY=y | ||
1157 | # CONFIG_EXT4DEV_FS is not set | ||
1158 | CONFIG_JBD=y | ||
1159 | # CONFIG_JBD_DEBUG is not set | ||
1160 | CONFIG_FS_MBCACHE=y | ||
1161 | # CONFIG_REISERFS_FS is not set | ||
1162 | # CONFIG_JFS_FS is not set | ||
1163 | CONFIG_FS_POSIX_ACL=y | ||
1164 | # CONFIG_XFS_FS is not set | ||
1165 | # CONFIG_GFS2_FS is not set | ||
1166 | # CONFIG_OCFS2_FS is not set | ||
1167 | # CONFIG_MINIX_FS is not set | ||
1168 | # CONFIG_ROMFS_FS is not set | ||
1169 | CONFIG_INOTIFY=y | ||
1170 | CONFIG_INOTIFY_USER=y | ||
1171 | # CONFIG_QUOTA is not set | ||
1172 | CONFIG_DNOTIFY=y | ||
1173 | # CONFIG_AUTOFS_FS is not set | ||
1174 | # CONFIG_AUTOFS4_FS is not set | ||
1175 | CONFIG_FUSE_FS=y | ||
1176 | CONFIG_GENERIC_ACL=y | ||
1177 | |||
1178 | # | ||
1179 | # CD-ROM/DVD Filesystems | ||
1180 | # | ||
1181 | CONFIG_ISO9660_FS=y | ||
1182 | CONFIG_JOLIET=y | ||
1183 | CONFIG_ZISOFS=y | ||
1184 | CONFIG_UDF_FS=y | ||
1185 | CONFIG_UDF_NLS=y | ||
1186 | |||
1187 | # | ||
1188 | # DOS/FAT/NT Filesystems | ||
1189 | # | ||
1190 | CONFIG_FAT_FS=y | ||
1191 | # CONFIG_MSDOS_FS is not set | ||
1192 | CONFIG_VFAT_FS=y | ||
1193 | CONFIG_FAT_DEFAULT_CODEPAGE=437 | ||
1194 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | ||
1195 | # CONFIG_NTFS_FS is not set | ||
1196 | |||
1197 | # | ||
1198 | # Pseudo filesystems | ||
1199 | # | ||
1200 | CONFIG_PROC_FS=y | ||
1201 | CONFIG_PROC_KCORE=y | ||
1202 | CONFIG_PROC_SYSCTL=y | ||
1203 | CONFIG_SYSFS=y | ||
1204 | CONFIG_TMPFS=y | ||
1205 | CONFIG_TMPFS_POSIX_ACL=y | ||
1206 | # CONFIG_HUGETLBFS is not set | ||
1207 | # CONFIG_HUGETLB_PAGE is not set | ||
1208 | CONFIG_CONFIGFS_FS=y | ||
1209 | |||
1210 | # | ||
1211 | # Miscellaneous filesystems | ||
1212 | # | ||
1213 | # CONFIG_ADFS_FS is not set | ||
1214 | # CONFIG_AFFS_FS is not set | ||
1215 | # CONFIG_HFS_FS is not set | ||
1216 | # CONFIG_HFSPLUS_FS is not set | ||
1217 | # CONFIG_BEFS_FS is not set | ||
1218 | # CONFIG_BFS_FS is not set | ||
1219 | # CONFIG_EFS_FS is not set | ||
1220 | # CONFIG_CRAMFS is not set | ||
1221 | # CONFIG_VXFS_FS is not set | ||
1222 | # CONFIG_HPFS_FS is not set | ||
1223 | # CONFIG_QNX4FS_FS is not set | ||
1224 | # CONFIG_SYSV_FS is not set | ||
1225 | # CONFIG_UFS_FS is not set | ||
1226 | # CONFIG_NETWORK_FILESYSTEMS is not set | ||
1227 | |||
1228 | # | ||
1229 | # Partition Types | ||
1230 | # | ||
1231 | # CONFIG_PARTITION_ADVANCED is not set | ||
1232 | CONFIG_MSDOS_PARTITION=y | ||
1233 | CONFIG_NLS=y | ||
1234 | CONFIG_NLS_DEFAULT="iso8859-1" | ||
1235 | CONFIG_NLS_CODEPAGE_437=y | ||
1236 | # CONFIG_NLS_CODEPAGE_737 is not set | ||
1237 | # CONFIG_NLS_CODEPAGE_775 is not set | ||
1238 | # CONFIG_NLS_CODEPAGE_850 is not set | ||
1239 | # CONFIG_NLS_CODEPAGE_852 is not set | ||
1240 | # CONFIG_NLS_CODEPAGE_855 is not set | ||
1241 | # CONFIG_NLS_CODEPAGE_857 is not set | ||
1242 | # CONFIG_NLS_CODEPAGE_860 is not set | ||
1243 | # CONFIG_NLS_CODEPAGE_861 is not set | ||
1244 | # CONFIG_NLS_CODEPAGE_862 is not set | ||
1245 | # CONFIG_NLS_CODEPAGE_863 is not set | ||
1246 | # CONFIG_NLS_CODEPAGE_864 is not set | ||
1247 | # CONFIG_NLS_CODEPAGE_865 is not set | ||
1248 | # CONFIG_NLS_CODEPAGE_866 is not set | ||
1249 | # CONFIG_NLS_CODEPAGE_869 is not set | ||
1250 | # CONFIG_NLS_CODEPAGE_936 is not set | ||
1251 | # CONFIG_NLS_CODEPAGE_950 is not set | ||
1252 | # CONFIG_NLS_CODEPAGE_932 is not set | ||
1253 | # CONFIG_NLS_CODEPAGE_949 is not set | ||
1254 | # CONFIG_NLS_CODEPAGE_874 is not set | ||
1255 | # CONFIG_NLS_ISO8859_8 is not set | ||
1256 | # CONFIG_NLS_CODEPAGE_1250 is not set | ||
1257 | # CONFIG_NLS_CODEPAGE_1251 is not set | ||
1258 | # CONFIG_NLS_ASCII is not set | ||
1259 | # CONFIG_NLS_ISO8859_1 is not set | ||
1260 | # CONFIG_NLS_ISO8859_2 is not set | ||
1261 | # CONFIG_NLS_ISO8859_3 is not set | ||
1262 | # CONFIG_NLS_ISO8859_4 is not set | ||
1263 | # CONFIG_NLS_ISO8859_5 is not set | ||
1264 | # CONFIG_NLS_ISO8859_6 is not set | ||
1265 | # CONFIG_NLS_ISO8859_7 is not set | ||
1266 | # CONFIG_NLS_ISO8859_9 is not set | ||
1267 | # CONFIG_NLS_ISO8859_13 is not set | ||
1268 | # CONFIG_NLS_ISO8859_14 is not set | ||
1269 | # CONFIG_NLS_ISO8859_15 is not set | ||
1270 | # CONFIG_NLS_KOI8_R is not set | ||
1271 | # CONFIG_NLS_KOI8_U is not set | ||
1272 | # CONFIG_NLS_UTF8 is not set | ||
1273 | # CONFIG_DLM is not set | ||
1274 | # CONFIG_INSTRUMENTATION is not set | ||
1275 | |||
1276 | # | ||
1277 | # Kernel hacking | ||
1278 | # | ||
1279 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
1280 | CONFIG_PRINTK_TIME=y | ||
1281 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
1282 | CONFIG_ENABLE_MUST_CHECK=y | ||
1283 | CONFIG_MAGIC_SYSRQ=y | ||
1284 | # CONFIG_UNUSED_SYMBOLS is not set | ||
1285 | CONFIG_DEBUG_FS=y | ||
1286 | # CONFIG_HEADERS_CHECK is not set | ||
1287 | CONFIG_DEBUG_KERNEL=y | ||
1288 | # CONFIG_DEBUG_SHIRQ is not set | ||
1289 | CONFIG_DETECT_SOFTLOCKUP=y | ||
1290 | CONFIG_SCHED_DEBUG=y | ||
1291 | # CONFIG_SCHEDSTATS is not set | ||
1292 | CONFIG_TIMER_STATS=y | ||
1293 | CONFIG_DEBUG_SLAB=y | ||
1294 | # CONFIG_DEBUG_SLAB_LEAK is not set | ||
1295 | CONFIG_DEBUG_PREEMPT=y | ||
1296 | # CONFIG_DEBUG_RT_MUTEXES is not set | ||
1297 | # CONFIG_RT_MUTEX_TESTER is not set | ||
1298 | CONFIG_DEBUG_SPINLOCK=y | ||
1299 | CONFIG_DEBUG_MUTEXES=y | ||
1300 | CONFIG_DEBUG_LOCK_ALLOC=y | ||
1301 | CONFIG_PROVE_LOCKING=y | ||
1302 | CONFIG_LOCKDEP=y | ||
1303 | CONFIG_LOCK_STAT=y | ||
1304 | # CONFIG_DEBUG_LOCKDEP is not set | ||
1305 | CONFIG_TRACE_IRQFLAGS=y | ||
1306 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | ||
1307 | CONFIG_DEBUG_LOCKING_API_SELFTESTS=y | ||
1308 | CONFIG_STACKTRACE=y | ||
1309 | # CONFIG_DEBUG_KOBJECT is not set | ||
1310 | # CONFIG_DEBUG_HIGHMEM is not set | ||
1311 | CONFIG_DEBUG_BUGVERBOSE=y | ||
1312 | CONFIG_DEBUG_INFO=y | ||
1313 | CONFIG_DEBUG_VM=y | ||
1314 | CONFIG_DEBUG_LIST=y | ||
1315 | # CONFIG_DEBUG_SG is not set | ||
1316 | CONFIG_FRAME_POINTER=y | ||
1317 | CONFIG_FORCED_INLINING=y | ||
1318 | # CONFIG_BOOT_PRINTK_DELAY is not set | ||
1319 | # CONFIG_FAULT_INJECTION is not set | ||
1320 | # CONFIG_SAMPLES is not set | ||
1321 | CONFIG_EARLY_PRINTK=y | ||
1322 | # CONFIG_DEBUG_STACKOVERFLOW is not set | ||
1323 | # CONFIG_DEBUG_STACK_USAGE is not set | ||
1324 | # CONFIG_DEBUG_PAGEALLOC is not set | ||
1325 | # CONFIG_DEBUG_RODATA is not set | ||
1326 | # CONFIG_4KSTACKS is not set | ||
1327 | CONFIG_X86_FIND_SMP_CONFIG=y | ||
1328 | CONFIG_X86_MPPARSE=y | ||
1329 | CONFIG_DOUBLEFAULT=y | ||
1330 | |||
1331 | # | ||
1332 | # Security options | ||
1333 | # | ||
1334 | # CONFIG_KEYS is not set | ||
1335 | # CONFIG_SECURITY is not set | ||
1336 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
1337 | CONFIG_CRYPTO=y | ||
1338 | CONFIG_CRYPTO_ALGAPI=y | ||
1339 | CONFIG_CRYPTO_BLKCIPHER=y | ||
1340 | CONFIG_CRYPTO_HASH=y | ||
1341 | CONFIG_CRYPTO_MANAGER=y | ||
1342 | CONFIG_CRYPTO_HMAC=y | ||
1343 | # CONFIG_CRYPTO_XCBC is not set | ||
1344 | CONFIG_CRYPTO_NULL=y | ||
1345 | CONFIG_CRYPTO_MD4=y | ||
1346 | CONFIG_CRYPTO_MD5=y | ||
1347 | CONFIG_CRYPTO_SHA1=y | ||
1348 | CONFIG_CRYPTO_SHA256=y | ||
1349 | CONFIG_CRYPTO_SHA512=y | ||
1350 | CONFIG_CRYPTO_WP512=y | ||
1351 | CONFIG_CRYPTO_TGR192=y | ||
1352 | # CONFIG_CRYPTO_GF128MUL is not set | ||
1353 | CONFIG_CRYPTO_ECB=y | ||
1354 | CONFIG_CRYPTO_CBC=y | ||
1355 | # CONFIG_CRYPTO_PCBC is not set | ||
1356 | # CONFIG_CRYPTO_LRW is not set | ||
1357 | # CONFIG_CRYPTO_XTS is not set | ||
1358 | # CONFIG_CRYPTO_CRYPTD is not set | ||
1359 | CONFIG_CRYPTO_DES=y | ||
1360 | # CONFIG_CRYPTO_FCRYPT is not set | ||
1361 | CONFIG_CRYPTO_BLOWFISH=y | ||
1362 | CONFIG_CRYPTO_TWOFISH=y | ||
1363 | CONFIG_CRYPTO_TWOFISH_COMMON=y | ||
1364 | CONFIG_CRYPTO_TWOFISH_586=y | ||
1365 | CONFIG_CRYPTO_SERPENT=y | ||
1366 | CONFIG_CRYPTO_AES=y | ||
1367 | CONFIG_CRYPTO_AES_586=y | ||
1368 | CONFIG_CRYPTO_CAST5=y | ||
1369 | CONFIG_CRYPTO_CAST6=y | ||
1370 | CONFIG_CRYPTO_TEA=y | ||
1371 | CONFIG_CRYPTO_ARC4=y | ||
1372 | CONFIG_CRYPTO_KHAZAD=y | ||
1373 | CONFIG_CRYPTO_ANUBIS=y | ||
1374 | # CONFIG_CRYPTO_SEED is not set | ||
1375 | CONFIG_CRYPTO_DEFLATE=y | ||
1376 | CONFIG_CRYPTO_MICHAEL_MIC=y | ||
1377 | CONFIG_CRYPTO_CRC32C=y | ||
1378 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
1379 | # CONFIG_CRYPTO_AUTHENC is not set | ||
1380 | CONFIG_CRYPTO_HW=y | ||
1381 | CONFIG_CRYPTO_DEV_PADLOCK=y | ||
1382 | CONFIG_CRYPTO_DEV_PADLOCK_AES=y | ||
1383 | CONFIG_CRYPTO_DEV_PADLOCK_SHA=y | ||
1384 | CONFIG_CRYPTO_DEV_GEODE=y | ||
1385 | |||
1386 | # | ||
1387 | # Library routines | ||
1388 | # | ||
1389 | CONFIG_BITREVERSE=y | ||
1390 | CONFIG_CRC_CCITT=y | ||
1391 | CONFIG_CRC16=y | ||
1392 | # CONFIG_CRC_ITU_T is not set | ||
1393 | CONFIG_CRC32=y | ||
1394 | # CONFIG_CRC7 is not set | ||
1395 | CONFIG_LIBCRC32C=y | ||
1396 | CONFIG_ZLIB_INFLATE=y | ||
1397 | CONFIG_ZLIB_DEFLATE=y | ||
1398 | CONFIG_PLIST=y | ||
1399 | CONFIG_HAS_IOMEM=y | ||
1400 | CONFIG_HAS_IOPORT=y | ||
1401 | CONFIG_HAS_DMA=y | ||
1402 | |||
1403 | # | ||
1404 | # LITMUS^RT | ||
1405 | # | ||
1406 | |||
1407 | # | ||
1408 | # Real-Time Synchronization | ||
1409 | # | ||
1410 | CONFIG_NP_SECTION=y | ||
1411 | CONFIG_SRP=y | ||
1412 | CONFIG_FMLP=y | ||
1413 | |||
1414 | # | ||
1415 | # Tracing | ||
1416 | # | ||
1417 | CONFIG_SCHED_TASK_TRACE=y | ||
1418 | CONFIG_SCHED_DEBUG_TRACE=y | ||
1419 | CONFIG_FEATHER_TRACE=y | ||
@@ -26,7 +26,7 @@ | |||
26 | <a href="#publications">publications</a> - | 26 | <a href="#publications">publications</a> - |
27 | <a href="#download">download</a> - | 27 | <a href="#download">download</a> - |
28 | <a href="#install">installation</a> - | 28 | <a href="#install">installation</a> - |
29 | <a href="#links">documentation</a> | 29 | <a href="#doc">documentation</a> |
30 | </p> | 30 | </p> |
31 | </div> | 31 | </div> |
32 | 32 | ||
@@ -41,8 +41,8 @@ | |||
41 | is supported. | 41 | is supported. |
42 | </p> | 42 | </p> |
43 | <p> | 43 | <p> |
44 | The current version of LITMUS<sup>RT</sup> is <strong>2008.1</strong> and is based on Linux 2.6.24. | 44 | The current version of LITMUS<sup>RT</sup> is <strong>2008.2</strong> and is based on Linux 2.6.24. |
45 | It was released on 7/19/2008 and includes plugins for the following | 45 | It was released on 12/01/2008 and includes plugins for the following |
46 | scheduling policies: | 46 | scheduling policies: |
47 | </p> | 47 | </p> |
48 | <ul> | 48 | <ul> |
@@ -66,7 +66,7 @@ | |||
66 | <h2 id="support">Support</h2> | 66 | <h2 id="support">Support</h2> |
67 | <div class="box"> | 67 | <div class="box"> |
68 | <p class="nomargin"> | 68 | <p class="nomargin"> |
69 | The LITMUS<sup>RT</sup> development effort is being supported by grants from | 69 | The LITMUS<sup>RT</sup> development effort is being supported by grants from, SUN Corp., |
70 | Intel Corp., IBM Corp., The National Science Foundation (grant CCR 0615197), and The U.S. | 70 | Intel Corp., IBM Corp., The National Science Foundation (grant CCR 0615197), and The U.S. |
71 | Army Research Office (grant W911NF-06-1-0425). | 71 | Army Research Office (grant W911NF-06-1-0425). |
72 | </p> | 72 | </p> |
@@ -85,7 +85,7 @@ | |||
85 | </p> | 85 | </p> |
86 | <ul> | 86 | <ul> |
87 | <li> | 87 | <li> |
88 | <a href="http://www.cs.unc.edu/~bbb/">Björn B. Brandenburg</a> | 88 | <a href="http://www.cs.unc.edu/~bbb/">Björn B. Brandenburg</a> (current maintainer) |
89 | </li> | 89 | </li> |
90 | <li> <a href="http://www.cs.unc.edu/~jmc/">John M. Calandrino</a> | 90 | <li> <a href="http://www.cs.unc.edu/~jmc/">John M. Calandrino</a> |
91 | </li> | 91 | </li> |
@@ -214,7 +214,7 @@ Technology and Applications Symposium</cite>, pp. 342-353, April 2008. | |||
214 | General Public License (GPL)</a>. | 214 | General Public License (GPL)</a>. |
215 | </p> | 215 | </p> |
216 | <p> | 216 | <p> |
217 | The current release of LITMUS<sup>RT</sup> is 2008.1. | 217 | The current release of LITMUS<sup>RT</sup> is 2008.2. |
218 | It consists of our Linux kernel modifications in the form of | 218 | It consists of our Linux kernel modifications in the form of |
219 | a patch against Linux 2.6.24 and | 219 | a patch against Linux 2.6.24 and |
220 | <span class="src">liblitmus</span>, the user-space API for real-time tasks. | 220 | <span class="src">liblitmus</span>, the user-space API for real-time tasks. |
@@ -223,11 +223,36 @@ Technology and Applications Symposium</cite>, pp. 342-353, April 2008. | |||
223 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.2</h3> | 223 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.2</h3> |
224 | <div class="release"> | 224 | <div class="release"> |
225 | <p> | 225 | <p> |
226 | The next release is currently in preparation and will be released in | 226 | Based on Linux 2.6.24. Released in December 2008. |
227 | early December for <a href="http://www.rtss.org">RTSS 2008</a>. | 227 | |
228 | It will contain numerous bug fixes and performance improvements. At this point, | ||
229 | please do not start new development efforts based on the previous 2008.1 version. | ||
230 | </p> | 228 | </p> |
229 | <h4>Files:</h4> | ||
230 | <ul> | ||
231 | <li> | ||
232 | <a href="download/2008.2/litmus-rt-2008.2.patch">litmus-rt-2008.2.patch</a> | ||
233 | </li> | ||
234 | <li> | ||
235 | <a href="download/2008.2/liblitmus-2008.2.tgz">liblitmus-2008.2.tgz</a> | ||
236 | </li> | ||
237 | <li><a href="download/2008.2/SHA256SUMS">SHA256 check sums</a> | ||
238 | </li> | ||
239 | </ul> | ||
240 | <h4>Major changes (since LITMUS<sup>RT</sup> 2008.1):</h4> | ||
241 | <ul> | ||
242 | <li>PFAIR implementation can now recover from missed tick interrupts. | ||
243 | </li> | ||
244 | <li>A bug in the PFAIR prioritization function was corrected. | ||
245 | </li> | ||
246 | <li>Support for synchronous task system releases in the EDF-based schedulers was fixed. | ||
247 | </li> | ||
248 | <li><span class="src">sched_trace()</span> support was re-implemented based on Feather-Trace. | ||
249 | </li> | ||
250 | <li>Added the tool <span class="src">showst</span> to liblitmus, which can convert <span class="src">sched_trace()</span> binary data to a human-readable format. | ||
251 | </li> | ||
252 | <li> | ||
253 | Assorted bug fixes. | ||
254 | </li> | ||
255 | </ul> | ||
231 | </div> | 256 | </div> |
232 | 257 | ||
233 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.1</h3> | 258 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.1</h3> |
@@ -282,6 +307,7 @@ Technology and Applications Symposium</cite>, pp. 342-353, April 2008. | |||
282 | </div> | 307 | </div> |
283 | 308 | ||
284 | 309 | ||
310 | |||
285 | <h2 id="install">Installation</h2> | 311 | <h2 id="install">Installation</h2> |
286 | <div class="box"> | 312 | <div class="box"> |
287 | <p class="notopmargin"> | 313 | <p class="notopmargin"> |
@@ -310,12 +336,17 @@ cd $DIR | |||
310 | # get Linux 2.6.24 | 336 | # get Linux 2.6.24 |
311 | wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.24.tar.bz2 | 337 | wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.24.tar.bz2 |
312 | tar xjf linux-2.6.24.tar.bz2 | 338 | tar xjf linux-2.6.24.tar.bz2 |
313 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2008.1/litmus-rt-2008.1.patch | 339 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2008.2/litmus-rt-2008.2.patch |
314 | mv linux-2.6.24 litmus2008 | 340 | mv linux-2.6.24 litmus2008 |
315 | # apply the LITMUS RT patch | 341 | # apply the LITMUS RT patch |
316 | cd litmus2008 | 342 | cd litmus2008 |
317 | patch -p1 < ../litmus-rt-2008.1.patch | 343 | patch -p1 < ../litmus-rt-2008.2.patch |
318 | # create a working kernel configuration with HZ=1000 | 344 | # create a working kernel configuration |
345 | # - select HZ=1000 | ||
346 | # - enable in-kernel preemptions | ||
347 | # - disable NO_HZ | ||
348 | # - don't use power management options like frequency scaling | ||
349 | # - disable support for group scheduling | ||
319 | make menuconfig | 350 | make menuconfig |
320 | # compile the kernel | 351 | # compile the kernel |
321 | make bzImage | 352 | make bzImage |
@@ -325,7 +356,7 @@ make modules | |||
325 | </pre> | 356 | </pre> |
326 | <p> | 357 | <p> |
327 | When configuring the kernel, note that there is a menu (at the very end of the list) | 358 | When configuring the kernel, note that there is a menu (at the very end of the list) |
328 | with LITMUS<sup>RT</sup>-specific configuration options. | 359 | with LITMUS<sup>RT</sup>-specific configuration options. For reference, <a href="download/2008.2/qemu-config">we provide a configuration that is known to work under QEMU</a>. |
329 | </p> | 360 | </p> |
330 | 361 | ||
331 | <h3>Libraries</h3> | 362 | <h3>Libraries</h3> |
@@ -338,8 +369,8 @@ make modules | |||
338 | </p> | 369 | </p> |
339 | <pre class="shell"> | 370 | <pre class="shell"> |
340 | cd $DIR | 371 | cd $DIR |
341 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2008.1/liblitmus-2008.1.tgz | 372 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2008.2/liblitmus-2008.2.tgz |
342 | tar xzf liblitmus-2008.1.tgz | 373 | tar xzf liblitmus-2008.2.tgz |
343 | cd liblitmus | 374 | cd liblitmus |
344 | # change KERNEL_DIR in Makefile to point to the kernel source | 375 | # change KERNEL_DIR in Makefile to point to the kernel source |
345 | make | 376 | make |